repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
Admenri/urge
| 4,981
|
binding/mri/third_party/libffi/src/powerpc/linux64.S
|
/* -----------------------------------------------------------------------
sysv.h - Copyright (c) 2003 Jakub Jelinek <jakub@redhat.com>
Copyright (c) 2008 Red Hat, Inc.
PowerPC64 Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#ifdef __powerpc64__
.hidden ffi_call_LINUX64, .ffi_call_LINUX64
.globl ffi_call_LINUX64, .ffi_call_LINUX64
.section ".opd","aw"
.align 3
ffi_call_LINUX64:
.quad .ffi_call_LINUX64,.TOC.@tocbase,0
.size ffi_call_LINUX64,24
.type .ffi_call_LINUX64,@function
.text
.ffi_call_LINUX64:
.LFB1:
mflr %r0
std %r28, -32(%r1)
std %r29, -24(%r1)
std %r30, -16(%r1)
std %r31, -8(%r1)
std %r0, 16(%r1)
mr %r28, %r1 /* our AP. */
.LCFI0:
stdux %r1, %r1, %r4
mr %r31, %r5 /* flags, */
mr %r30, %r6 /* rvalue, */
mr %r29, %r7 /* function address. */
std %r2, 40(%r1)
/* Call ffi_prep_args64. */
mr %r4, %r1
bl .ffi_prep_args64
ld %r0, 0(%r29)
ld %r2, 8(%r29)
ld %r11, 16(%r29)
/* Now do the call. */
/* Set up cr1 with bits 4-7 of the flags. */
mtcrf 0x40, %r31
/* Get the address to call into CTR. */
mtctr %r0
/* Load all those argument registers. */
ld %r3, -32-(8*8)(%r28)
ld %r4, -32-(7*8)(%r28)
ld %r5, -32-(6*8)(%r28)
ld %r6, -32-(5*8)(%r28)
bf- 5, 1f
ld %r7, -32-(4*8)(%r28)
ld %r8, -32-(3*8)(%r28)
ld %r9, -32-(2*8)(%r28)
ld %r10, -32-(1*8)(%r28)
1:
/* Load all the FP registers. */
bf- 6, 2f
lfd %f1, -32-(21*8)(%r28)
lfd %f2, -32-(20*8)(%r28)
lfd %f3, -32-(19*8)(%r28)
lfd %f4, -32-(18*8)(%r28)
lfd %f5, -32-(17*8)(%r28)
lfd %f6, -32-(16*8)(%r28)
lfd %f7, -32-(15*8)(%r28)
lfd %f8, -32-(14*8)(%r28)
lfd %f9, -32-(13*8)(%r28)
lfd %f10, -32-(12*8)(%r28)
lfd %f11, -32-(11*8)(%r28)
lfd %f12, -32-(10*8)(%r28)
lfd %f13, -32-(9*8)(%r28)
2:
/* Make the call. */
bctrl
/* This must follow the call immediately, the unwinder
uses this to find out if r2 has been saved or not. */
ld %r2, 40(%r1)
/* Now, deal with the return value. */
mtcrf 0x01, %r31
bt- 30, .Ldone_return_value
bt- 29, .Lfp_return_value
std %r3, 0(%r30)
/* Fall through... */
.Ldone_return_value:
/* Restore the registers we used and return. */
mr %r1, %r28
ld %r0, 16(%r28)
ld %r28, -32(%r1)
mtlr %r0
ld %r29, -24(%r1)
ld %r30, -16(%r1)
ld %r31, -8(%r1)
blr
.Lfp_return_value:
bf 28, .Lfloat_return_value
stfd %f1, 0(%r30)
mtcrf 0x02, %r31 /* cr6 */
bf 27, .Ldone_return_value
stfd %f2, 8(%r30)
b .Ldone_return_value
.Lfloat_return_value:
stfs %f1, 0(%r30)
b .Ldone_return_value
.LFE1:
.long 0
.byte 0,12,0,1,128,4,0,0
.size .ffi_call_LINUX64,.-.ffi_call_LINUX64
.section .eh_frame,EH_FRAME_FLAGS,@progbits
.Lframe1:
.4byte .LECIE1-.LSCIE1 # Length of Common Information Entry
.LSCIE1:
.4byte 0x0 # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "zR\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -8 # CIE Data Alignment Factor
.byte 0x41 # CIE RA Column
.uleb128 0x1 # Augmentation size
.byte 0x14 # FDE Encoding (pcrel udata8)
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x1
.uleb128 0x0
.align 3
.LECIE1:
.LSFDE1:
.4byte .LEFDE1-.LASFDE1 # FDE Length
.LASFDE1:
.4byte .LASFDE1-.Lframe1 # FDE CIE offset
.8byte .LFB1-. # FDE initial location
.8byte .LFE1-.LFB1 # FDE address range
.uleb128 0x0 # Augmentation size
.byte 0x2 # DW_CFA_advance_loc1
.byte .LCFI0-.LFB1
.byte 0xd # DW_CFA_def_cfa_register
.uleb128 0x1c
.byte 0x11 # DW_CFA_offset_extended_sf
.uleb128 0x41
.sleb128 -2
.byte 0x9f # DW_CFA_offset, column 0x1f
.uleb128 0x1
.byte 0x9e # DW_CFA_offset, column 0x1e
.uleb128 0x2
.byte 0x9d # DW_CFA_offset, column 0x1d
.uleb128 0x3
.byte 0x9c # DW_CFA_offset, column 0x1c
.uleb128 0x4
.align 3
.LEFDE1:
#endif
#if defined __ELF__ && defined __linux__
.section .note.GNU-stack,"",@progbits
#endif
|
Admenri/urge
| 69,680
|
third_party/dav1d/src/loongarch/cdef.S
|
/*
* Copyright © 2024, VideoLAN and dav1d authors
* Copyright © 2024, Loongson Technology Corporation Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/loongarch/loongson_asm.S"
// static int cdef_find_dir_lsx(const pixel *img, const ptrdiff_t stride,
// unsigned *const var HIGHBD_DECL_SUFFIX)
// param: img: a0, stride: a1, var: a2
function cdef_find_dir_8bpc_lsx
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
li.d a3, 128
vreplgr2vr.w vr31, a3
// hv: vr0-vr3 diag: vr4-vr11 alt: vr12-vr23
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, vr8, vr9, vr10, \
vr11, vr12, vr13, vr14, vr15, vr16, vr17, vr18, vr19, \
vr20, vr21, vr22, vr23
vxor.v \i, \i, \i
.endr
.CFDL01: // 8
// 0
fld.d f24, a0, 0 //img
vpermi.w vr25, vr24, 0x01
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr25, vr25, 0
vsllwil.hu.bu vr25, vr25, 0
vsub.w vr24, vr24, vr31 //px
vsub.w vr25, vr25, vr31
vadd.w vr4, vr4, vr24 //diag[0][y+x]
vadd.w vr5, vr5, vr25
vpackev.w vr26, vr25, vr24
vpackod.w vr27, vr25, vr24
vpermi.w vr26, vr26, 0xd8 //px0246
vpermi.w vr27, vr27, 0xd8 //px1357
vadd.w vr12, vr12, vr26
vadd.w vr12, vr12, vr27 //alt[0][y+(x>>1)]
vhaddw.d.w vr28, vr24, vr24
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vhaddw.d.w vr28, vr25, vr25
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr0, a3, 0 //hv[0][y]
vadd.w vr15, vr15, vr26
vadd.w vr15, vr15, vr27 //alt[1][3+y-(x>>1)]
vpermi.w vr15, vr15, 0x1b
vadd.w vr9, vr9, vr24
vadd.w vr8, vr8, vr25
vpermi.w vr8, vr8, 0x1b
vpermi.w vr9, vr9, 0x1b //diag[1][7+y-x]
vxor.v vr28, vr28, vr28
vxor.v vr29, vr29, vr29
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25
vextrins.w vr18, vr28, 0x30
vshuf4i.w vr19, vr28, 0x39
vextrins.w vr19, vr29, 0x30
vshuf4i.w vr20, vr29, 0x39 //alt[2][3-(y>>1)+7]
vinsgr2vr.w vr20, zero, 3
vadd.w vr2, vr2, vr24
vadd.w vr3, vr3, vr25 //hv[1][x]
vadd.w vr21, vr21, vr24
vadd.w vr22, vr22, vr25 //alt[3][(y>>1)+x]
add.d a0, a0, a1
// 1
fld.d f24, a0, 0 //img
vpermi.w vr25, vr24, 0x01
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr25, vr25, 0
vsllwil.hu.bu vr25, vr25, 0
vsub.w vr24, vr24, vr31 //px
vsub.w vr25, vr25, vr31
vbsrl.v vr28, vr4, 4 //1-4
vbsrl.v vr29, vr5, 4 //5-8
vextrins.w vr28, vr5, 0x30
vadd.w vr28, vr28, vr24 //diag[0][y+x]
vadd.w vr29, vr29, vr25
vbsll.v vr5, vr29, 4
vextrins.w vr5, vr28, 0x03
vextrins.w vr6, vr29, 0x03
vextrins.w vr28, vr4, 0x30
vshuf4i.w vr4, vr28, 0x93
vbsrl.v vr28, vr12, 4
vextrins.w vr28, vr13, 0x30
vpackev.w vr26, vr25, vr24
vpackod.w vr27, vr25, vr24
vpermi.w vr26, vr26, 0xd8 //px0246
vpermi.w vr27, vr27, 0xd8 //px1357
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[0][y+(x>>1)]
vextrins.w vr13, vr28, 0x03
vextrins.w vr28, vr12, 0x30
vshuf4i.w vr12, vr28, 0x93
vhaddw.d.w vr28, vr24, vr24
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vhaddw.d.w vr28, vr25, vr25
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr0, a3, 1 //hv[0][y]
vbsrl.v vr28, vr15, 4
vextrins.w vr28, vr16, 0x30
vpermi.w vr28, vr28, 0x1b
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[1][3+y-(x>>1)]
vextrins.w vr16, vr28, 0x00
vextrins.w vr28, vr15, 0x00
vshuf4i.w vr15, vr28, 0x6c
vbsrl.v vr28, vr8, 4 //4321
vbsrl.v vr29, vr9, 4 //8765
vextrins.w vr28, vr9, 0x30
vpermi.w vr28, vr28, 0x1b
vpermi.w vr29, vr29, 0x1b
vadd.w vr29, vr29, vr24
vadd.w vr28, vr28, vr25 //diag[1][7+y-x]
vextrins.w vr10, vr29, 0x00
vextrins.w vr29, vr28, 0x00
vshuf4i.w vr9, vr29, 0x6c
vextrins.w vr28, vr8, 0x00
vshuf4i.w vr8, vr28, 0x6c
vbsll.v vr28, vr19, 4
vextrins.w vr28, vr18, 0x03
vbsll.v vr29, vr20, 4
vextrins.w vr29, vr19, 0x03
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25 //alt[2][3-(y>>1)+7]
vextrins.w vr18, vr28, 0x30
vextrins.w vr28, vr29, 0x00
vshuf4i.w vr19, vr28, 0x39
vbsrl.v vr20, vr29, 4
vadd.w vr2, vr2, vr24
vadd.w vr3, vr3, vr25 //hv[1][x]
vadd.w vr21, vr21, vr24
vadd.w vr22, vr22, vr25 //alt[3][(y>>1)+x]
add.d a0, a0, a1
// 2
fld.d f24, a0, 0 //img
vpermi.w vr25, vr24, 0x01
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr25, vr25, 0
vsllwil.hu.bu vr25, vr25, 0
vsub.w vr24, vr24, vr31 //px
vsub.w vr25, vr25, vr31
vbsrl.v vr28, vr4, 8
vbsrl.v vr29, vr5, 8
vextrins.d vr28, vr5, 0x10 //2-5
vextrins.d vr29, vr6, 0x10 //6-9
vadd.w vr28, vr28, vr24 //diag[0][y+x]
vadd.w vr29, vr29, vr25
vextrins.d vr4, vr28, 0x10
vextrins.d vr5, vr28, 0x01
vextrins.d vr5, vr29, 0x10
vextrins.d vr6, vr29, 0x01
vbsrl.v vr28, vr12, 8
vextrins.d vr28, vr13, 0x10
vpackev.w vr26, vr25, vr24
vpackod.w vr27, vr25, vr24
vpermi.w vr26, vr26, 0xd8 //px0246
vpermi.w vr27, vr27, 0xd8 //px1357
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[0][y+(x>>1)]
vextrins.d vr12, vr28, 0x10
vextrins.d vr13, vr28, 0x01
vhaddw.d.w vr28, vr24, vr24
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vhaddw.d.w vr28, vr25, vr25
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr0, a3, 2 //hv[0][y]
vbsrl.v vr28, vr15, 8
vextrins.d vr28, vr16, 0x10
vpermi.w vr28, vr28, 0x1b
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[1][3+y-(x>>1)]
vpermi.w vr28, vr28, 0x1b
vextrins.d vr15, vr28, 0x10
vextrins.d vr16, vr28, 0x01
vbsrl.v vr28, vr8, 8
vextrins.d vr28, vr9, 0x10
vbsrl.v vr29, vr9, 8
vextrins.d vr29, vr10, 0x10
vpermi.w vr28, vr28, 0x1b //5432
vpermi.w vr29, vr29, 0x1b //9876
vadd.w vr29, vr29, vr24
vadd.w vr28, vr28, vr25
vpermi.w vr28, vr28, 0x1b
vpermi.w vr29, vr29, 0x1b
vextrins.d vr8, vr28, 0x10
vextrins.d vr9, vr28, 0x01
vextrins.d vr9, vr29, 0x10
vextrins.d vr10, vr29, 0x01 //diag[1][7+y-x]
vbsrl.v vr28, vr18, 8
vextrins.d vr28, vr19, 0x10 //2345
vbsrl.v vr29, vr19, 8
vextrins.d vr29, vr20, 0x10 //6789
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25
vextrins.d vr18, vr28, 0x10
vextrins.d vr19, vr28, 0x01
vextrins.d vr19, vr29, 0x10
vextrins.d vr20, vr29, 0x01 //alt[2][3-(y>>1)+7]
vadd.w vr2, vr2, vr24
vadd.w vr3, vr3, vr25 //hv[1][x]
vbsrl.v vr28, vr21, 4
vextrins.w vr28, vr22, 0x30 //1234
vbsrl.v vr29, vr22, 4 //5678
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25 //alt[3][(y>>1)+x]
vextrins.w vr23, vr29, 0x03
vextrins.w vr29, vr28, 0x33
vshuf4i.w vr22, vr29, 0x93
vextrins.w vr28, vr21, 0x30
vshuf4i.w vr21, vr28, 0x93
add.d a0, a0, a1
// 3
fld.d f24, a0, 0 //img
vpermi.w vr25, vr24, 0x01
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr25, vr25, 0
vsllwil.hu.bu vr25, vr25, 0
vsub.w vr24, vr24, vr31 //px
vsub.w vr25, vr25, vr31
vbsll.v vr28, vr5, 4
vextrins.w vr28, vr4, 0x03 //3456
vbsll.v vr29, vr6, 4
vextrins.w vr29, vr5, 0x03 //78910
vadd.w vr28, vr28, vr24 //diag[0][y+x]
vadd.w vr29, vr29, vr25
vextrins.w vr4, vr28, 0x30
vextrins.w vr28, vr29, 0x00
vshuf4i.w vr5, vr28, 0x39
vbsrl.v vr6, vr29, 4
vbsll.v vr28, vr13, 4
vextrins.w vr28, vr12, 0x03
vpackev.w vr26, vr25, vr24
vpackod.w vr27, vr25, vr24
vpermi.w vr26, vr26, 0xd8 //px0246
vpermi.w vr27, vr27, 0xd8 //px1357
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[0][y+(x>>1)]
vextrins.w vr12, vr28, 0x30
vbsrl.v vr13, vr28, 4
vhaddw.d.w vr28, vr24, vr24
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vhaddw.d.w vr28, vr25, vr25
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr0, a3, 3 //hv[0][y]
vbsll.v vr28, vr16, 4
vextrins.w vr28, vr15, 0x03
vpermi.w vr28, vr28, 0x1b //6543
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[1][3+y-(x>>1)]
vextrins.w vr15, vr28, 0x33
vshuf4i.w vr16, vr28, 0xc6
vinsgr2vr.w vr16, zero, 3
vbsll.v vr28, vr9, 4
vextrins.w vr28, vr8, 0x03 //3456
vbsll.v vr29, vr10, 4
vextrins.w vr29, vr9, 0x03 //78910
vpermi.w vr28, vr28, 0x1b //6543
vpermi.w vr29, vr29, 0x1b //10987
vadd.w vr29, vr29, vr24
vadd.w vr28, vr28, vr25 //diag[1][7+y-x]
vextrins.w vr8, vr28, 0x33
vextrins.w vr28, vr29, 0x33
vshuf4i.w vr9, vr28, 0xc6
vshuf4i.w vr10, vr29, 0xc6
vinsgr2vr.w vr10, zero, 3
vbsrl.v vr28, vr18, 8
vextrins.d vr28, vr19, 0x10 //2345
vbsrl.v vr29, vr19, 8
vextrins.d vr29, vr20, 0x10 //6789
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25
vextrins.d vr18, vr28, 0x10
vextrins.d vr19, vr28, 0x01
vextrins.d vr19, vr29, 0x10
vextrins.d vr20, vr29, 0x01 //alt[2][3-(y>>1)+7]
vadd.w vr2, vr2, vr24
vadd.w vr3, vr3, vr25 //hv[1][x]
vbsrl.v vr28, vr21, 4
vextrins.w vr28, vr22, 0x30 //1234
vbsrl.v vr29, vr22, 4 //5678
vextrins.w vr29, vr23, 0x30
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25 //alt[3][(y>>1)+x]
vextrins.w vr23, vr29, 0x03
vextrins.w vr29, vr28, 0x33
vshuf4i.w vr22, vr29, 0x93
vextrins.w vr28, vr21, 0x30
vshuf4i.w vr21, vr28, 0x93
add.d a0, a0, a1
// 4
fld.d f24, a0, 0 //img
vpermi.w vr25, vr24, 0x01
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr25, vr25, 0
vsllwil.hu.bu vr25, vr25, 0
vsub.w vr24, vr24, vr31 //px
vsub.w vr25, vr25, vr31
vadd.w vr5, vr5, vr24 //diag[0][y+x]
vadd.w vr6, vr6, vr25
vpackev.w vr26, vr25, vr24
vpackod.w vr27, vr25, vr24
vpermi.w vr26, vr26, 0xd8 //px0246
vpermi.w vr27, vr27, 0xd8 //px1357
vadd.w vr13, vr13, vr26
vadd.w vr13, vr13, vr27 //alt[0][y+(x>>1)]
vhaddw.d.w vr28, vr24, vr24
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vhaddw.d.w vr28, vr25, vr25
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr1, a3, 0 //hv[0][y]
vpermi.w vr16, vr16, 0x1b
vadd.w vr16, vr16, vr26
vadd.w vr16, vr16, vr27 //alt[1][3+y-(x>>1)]
vpermi.w vr16, vr16, 0x1b
vpermi.w vr9, vr9, 0x1b
vpermi.w vr10, vr10, 0x1b
vadd.w vr10, vr10, vr24
vadd.w vr9, vr9, vr25
vpermi.w vr9, vr9, 0x1b
vpermi.w vr10, vr10, 0x1b //diag[1][7+y-x]
vbsrl.v vr28, vr18, 4
vextrins.w vr28, vr19, 0x30 //1234
vbsrl.v vr29, vr19, 4
vextrins.w vr29, vr20, 0x30 //5678
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25 //alt[2][3-(y>>1)+7]
vextrins.w vr20, vr29, 0x03
vextrins.w vr29, vr28, 0x33
vshuf4i.w vr19, vr29, 0x93
vbsll.v vr18, vr28, 4
vadd.w vr2, vr2, vr24
vadd.w vr3, vr3, vr25 //hv[1][x]
vbsrl.v vr28, vr21, 8
vextrins.d vr28, vr22, 0x10
vbsrl.v vr29, vr22, 8
vextrins.d vr29, vr23, 0x10
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25
vextrins.d vr21, vr28, 0x10
vextrins.d vr22, vr28, 0x01
vextrins.d vr22, vr29, 0x10
vextrins.d vr23, vr29, 0x01 //alt[3][(y>>1)+x]
add.d a0, a0, a1
// 5
fld.d f24, a0, 0 //img
vpermi.w vr25, vr24, 0x01
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr25, vr25, 0
vsllwil.hu.bu vr25, vr25, 0
vsub.w vr24, vr24, vr31 //px
vsub.w vr25, vr25, vr31
vbsrl.v vr28, vr5, 4 //5-8
vbsrl.v vr29, vr6, 4 //9-12
vextrins.w vr28, vr6, 0x30
vadd.w vr28, vr28, vr24 //diag[0][y+x]
vadd.w vr29, vr29, vr25
vextrins.w vr7, vr29, 0x03
vextrins.w vr29, vr28, 0x33
vshuf4i.w vr6, vr29, 0x93
vextrins.w vr28, vr5, 0x30
vshuf4i.w vr5, vr28, 0x93
vbsrl.v vr28, vr13, 4
vextrins.w vr28, vr14, 0x30
vpackev.w vr26, vr25, vr24
vpackod.w vr27, vr25, vr24
vpermi.w vr26, vr26, 0xd8 //px0246
vpermi.w vr27, vr27, 0xd8 //px1357
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[0][y+(x>>1)]
vextrins.w vr14, vr28, 0x03
vextrins.w vr28, vr13, 0x30
vshuf4i.w vr13, vr28, 0x93
vhaddw.d.w vr28, vr24, vr24
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vhaddw.d.w vr28, vr25, vr25
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr1, a3, 1 //hv[0][y]
vbsrl.v vr28, vr16, 4
vextrins.w vr28, vr17, 0x30
vpermi.w vr28, vr28, 0x1b
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[1][3+y-(x>>1)]
vextrins.w vr17, vr28, 0x00
vextrins.w vr28, vr16, 0x00
vshuf4i.w vr16, vr28, 0x6c
vbsrl.v vr28, vr9, 4
vbsrl.v vr29, vr10, 4
vextrins.w vr28, vr10, 0x30
vpermi.w vr28, vr28, 0x1b //8-5
vpermi.w vr29, vr29, 0x1b //12-9
vadd.w vr29, vr29, vr24
vadd.w vr28, vr28, vr25 //diag[1][7+y-x]
vextrins.w vr11, vr29, 0x00
vextrins.w vr29, vr28, 0x00
vshuf4i.w vr10, vr29, 0x6c
vextrins.w vr28, vr9, 0x00
vshuf4i.w vr9, vr28, 0x6c
vbsrl.v vr28, vr18, 4
vextrins.w vr28, vr19, 0x30 //1234
vbsrl.v vr29, vr19, 4
vextrins.w vr29, vr20, 0x30 //5678
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25 //alt[2][3-(y>>1)+7]
vextrins.w vr20, vr29, 0x03
vextrins.w vr29, vr28, 0x33
vshuf4i.w vr19, vr29, 0x93
vbsll.v vr18, vr28, 4
vadd.w vr2, vr2, vr24
vadd.w vr3, vr3, vr25 //hv[1][x]
vbsrl.v vr28, vr21, 8
vextrins.d vr28, vr22, 0x10
vbsrl.v vr29, vr22, 8
vextrins.d vr29, vr23, 0x10
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25
vextrins.d vr21, vr28, 0x10
vextrins.d vr22, vr28, 0x01
vextrins.d vr22, vr29, 0x10
vextrins.d vr23, vr29, 0x01 //alt[3][(y>>1)+x]
add.d a0, a0, a1
// 6
fld.d f24, a0, 0 //img
vpermi.w vr25, vr24, 0x01
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr25, vr25, 0
vsllwil.hu.bu vr25, vr25, 0
vsub.w vr24, vr24, vr31 //px
vsub.w vr25, vr25, vr31
vbsrl.v vr28, vr5, 8
vbsrl.v vr29, vr6, 8
vextrins.d vr28, vr6, 0x10 //6-9
vextrins.d vr29, vr7, 0x10 //10-13
vadd.w vr28, vr28, vr24 //diag[0][y+x]
vadd.w vr29, vr29, vr25
vextrins.d vr5, vr28, 0x10
vextrins.d vr6, vr28, 0x01
vextrins.d vr6, vr29, 0x10
vextrins.d vr7, vr29, 0x01
vbsrl.v vr28, vr13, 8
vextrins.d vr28, vr14, 0x10
vpackev.w vr26, vr25, vr24
vpackod.w vr27, vr25, vr24
vpermi.w vr26, vr26, 0xd8 //px0246
vpermi.w vr27, vr27, 0xd8 //px1357
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[0][y+(x>>1)]
vextrins.d vr13, vr28, 0x10
vextrins.d vr14, vr28, 0x01
vhaddw.d.w vr28, vr24, vr24
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vhaddw.d.w vr28, vr25, vr25
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr1, a3, 2 //hv[0][y]
vbsrl.v vr28, vr16, 8
vextrins.d vr28, vr17, 0x10
vpermi.w vr28, vr28, 0x1b
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[1][3+y-(x>>1)]
vpermi.w vr28, vr28, 0x1b
vextrins.d vr16, vr28, 0x10
vextrins.d vr17, vr28, 0x01
vbsrl.v vr28, vr9, 8
vextrins.d vr28, vr10, 0x10
vbsrl.v vr29, vr10, 8
vextrins.d vr29, vr11, 0x10
vpermi.w vr28, vr28, 0x1b //9876
vpermi.w vr29, vr29, 0x1b //13-10
vadd.w vr29, vr29, vr24
vadd.w vr28, vr28, vr25
vpermi.w vr28, vr28, 0x1b
vpermi.w vr29, vr29, 0x1b
vextrins.d vr9, vr28, 0x10
vextrins.d vr10, vr28, 0x01
vextrins.d vr10, vr29, 0x10
vextrins.d vr11, vr29, 0x01 //diag[1][7+y-x]
vadd.w vr18, vr18, vr24 //0123
vadd.w vr19, vr19, vr25 //4567 alt[2][3-(y>>1)+7]
vadd.w vr2, vr2, vr24
vadd.w vr3, vr3, vr25 //hv[1][x]
vbsll.v vr28, vr22, 4
vextrins.w vr28, vr21, 0x03 //3456
vbsll.v vr29, vr23, 4
vextrins.w vr29, vr22, 0x03 //78910
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25 //alt[3][(y>>1)+x]
vextrins.w vr21, vr28, 0x30
vextrins.w vr28, vr29, 0x00
vshuf4i.w vr22, vr28, 0x39
vbsrl.v vr23, vr29, 4
add.d a0, a0, a1
// 7
fld.d f24, a0, 0 //img
vpermi.w vr25, vr24, 0x01
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr24, vr24, 0
vsllwil.hu.bu vr25, vr25, 0
vsllwil.hu.bu vr25, vr25, 0
vsub.w vr24, vr24, vr31 //px
vsub.w vr25, vr25, vr31
vbsll.v vr28, vr6, 4
vextrins.w vr28, vr5, 0x03 //78910
vbsll.v vr29, vr7, 4
vextrins.w vr29, vr6, 0x03 //11-14
vadd.w vr28, vr28, vr24 //diag[0][y+x]
vadd.w vr29, vr29, vr25
vextrins.w vr5, vr28, 0x30
vextrins.w vr28, vr29, 0x00
vshuf4i.w vr6, vr28, 0x39
vbsrl.v vr7, vr29, 4
vbsll.v vr28, vr14, 4
vextrins.w vr28, vr13, 0x03
vpackev.w vr26, vr25, vr24
vpackod.w vr27, vr25, vr24
vpermi.w vr26, vr26, 0xd8 //px0246
vpermi.w vr27, vr27, 0xd8 //px1357
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[0][y+(x>>1)]
vextrins.w vr13, vr28, 0x30
vbsrl.v vr14, vr28, 4
vhaddw.d.w vr28, vr24, vr24
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vhaddw.d.w vr28, vr25, vr25
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr1, a3, 3 //hv[0][y]
vbsll.v vr28, vr17, 4
vextrins.w vr28, vr16, 0x03
vpermi.w vr28, vr28, 0x1b //10987
vadd.w vr28, vr28, vr26
vadd.w vr28, vr28, vr27 //alt[1][3+y-(x>>1)]
vextrins.w vr16, vr28, 0x33
vshuf4i.w vr17, vr28, 0xc6
vinsgr2vr.w vr17, zero, 3
vbsll.v vr28, vr10, 4
vextrins.w vr28, vr9, 0x03 //7-10
vbsll.v vr29, vr11, 4
vextrins.w vr29, vr10, 0x03 //11-14
vpermi.w vr28, vr28, 0x1b //10-7
vpermi.w vr29, vr29, 0x1b //14-11
vadd.w vr29, vr29, vr24
vadd.w vr28, vr28, vr25 //diag[1][7+y-x]
vextrins.w vr9, vr28, 0x33
vextrins.w vr28, vr29, 0x33
vshuf4i.w vr10, vr28, 0xc6
vshuf4i.w vr11, vr29, 0xc6
vinsgr2vr.w vr11, zero, 3
vadd.w vr18, vr18, vr24 //0123
vadd.w vr19, vr19, vr25 //4567 alt[2][3-(y>>1)+7]
vadd.w vr2, vr2, vr24
vadd.w vr3, vr3, vr25 //hv[1][x]
vbsll.v vr28, vr22, 4
vextrins.w vr28, vr21, 0x03 //3456
vbsll.v vr29, vr23, 4
vextrins.w vr29, vr22, 0x03 //78910
vadd.w vr28, vr28, vr24
vadd.w vr29, vr29, vr25 //alt[3][(y>>1)+x]
vextrins.w vr21, vr28, 0x30
vextrins.w vr28, vr29, 0x00
vshuf4i.w vr22, vr28, 0x39
vbsrl.v vr23, vr29, 4
add.d a0, a0, a1
vxor.v vr24, vr24, vr24 //unsigned cost[8]
vxor.v vr25, vr25, vr25
vmul.w vr26, vr0, vr0
vmul.w vr27, vr1, vr1
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vhaddw.d.w vr28, vr27, vr27
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vmul.w vr26, vr2, vr2
vmul.w vr27, vr3, vr3
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
vhaddw.d.w vr28, vr27, vr27
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a5, vr28, 0
add.d a4, a4, a5
li.d a6, 105
mul.w a3, a3, a6
mul.w a4, a4, a6
vinsgr2vr.w vr24, a3, 2
vinsgr2vr.w vr25, a4, 2
vxor.v vr30, vr30, vr30 //div_table
vxor.v vr31, vr31, vr31
li.d t0, 840
vinsgr2vr.w vr30, t0, 0
li.d t0, 420
vinsgr2vr.w vr30, t0, 1
li.d t0, 280
vinsgr2vr.w vr30, t0, 2
li.d t0, 210
vinsgr2vr.w vr30, t0, 3
li.d t0, 168
vinsgr2vr.w vr31, t0, 0
li.d t0, 140
vinsgr2vr.w vr31, t0, 1
li.d t0, 120
vinsgr2vr.w vr31, t0, 2
vbsll.v vr27, vr7, 4
vextrins.w vr27, vr6, 0x03
vpermi.w vr27, vr27, 0x1b
vmul.w vr26, vr4, vr4
vmadd.w vr26, vr27, vr27
vmul.w vr26, vr26, vr30
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a3, vr28, 0
vbsll.v vr27, vr6, 4
vpermi.w vr27, vr27, 0x1b
vmul.w vr26, vr5, vr5
vmadd.w vr26, vr27, vr27
vmul.w vr26, vr26, vr31
vextrins.w vr26, vr31, 0x33
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4 //cost[0]
vbsll.v vr27, vr11, 4
vextrins.w vr27, vr10, 0x03
vpermi.w vr27, vr27, 0x1b
vmul.w vr26, vr8, vr8
vmadd.w vr26, vr27, vr27
vmul.w vr26, vr26, vr30
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
vbsll.v vr27, vr10, 4
vpermi.w vr27, vr27, 0x1b
vmul.w vr26, vr9, vr9
vmadd.w vr26, vr27, vr27
vmul.w vr26, vr26, vr31
vextrins.w vr26, vr31, 0x33
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a5, vr28, 0
add.d a4, a4, a5 //cost[4]
vpickve2gr.w a5, vr5, 3
mul.w a5, a5, a5
mul.w a5, a5, a6
add.w a3, a3, a5
vinsgr2vr.w vr24, a3, 0
vpickve2gr.w a5, vr9, 3
mul.w a5, a5, a5
mul.w a5, a5, a6
add.w a4, a4, a5
vinsgr2vr.w vr25, a4, 0
//n=0
vpickve2gr.w a3, vr24, 1
vmul.w vr26, vr13, vr13
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
vpickve2gr.w a5, vr12, 3
mul.w a5, a5, a5
add.d a3, a3, a4
add.d a3, a3, a5
mul.w a3, a3, a6 //*cost_ptr
vextrins.w vr29, vr30, 0x01
vextrins.w vr29, vr30, 0x13
vextrins.w vr29, vr31, 0x21
vextrins.w vr29, vr31, 0x33
vbsll.v vr27, vr14, 4
vpermi.w vr27, vr27, 0x1b
vmul.w vr28, vr12, vr12
vextrins.w vr28, vr31, 0x33
vmadd.w vr28, vr27, vr27
vmul.w vr26, vr28, vr29
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr24, a3, 1
//n=1
vpickve2gr.w a3, vr24, 3
vmul.w vr26, vr16, vr16
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
vpickve2gr.w a5, vr15, 3
mul.w a5, a5, a5
add.d a3, a3, a4
add.d a3, a3, a5
mul.w a3, a3, a6 //*cost_ptr
vbsll.v vr27, vr17, 4
vpermi.w vr27, vr27, 0x1b
vmul.w vr28, vr15, vr15
vextrins.w vr28, vr31, 0x33
vmadd.w vr28, vr27, vr27
vmul.w vr26, vr28, vr29
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr24, a3, 3
//n=2
vpickve2gr.w a3, vr25, 1
vmul.w vr26, vr19, vr19
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
vpickve2gr.w a5, vr18, 3
mul.w a5, a5, a5
add.d a3, a3, a4
add.d a3, a3, a5
mul.w a3, a3, a6 //*cost_ptr
vbsll.v vr27, vr20, 4
vpermi.w vr27, vr27, 0x1b
vmul.w vr28, vr18, vr18
vextrins.w vr28, vr31, 0x33
vmadd.w vr28, vr27, vr27
vmul.w vr26, vr28, vr29
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr25, a3, 1
//n=3
vpickve2gr.w a3, vr25, 3
vmul.w vr26, vr22, vr22
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
vpickve2gr.w a5, vr21, 3
mul.w a5, a5, a5
add.d a3, a3, a4
add.d a3, a3, a5
mul.w a3, a3, a6 //*cost_ptr
vbsll.v vr27, vr23, 4
vpermi.w vr27, vr27, 0x1b
vmul.w vr28, vr21, vr21
vextrins.w vr28, vr31, 0x33
vmadd.w vr28, vr27, vr27
vmul.w vr26, vr28, vr29
vhaddw.d.w vr28, vr26, vr26
vhaddw.q.d vr28, vr28, vr28
vpickve2gr.d a4, vr28, 0
add.d a3, a3, a4
vinsgr2vr.w vr25, a3, 3
xor a3, a3, a3 //best_dir
vpickve2gr.w a4, vr24, 0 //best_cost
.BSETDIR01:
vpickve2gr.w a5, vr24, 1
bge a4, a5, .BSETDIR02
or a4, a5, a5
ori a3, zero, 1
.BSETDIR02:
vpickve2gr.w a5, vr24, 2
bge a4, a5, .BSETDIR03
or a4, a5, a5
ori a3, zero, 2
.BSETDIR03:
vpickve2gr.w a5, vr24, 3
bge a4, a5, .BSETDIR04
or a4, a5, a5
ori a3, zero, 3
.BSETDIR04:
vpickve2gr.w a5, vr25, 0
bge a4, a5, .BSETDIR05
or a4, a5, a5
ori a3, zero, 4
.BSETDIR05:
vpickve2gr.w a5, vr25, 1
bge a4, a5, .BSETDIR06
or a4, a5, a5
ori a3, zero, 5
.BSETDIR06:
vpickve2gr.w a5, vr25, 2
bge a4, a5, .BSETDIR07
or a4, a5, a5
ori a3, zero, 6
.BSETDIR07:
vpickve2gr.w a5, vr25, 3
bge a4, a5, .BSETDIREND
or a4, a5, a5
ori a3, zero, 7
.BSETDIREND:
xori a5, a3, 4
li.d a1, 4
bge a5, a1, .GETCOST01
vreplve.w vr26, vr24, a5
b .GETCOST02
.GETCOST01:
vreplve.w vr26, vr25, a5
.GETCOST02:
vpickve2gr.w a5, vr26, 0
sub.w a5, a4, a5
srai.d a5, a5, 10
st.w a5, a2, 0
or a0, a3, a3
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc
.macro cdef_fill tmp, stride, w, h
beqz \h, 700f //h
or t0, zero, zero //y
100:
or t1, zero, zero //xx
srai.d s6, \w, 3 //x
beqz s6, 300f
200:
vstx vr18, \tmp, t1
addi.d t1, t1, 16
addi.d s6, s6, -1
bnez s6, 200b
300:
andi s6, \w, 4
beqz s6, 400f
fstx.d f18, \tmp, t1
addi.d t1, t1, 8
400:
andi s6, \w, 2
beqz s6, 500f
fstx.s f18, \tmp, t1
addi.d t1, t1, 4
500:
andi s6, \w, 1
beqz s6, 600f
li.w s6, -16384
stx.h s6, \tmp, t1
addi.d t1, t1, 2
600:
add.d \tmp, \tmp, \stride
add.d \tmp, \tmp, \stride
addi.d t0, t0, 1
blt t0, \h, 100b
700:
.endm
const dav1d_cdef_directions
.byte 1 * 12 + 0, 2 * 12 + 0
.byte 1 * 12 + 0, 2 * 12 - 1
.byte -1 * 12 + 1, -2 * 12 + 2
.byte 0 * 12 + 1, -1 * 12 + 2
.byte 0 * 12 + 1, 0 * 12 + 2
.byte 0 * 12 + 1, 1 * 12 + 2
.byte 1 * 12 + 1, 2 * 12 + 2
.byte 1 * 12 + 0, 2 * 12 + 1
.byte 1 * 12 + 0, 2 * 12 + 0
.byte 1 * 12 + 0, 2 * 12 - 1
.byte -1 * 12 + 1, -2 * 12 + 2
.byte 0 * 12 + 1, -1 * 12 + 2
endconst
.macro constrain_vrh in0, in1, in2, tmp0, tmp1, out
vabsd.h \tmp0, \in0, vr23 //adiff
vsra.h \tmp1, \tmp0, \in2
vsub.h \tmp1, \in1, \tmp1
vmax.h \tmp1, vr23, \tmp1 //imax
vmin.h \tmp0, \tmp0, \tmp1 //imin
//apply_sign
vslt.h \tmp1, \in0, vr23
vandn.v \in0, \tmp1, \tmp0
vsigncov.h \tmp0, \tmp1, \tmp0
vor.v \out, \in0, \tmp0
.endm
.macro iclip_vrh in0, in1, in2, tmp0, tmp1, out
vmin.h \tmp0, \in2, \in0
vslt.h \in0, \in0, \in1
vand.v \tmp1, \in0, \in1
vandn.v \tmp0, \in0, \tmp0
vor.v \out, \tmp1, \tmp0
.endm
.macro cdef_padding_data
//y < 0
beqz t7, 90f
4:
or t4, t5, t5 //data index xx
slli.d t0, t4, 1
mul.w t2, t7, s5
slli.d t2, t2, 1
add.d t2, s4, t2
sub.d t3, t6, t5 //loop param x
srai.d t3, t3, 3
add.d t3, t3, t5
beq t5, t3, 6f
5: // /8
fldx.d f18, a3, t4
vsllwil.hu.bu vr18, vr18, 0
vstx vr18, t2, t0
addi.d t0, t0, 16
addi.d t4, t4, 8
addi.d t3, t3, -1
bne t5, t3, 5b
6: // &4
sub.d t1, t6, t5
andi t1, t1, 4
beqz t1, 7f
fldx.s f18, a3, t4
vsllwil.hu.bu vr18, vr18, 0
fstx.d f18, t2, t0
addi.d t0, t0, 8
addi.d t4, t4, 4
7: // &2
sub.d t1, t6, t5
andi t1, t1, 2
beqz t1, 9f
ldx.bu t1, a3, t4
stx.h t1, t2, t0
addi.d t0, t0, 2
addi.d t4, t4, 1
ldx.bu t1, a3, t4
stx.h t1, t2, t0
addi.d t0, t0, 2
addi.d t4, t4, 1
9:
add.d a3, a3, a1
addi.d t7, t7, 1
bnez t7, 4b
90:
// y < h
beqz s1, 12f
beqz t5, 12f
or t7, zero, zero //y
10:
or t4, t5, t5 //data index x
11:
slli.d t3, t7, 1
addi.d t3, t3, 2
add.d t3, t3, t4
ldx.bu t1, a2, t3
mul.w t3, t7, s5
add.d t3, t3, t4
slli.d t3, t3, 1
stx.h t1, s4, t3
addi.d t4, t4, 1
bnez t4, 11b
addi.d t7, t7, 1
bne t7, s1, 10b
12:
// y = 0 ; y < h
or s0, s4, s4
beqz s1, 20f
or s6, a0, a0
or t7, zero, zero //y
srai.d t4, t6, 3 //loop max
13:
or t0, zero, zero //loop param
or t3, t0, t0 //data index src
or t1, t0, t0 //data index tmp
beqz t4, 16f
15: // /8
fldx.d f18, s6, t3
vsllwil.hu.bu vr18, vr18, 0
vstx vr18, s0, t1
addi.d t3, t3, 8
addi.d t1, t1, 16
addi.d t0, t0, 1
blt t0, t4, 15b
16: // &4
andi t0, t6, 4
beqz t0, 17f
fldx.s f18, s6, t3
vsllwil.hu.bu vr18, vr18, 0
fstx.d f18, s0, t1
addi.d t3, t3, 4
addi.d t1, t1, 8
17: // &2
andi t0, t6, 2
beqz t0, 19f
ldx.bu t2, s6, t3
stx.h t2, s0, t1
addi.d t3, t3, 1
addi.d t1, t1, 2
ldx.bu t2, s6, t3
stx.h t2, s0, t1
addi.d t3, t3, 1
addi.d t1, t1, 2
19: // src+ tmp+
add.d s6, s6, a1
add.d s0, s0, s5
add.d s0, s0, s5
addi.d t7, t7, 1
blt t7, s1, 13b
// y = h ; y < y_end
20:
beq s1, t8, 27f
or t7, s1, s1 //y
sub.d t4, t6, t5
srai.d t4, t4, 3
add.d t4, t4, t5 //8 loop max
21:
or t0, t5, t5 //xx
or t3, t0, t0 //data index bottom
slli.d t1, t0, 1 //data index tmp
beq t5, t4, 23f
22: // /8
fldx.d f18, a4, t3
vsllwil.hu.bu vr18, vr18, 0
vstx vr18, s0, t1
addi.d t3, t3, 8
addi.d t1, t1, 16
addi.d t0, t0, 1
blt t0, t4, 22b
23: // &4
sub.d t0, t6, t5
andi t0, t0, 4
beqz t0, 24f
fldx.s f18, a4, t3
vsllwil.hu.bu vr18, vr18, 0
fstx.d f18, s0, t1
addi.d t3, t3, 4
addi.d t1, t1, 8
24: // &2
sub.d t0, t6, t5
andi t0, t0, 2
beqz t0, 26f
ldx.bu t2, a4, t3
stx.h t2, s0, t1
addi.d t3, t3, 1
addi.d t1, t1, 2
ldx.bu t2, a4, t3
stx.h t2, s0, t1
addi.d t3, t3, 1
addi.d t1, t1, 2
26: // bottom+ tmp+
add.d a4, a4, a1
add.d s0, s0, s5
add.d s0, s0, s5
addi.d t7, t7, 1
blt t7, t8, 21b
27:
// padding end
.endm
.macro cdef_pri_sec_init
clz.w t3, a6
sub.w t3, t2, t3
sub.w t3, s7, t3 //sec_shift
vreplgr2vr.h vr4, t0 //pri_tap_k
vreplgr2vr.h vr9, a5 //pri_strength
vreplgr2vr.h vr10, t1 //pri_shift
vreplgr2vr.h vr18, a6 //sec_strength
vreplgr2vr.h vr19, t3 //sec_shift
or t2, s1, s1 //dowhile loop param
addi.d s1, a7, 2
slli.d s1, s1, 1 //directions dir+2
addi.d s2, a7, 4
slli.d s2, s2, 1 //directions dir+4
slli.d s3, a7, 1 //directions dir+0
la.local t0, dav1d_cdef_directions
add.d s1, t0, s1
ld.b a2, s1, 0 //off01
ld.b a3, s1, 1 //off11
add.d s2, t0, s2
ld.b s1, s2, 0 //off02
ld.b s2, s2, 1 //off12
add.d s3, t0, s3
ld.b t0, s3, 0 //off03
ld.b s3, s3, 1 //off13
slli.d a2, a2, 1
slli.d a3, a3, 1
slli.d s1, s1, 1
slli.d s2, s2, 1
slli.d t0, t0, 1
slli.d s3, s3, 1
.endm
.macro cdef_pri_init
vreplgr2vr.h vr4, t0 //pri_tap_k
vreplgr2vr.h vr9, a5 //pri_strength
vreplgr2vr.h vr10, t1 //pri_shift
or t2, s1, s1 //dowhile loop param
addi.d s1, a7, 2
slli.d s1, s1, 1 //directions dir+2
la.local t0, dav1d_cdef_directions
add.d s1, t0, s1
ld.b a2, s1, 0 //off01
ld.b a3, s1, 1 //off11
slli.d a2, a2, 1
slli.d a3, a3, 1
.endm
.macro cdef_sec_init
clz.w t3, a6
li.w t2, 31
sub.w t3, t2, t3
sub.w t3, s7, t3 //sec_shift
vreplgr2vr.h vr18, a6 //sec_strength
vreplgr2vr.h vr19, t3 //sec_shift
or t2, s1, s1 //dowhile loop param
addi.d s2, a7, 4
slli.d s2, s2, 1 //directions dir+4
slli.d s3, a7, 1 //directions dir+0
la.local t0, dav1d_cdef_directions
add.d s1, t0, s1
add.d s2, t0, s2
ld.b s1, s2, 0 //off02
ld.b s2, s2, 1 //off12
add.d s3, t0, s3
ld.b t0, s3, 0 //off03
ld.b s3, s3, 1 //off13
slli.d s1, s1, 1
slli.d s2, s2, 1
slli.d t0, t0, 1
slli.d s3, s3, 1
.endm
.macro cdef_process_data_w8 in0, in1
vsub.h vr11, vr5, vr0
vsub.h vr12, vr6, vr0
vsub.h vr13, vr7, vr0
vsub.h vr14, vr8, vr0
constrain_vrh vr11, \in0, \in1, vr16, vr17, vr11
constrain_vrh vr12, \in0, \in1, vr16, vr17, vr12
constrain_vrh vr13, \in0, \in1, vr16, vr17, vr13
constrain_vrh vr14, \in0, \in1, vr16, vr17, vr14
.endm
.macro cdef_process_data_w4 in0, in1
vpermi.w vr6, vr5, 0x44
vpermi.w vr8, vr7, 0x44
vsub.h vr12, vr6, vr0
vsub.h vr14, vr8, vr0
constrain_vrh vr12, \in0, \in1, vr16, vr17, vr12
constrain_vrh vr14, \in0, \in1, vr16, vr17, vr14
.endm
.macro cdef_calc_sum_tapchange_w8
vmul.h vr1, vr15, vr11 //sum
vmadd.h vr1, vr15, vr12 //sum
vand.v vr15, vr15, vr21
vor.v vr15, vr15, vr22
vmadd.h vr1, vr15, vr13 //sum
vmadd.h vr1, vr15, vr14 //sum
.endm
.macro cdef_calc_sum_tapchange_w4
vmul.h vr1, vr15, vr12 //sum
vand.v vr15, vr15, vr21
vor.v vr15, vr15, vr22
vmadd.h vr1, vr15, vr14 //sum
.endm
.macro cdef_calc_sum_no_tapchange_w4 in0
vmadd.h vr1, \in0, vr12
vmadd.h vr1, \in0, vr14
.endm
.macro cdef_calc_sum_no_tapchange_w8 in0
vmadd.h vr1, \in0, vr11 //sum
vmadd.h vr1, \in0, vr12
vmadd.h vr1, \in0, vr13
vmadd.h vr1, \in0, vr14
.endm
.macro cdef_calc_maxmin_w4
vmin.hu vr3, vr6, vr3
vmax.h vr2, vr6, vr2
vmin.hu vr3, vr8, vr3 //min
vmax.h vr2, vr8, vr2 //max
.endm
.macro cdef_calc_maxmin_w8
vmin.hu vr3, vr5, vr3
vmax.h vr2, vr5, vr2
vmin.hu vr3, vr6, vr3
vmax.h vr2, vr6, vr2
vmin.hu vr3, vr7, vr3
vmax.h vr2, vr7, vr2
vmin.hu vr3, vr8, vr3 //min
vmax.h vr2, vr8, vr2 //max
.endm
.macro cdef_calc_dst
vslti.h vr5, vr1, 0
vand.v vr5, vr5, vr20
vsub.h vr5, vr1, vr5
vaddi.hu vr5, vr5, 8
vsrai.h vr5, vr5, 4
vadd.h vr5, vr0, vr5
.endm
//static NOINLINE void cdef_filter_block_lsx
// (pixel *dst, const ptrdiff_t dst_stride,
// const pixel (*left)[2], const pixel *const top,
// const int pri_strength, const int sec_strength,
// const int dir, const int damping, const int w, int h,
// const enum CdefEdgeFlags edges HIGHBD_DECL_SUFFIX)
// w=4 h=4
//param: dst:a0, dst_stride:a1, left:a2, top:a3, bottom:a4, pri_strength:a5
//sec_strength:a6, dir:a7, damping:s7, w:s0, h:s1, edges:s2
function cdef_filter_block_4x4_8bpc_lsx
ld.w t0, sp, 0
ld.w t1, sp, 8
addi.d sp, sp, -(64+288)
st.d s0, sp, 0
st.d s1, sp, 8
st.d s2, sp, 16
st.d s3, sp, 24
st.d s4, sp, 32
st.d s5, sp, 40
st.d s6, sp, 48
st.d s7, sp, 56
li.w s0, 4 //w
li.w s1, 4 //h
or s2, t1, t1 //edges
or s7, t0, t0 //damping
li.d s5, 12 //tmp_stride
addi.d s4, sp, 64
slli.d t0, s5, 1
addi.d t0, t0, 2
slli.d t0, t0, 1
add.d s4, s4, t0 //ptr tmp
vxor.v vr23, vr23, vr23
li.w t2, 1
vreplgr2vr.h vr20, t2
vaddi.hu vr21, vr20, 2
vaddi.hu vr22, vr20, 1
li.w t0, -16384
vreplgr2vr.h vr18, t0
//padding
li.w t5, -2 //x_start
addi.d t6, s0, 2 //x_end
li.w t7, -2 //y_start
addi.d t8, s1, 2 //y_end
li.w t2, 2
andi t4, s2, 4
bnez t4, 1f
//CDEF_HAVE_TOP
slli.d t3, s5, 2
addi.d t4, s4, -4
sub.d t4, t4, t3
addi.d t3, s0, 4
cdef_fill t4, s5, t3, t2
or t7, zero, zero
1: //CDEF_HAVE_BOTTOM
andi t4, s2,8
bnez t4, 2f
mul.w t3, s1, s5
slli.d t3, t3, 1
add.d t4, s4, t3
addi.d t4, t4, -4
li.d t3, 8
cdef_fill t4, s5, t3, t2
addi.d t8, t8, -2
2: //CDEF_HAVE_LEFT
andi t4, s2,1
bnez t4, 3f
mul.w t3, t7, s5
slli.d t3, t3, 1
add.d t4, s4, t3
addi.d t4, t4, -4
sub.d t3, t8, t7
cdef_fill t4, s5, t2, t3
or t5, zero, zero
3: //CDEF_HAVE_RIGHT
andi t4, s2,2
bnez t4, 40f
mul.w t3, t7, s5
slli.d t3, t3, 1
add.d t4, s4, t3
addi.d t4, t4, 8
sub.d t3, t8, t7
cdef_fill t4, s5, t2, t3
addi.d t6, t6, -2
40:
cdef_padding_data
beqz a5, 33f
28: //if (pri_strength)
li.w t0, 4
andi t1, a5, 1
sub.d t0, t0, t1 //pri_tap
clz.w t1, a5
li.d t2, 31
sub.w t1, t2, t1
sub.w t1, s7, t1
blt t1, zero, 281f
or t1, t1, t1
b 282f
281:
or t1, zero, zero //t1: pri_shift
282:
beqz a6, 31f
29: //if (sec_strength)
cdef_pri_sec_init
30:
fld.s f0, a0, 0 //px
vsllwil.hu.bu vr0, vr0, 0
vpermi.w vr0, vr0, 0x44
vxor.v vr1, vr1, vr1 //sum
vor.v vr2, vr0, vr0 //max
vor.v vr3, vr0, vr0 //min
vor.v vr15, vr4, vr4 //pri_tap_k
sub.d t4, s4, a2
sub.d t5, s4, a3
fldx.d f5, s4, a2 //p0_00
fld.d f6, t4, 0 //p0_01
fldx.d f7, s4, a3 //p0_10
fld.d f8, t5, 0 //p0_11
cdef_process_data_w4 vr9, vr10
cdef_calc_sum_tapchange_w4
cdef_calc_maxmin_w4
sub.d t4, s4, s1 //tmp[-off02]
sub.d t5, s4, t0 //tmp[-off03]
fldx.d f5, s4, s1 //s0_00
fld.d f6, t4, 0 //s0_01
fldx.d f7, s4, t0 //s0_02
fld.d f8, t5, 0 //s0_03
cdef_process_data_w4 vr18, vr19
cdef_calc_sum_no_tapchange_w4 vr22
cdef_calc_maxmin_w4
sub.d t4, s4, s2 //tmp[-off12]
sub.d t5, s4, s3 //tmp[-off13]
fldx.d f5, s4, s2 //s0_10
fld.d f6, t4, 0 //s0_11
fldx.d f7, s4, s3 //s0_12
fld.d f8, t5, 0 //s0_13
cdef_process_data_w4 vr18, vr19
cdef_calc_sum_no_tapchange_w4 vr20
cdef_calc_maxmin_w4
vshuf4i.w vr5, vr1, 0x0e
vshuf4i.w vr6, vr3, 0x0e
vshuf4i.w vr7, vr2, 0x0e
vadd.h vr1, vr1, vr5
vmin.hu vr3, vr6, vr3
vmax.h vr2, vr7, vr2
cdef_calc_dst
iclip_vrh vr5, vr3, vr2, vr16, vr17, vr5
vsrlni.b.h vr5, vr5, 0
fst.s f5, a0, 0
add.d a0, a0, a1
add.d s4, s4, s5
add.d s4, s4, s5
addi.d t2, t2, -1
blt zero, t2, 30b
b 35f
31: // pri_strength only
cdef_pri_init
32:
fld.s f0, a0, 0 //px
vsllwil.hu.bu vr0, vr0, 0
vpermi.w vr0, vr0, 0x44
vxor.v vr1, vr1, vr1 //sum
vor.v vr15, vr4, vr4 //pri_tap_k
sub.d t4, s4, a2
sub.d t5, s4, a3
fldx.d f5, s4, a2 //p0_00
fld.d f6, t4, 0 //p0_01
fldx.d f7, s4, a3 //p0_10
fld.d f8, t5, 0 //p0_11
cdef_process_data_w4 vr9, vr10
cdef_calc_sum_tapchange_w4
vshuf4i.w vr5, vr1, 0x0e
vadd.h vr1, vr1, vr5
cdef_calc_dst
vsrlni.b.h vr5, vr5, 0
fst.s f5, a0, 0
add.d a0, a0, a1
add.d s4, s4, s5
add.d s4, s4, s5
addi.d t2, t2, -1
blt zero, t2, 32b
b 35f
33: // sec_strength only
cdef_sec_init
34:
fld.s f0, a0, 0 //px
vsllwil.hu.bu vr0, vr0, 0
vpermi.w vr0, vr0, 0x44
vxor.v vr1, vr1, vr1 //sum
sub.d t4, s4, s1 //tmp[-off02]
sub.d t5, s4, t0 //tmp[-off03]
fldx.d f5, s4, s1 //s0_00
fld.d f6, t4, 0 //s0_01
fldx.d f7, s4, t0 //s0_02
fld.d f8, t5, 0 //s0_03
cdef_process_data_w4 vr18, vr19
cdef_calc_sum_no_tapchange_w4 vr22
sub.d t4, s4, s2 //tmp[-off12]
sub.d t5, s4, s3 //tmp[-off13]
fldx.d f5, s4, s2 //s0_10
fld.d f6, t4, 0 //s0_11
fldx.d f7, s4, s3 //s0_12
fld.d f8, t5, 0 //s0_13
cdef_process_data_w4 vr18, vr19
cdef_calc_sum_no_tapchange_w4 vr20
vshuf4i.w vr5, vr1, 0x0e
vadd.h vr1, vr1, vr5
cdef_calc_dst
vsrlni.b.h vr5, vr5, 0
fst.s f5, a0, 0
add.d a0, a0, a1
add.d s4, s4, s5
add.d s4, s4, s5
addi.d t2, t2, -1
blt zero, t2, 34b
35:
ld.d s0, sp, 0
ld.d s1, sp, 8
ld.d s2, sp, 16
ld.d s3, sp, 24
ld.d s4, sp, 32
ld.d s5, sp, 40
ld.d s6, sp, 48
ld.d s7, sp, 56
addi.d sp, sp, (64+288)
endfunc
function cdef_filter_block_4x8_8bpc_lsx
ld.w t0, sp, 0
ld.w t1, sp, 8
addi.d sp, sp, -(64+288)
st.d s0, sp, 0
st.d s1, sp, 8
st.d s2, sp, 16
st.d s3, sp, 24
st.d s4, sp, 32
st.d s5, sp, 40
st.d s6, sp, 48
st.d s7, sp, 56
li.w s0, 4 //w
li.w s1, 8 //h
or s2, t1, t1 //edges
or s7, t0, t0 //damping
li.d s5, 12 //tmp_stride
addi.d s4, sp, 64
slli.d t0, s5, 1
addi.d t0, t0, 2
slli.d t0, t0, 1
add.d s4, s4, t0 //ptr tmp
vxor.v vr23, vr23, vr23
li.w t2, 1
vreplgr2vr.h vr20, t2
vaddi.hu vr21, vr20, 2
vaddi.hu vr22, vr20, 1
li.w t0, -16384
vreplgr2vr.h vr18, t0
//padding
li.w t5, -2 //x_start
addi.d t6, s0, 2 //x_end
li.w t7, -2 //y_start
addi.d t8, s1, 2 //y_end
li.w t2, 2
andi t4, s2, 4
bnez t4, 1f
//CDEF_HAVE_TOP
slli.d t3, s5, 2
addi.d t4, s4, -4
sub.d t4, t4, t3
addi.d t3, s0, 4
cdef_fill t4, s5, t3, t2
or t7, zero, zero
1: //CDEF_HAVE_BOTTOM
andi t4, s2,8
bnez t4, 2f
mul.w t3, s1, s5
slli.d t3, t3, 1
add.d t4, s4, t3
addi.d t4, t4, -4
li.d t3, 8
cdef_fill t4, s5, t3, t2
addi.d t8, t8, -2
2: //CDEF_HAVE_LEFT
andi t4, s2,1
bnez t4, 3f
mul.w t3, t7, s5
slli.d t3, t3, 1
add.d t4, s4, t3
addi.d t4, t4, -4
sub.d t3, t8, t7
cdef_fill t4, s5, t2, t3
or t5, zero, zero
3: //CDEF_HAVE_RIGHT
andi t4, s2,2
bnez t4, 40f
mul.w t3, t7, s5
slli.d t3, t3, 1
add.d t4, s4, t3
addi.d t4, t4, 8
sub.d t3, t8, t7
cdef_fill t4, s5, t2, t3
addi.d t6, t6, -2
40:
cdef_padding_data
beqz a5, 33f
28: //if (pri_strength)
li.w t0, 4
andi t1, a5, 1
sub.d t0, t0, t1 //pri_tap
clz.w t1, a5
li.d t2, 31
sub.w t1, t2, t1
sub.w t1, s7, t1
blt t1, zero, 281f
or t1, t1, t1
b 282f
281:
or t1, zero, zero //t1: pri_shift
282:
beqz a6, 31f
29: //if (sec_strength)
cdef_pri_sec_init
30:
fld.s f0, a0, 0 //px
vsllwil.hu.bu vr0, vr0, 0
vpermi.w vr0, vr0, 0x44
vxor.v vr1, vr1, vr1 //sum
vor.v vr2, vr0, vr0 //max
vor.v vr3, vr0, vr0 //min
vor.v vr15, vr4, vr4 //pri_tap_k
sub.d t4, s4, a2
sub.d t5, s4, a3
fldx.d f5, s4, a2 //p0_00
fld.d f6, t4, 0 //p0_01
fldx.d f7, s4, a3 //p0_10
fld.d f8, t5, 0 //p0_11
cdef_process_data_w4 vr9, vr10
cdef_calc_sum_tapchange_w4
cdef_calc_maxmin_w4
sub.d t4, s4, s1 //tmp[-off02]
sub.d t5, s4, t0 //tmp[-off03]
fldx.d f5, s4, s1 //s0_00
fld.d f6, t4, 0 //s0_01
fldx.d f7, s4, t0 //s0_02
fld.d f8, t5, 0 //s0_03
cdef_process_data_w4 vr18, vr19
cdef_calc_sum_no_tapchange_w4 vr22
cdef_calc_maxmin_w4
sub.d t4, s4, s2 //tmp[-off12]
sub.d t5, s4, s3 //tmp[-off13]
fldx.d f5, s4, s2 //s0_10
fld.d f6, t4, 0 //s0_11
fldx.d f7, s4, s3 //s0_12
fld.d f8, t5, 0 //s0_13
cdef_process_data_w4 vr18, vr19
cdef_calc_sum_no_tapchange_w4 vr20
cdef_calc_maxmin_w4
vshuf4i.w vr5, vr1, 0x0e
vshuf4i.w vr6, vr3, 0x0e
vshuf4i.w vr7, vr2, 0x0e
vadd.h vr1, vr1, vr5
vmin.hu vr3, vr6, vr3
vmax.h vr2, vr7, vr2
cdef_calc_dst
iclip_vrh vr5, vr3, vr2, vr16, vr17, vr5
vsrlni.b.h vr5, vr5, 0
fst.s f5, a0, 0
add.d a0, a0, a1
add.d s4, s4, s5
add.d s4, s4, s5
addi.d t2, t2, -1
blt zero, t2, 30b
b 35f
31: // pri_strength only
cdef_pri_init
32:
fld.s f0, a0, 0 //px
vsllwil.hu.bu vr0, vr0, 0
vpermi.w vr0, vr0, 0x44
vxor.v vr1, vr1, vr1 //sum
vor.v vr15, vr4, vr4 //pri_tap_k
sub.d t4, s4, a2
sub.d t5, s4, a3
fldx.d f5, s4, a2 //p0_00
fld.d f6, t4, 0 //p0_01
fldx.d f7, s4, a3 //p0_10
fld.d f8, t5, 0 //p0_11
cdef_process_data_w4 vr9, vr10
cdef_calc_sum_tapchange_w4
vshuf4i.w vr5, vr1, 0x0e
vadd.h vr1, vr1, vr5
cdef_calc_dst
vsrlni.b.h vr5, vr5, 0
fst.s f5, a0, 0
add.d a0, a0, a1
add.d s4, s4, s5
add.d s4, s4, s5
addi.d t2, t2, -1
blt zero, t2, 32b
b 35f
33: // sec_strength only
cdef_sec_init
34:
fld.s f0, a0, 0 //px
vsllwil.hu.bu vr0, vr0, 0
vpermi.w vr0, vr0, 0x44
vxor.v vr1, vr1, vr1 //sum
sub.d t4, s4, s1 //tmp[-off02]
sub.d t5, s4, t0 //tmp[-off03]
fldx.d f5, s4, s1 //s0_00
fld.d f6, t4, 0 //s0_01
fldx.d f7, s4, t0 //s0_02
fld.d f8, t5, 0 //s0_03
cdef_process_data_w4 vr18, vr19
cdef_calc_sum_no_tapchange_w4 vr22
sub.d t4, s4, s2 //tmp[-off12]
sub.d t5, s4, s3 //tmp[-off13]
fldx.d f5, s4, s2 //s0_10
fld.d f6, t4, 0 //s0_11
fldx.d f7, s4, s3 //s0_12
fld.d f8, t5, 0 //s0_13
cdef_process_data_w4 vr18, vr19
cdef_calc_sum_no_tapchange_w4 vr20
vshuf4i.w vr5, vr1, 0x0e
vadd.h vr1, vr1, vr5
cdef_calc_dst
vsrlni.b.h vr5, vr5, 0
fst.s f5, a0, 0
add.d a0, a0, a1
add.d s4, s4, s5
add.d s4, s4, s5
addi.d t2, t2, -1
blt zero, t2, 34b
35:
ld.d s0, sp, 0
ld.d s1, sp, 8
ld.d s2, sp, 16
ld.d s3, sp, 24
ld.d s4, sp, 32
ld.d s5, sp, 40
ld.d s6, sp, 48
ld.d s7, sp, 56
addi.d sp, sp, (64+288)
endfunc
function cdef_filter_block_8x8_8bpc_lsx
ld.w t0, sp, 0
ld.w t1, sp, 8
addi.d sp, sp, -(64+288)
st.d s0, sp, 0
st.d s1, sp, 8
st.d s2, sp, 16
st.d s3, sp, 24
st.d s4, sp, 32
st.d s5, sp, 40
st.d s6, sp, 48
st.d s7, sp, 56
li.w s0, 8 //w
li.w s1, 8 //h
or s2, t1, t1 //edges
or s7, t0, t0 //damping
// cdef_filter_block_kernel
li.d s5, 12 //tmp_stride
addi.d s4, sp, 64
slli.d t0, s5, 1
addi.d t0, t0, 2
slli.d t0, t0, 1
add.d s4, s4, t0 //ptr tmp
vxor.v vr23, vr23, vr23
li.w t2, 1
vreplgr2vr.h vr20, t2
vaddi.hu vr21, vr20, 2
vaddi.hu vr22, vr20, 1
li.w t0, -16384
vreplgr2vr.h vr18, t0
//padding
li.w t5, -2 //x_start
addi.d t6, s0, 2 //x_end
li.w t7, -2 //y_start
addi.d t8, s1, 2 //y_end
li.w t2, 2
andi t4, s2, 4
bnez t4, 1f
//CDEF_HAVE_TOP
slli.d t3, s5, 2
addi.d t4, s4, -4
sub.d t4, t4, t3
addi.d t3, s0, 4
cdef_fill t4, s5, t3, t2
or t7, zero, zero
1: //CDEF_HAVE_BOTTOM
andi t4, s2,8
bnez t4, 2f
mul.w t3, s1, s5
slli.d t3, t3, 1
add.d t4, s4, t3
addi.d t4, t4, -4
li.d t3, 12
cdef_fill t4, s5, t3, t2
addi.d t8, t8, -2
2: //CDEF_HAVE_LEFT
andi t4, s2,1
bnez t4, 3f
mul.w t3, t7, s5
slli.d t3, t3, 1
add.d t4, s4, t3
addi.d t4, t4, -4
sub.d t3, t8, t7
li.d t2, 2
cdef_fill t4, s5, t2, t3
or t5, zero, zero
3: //CDEF_HAVE_RIGHT
andi t4, s2,2
bnez t4, 40f
mul.w t3, t7, s5
slli.d t3, t3, 1
add.d t4, s4, t3
addi.d t4, t4, 16
sub.d t3, t8, t7
li.d t2, 2
cdef_fill t4, s5, t2, t3
addi.d t6, t6, -2
40:
cdef_padding_data
beqz a5, 33f
28: //if (pri_strength)
li.w t0, 4
andi t1, a5, 1
sub.d t0, t0, t1 //pri_tap
//edit
clz.w t1, a5
li.d t2, 31
sub.w t3, t2, t1
sub.w t3, s7, t3
or t1, zero, zero //t1: pri_shift
blt t3, zero, 281f
or t1, t3, t3
281:
beqz a6, 31f
29: //if (sec_strength)
cdef_pri_sec_init
301:
fld.d f0, a0, 0 //px
vsllwil.hu.bu vr0, vr0, 0
vxor.v vr1, vr1, vr1 //sum
vor.v vr2, vr0, vr0 //max
vor.v vr3, vr0, vr0 //min
vor.v vr15, vr4, vr4 //pri_tap_k
sub.d t4, s4, a2
sub.d t5, s4, a3
vldx vr5, s4, a2
vld vr6, t4, 0
vldx vr7, s4, a3
vld vr8, t5, 0
cdef_process_data_w8 vr9, vr10
cdef_calc_sum_tapchange_w8
cdef_calc_maxmin_w8
//s 00-03
sub.d t4, s4, s1 //tmp[-off02]
sub.d t5, s4, t0 //tmp[-off03]
vldx vr5, s4, s1
vld vr6, t4, 0
vldx vr7, s4, t0
vld vr8, t5, 0
cdef_process_data_w8 vr18, vr19
cdef_calc_sum_no_tapchange_w8 vr22
cdef_calc_maxmin_w8
//s 10-13
sub.d t4, s4, s2 //tmp[-off12]
sub.d t5, s4, s3 //tmp[-off13]
vldx vr5, s4, s2
vld vr6, t4, 0
vldx vr7, s4, s3
vld vr8, t5, 0
cdef_process_data_w8 vr18, vr19
cdef_calc_sum_no_tapchange_w8 vr20
cdef_calc_maxmin_w8
cdef_calc_dst
iclip_vrh vr5, vr3, vr2, vr16, vr17, vr5
vsrlni.b.h vr5, vr5, 0
fst.d f5, a0, 0
add.d a0, a0, a1
add.d s4, s4, s5
add.d s4, s4, s5
addi.d t2, t2, -1
blt zero, t2, 301b
b 35f
31: // pri_strength only
cdef_pri_init
32:
fld.d f0, a0, 0 //px
vsllwil.hu.bu vr0, vr0, 0
vxor.v vr1, vr1, vr1 //sum
vor.v vr15, vr4, vr4 //pri_tap_k
sub.d t4, s4, a2
sub.d t5, s4, a3
vldx vr5, s4, a2
vld vr6, t4, 0
vldx vr7, s4, a3
vld vr8, t5, 0
cdef_process_data_w8 vr9, vr10
cdef_calc_sum_tapchange_w8
cdef_calc_dst
vsrlni.b.h vr5, vr5, 0
fst.d f5, a0, 0
add.d a0, a0, a1
add.d s4, s4, s5
add.d s4, s4, s5
addi.d t2, t2, -1
blt zero, t2, 32b
b 35f
33: // sec_strength only
cdef_sec_init
34:
fld.d f0, a0, 0 //px
vsllwil.hu.bu vr0, vr0, 0
vxor.v vr1, vr1, vr1 //sum
sub.d t4, s4, s1 //tmp[-off02]
sub.d t5, s4, t0 //tmp[-off03]
vldx vr5, s4, s1
vld vr6, t4, 0
vldx vr7, s4, t0
vld vr8, t5, 0
cdef_process_data_w8 vr18, vr19
cdef_calc_sum_no_tapchange_w8 vr22
sub.d t4, s4, s2 //tmp[-off12]
sub.d t5, s4, s3 //tmp[-off13]
vldx vr5, s4, s2
vld vr6, t4, 0
vldx vr7, s4, s3
vld vr8, t5, 0
cdef_process_data_w8 vr18, vr19
cdef_calc_sum_no_tapchange_w8 vr20
cdef_calc_dst
vsrlni.b.h vr5, vr5, 0
fst.d f5, a0, 0
add.d a0, a0, a1
add.d s4, s4, s5
add.d s4, s4, s5
addi.d t2, t2, -1
blt zero, t2, 34b
35:
ld.d s0, sp, 0
ld.d s1, sp, 8
ld.d s2, sp, 16
ld.d s3, sp, 24
ld.d s4, sp, 32
ld.d s5, sp, 40
ld.d s6, sp, 48
ld.d s7, sp, 56
addi.d sp, sp, (64+288)
endfunc
|
Admenri/urge
| 44,160
|
third_party/dav1d/src/loongarch/loopfilter.S
|
/*
* Copyright © 2023, VideoLAN and dav1d authors
* Copyright © 2023, Loongson Technology Corporation Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/loongarch/loongson_asm.S"
#include "src/loongarch/loongson_util.S"
// depending on how many pixels need to be stored, returns:
// t4 = (1 << 0) : 0 pixels
// t4 = (1 << 4) : inner 4 pixels
// t4 = (1 << 6) : inner 6 pixels
// t4 = 0 : all pixels
.macro FILTER wd
functionl lpf_16_wd\wd\()_lsx
vabsd.bu vr0, vr22, vr23 // abs(p1 - p0)
vabsd.bu vr1, vr25, vr24 // abs(q1 - q0)
vabsd.bu vr2, vr23, vr24 // abs(p0 - q0)
vabsd.bu vr3, vr22, vr25 // abs(p1 - q1)
.if \wd >= 6
vabsd.bu vr4, vr21, vr22 // abs(p2 - p1)
vabsd.bu vr5, vr26, vr25 // abs(q2 - q1)
.endif
.if \wd >= 8
vabsd.bu vr6, vr20, vr21 // abs(p3 - p2)
vabsd.bu vr7, vr27, vr26 // abs(q3 - q3)
.endif
.if \wd >= 6
vmax.bu vr4, vr4, vr5
.endif
vsadd.bu vr2, vr2, vr2 // abs(p0 - q0) * 2
.if \wd >= 8
vmax.bu vr6, vr6, vr7
.endif
vsrli.b vr3, vr3, 1 // abs(p1 - q1) >> 1
.if \wd >= 8
vmax.bu vr4, vr4, vr6
.endif
.if \wd >= 6
vand.v vr4, vr4, vr14
.endif
vmax.bu vr0, vr0, vr1 // max(abs(p1 - p0), abs(q1 - q0))
vsadd.bu vr2, vr2, vr3 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
.if \wd >= 6
vmax.bu vr4, vr0, vr4
vsle.bu vr1, vr4, vr11 // max(abs(p1 - p0), abs(q1 - q0), abs(), abs(), ...) <= I
.else
vsle.bu vr1, vr0, vr11 // max(abs(p1 - p0), abs(q1 - q0)) <= I
.endif
vsle.bu vr2, vr2, vr10 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1 <= E
vand.v vr1, vr1, vr2 // fm
vand.v vr1, vr1, vr13 // fm && wd >= 4
.if \wd >= 6
vand.v vr14, vr14, vr1 // fm && wd > 4
.endif
.if \wd >= 16
vand.v vr15, vr15, vr1 // fm && wd == 16
.endif
vhaddw.qu.du vr8, vr1, vr1
vpickve2gr.du t6, vr8, 0
bnez t6, 9f // if (!fm || wd < 4) return;
li.w t4, 1 << 0
jirl zero, ra, 0x00
9:
.if \wd >= 6
vabsd.bu vr2, vr21, vr23 // abs(p2 - p0)
vabsd.bu vr3, vr22, vr23 // abs(p1 - p0)
vabsd.bu vr4, vr25, vr24 // abs(q1 - q0)
vabsd.bu vr5, vr26, vr24 // abs(q2 - q0)
.if \wd >= 8
vabsd.bu vr6, vr20, vr23 // abs(p3 - p0)
vabsd.bu vr7, vr27, vr24 // abs(q3 - q0)
.endif
vmax.bu vr2, vr2, vr3
vmax.bu vr4, vr4, vr5
.if \wd >= 8
vmax.bu vr6, vr6, vr7
.endif
vmax.bu vr2, vr2, vr4
.if \wd >= 8
vmax.bu vr2, vr2, vr6
.endif
.if \wd == 16
vabsd.bu vr3, vr17, vr23 // abs(p6 - p0)
vabsd.bu vr4, vr18, vr23 // abs(p5 - p0)
vabsd.bu vr5, vr19, vr23 // abs(p4 - p0)
.endif
vslei.bu vr2, vr2, 1 // flat8in
.if \wd == 16
vabsd.bu vr6, vr28, vr24 // abs(q4 - q0)
vabsd.bu vr7, vr29, vr24 // abs(q5 - q0)
vabsd.bu vr8, vr30, vr24 // abs(q6 - q0)
.endif
vand.v vr14, vr2, vr14 // flat8in && fm && wd > 4
vandn.v vr1, vr14, vr1 // fm && wd >= 4 && !flat8in
.if \wd == 16
vmax.bu vr3, vr3, vr4
vmax.bu vr5, vr5, vr6
.endif
vhaddw.qu.du vr9, vr1, vr1
.if \wd == 16
vmax.bu vr7, vr7, vr8
vmax.bu vr3, vr3, vr5
vmax.bu vr3, vr3, vr7
vslei.bu vr3, vr3, 1 // flat8out
.endif
vpickve2gr.du t6, vr9, 0
.if \wd == 16
vand.v vr15, vr15, vr3 // flat8out && fm && wd == 16
vand.v vr15, vr15, vr14 // flat8out && flat8in && fm && wd == 16
vandn.v vr14, vr15, vr14 // flat8in && fm && wd >= 4 && !flat8out
.endif
beqz t6, 1f // skip wd == 4 case
.endif
vxori.b vr2, vr22, 128 // p1 - 128
vxori.b vr3, vr25, 128 // q1 - 128
vslt.bu vr0, vr12, vr0 // hev
vssub.b vr2, vr2, vr3 // iclip_diff(p1 - q1)
vand.v vr4, vr2, vr0 // if (hev) iclip_diff(p1 - q1)
vandn.v vr0, vr0, vr1 // (fm && wd >= 4 && !hev)
vxor.v vr5, vr5, vr5
vaddi.hu vr5, vr5, 3
vsubwev.h.bu vr2, vr24, vr23
vsubwod.h.bu vr3, vr24, vr23
vmul.h vr2, vr2, vr5
vmul.h vr3, vr3, vr5
vxor.v vr6, vr6, vr6
vaddwev.h.b vr7, vr4, vr6
vaddwod.h.b vr6, vr4, vr6
vadd.h vr2, vr2, vr7
vadd.h vr3, vr3, vr6
vssrani.b.h vr2, vr2, 0
vssrani.b.h vr3, vr3, 0
vilvl.b vr2, vr3, vr2 // f
vxor.v vr6, vr6, vr6
vaddi.bu vr5, vr6, 3
vaddi.bu vr6, vr6, 4 // 4
vsadd.b vr4, vr6, vr2 // imin(f + 4, 127)
vsadd.b vr5, vr5, vr2 // imin(f + 3, 127)
vsrai.b vr4, vr4, 3 // f1
vsrai.b vr5, vr5, 3 // f2
vaddi.bu vr2, vr23, 0 // p0
vaddi.bu vr3, vr24, 0 // q0
vxori.b vr2, vr2, 128
vxori.b vr3, vr3, 128
vsadd.b vr2, vr2, vr5 // p0 + f2 out p0
vssub.b vr3, vr3, vr4 // q0 - f1 out q0
vxori.b vr2, vr2, 128
vxori.b vr3, vr3, 128
vsrari.b vr4, vr4, 1 // (f1 + 1) >> 1
vbitsel.v vr23, vr23, vr2, vr1 // if (fm && wd >= 4)
vbitsel.v vr24, vr24, vr3, vr1 // if (fm && wd >= 4)
vaddi.bu vr2, vr22, 0 // p1
vaddi.bu vr3, vr25, 0 // q1
vxori.b vr2, vr2, 128
vxori.b vr3, vr3, 128
vsadd.b vr2, vr2, vr4 // out p1
vssub.b vr3, vr3, vr4 // out q1
vxori.b vr2, vr2, 128
vxori.b vr3, vr3, 128
vbitsel.v vr22, vr22, vr2, vr0 // if (fm && wd >= 4 && !hev)
vbitsel.v vr25, vr25, vr3, vr0 // if (fm && wd >= 4 && !hev)
1:
.if \wd == 6
vhaddw.qu.du vr0, vr14, vr14
vpickve2gr.du t6, vr0, 0
beqz t6, 2f // skip if there's no flat8in
vaddwev.h.bu vr0, vr21, vr21
vaddwod.h.bu vr1, vr21, vr21 // p2 * 2
vaddwev.h.bu vr2, vr21, vr22
vaddwod.h.bu vr3, vr21, vr22 // p2 + p1
vaddwev.h.bu vr4, vr22, vr23
vaddwod.h.bu vr5, vr22, vr23 // p1 + p0
vaddwev.h.bu vr6, vr23, vr24
vaddwod.h.bu vr7, vr23, vr24 // p0 + q0
vadd.h vr8, vr0, vr2
vadd.h vr9, vr1, vr3
vadd.h vr10, vr4, vr6
vadd.h vr11, vr5, vr7
vaddwev.h.bu vr12, vr24, vr25
vaddwod.h.bu vr13, vr24, vr25 // q0 + q1
vadd.h vr8, vr8, vr10
vadd.h vr9, vr9, vr11
vsub.h vr12, vr12, vr0
vsub.h vr13, vr13, vr1
vaddwev.h.bu vr10, vr25, vr26
vaddwod.h.bu vr11, vr25, vr26 // q1 + q2
vssrlrni.bu.h vr0, vr8, 3
vssrlrni.bu.h vr1, vr9, 3
vilvl.b vr0, vr1, vr0 // out p1
vadd.h vr8, vr8, vr12
vadd.h vr9, vr9, vr13
vsub.h vr10, vr10, vr2
vsub.h vr11, vr11, vr3
vaddwev.h.bu vr12, vr26, vr26 // q2 + q2
vaddwod.h.bu vr13, vr26, vr26
vssrlrni.bu.h vr1, vr8, 3
vssrlrni.bu.h vr2, vr9, 3
vilvl.b vr1, vr2, vr1 // out p0
vadd.h vr8, vr8, vr10
vadd.h vr9, vr9, vr11
vsub.h vr12, vr12, vr4
vsub.h vr13, vr13, vr5
vssrlrni.bu.h vr2, vr8, 3
vssrlrni.bu.h vr3, vr9, 3
vilvl.b vr2, vr3, vr2 // out q0
vbitsel.v vr22, vr22, vr0, vr14
vadd.h vr8, vr8, vr12
vadd.h vr9, vr9, vr13
vbitsel.v vr23, vr23, vr1, vr14
vssrlrni.bu.h vr3, vr8, 3
vssrlrni.bu.h vr4, vr9, 3
vilvl.b vr3, vr4, vr3
vbitsel.v vr24, vr24, vr2, vr14
vbitsel.v vr25, vr25, vr3, vr14
.elseif \wd >= 8
vhaddw.qu.du vr0, vr14, vr14
vpickve2gr.du t6, vr0, 0
.if \wd == 8
beqz t6, 8f // skip if there's no flat8in
.else
beqz t6, 2f // skip if there's no flat8in
.endif
vaddwev.h.bu vr0, vr20, vr21
vaddwod.h.bu vr1, vr20, vr21 // p3 + p2
vaddwev.h.bu vr2, vr22, vr25
vaddwod.h.bu vr3, vr22, vr25 // p1 + q1
vaddwev.h.bu vr4, vr20, vr22
vaddwod.h.bu vr5, vr20, vr22 // p3 + p1
vaddwev.h.bu vr6, vr23, vr26
vaddwod.h.bu vr7, vr23, vr26 // p0 + q2
vadd.h vr8, vr0, vr0
vadd.h vr9, vr1, vr1 // 2 * (p3 + p2)
vxor.v vr10, vr10, vr10
vaddwev.h.bu vr11, vr23, vr10
vaddwod.h.bu vr12, vr23, vr10
vaddwev.h.bu vr13, vr24, vr10
vaddwod.h.bu vr10, vr24, vr10
vadd.h vr8, vr8, vr11 // + p0
vadd.h vr9, vr9, vr12
vadd.h vr8, vr8, vr13 // + q0
vadd.h vr9, vr9, vr10
vadd.h vr8, vr8, vr4
vadd.h vr9, vr9, vr5 // + p3 + p1
vsub.h vr2, vr2, vr0
vsub.h vr3, vr3, vr1 // p1 + q1 - p3 - p2
vsub.h vr6, vr6, vr4
vsub.h vr7, vr7, vr5 // p0 + q2 - p3 - p1
vssrlrni.bu.h vr10, vr8, 3
vssrlrni.bu.h vr11, vr9, 3
vilvl.b vr10, vr11, vr10 // out p2
vadd.h vr8, vr8, vr2
vadd.h vr9, vr9, vr3
vaddwev.h.bu vr0, vr20, vr23
vaddwod.h.bu vr1, vr20, vr23 // p3 + p0
vaddwev.h.bu vr2, vr24, vr27
vaddwod.h.bu vr3, vr24, vr27 // q0 + q3
vssrlrni.bu.h vr11, vr8, 3
vssrlrni.bu.h vr12, vr9, 3
vilvl.b vr11, vr12, vr11 // out p1
vadd.h vr8, vr8, vr6
vadd.h vr9, vr9, vr7
vsub.h vr2, vr2, vr0 // q0 + q3 - p3 - p0
vsub.h vr3, vr3, vr1
vaddwev.h.bu vr4, vr21, vr24 // p2 + q0
vaddwod.h.bu vr5, vr21, vr24
vaddwev.h.bu vr6, vr25, vr27 // q1 + q3
vaddwod.h.bu vr7, vr25, vr27
vssrlrni.bu.h vr12, vr8, 3
vssrlrni.bu.h vr13, vr9, 3
vilvl.b vr12, vr13, vr12 // out p0
vadd.h vr8, vr8, vr2
vadd.h vr9, vr9, vr3
vsub.h vr6, vr6, vr4 // q1 + q3 - p2 - q0
vsub.h vr7, vr7, vr5
vaddwev.h.bu vr0, vr22, vr25 // p1 + q1
vaddwod.h.bu vr1, vr22, vr25
vaddwev.h.bu vr2, vr26, vr27
vaddwod.h.bu vr3, vr26, vr27 // q2 + q3
vssrlrni.bu.h vr13, vr8, 3
vssrlrni.bu.h vr4, vr9, 3
vilvl.b vr13, vr4, vr13 // out q0
vadd.h vr8, vr8, vr6
vadd.h vr9, vr9, vr7
vsub.h vr2, vr2, vr0 // q2 + q3 - p1 - q1
vsub.h vr3, vr3, vr1
vssrlrni.bu.h vr0, vr8, 3
vssrlrni.bu.h vr1, vr9, 3
vilvl.b vr0, vr1, vr0 // out q1
vadd.h vr8, vr8, vr2
vadd.h vr9, vr9, vr3
vbitsel.v vr21, vr21, vr10, vr14
vbitsel.v vr22, vr22, vr11, vr14
vbitsel.v vr23, vr23, vr12, vr14
vbitsel.v vr24, vr24, vr13, vr14
vssrlrni.bu.h vr1, vr8, 3
vssrlrni.bu.h vr2, vr9, 3
vilvl.b vr1, vr2, vr1 // out q2
vbitsel.v vr25, vr25, vr0, vr14
vbitsel.v vr26, vr26, vr1, vr14
.endif
2:
.if \wd == 16
vhaddw.qu.du vr2, vr15, vr15
vpickve2gr.du t6, vr2, 0
bnez t6, 1f // check if flat8out is needed
vhaddw.qu.du vr2, vr14, vr14
vpickve2gr.du t6, vr2, 0
beqz t6, 8f // if there was no flat8in, just write the inner 4 pixels
b 7f // if flat8in was used, write the inner 6 pixels
1:
vaddwev.h.bu vr2, vr17, vr17 // p6 + p6
vaddwod.h.bu vr3, vr17, vr17
vaddwev.h.bu vr4, vr17, vr18
vaddwod.h.bu vr5, vr17, vr18 // p6 + p5
vaddwev.h.bu vr6, vr17, vr19
vaddwod.h.bu vr7, vr17, vr19 // p6 + p4
vaddwev.h.bu vr8, vr17, vr20
vaddwod.h.bu vr9, vr17, vr20 // p6 + p3
vadd.h vr12, vr2, vr4
vadd.h vr13, vr3, vr5
vadd.h vr10, vr6, vr8
vadd.h vr11, vr7, vr9
vaddwev.h.bu vr6, vr17, vr21
vaddwod.h.bu vr7, vr17, vr21 // p6 + p2
vadd.h vr12, vr12, vr10
vadd.h vr13, vr13, vr11
vaddwev.h.bu vr8, vr17, vr22
vaddwod.h.bu vr9, vr17, vr22 // p6 + p1
vaddwev.h.bu vr10, vr18, vr23
vaddwod.h.bu vr11, vr18, vr23 // p5 + p0
vadd.h vr6, vr6, vr8
vadd.h vr7, vr7, vr9
vaddwev.h.bu vr8, vr19, vr24
vaddwod.h.bu vr9, vr19, vr24 // p4 + q0
vadd.h vr12, vr12, vr6
vadd.h vr13, vr13, vr7
vadd.h vr10, vr10, vr8
vadd.h vr11, vr11, vr9
vaddwev.h.bu vr6, vr20, vr25
vaddwod.h.bu vr7, vr20, vr25 // p3 + q1
vadd.h vr12, vr12, vr10
vadd.h vr13, vr13, vr11
vsub.h vr6, vr6, vr2
vsub.h vr7, vr7, vr3
vaddwev.h.bu vr2, vr21, vr26
vaddwod.h.bu vr3, vr21, vr26 // p2 + q2
vssrlrni.bu.h vr0, vr12, 4
vssrlrni.bu.h vr1, vr13, 4
vilvl.b vr0, vr1, vr0 // out p5
vadd.h vr12, vr12, vr6
vadd.h vr13, vr13, vr7 // - (p6 + p6) + (p3 + q1)
vsub.h vr2, vr2, vr4
vsub.h vr3, vr3, vr5
vaddwev.h.bu vr4, vr22, vr27
vaddwod.h.bu vr5, vr22, vr27 // p1 + q3
vaddwev.h.bu vr6, vr17, vr19
vaddwod.h.bu vr7, vr17, vr19 // p6 + p4
vssrlrni.bu.h vr1, vr12, 4
vssrlrni.bu.h vr8, vr13, 4
vilvl.b vr1, vr8, vr1 // out p4
vadd.h vr12, vr12, vr2
vadd.h vr13, vr13, vr3 // - (p6 + p5) + (p2 + q2)
vsub.h vr4, vr4, vr6
vsub.h vr5, vr5, vr7
vaddwev.h.bu vr6, vr23, vr28
vaddwod.h.bu vr7, vr23, vr28 // p0 + q4
vaddwev.h.bu vr8, vr17, vr20
vaddwod.h.bu vr9, vr17, vr20 // p6 + p3
vssrlrni.bu.h vr2, vr12, 4
vssrlrni.bu.h vr10, vr13, 4
vilvl.b vr2, vr10, vr2 // out p3
vadd.h vr12, vr12, vr4
vadd.h vr13, vr13, vr5 // - (p6 + p4) + (p1 + q3)
vsub.h vr6, vr6, vr8
vsub.h vr7, vr7, vr9
vaddwev.h.bu vr8, vr24, vr29
vaddwod.h.bu vr9, vr24, vr29 // q0 + q5
vaddwev.h.bu vr4, vr17, vr21
vaddwod.h.bu vr5, vr17, vr21 // p6 + p2
vssrlrni.bu.h vr3, vr12, 4
vssrlrni.bu.h vr11, vr13, 4
vilvl.b vr3, vr11, vr3 // out p2
vadd.h vr12, vr12, vr6
vadd.h vr13, vr13, vr7 // - (p6 + p3) + (p0 + q4)
vsub.h vr8, vr8, vr4
vsub.h vr9, vr9, vr5
vaddwev.h.bu vr6, vr25, vr30
vaddwod.h.bu vr7, vr25, vr30 // q1 + q6
vaddwev.h.bu vr10, vr17, vr22
vaddwod.h.bu vr11, vr17, vr22 // p6 + p1
vssrlrni.bu.h vr4, vr12, 4
vssrlrni.bu.h vr5, vr13, 4
vilvl.b vr4, vr5, vr4 // out p1
vadd.h vr12, vr12, vr8
vadd.h vr13, vr13, vr9 // - (p6 + p2) + (q0 + q5)
vsub.h vr6, vr6, vr10
vsub.h vr7, vr7, vr11
vaddwev.h.bu vr8, vr26, vr30
vaddwod.h.bu vr9, vr26, vr30 // q2 + q6
vbitsel.v vr0, vr18, vr0, vr15 // out p5
vaddwev.h.bu vr10, vr18, vr23
vaddwod.h.bu vr11, vr18, vr23 // p5 + p0
vssrlrni.bu.h vr5, vr12, 4
vssrlrni.bu.h vr18, vr13, 4
vilvl.b vr5, vr18, vr5 // out p0
vadd.h vr12, vr12, vr6
vadd.h vr13, vr13, vr7 // - (p6 + p1) + (q1 + q6)
vsub.h vr8, vr8, vr10
vsub.h vr9, vr9, vr11
vaddwev.h.bu vr10, vr27, vr30
vaddwod.h.bu vr11, vr27, vr30 // q3 + q6
vbitsel.v vr1, vr19, vr1, vr15 // out p4
vaddwev.h.bu vr18, vr19, vr24
vaddwod.h.bu vr19, vr19, vr24 // p4 + q0
vssrlrni.bu.h vr6, vr12, 4
vssrlrni.bu.h vr7, vr13, 4
vilvl.b vr6, vr7, vr6 // out q0
vadd.h vr12, vr12, vr8
vadd.h vr13, vr13, vr9 // - (p5 + p0) + (q2 + q6)
vsub.h vr10, vr10, vr18
vsub.h vr11, vr11, vr19
vaddwev.h.bu vr8, vr28, vr30
vaddwod.h.bu vr9, vr28, vr30 // q4 + q6
vbitsel.v vr2, vr20, vr2, vr15 // out p3
vaddwev.h.bu vr18, vr20, vr25
vaddwod.h.bu vr19, vr20, vr25 // p3 + q1
vssrlrni.bu.h vr7, vr12, 4
vssrlrni.bu.h vr20, vr13, 4
vilvl.b vr7, vr20, vr7 // out q1
vadd.h vr12, vr12, vr10
vadd.h vr13, vr13, vr11 // - (p4 + q0) + (q3 + q6)
vsub.h vr18, vr8, vr18
vsub.h vr19, vr9, vr19
vaddwev.h.bu vr10, vr29, vr30
vaddwod.h.bu vr11, vr29, vr30 // q5 + q6
vbitsel.v vr3, vr21, vr3, vr15 // out p2
vaddwev.h.bu vr20, vr21, vr26
vaddwod.h.bu vr21, vr21, vr26 // p2 + q2
vssrlrni.bu.h vr8, vr12, 4
vssrlrni.bu.h vr9, vr13, 4
vilvl.b vr8, vr9, vr8 // out q2
vadd.h vr12, vr12, vr18
vadd.h vr13, vr13, vr19 // - (p3 + q1) + (q4 + q6)
vsub.h vr10, vr10, vr20
vsub.h vr11, vr11, vr21
vaddwev.h.bu vr18, vr30, vr30
vaddwod.h.bu vr19, vr30, vr30 // q6 + q6
vbitsel.v vr4, vr22, vr4, vr15 // out p1
vaddwev.h.bu vr20, vr22, vr27
vaddwod.h.bu vr21, vr22, vr27 // p1 + q3
vssrlrni.bu.h vr9, vr12, 4
vssrlrni.bu.h vr22, vr13, 4
vilvl.b vr9, vr22, vr9 // out q3
vadd.h vr12, vr12, vr10
vadd.h vr13, vr13, vr11 // - (p2 + q2) + (q5 + q6)
vsub.h vr18, vr18, vr20
vsub.h vr19, vr19, vr21
vbitsel.v vr5, vr23, vr5, vr15 // out p0
vssrlrni.bu.h vr10, vr12, 4
vssrlrni.bu.h vr23, vr13, 4
vilvl.b vr10, vr23, vr10 // out q4
vadd.h vr12, vr12, vr18
vadd.h vr13, vr13, vr19 // - (p1 + q3) + (q6 + q6)
vssrlrni.bu.h vr11, vr12, 4
vssrlrni.bu.h vr12, vr13, 4
vilvl.b vr11, vr12, vr11 // out q5
vbitsel.v vr6, vr24, vr6, vr15
vbitsel.v vr7, vr25, vr7, vr15
vbitsel.v vr8, vr26, vr8, vr15
vbitsel.v vr9, vr27, vr9, vr15
vbitsel.v vr10, vr28, vr10, vr15
vbitsel.v vr11, vr29, vr11, vr15
.endif
li.w t4, 0
jirl zero, ra, 0x00
.if \wd == 16
7:
// Return to a shorter epilogue, writing only the inner 6 pixels
li.w t4, 1 << 6
jirl zero, ra, 0x00
.endif
.if \wd >= 8
8:
// Return to a shorter epilogue, writing only the inner 4 pixels
li.w t4, 1 << 4
jirl zero, ra, 0x00
.endif
endfuncl
.endm
FILTER 16
FILTER 8
FILTER 6
FILTER 4
.macro LPF_16_WD16
move t7, ra
bl lpf_16_wd16_lsx
move ra, t7
beqz t4, 1f
andi t5, t4, 1 << 6
bnez t5, 7f
andi t5, t4, 1 << 4
bnez t5, 8f
jirl zero, ra, 0x00
1:
.endm
.macro LPF_16_WD8
move t7, ra
bl lpf_16_wd8_lsx
move ra, t7
beqz t4, 1f
andi t5, t4, 1 << 4
bnez t5, 8f
jirl zero, ra, 0x00
1:
.endm
.macro LPF_16_WD6
move t7, ra
bl lpf_16_wd6_lsx
move ra, t7
beqz t4, 1f
jirl zero, ra, 0x00
1:
.endm
.macro LPF_16_WD4
move t7, ra
bl lpf_16_wd4_lsx
move ra, t7
beqz t4, 1f
jirl zero, ra, 0x00
1:
.endm
functionl lpf_v_4_16_lsx
slli.d t3, a1, 1
sub.d t3, a0, t3
vld vr22, t3, 0 // p1
vldx vr23, t3, a1 // p0
vld vr24, a0, 0 // q0
vldx vr25, a0, a1 // q1
LPF_16_WD4
vst vr22, t3, 0 // p1
vstx vr23, t3, a1 // p0
vst vr24, a0, 0 // q0
vstx vr25, a0, a1 // q1
endfuncl
functionl lpf_h_4_16_lsx
addi.d t3, a0, -2
fld.s f22, t3, 0
fldx.s f23, t3, a1
alsl.d t3, a1, t3, 1
fld.s f24, t3, 0
fldx.s f25, t3, a1
alsl.d t3, a1, t3, 1
fld.s f17, t3, 0
fldx.s f18, t3, a1
alsl.d t3, a1, t3, 1
fld.s f19, t3, 0
fldx.s f20, t3, a1
alsl.d t3, a1, t3, 1
vilvl.w vr22, vr17, vr22
vilvl.w vr23, vr18, vr23
vilvl.w vr24, vr19, vr24
vilvl.w vr25, vr20, vr25
fld.s f17, t3, 0
fldx.s f18, t3, a1
alsl.d t3, a1, t3, 1
fld.s f19, t3, 0
fldx.s f20, t3, a1
alsl.d t3, a1, t3, 1
fld.s f26, t3, 0
fldx.s f27, t3, a1
alsl.d t3, a1, t3, 1
fld.s f28, t3, 0
fldx.s f29, t3, a1
alsl.d t3, a1, t3, 1
vilvl.w vr17, vr26, vr17
vilvl.w vr18, vr27, vr18
vilvl.w vr19, vr28, vr19
vilvl.w vr20, vr29, vr20
vilvl.d vr22, vr17, vr22
vilvl.d vr23, vr18, vr23
vilvl.d vr24, vr19, vr24
vilvl.d vr25, vr20, vr25
addi.d a0, t3, 2
TRANSPOSE_4x16B vr22, vr23, vr24, vr25, vr26, vr27, vr28, vr29
LPF_16_WD4
slli.d t3, a1, 4
sub.d a0, a0, t3
TRANSPOSE_4x16B vr22, vr23, vr24, vr25, vr26, vr27, vr28, vr29
addi.d a0, a0, -2
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 0
add.d a0, a0, a1
.endr
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 1
add.d a0, a0, a1
.endr
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 2
add.d a0, a0, a1
.endr
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 3
add.d a0, a0, a1
.endr
addi.d a0, a0, 2
endfuncl
functionl lpf_v_6_16_lsx
slli.d t3, a1, 1
sub.d t3, a0, t3
sub.d s0, t3, a1
vld vr21, s0, 0 // p2
vldx vr22, s0, a1 // p1
alsl.d s0, a1, s0, 1
vld vr23, s0, 0 // p0
vldx vr24, s0, a1 // q0
alsl.d s0, a1, s0, 1
vld vr25, s0, 0 // q1
vldx vr26, s0, a1 // q2
LPF_16_WD6
vst vr22, t3, 0 // p1
vstx vr23, t3, a1 // p0
vst vr24, a0, 0 // q0
vstx vr25, a0, a1 // q1
endfuncl
functionl lpf_h_6_16_lsx
addi.d t3, a0, -4
fld.d f20, t3, 0
fldx.d f21, t3, a1
alsl.d t3, a1, t3, 1
fld.d f22, t3, 0
fldx.d f23, t3, a1
alsl.d t3, a1, t3, 1
fld.d f24, t3, 0
fldx.d f25, t3, a1
alsl.d t3, a1, t3, 1
fld.d f26, t3, 0
fldx.d f27, t3, a1
alsl.d t3, a1, t3, 1
fld.d f16, t3, 0
fldx.d f17, t3, a1
alsl.d t3, a1, t3, 1
fld.d f18, t3, 0
fldx.d f19, t3, a1
alsl.d t3, a1, t3, 1
fld.d f28, t3, 0
fldx.d f29, t3, a1
alsl.d t3, a1, t3, 1
fld.d f30, t3, 0
fldx.d f31, t3, a1
alsl.d t3, a1, t3, 1
vilvl.d vr20, vr16, vr20
vilvl.d vr21, vr17, vr21
vilvl.d vr22, vr18, vr22
vilvl.d vr23, vr19, vr23
vilvl.d vr24, vr28, vr24
vilvl.d vr25, vr29, vr25
vilvl.d vr26, vr30, vr26
vilvl.d vr27, vr31, vr27
addi.d a0, t3, 4
TRANSPOSE_8x16B vr20, vr21, vr22, vr23, vr24, vr25, vr26, vr27, vr28, vr29
LPF_16_WD6
slli.d t3, a1, 4
sub.d a0, a0, t3
TRANSPOSE_4x16b vr22, vr23, vr24, vr25, vr26, vr27, vr28, vr29
addi.d a0, a0, -2
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 0
add.d a0, a0, a1
.endr
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 1
add.d a0, a0, a1
.endr
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 2
add.d a0, a0, a1
.endr
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 3
add.d a0, a0, a1
.endr
addi.d a0, a0, 2
endfuncl
functionl lpf_v_8_16_lsx
slli.d t3, a1, 2
sub.d s0, a0, t3
vld vr20, s0, 0 // p3
vldx vr21, s0, a1 // p2
alsl.d s0, a1, s0, 1
vld vr22, s0, 0 // p1
vldx vr23, s0, a1 // p0
alsl.d s0, a1, s0, 1
vld vr24, s0, 0 // q0
vldx vr25, s0, a1 // q1
alsl.d s0, a1, s0, 1
vld vr26, s0, 0 // q2
vldx vr27, s0, a1 // q3
LPF_16_WD8
sub.d t3, a0, t3
add.d t3, t3, a1 // -3
vst vr21, t3, 0 // p2
vstx vr22, t3, a1 // p1
alsl.d t3, a1, t3, 1
vst vr23, t3, 0 // p0
vstx vr24, t3, a1 // q0
alsl.d t3, a1, t3, 1
vst vr25, t3, 0 // q1
vstx vr26, t3, a1 // q2
jirl zero, ra, 0x00
8:
slli.d t3, a1, 1
sub.d t3, a0, t3
vst vr22, t3, 0 // p1
vstx vr23, t3, a1 // p0
alsl.d t3, a1, t3, 1
vst vr24, t3, 0 // q0
vstx vr25, t3, a1 // q1
endfuncl
functionl lpf_h_8_16_lsx
addi.d t3, a0, -4
fld.d f20, t3, 0
fldx.d f21, t3, a1
alsl.d t3, a1, t3, 1
fld.d f22, t3, 0
fldx.d f23, t3, a1
alsl.d t3, a1, t3, 1
fld.d f24, t3, 0
fldx.d f25, t3, a1
alsl.d t3, a1, t3, 1
fld.d f26, t3, 0
fldx.d f27, t3, a1
alsl.d t3, a1, t3, 1
fld.d f16, t3, 0
fldx.d f17, t3, a1
alsl.d t3, a1, t3, 1
fld.d f18, t3, 0
fldx.d f19, t3, a1
alsl.d t3, a1, t3, 1
fld.d f28, t3, 0
fldx.d f29, t3, a1
alsl.d t3, a1, t3, 1
fld.d f30, t3, 0
fldx.d f31, t3, a1
alsl.d t3, a1, t3, 1
vilvl.d vr20, vr16, vr20
vilvl.d vr21, vr17, vr21
vilvl.d vr22, vr18, vr22
vilvl.d vr23, vr19, vr23
vilvl.d vr24, vr28, vr24
vilvl.d vr25, vr29, vr25
vilvl.d vr26, vr30, vr26
vilvl.d vr27, vr31, vr27
addi.d a0, t3, 4
TRANSPOSE_8x16B vr20, vr21, vr22, vr23, vr24, vr25, vr26, vr27, vr28, vr29
LPF_16_WD8
slli.d t3, a1, 4
sub.d a0, a0, t3
TRANSPOSE_8x16b vr20, vr21, vr22, vr23, vr24, vr25, vr26, vr27, vr28, vr29
addi.d a0, a0, -4
.irp i, vr20, vr21, vr22, vr23, vr24, vr25, vr26, vr27
vstelm.d \i, a0, 0, 0
add.d a0, a0, a1
.endr
.irp i, vr20, vr21, vr22, vr23, vr24, vr25, vr26, vr27
vstelm.d \i, a0, 0, 1
add.d a0, a0, a1
.endr
addi.d a0, a0, 4
jirl zero, ra, 0x00
8:
slli.d t3, a1, 4
sub.d a0, a0, t3
TRANSPOSE_4x16B vr22, vr23, vr24, vr25, vr26, vr27, vr28, vr29
addi.d a0, a0, -2
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 0
add.d a0, a0, a1
.endr
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 1
add.d a0, a0, a1
.endr
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 2
add.d a0, a0, a1
.endr
.irp i, vr22, vr23, vr24, vr25
vstelm.w \i, a0, 0, 3
add.d a0, a0, a1
.endr
addi.d a0, a0, 2
endfuncl
functionl lpf_v_16_16_lsx
slli.d t3, a1, 3
sub.d s0, a0, t3
add.d s0, s0, a1
vld vr17, s0, 0 // p6
vldx vr18, s0, a1 // p5
alsl.d s0, a1, s0, 1
vld vr19, s0, 0 // p4
vldx vr20, s0, a1 // p3
alsl.d s0, a1, s0, 1
vld vr21, s0, 0 // p2
vldx vr22, s0, a1 // p1
alsl.d s0, a1, s0, 1
vld vr23, s0, 0 // p0
vldx vr24, s0, a1 // q0
alsl.d s0, a1, s0, 1
vld vr25, s0, 0 // q1
vldx vr26, s0, a1 // q2
alsl.d s0, a1, s0, 1
vld vr27, s0, 0 // q3
vldx vr28, s0, a1 // q4
alsl.d s0, a1, s0, 1
vld vr29, s0, 0 // q5
vldx vr30, s0, a1 // q6
LPF_16_WD16
sub.d s0, a0, t3
alsl.d s0, a1, s0, 1
vst vr0, s0, 0 // p5
vstx vr1, s0, a1 // p4
alsl.d s0, a1, s0, 1
vst vr2, s0, 0 // p3
vstx vr3, s0, a1 // p2
alsl.d s0, a1, s0, 1
vst vr4, s0, 0 // p1
vstx vr5, s0, a1 // p0
alsl.d s0, a1, s0, 1
vst vr6, s0, 0 // q0
vstx vr7, s0, a1 // q1
alsl.d s0, a1, s0, 1
vst vr8, s0, 0 // q2
vstx vr9, s0, a1 // q3
alsl.d s0, a1, s0, 1
vst vr10, s0, 0 // q4
vstx vr11, s0, a1 // q5
jirl zero, ra, 0x00
7:
slli.d t3, a1, 1
add.d t3, t3, a1
sub.d s0, a0, t3
vst vr21, s0, 0 // p2
vstx vr22, s0, a1 // p1
alsl.d s0, a1, s0, 1
vst vr23, s0, 0 // p0
vstx vr24, s0, a1 // q0
alsl.d s0, a1, s0, 1
vst vr25, s0, 0 // q1
vstx vr26, s0, a1 // q2
jirl zero, ra, 0x00
8:
slli.d t3, a1, 1
sub.d s0, a0, t3
vst vr22, s0, 0 // p1
vstx vr23, s0, a1 // p0
alsl.d s0, a1, s0, 1
vst vr24, s0, 0 // q0
vstx vr25, s0, a1 // q1
endfuncl
functionl lpf_h_16_16_lsx
addi.d t3, a0, -8
vld vr16, t3, 0
vldx vr17, t3, a1
alsl.d t3, a1, t3, 1
vld vr18, t3, 0
vldx vr19, t3, a1
alsl.d t3, a1, t3, 1
vld vr20, t3, 0
vldx vr21, t3, a1
alsl.d t3, a1, t3, 1
vld vr22, t3, 0
vldx vr23, t3, a1
alsl.d t3, a1, t3, 1
vld vr24, t3, 0
vldx vr25, t3, a1
alsl.d t3, a1, t3, 1
vld vr26, t3, 0
vldx vr27, t3, a1
alsl.d t3, a1, t3, 1
vld vr28, t3, 0
vldx vr29, t3, a1
alsl.d t3, a1, t3, 1
vld vr30, t3, 0
vldx vr31, t3, a1
alsl.d t3, a1, t3, 1
.macro SWAPD in0, in1
vaddi.bu vr0, \in0, 0
vilvl.d \in0, \in1, \in0
vilvh.d \in1, \in1, vr0
.endm
SWAPD vr16, vr24
SWAPD vr17, vr25
SWAPD vr18, vr26
SWAPD vr19, vr27
SWAPD vr20, vr28
SWAPD vr21, vr29
SWAPD vr22, vr30
SWAPD vr23, vr31
addi.d a0, t3, 8
TRANSPOSE_8x16B vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23, vr0, vr1
TRANSPOSE_8x16B vr24, vr25, vr26, vr27, vr28, vr29, vr30, vr31, vr0, vr1
LPF_16_WD16
slli.d t3, a1, 4
sub.d a0, a0, t3
TRANSPOSE_8x16B vr16, vr17, vr0, vr1, vr2, vr3, vr4, vr5, vr18, vr19
TRANSPOSE_8x16B vr6, vr7, vr8, vr9, vr10, vr11, vr30, vr31, vr18, vr19
addi.d t3, a0, -8
.irp i, vr16, vr17, vr0, vr1, vr2, vr3, vr4, vr5
vstelm.d \i, t3, 0, 0
add.d t3, t3, a1
.endr
.irp i, vr16, vr17, vr0, vr1, vr2, vr3, vr4, vr5
vstelm.d \i, t3, 0, 1
add.d t3, t3, a1
.endr
.irp i, vr6, vr7, vr8, vr9, vr10, vr11, vr30, vr31
vstelm.d \i, a0, 0, 0
add.d a0, a0, a1
.endr
.irp i, vr6, vr7, vr8, vr9, vr10, vr11, vr30, vr31
vstelm.d \i, a0, 0, 1
add.d a0, a0, a1
.endr
jirl zero, ra, 0x00
7:
slli.d t3, a1, 4
sub.d a0, a0, t3
TRANSPOSE_8x16B vr20, vr21, vr22, vr23, vr24, vr25, vr26, vr27, vr28, vr29
addi.d a0, a0, -4
.irp i, vr20, vr21, vr22, vr23, vr24, vr25, vr26, vr27
vstelm.d \i, a0, 0, 0
add.d a0, a0, a1
.endr
.irp i, vr20, vr21, vr22, vr23, vr24, vr25, vr26, vr27
vstelm.d \i, a0, 0, 1
add.d a0, a0, a1
.endr
addi.d a0, a0, 4
jirl zero, ra, 0x00
8:
slli.d t3, a1, 4
sub.d a0, a0, t3
TRANSPOSE_4x16B vr22, vr23, vr24, vr25, vr26, vr27, vr28, vr29
addi.d a0, a0, -2
.irp i, 0, 1, 2, 3
vstelm.w vr22, a0, 0, \i
add.d a0, a0, a1
vstelm.w vr23, a0, 0, \i
add.d a0, a0, a1
vstelm.w vr24, a0, 0, \i
add.d a0, a0, a1
vstelm.w vr25, a0, 0, \i
add.d a0, a0, a1
.endr
addi.d a0, a0, 2
endfuncl
.macro PUSH_REG
addi.d sp, sp, -64-8
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
st.d s0, sp, 64
.endm
.macro POP_REG
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
ld.d s0, sp, 64
addi.d sp, sp, 64+8
.endm
const mask_1248
.word 1, 2, 4, 8
endconst
.macro LPF_FUNC DIR, TYPE
function lpf_\DIR\()_sb_\TYPE\()_8bpc_lsx
PUSH_REG
move t8, ra
vld vr0, a2, 0 //vmask
vpickve2gr.wu t0, vr0, 0
vpickve2gr.wu t1, vr0, 1
.ifc \TYPE, y
vpickve2gr.wu t2, vr0, 2
.endif
addi.d a5, a5, 128 // Move to sharp part of lut
.ifc \TYPE, y
or t1, t1, t2 // vmask[1] |= vmaks[2]
.endif
slli.d a4, a4, 2
.ifc \DIR, v
sub.d a4, a3, a4
.else
addi.d a3, a3, -4
.endif
or t0, t0, t1 // vmaks[0] |= vmask[1]
1:
andi t3, t0, 0x0f
.ifc \DIR, v
vld vr0, a4, 0 // l[-b4_stride][]
addi.d a4, a4, 16
vld vr1, a3, 0 // l[0][]
addi.d a3, a3, 16
.else
fld.d f0, a3, 0
fldx.d f1, a3, a4
alsl.d a3, a4, a3, 1
fld.d f2, a3, 0
fldx.d f3, a3, a4
alsl.d a3, a4, a3, 1
vilvl.w vr1, vr1, vr0
vilvl.w vr2, vr3, vr2
vilvl.d vr0, vr2, vr1
vilvh.d vr1, vr2, vr1
.endif
beqz t3, 7f
//l[0][] ? l[0][] : l[-b4_stride][]
vseqi.b vr2, vr1, 0
vbitsel.v vr1, vr1, vr0, vr2
li.w t3, 0xff
vreplgr2vr.w vr3, t3
vand.v vr1, vr1, vr3
vshuf4i.b vr1, vr1, 0x00 // L -- 1 0 2 0
vseqi.w vr2, vr1, 0 // 0 -1 0 -1
vseqi.w vr2, vr2, 0 // L != 0 -- -1 0 -1 0
vhaddw.qu.du vr3, vr2, vr2
vpickve2gr.du t4, vr3, 0
beqz t4, 7f // if (!L) continue
la.local t3, mask_1248 // bits x
vld vr16, t3, 0
vreplgr2vr.w vr13, t0 // vmask[0]
vreplgr2vr.w vr14, t1 // vmaks[1]
vand.v vr13, vr13, vr16
vseqi.w vr13, vr13, 0
vseqi.w vr13, vr13, 0 // if (vmask[0] & x)
vand.v vr13, vr13, vr2 // vmask[0] &= L != 0
vand.v vr14, vr14, vr16
vseqi.w vr14, vr14, 0
vseqi.w vr14, vr14, 0 // if (vmask[1] & x)
.ifc \TYPE, y
vreplgr2vr.w vr15, t2 // vmask[2]
vand.v vr15, vr15, vr16
vseqi.w vr15, vr15, 0
vseqi.w vr15, vr15, 0 // if (vmask[2] & x)
.endif
vldrepl.b vr5, a5, 0 // sharp[0]
addi.d t5, a5, 8
vldrepl.b vr6, t5, 0 // sharp[1]
vsrl.b vr3, vr1, vr5 // L >> sharp[0]
vsrli.b vr12, vr1, 4 // H
vmin.bu vr3, vr3, vr6 // imin(L >> sharp[0], sharp[1])
vaddi.bu vr0, vr1, 2 // L + 2
vmaxi.bu vr11, vr3, 1 // imax(imin(), 1) = limit = I
vslli.b vr0, vr0, 1 // 2*(L + 2)
vadd.b vr10, vr0, vr11 // 2*(L + 2) + limit = E
.ifc \TYPE, y
andi t3, t2, 0x0f
beqz t3, 2f
//wd16
bl lpf_\DIR\()_16_16_lsx
b 8f
2:
.endif
andi t3, t1, 0x0f
beqz t3, 3f
.ifc \TYPE, y
// wd8
bl lpf_\DIR\()_8_16_lsx
.else
// wd6
bl lpf_\DIR\()_6_16_lsx
.endif
b 8f
3:
// wd4
bl lpf_\DIR\()_4_16_lsx
.ifc \DIR, h
b 8f
7:
// For dir h, the functions above increment a0.
// If the whole function is skipped, increment it here instead.
alsl.d a0, a1, a0, 4
.else
7:
.endif
8:
srli.d t0, t0, 4
srli.d t1, t1, 4
.ifc \TYPE, y
srli.d t2, t2, 4
.endif
.ifc \DIR, v
addi.d a0, a0, 16
.else
// For dir h, a0 is returned incremented
.endif
bnez t0, 1b
move ra, t8
POP_REG
endfunc
.endm
LPF_FUNC h, y
LPF_FUNC v, y
LPF_FUNC h, uv
LPF_FUNC v, uv
|
Admenri/urge
| 22,175
|
third_party/dav1d/src/loongarch/loongson_asm.S
|
/*********************************************************************
* Copyright (c) 2022 Loongson Technology Corporation Limited
* Contributed by Gu Xiwei(guxiwei-hf@loongson.cn)
* Shiyou Yin(yinshiyou-hf@loongson.cn)
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*********************************************************************/
/*
* This file is a LoongArch assembly helper file and available under ISC
* license. It provides a large number of macros and alias to simplify
* writing assembly code, especially for LSX and LASX optimizations.
*
* Any one can modify it or add new features for his/her own purposes.
* Contributing a patch will be appreciated as it might be useful for
* others as well. Send patches to loongson contributor mentioned above.
*
* MAJOR version: Usage changes, incompatible with previous version.
* MINOR version: Add new macros/functions, or bug fixes.
* MICRO version: Comment changes or implementation changes.
*/
#define LML_VERSION_MAJOR 0
#define LML_VERSION_MINOR 4
#define LML_VERSION_MICRO 0
#define DEFAULT_ALIGN 5
/* Set prefix as needed. */
#ifndef PRIVATE_PREFIX
#define PRIVATE_PREFIX dav1d_
#endif
#define PASTE(a,b) a ## b
#define CONCAT(a,b) PASTE(a,b)
#ifdef PREFIX
#define ASM_PREF CONCAT(_,PRIVATE_PREFIX)
#else
#define ASM_PREF PRIVATE_PREFIX
#endif
.macro function name, align=DEFAULT_ALIGN
.macro endfunc
jirl $r0, $r1, 0x0
.size ASM_PREF\name, . - ASM_PREF\name
.purgem endfunc
.endm
.text ;
.align \align ;
.globl ASM_PREF\name ;
.type ASM_PREF\name, @function ;
ASM_PREF\name: ;
.endm
.macro const name, align=DEFAULT_ALIGN
.macro endconst
.size \name, . - \name
.purgem endconst
.endm
.section .rodata
.align \align
\name:
.endm
/*
*============================================================================
* LoongArch register alias
*============================================================================
*/
#define a0 $a0
#define a1 $a1
#define a2 $a2
#define a3 $a3
#define a4 $a4
#define a5 $a5
#define a6 $a6
#define a7 $a7
#define t0 $t0
#define t1 $t1
#define t2 $t2
#define t3 $t3
#define t4 $t4
#define t5 $t5
#define t6 $t6
#define t7 $t7
#define t8 $t8
#define s0 $s0
#define s1 $s1
#define s2 $s2
#define s3 $s3
#define s4 $s4
#define s5 $s5
#define s6 $s6
#define s7 $s7
#define s8 $s8
#define zero $zero
#define sp $sp
#define ra $ra
#define fa0 $fa0
#define fa1 $fa1
#define fa2 $fa2
#define fa3 $fa3
#define fa4 $fa4
#define fa5 $fa5
#define fa6 $fa6
#define fa7 $fa7
#define ft0 $ft0
#define ft1 $ft1
#define ft2 $ft2
#define ft3 $ft3
#define ft4 $ft4
#define ft5 $ft5
#define ft6 $ft6
#define ft7 $ft7
#define ft8 $ft8
#define ft9 $ft9
#define ft10 $ft10
#define ft11 $ft11
#define ft12 $ft12
#define ft13 $ft13
#define ft14 $ft14
#define ft15 $ft15
#define fs0 $fs0
#define fs1 $fs1
#define fs2 $fs2
#define fs3 $fs3
#define fs4 $fs4
#define fs5 $fs5
#define fs6 $fs6
#define fs7 $fs7
#define f0 $f0
#define f1 $f1
#define f2 $f2
#define f3 $f3
#define f4 $f4
#define f5 $f5
#define f6 $f6
#define f7 $f7
#define f8 $f8
#define f9 $f9
#define f10 $f10
#define f11 $f11
#define f12 $f12
#define f13 $f13
#define f14 $f14
#define f15 $f15
#define f16 $f16
#define f17 $f17
#define f18 $f18
#define f19 $f19
#define f20 $f20
#define f21 $f21
#define f22 $f22
#define f23 $f23
#define f24 $f24
#define f25 $f25
#define f26 $f26
#define f27 $f27
#define f28 $f28
#define f29 $f29
#define f30 $f30
#define f31 $f31
#define vr0 $vr0
#define vr1 $vr1
#define vr2 $vr2
#define vr3 $vr3
#define vr4 $vr4
#define vr5 $vr5
#define vr6 $vr6
#define vr7 $vr7
#define vr8 $vr8
#define vr9 $vr9
#define vr10 $vr10
#define vr11 $vr11
#define vr12 $vr12
#define vr13 $vr13
#define vr14 $vr14
#define vr15 $vr15
#define vr16 $vr16
#define vr17 $vr17
#define vr18 $vr18
#define vr19 $vr19
#define vr20 $vr20
#define vr21 $vr21
#define vr22 $vr22
#define vr23 $vr23
#define vr24 $vr24
#define vr25 $vr25
#define vr26 $vr26
#define vr27 $vr27
#define vr28 $vr28
#define vr29 $vr29
#define vr30 $vr30
#define vr31 $vr31
#define xr0 $xr0
#define xr1 $xr1
#define xr2 $xr2
#define xr3 $xr3
#define xr4 $xr4
#define xr5 $xr5
#define xr6 $xr6
#define xr7 $xr7
#define xr8 $xr8
#define xr9 $xr9
#define xr10 $xr10
#define xr11 $xr11
#define xr12 $xr12
#define xr13 $xr13
#define xr14 $xr14
#define xr15 $xr15
#define xr16 $xr16
#define xr17 $xr17
#define xr18 $xr18
#define xr19 $xr19
#define xr20 $xr20
#define xr21 $xr21
#define xr22 $xr22
#define xr23 $xr23
#define xr24 $xr24
#define xr25 $xr25
#define xr26 $xr26
#define xr27 $xr27
#define xr28 $xr28
#define xr29 $xr29
#define xr30 $xr30
#define xr31 $xr31
/*
*============================================================================
* LSX/LASX synthesize instructions
*============================================================================
*/
/*
* Description : Dot product of byte vector elements
* Arguments : Inputs - vj, vk
* Outputs - vd
* Return Type - halfword
*/
.macro vdp2.h.bu vd, vj, vk
vmulwev.h.bu \vd, \vj, \vk
vmaddwod.h.bu \vd, \vj, \vk
.endm
.macro vdp2.h.bu.b vd, vj, vk
vmulwev.h.bu.b \vd, \vj, \vk
vmaddwod.h.bu.b \vd, \vj, \vk
.endm
.macro vdp2.w.h vd, vj, vk
vmulwev.w.h \vd, \vj, \vk
vmaddwod.w.h \vd, \vj, \vk
.endm
.macro xvdp2.h.bu xd, xj, xk
xvmulwev.h.bu \xd, \xj, \xk
xvmaddwod.h.bu \xd, \xj, \xk
.endm
.macro xvdp2.h.bu.b xd, xj, xk
xvmulwev.h.bu.b \xd, \xj, \xk
xvmaddwod.h.bu.b \xd, \xj, \xk
.endm
.macro xvdp2.w.h xd, xj, xk
xvmulwev.w.h \xd, \xj, \xk
xvmaddwod.w.h \xd, \xj, \xk
.endm
/*
* Description : Dot product & addition of halfword vector elements
* Arguments : Inputs - vj, vk
* Outputs - vd
* Return Type - twice size of input
*/
.macro vdp2add.h.bu vd, vj, vk
vmaddwev.h.bu \vd, \vj, \vk
vmaddwod.h.bu \vd, \vj, \vk
.endm
.macro vdp2add.h.bu.b vd, vj, vk
vmaddwev.h.bu.b \vd, \vj, \vk
vmaddwod.h.bu.b \vd, \vj, \vk
.endm
.macro vdp2add.w.h vd, vj, vk
vmaddwev.w.h \vd, \vj, \vk
vmaddwod.w.h \vd, \vj, \vk
.endm
.macro xvdp2add.h.bu.b xd, xj, xk
xvmaddwev.h.bu.b \xd, \xj, \xk
xvmaddwod.h.bu.b \xd, \xj, \xk
.endm
.macro xvdp2add.w.h xd, xj, xk
xvmaddwev.w.h \xd, \xj, \xk
xvmaddwod.w.h \xd, \xj, \xk
.endm
/*
* Description : Range element vj[i] to vk[i] ~ vj[i]
* clip: vj > vk ? vj : vk && vj < va ? vj : va
*/
.macro vclip.h vd, vj, vk, va
vmax.h \vd, \vj, \vk
vmin.h \vd, \vd, \va
.endm
.macro vclip.w vd, vj, vk, va
vmax.w \vd, \vj, \vk
vmin.w \vd, \vd, \va
.endm
.macro xvclip.h xd, xj, xk, xa
xvmax.h \xd, \xj, \xk
xvmin.h \xd, \xd, \xa
.endm
.macro xvclip.w xd, xj, xk, xa
xvmax.w \xd, \xj, \xk
xvmin.w \xd, \xd, \xa
.endm
/*
* Description : Range element vj[i] to 0 ~ 255
* clip255: vj < 255 ? vj : 255 && vj > 0 ? vj : 0
*/
.macro vclip255.h vd, vj
vmaxi.h \vd, \vj, 0
vsat.hu \vd, \vd, 7
.endm
.macro vclip255.w vd, vj
vmaxi.w \vd, \vj, 0
vsat.wu \vd, \vd, 7
.endm
.macro xvclip255.h xd, xj
xvmaxi.h \xd, \xj, 0
xvsat.hu \xd, \xd, 7
.endm
.macro xvclip255.w xd, xj
xvmaxi.w \xd, \xj, 0
xvsat.wu \xd, \xd, 7
.endm
/*
* Description : Store elements of vector
* vd : Data vector to be stroed
* rk : Address of data storage
* ra : Offset of address
* si : Index of data in vd
*/
.macro vstelmx.b vd, rk, ra, si
add.d \rk, \rk, \ra
vstelm.b \vd, \rk, 0, \si
.endm
.macro vstelmx.h vd, rk, ra, si
add.d \rk, \rk, \ra
vstelm.h \vd, \rk, 0, \si
.endm
.macro vstelmx.w vd, rk, ra, si
add.d \rk, \rk, \ra
vstelm.w \vd, \rk, 0, \si
.endm
.macro vstelmx.d vd, rk, ra, si
add.d \rk, \rk, \ra
vstelm.d \vd, \rk, 0, \si
.endm
.macro vmov xd, xj
vor.v \xd, \xj, \xj
.endm
.macro xmov xd, xj
xvor.v \xd, \xj, \xj
.endm
.macro xvstelmx.d xd, rk, ra, si
add.d \rk, \rk, \ra
xvstelm.d \xd, \rk, 0, \si
.endm
/*
*============================================================================
* LSX/LASX custom macros
*============================================================================
*/
/*
* Load 4 float, double, V128, v256 elements with stride.
*/
.macro FLDS_LOADX_4 src, stride, stride2, stride3, out0, out1, out2, out3
fld.s \out0, \src, 0
fldx.s \out1, \src, \stride
fldx.s \out2, \src, \stride2
fldx.s \out3, \src, \stride3
.endm
.macro FLDD_LOADX_4 src, stride, stride2, stride3, out0, out1, out2, out3
fld.d \out0, \src, 0
fldx.d \out1, \src, \stride
fldx.d \out2, \src, \stride2
fldx.d \out3, \src, \stride3
.endm
.macro LSX_LOADX_4 src, stride, stride2, stride3, out0, out1, out2, out3
vld \out0, \src, 0
vldx \out1, \src, \stride
vldx \out2, \src, \stride2
vldx \out3, \src, \stride3
.endm
.macro LASX_LOADX_4 src, stride, stride2, stride3, out0, out1, out2, out3
xvld \out0, \src, 0
xvldx \out1, \src, \stride
xvldx \out2, \src, \stride2
xvldx \out3, \src, \stride3
.endm
/*
* Description : Transpose 4x4 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
*/
.macro LSX_TRANSPOSE4x4_H in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
vilvl.h \tmp0, \in1, \in0
vilvl.h \tmp1, \in3, \in2
vilvl.w \out0, \tmp1, \tmp0
vilvh.w \out2, \tmp1, \tmp0
vilvh.d \out1, \out0, \out0
vilvh.d \out3, \out0, \out2
.endm
/*
* Description : Transpose 4x4 block with word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
* Details :
* Example :
* 1, 2, 3, 4 1, 5, 9,13
* 5, 6, 7, 8 to 2, 6,10,14
* 9,10,11,12 =====> 3, 7,11,15
* 13,14,15,16 4, 8,12,16
*/
.macro LSX_TRANSPOSE4x4_W in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
vilvl.w \tmp0, \in1, \in0
vilvh.w \out1, \in1, \in0
vilvl.w \tmp1, \in3, \in2
vilvh.w \out3, \in3, \in2
vilvl.d \out0, \tmp1, \tmp0
vilvl.d \out2, \out3, \out1
vilvh.d \out3, \out3, \out1
vilvh.d \out1, \tmp1, \tmp0
.endm
/*
* Description : Transpose 8x8 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
* Outputs - out0, out1, out2, out3, out4, out5, out6, out7
*/
.macro LSX_TRANSPOSE8x8_H in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3, out4, out5, out6, out7, tmp0, tmp1, tmp2, \
tmp3, tmp4, tmp5, tmp6, tmp7
vilvl.h \tmp0, \in6, \in4
vilvl.h \tmp1, \in7, \in5
vilvl.h \tmp2, \in2, \in0
vilvl.h \tmp3, \in3, \in1
vilvl.h \tmp4, \tmp1, \tmp0
vilvh.h \tmp5, \tmp1, \tmp0
vilvl.h \tmp6, \tmp3, \tmp2
vilvh.h \tmp7, \tmp3, \tmp2
vilvh.h \tmp0, \in6, \in4
vilvh.h \tmp1, \in7, \in5
vilvh.h \tmp2, \in2, \in0
vilvh.h \tmp3, \in3, \in1
vpickev.d \out0, \tmp4, \tmp6
vpickod.d \out1, \tmp4, \tmp6
vpickev.d \out2, \tmp5, \tmp7
vpickod.d \out3, \tmp5, \tmp7
vilvl.h \tmp4, \tmp1, \tmp0
vilvh.h \tmp5, \tmp1, \tmp0
vilvl.h \tmp6, \tmp3, \tmp2
vilvh.h \tmp7, \tmp3, \tmp2
vpickev.d \out4, \tmp4, \tmp6
vpickod.d \out5, \tmp4, \tmp6
vpickev.d \out6, \tmp5, \tmp7
vpickod.d \out7, \tmp5, \tmp7
.endm
/*
* Description : Transpose 16x8 block with byte elements in vectors
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
* Outputs - out0, out1, out2, out3, out4, out5, out6, out7
*/
.macro LASX_TRANSPOSE16X8_B in0, in1, in2, in3, in4, in5, in6, in7, \
in8, in9, in10, in11, in12, in13, in14, in15, \
out0, out1, out2, out3, out4, out5, out6, out7,\
tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7
xvilvl.b \tmp0, \in2, \in0
xvilvl.b \tmp1, \in3, \in1
xvilvl.b \tmp2, \in6, \in4
xvilvl.b \tmp3, \in7, \in5
xvilvl.b \tmp4, \in10, \in8
xvilvl.b \tmp5, \in11, \in9
xvilvl.b \tmp6, \in14, \in12
xvilvl.b \tmp7, \in15, \in13
xvilvl.b \out0, \tmp1, \tmp0
xvilvh.b \out1, \tmp1, \tmp0
xvilvl.b \out2, \tmp3, \tmp2
xvilvh.b \out3, \tmp3, \tmp2
xvilvl.b \out4, \tmp5, \tmp4
xvilvh.b \out5, \tmp5, \tmp4
xvilvl.b \out6, \tmp7, \tmp6
xvilvh.b \out7, \tmp7, \tmp6
xvilvl.w \tmp0, \out2, \out0
xvilvh.w \tmp2, \out2, \out0
xvilvl.w \tmp4, \out3, \out1
xvilvh.w \tmp6, \out3, \out1
xvilvl.w \tmp1, \out6, \out4
xvilvh.w \tmp3, \out6, \out4
xvilvl.w \tmp5, \out7, \out5
xvilvh.w \tmp7, \out7, \out5
xvilvl.d \out0, \tmp1, \tmp0
xvilvh.d \out1, \tmp1, \tmp0
xvilvl.d \out2, \tmp3, \tmp2
xvilvh.d \out3, \tmp3, \tmp2
xvilvl.d \out4, \tmp5, \tmp4
xvilvh.d \out5, \tmp5, \tmp4
xvilvl.d \out6, \tmp7, \tmp6
xvilvh.d \out7, \tmp7, \tmp6
.endm
/*
* Description : Transpose 4x4 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
*/
.macro LASX_TRANSPOSE4x4_H in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
xvilvl.h \tmp0, \in1, \in0
xvilvl.h \tmp1, \in3, \in2
xvilvl.w \out0, \tmp1, \tmp0
xvilvh.w \out2, \tmp1, \tmp0
xvilvh.d \out1, \out0, \out0
xvilvh.d \out3, \out0, \out2
.endm
/*
* Description : Transpose 4x8 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
*/
.macro LASX_TRANSPOSE4x8_H in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
xvilvl.h \tmp0, \in2, \in0
xvilvl.h \tmp1, \in3, \in1
xvilvl.h \out2, \tmp1, \tmp0
xvilvh.h \out3, \tmp1, \tmp0
xvilvl.d \out0, \out2, \out2
xvilvh.d \out1, \out2, \out2
xvilvl.d \out2, \out3, \out3
xvilvh.d \out3, \out3, \out3
.endm
/*
* Description : Transpose 8x8 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
* Outputs - out0, out1, out2, out3, out4, out5, out6, out7
*/
.macro LASX_TRANSPOSE8x8_H in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3, out4, out5, out6, out7, \
tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7
xvilvl.h \tmp0, \in6, \in4
xvilvl.h \tmp1, \in7, \in5
xvilvl.h \tmp2, \in2, \in0
xvilvl.h \tmp3, \in3, \in1
xvilvl.h \tmp4, \tmp1, \tmp0
xvilvh.h \tmp5, \tmp1, \tmp0
xvilvl.h \tmp6, \tmp3, \tmp2
xvilvh.h \tmp7, \tmp3, \tmp2
xvilvh.h \tmp0, \in6, \in4
xvilvh.h \tmp1, \in7, \in5
xvilvh.h \tmp2, \in2, \in0
xvilvh.h \tmp3, \in3, \in1
xvpickev.d \out0, \tmp4, \tmp6
xvpickod.d \out1, \tmp4, \tmp6
xvpickev.d \out2, \tmp5, \tmp7
xvpickod.d \out3, \tmp5, \tmp7
xvilvl.h \tmp4, \tmp1, \tmp0
xvilvh.h \tmp5, \tmp1, \tmp0
xvilvl.h \tmp6, \tmp3, \tmp2
xvilvh.h \tmp7, \tmp3, \tmp2
xvpickev.d \out4, \tmp4, \tmp6
xvpickod.d \out5, \tmp4, \tmp6
xvpickev.d \out6, \tmp5, \tmp7
xvpickod.d \out7, \tmp5, \tmp7
.endm
/*
* Description : Transpose 2x4x4 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
*/
.macro LASX_TRANSPOSE2x4x4_H in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1, tmp2
xvilvh.h \tmp1, \in0, \in1
xvilvl.h \out1, \in0, \in1
xvilvh.h \tmp0, \in2, \in3
xvilvl.h \out3, \in2, \in3
xvilvh.w \tmp2, \out3, \out1
xvilvl.w \out3, \out3, \out1
xvilvl.w \out2, \tmp0, \tmp1
xvilvh.w \tmp1, \tmp0, \tmp1
xvilvh.d \out0, \out2, \out3
xvilvl.d \out2, \out2, \out3
xvilvh.d \out1, \tmp1, \tmp2
xvilvl.d \out3, \tmp1, \tmp2
.endm
/*
* Description : Transpose 4x4 block with word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
* Details :
* Example :
* 1, 2, 3, 4, 1, 2, 3, 4 1,5, 9,13, 1,5, 9,13
* 5, 6, 7, 8, 5, 6, 7, 8 to 2,6,10,14, 2,6,10,14
* 9,10,11,12, 9,10,11,12 =====> 3,7,11,15, 3,7,11,15
* 13,14,15,16, 13,14,15,16 4,8,12,16, 4,8,12,16
*/
.macro LASX_TRANSPOSE4x4_W in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
xvilvl.w \tmp0, \in1, \in0
xvilvh.w \out1, \in1, \in0
xvilvl.w \tmp1, \in3, \in2
xvilvh.w \out3, \in3, \in2
xvilvl.d \out0, \tmp1, \tmp0
xvilvl.d \out2, \out3, \out1
xvilvh.d \out3, \out3, \out1
xvilvh.d \out1, \tmp1, \tmp0
.endm
/*
* Description : Transpose 8x8 block with word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
* Outputs - out0, out1, out2, out3, out4, out5, out6,
* _out7
* Example : LASX_TRANSPOSE8x8_W
* in0 : 1,2,3,4,5,6,7,8
* in1 : 2,2,3,4,5,6,7,8
* in2 : 3,2,3,4,5,6,7,8
* in3 : 4,2,3,4,5,6,7,8
* in4 : 5,2,3,4,5,6,7,8
* in5 : 6,2,3,4,5,6,7,8
* in6 : 7,2,3,4,5,6,7,8
* in7 : 8,2,3,4,5,6,7,8
*
* out0 : 1,2,3,4,5,6,7,8
* out1 : 2,2,2,2,2,2,2,2
* out2 : 3,3,3,3,3,3,3,3
* out3 : 4,4,4,4,4,4,4,4
* out4 : 5,5,5,5,5,5,5,5
* out5 : 6,6,6,6,6,6,6,6
* out6 : 7,7,7,7,7,7,7,7
* out7 : 8,8,8,8,8,8,8,8
*/
.macro LASX_TRANSPOSE8x8_W in0, in1, in2, in3, in4, in5, in6, in7,\
out0, out1, out2, out3, out4, out5, out6, out7,\
tmp0, tmp1, tmp2, tmp3
xvilvl.w \tmp0, \in2, \in0
xvilvl.w \tmp1, \in3, \in1
xvilvh.w \tmp2, \in2, \in0
xvilvh.w \tmp3, \in3, \in1
xvilvl.w \out0, \tmp1, \tmp0
xvilvh.w \out1, \tmp1, \tmp0
xvilvl.w \out2, \tmp3, \tmp2
xvilvh.w \out3, \tmp3, \tmp2
xvilvl.w \tmp0, \in6, \in4
xvilvl.w \tmp1, \in7, \in5
xvilvh.w \tmp2, \in6, \in4
xvilvh.w \tmp3, \in7, \in5
xvilvl.w \out4, \tmp1, \tmp0
xvilvh.w \out5, \tmp1, \tmp0
xvilvl.w \out6, \tmp3, \tmp2
xvilvh.w \out7, \tmp3, \tmp2
xmov \tmp0, \out0
xmov \tmp1, \out1
xmov \tmp2, \out2
xmov \tmp3, \out3
xvpermi.q \out0, \out4, 0x02
xvpermi.q \out1, \out5, 0x02
xvpermi.q \out2, \out6, 0x02
xvpermi.q \out3, \out7, 0x02
xvpermi.q \out4, \tmp0, 0x31
xvpermi.q \out5, \tmp1, 0x31
xvpermi.q \out6, \tmp2, 0x31
xvpermi.q \out7, \tmp3, 0x31
.endm
/*
* Description : Transpose 4x4 block with double-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
* Example : LASX_TRANSPOSE4x4_D
* in0 : 1,2,3,4
* in1 : 1,2,3,4
* in2 : 1,2,3,4
* in3 : 1,2,3,4
*
* out0 : 1,1,1,1
* out1 : 2,2,2,2
* out2 : 3,3,3,3
* out3 : 4,4,4,4
*/
.macro LASX_TRANSPOSE4x4_D in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
xvilvl.d \tmp0, \in1, \in0
xvilvh.d \out1, \in1, \in0
xvilvh.d \tmp1, \in3, \in2
xvilvl.d \out2, \in3, \in2
xvor.v \out0, \tmp0, \tmp0
xvor.v \out3, \tmp1, \tmp1
xvpermi.q \out0, \out2, 0x02
xvpermi.q \out2, \tmp0, 0x31
xvpermi.q \out3, \out1, 0x31
xvpermi.q \out1, \tmp1, 0x02
.endm
|
Admenri/urge
| 182,398
|
third_party/dav1d/src/loongarch/itx.S
|
/*
* Copyright © 2023, VideoLAN and dav1d authors
* Copyright © 2023, Loongson Technology Corporation Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/loongarch/loongson_asm.S"
#include "src/loongarch/loongson_util.S"
.macro PUSH_REG
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
.endm
.macro POP_REG
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
.endm
.macro malloc_space number
li.w t0, \number
sub.d sp, sp, t0
addi.d sp, sp, -64
PUSH_REG
.endm
.macro free_space number
POP_REG
li.w t0, \number
add.d sp, sp, t0
addi.d sp, sp, 64
.endm
.macro iwht4
vadd.h vr0, vr0, vr1
vsub.h vr4, vr2, vr3
vsub.h vr5, vr0, vr4
vsrai.h vr5, vr5, 1
vsub.h vr2, vr5, vr1
vsub.h vr1, vr5, vr3
vadd.h vr3, vr4, vr2
vsub.h vr0, vr0, vr1
.endm
.macro DST_ADD_W4 in0, in1, in2, in3, in4, in5
vilvl.w \in0, \in1, \in0 // 0 1 2 3 4 5 6 7 x ...
vilvl.w \in2, \in3, \in2 // 8 9 10 11 12 13 14 15 x ...
vsllwil.hu.bu \in0, \in0, 0
vsllwil.hu.bu \in2, \in2, 0
vadd.h \in0, \in4, \in0
vadd.h \in2, \in5, \in2
vssrani.bu.h \in2, \in0, 0
vstelm.w \in2, a0, 0, 0
vstelmx.w \in2, a0, a1, 1
vstelmx.w \in2, a0, a1, 2
vstelmx.w \in2, a0, a1, 3
.endm
.macro VLD_DST_ADD_W4 in0, in1
vld vr0, a0, 0
vldx vr1, a0, a1
vld vr2, t2, 0
vldx vr3, t2, a1
DST_ADD_W4 vr0, vr1, vr2, vr3, \in0, \in1
.endm
function inv_txfm_add_wht_wht_4x4_8bpc_lsx
vld vr0, a2, 0
vld vr2, a2, 16
vxor.v vr20, vr20, vr20
vsrai.h vr0, vr0, 2
vsrai.h vr2, vr2, 2
vst vr20, a2, 0
vpickod.d vr1, vr0, vr0
vpickod.d vr3, vr2, vr2
vst vr20, a2, 16
iwht4
LSX_TRANSPOSE4x4_H vr0, vr1, vr2, vr3, vr0, vr1, vr2, vr3, vr4, vr5
iwht4
vilvl.d vr4, vr1, vr0
vilvl.d vr5, vr3, vr2
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 vr4, vr5
endfunc
const idct_coeffs, align=4
.word 2896, 2896*8, 1567, 3784
.word 799, 4017, 3406, 2276
.word 401, 4076, 3166, 2598
.word 1931, 3612, 3920, 1189
.word 201, 4091, 3035, 2751
.word 1751, 3703, 3857, 1380
.word 995, 3973, 3513, 2106
.word 2440, 3290, 4052, 601
endconst
.macro vsrari_h_x4 in0, in1, in2, in3, out0, out1, out2, out3, shift
vsrari.h \out0, \in0, \shift
vsrari.h \out1, \in1, \shift
vsrari.h \out2, \in2, \shift
vsrari.h \out3, \in3, \shift
.endm
.macro vsrari_h_x8 in0, in1, in2, in3, in4, in5, in6, in7, out0, \
out1, out2, out3, out4, out5, out6, out7, shift
vsrari.h \out0, \in0, \shift
vsrari.h \out1, \in1, \shift
vsrari.h \out2, \in2, \shift
vsrari.h \out3, \in3, \shift
vsrari.h \out4, \in4, \shift
vsrari.h \out5, \in5, \shift
vsrari.h \out6, \in6, \shift
vsrari.h \out7, \in7, \shift
.endm
.macro vmulev_vmaddod_lsx in0, in1, in2, in3, out0, out1, sz
vmulwev.w.h \out0, \in0, \in2
vmulwod.w.h \out1, \in0, \in2
vmaddwev.w.h \out0, \in1, \in3
vmaddwod.w.h \out1, \in1, \in3
.ifc \sz, .4h
vilvl.w \out0, \out1, \out0
.else
vilvl.w vr22, \out1, \out0
vilvh.w \out1, \out1, \out0
vor.v \out0, vr22, vr22
.endif
.endm
const idct_coeffs_h, align=4
.short 2896, 2896*8, 1567, 3784
.short 799, 4017, 3406, 2276
.short 401, 4076, 3166, 2598
.short 1931, 3612, 3920, 1189
.short 201, 4091, 3035, 2751
.short 1751, 3703, 3857, 1380
.short 995, 3973, 3513, 2106
.short 2440, 3290, 4052, 601
endconst
const iadst4_coeffs, align=4
.word 1321, 3803, 2482, 3344
endconst
.macro inv_dct4_lsx in0, in1, in2, in3, out0, out1, out2, out3, sz
la.local t0, idct_coeffs_h
vldrepl.h vr20, t0, 0 // 2896
vmulev_vmaddod_lsx \in0, \in2, vr20, vr20, vr16, vr18, \sz
vneg.h vr21, vr20
vmulev_vmaddod_lsx \in0, \in2, vr20, vr21, vr17, vr19, \sz
vssrarni.h.w vr18, vr16, 12 // t0
vssrarni.h.w vr19, vr17, 12 // t1
vldrepl.h vr20, t0, 4 // 1567
vldrepl.h vr21, t0, 6 // 3784
vmulev_vmaddod_lsx \in1, \in3, vr21, vr20, \in0, vr16, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx \in1, \in3, vr20, vr21, \in2, vr17, \sz
vssrarni.h.w vr16, \in0, 12 // t3
vssrarni.h.w vr17, \in2, 12 // t2
vsadd.h \out0, vr18, vr16
vsadd.h \out1, vr19, vr17
vssub.h \out2, vr19, vr17
vssub.h \out3, vr18, vr16
.endm
functionl inv_dct_4h_x4_lsx
inv_dct4_lsx vr0, vr1, vr2, vr3, vr0, vr1, vr2, vr3, .4h
endfuncl
functionl inv_dct_8h_x4_lsx
inv_dct4_lsx vr0, vr1, vr2, vr3, vr0, vr1, vr2, vr3, .8h
endfuncl
.macro inv_adst4_core_lsx in0, in1, in2, in3, out0, out1, out2, out3
vsub.w vr16, \in0, \in2 // in0-in2
vmul.w vr17, \in0, vr20 // in0*1321
vmul.w vr19, \in0, vr22 // in0*2482
vmul.w vr18, \in1, vr23 // in1*3344
vmadd.w vr17, \in2, vr21 // in0*1321+in2*3803
vmsub.w vr19, \in2, vr20 // in2*1321
vadd.w vr16, vr16, \in3 // in0-in2+in3
vmadd.w vr17, \in3, vr22 // in0*1321+in2*3803+in3*2482
vmsub.w vr19, \in3, vr21 // in0*2482-in2*1321-in3*3803
vadd.w vr15, vr17, vr19
vmul.w \out2, vr16, vr23 // out[2] 8 9 10 11
vadd.w \out0, vr17, vr18 // out[0] 0 1 2 3
vadd.w \out1, vr19, vr18 // out[1] 4 5 6 7
vsub.w \out3, vr15, vr18 // out[3] 12 13 14 15
.endm
.macro inv_adst4_lsx in0, in1, in2, in3, out0, out1, out2, out3
la.local t0, iadst4_coeffs
vldrepl.w vr20, t0, 0 // 1321
vldrepl.w vr21, t0, 4 // 3803
vldrepl.w vr22, t0, 8 // 2482
vldrepl.w vr23, t0, 12 // 3344
vsllwil.w.h vr0, \in0, 0
vsllwil.w.h vr1, \in1, 0
vsllwil.w.h vr2, \in2, 0
vsllwil.w.h vr3, \in3, 0
inv_adst4_core_lsx vr0, vr1, vr2, vr3, \out0, \out1, \out2, \out3
vssrarni.h.w \out0, \out0, 12
vssrarni.h.w \out1, \out1, 12
vssrarni.h.w \out2, \out2, 12
vssrarni.h.w \out3, \out3, 12
.endm
functionl inv_adst_4h_x4_lsx
inv_adst4_lsx vr0, vr1, vr2, vr3, vr0, vr1, vr2, vr3
endfuncl
functionl inv_flipadst_4h_x4_lsx
inv_adst4_lsx vr0, vr1, vr2, vr3, vr3, vr2, vr1, vr0
endfuncl
.macro inv_adst_8x4_lsx in0, in1, in2, in3, out0, out1, out2, out3
la.local t0, iadst4_coeffs
vldrepl.w vr20, t0, 0 // 1321
vldrepl.w vr21, t0, 4 // 3803
vldrepl.w vr22, t0, 8 // 2482
vldrepl.w vr23, t0, 12 // 3344
vsllwil.w.h vr10, \in0, 0 // in0
vsllwil.w.h vr11, \in1, 0 // in1
vsllwil.w.h vr12, \in2, 0 // in2
vsllwil.w.h vr13, \in3, 0 // in3
inv_adst4_core_lsx vr10, vr11, vr12, vr13, vr10, vr11, vr12, vr13
vexth.w.h \in0, \in0 // in0
vexth.w.h \in1, \in1 // in1
vexth.w.h \in2, \in2 // in2
vexth.w.h \in3, \in3 // in3
inv_adst4_core_lsx \in0, \in1, \in2, \in3, \out0, \out1, \out2, \out3
vssrarni.h.w \out0, vr10, 12
vssrarni.h.w \out1, vr11, 12
vssrarni.h.w \out2, vr12, 12
vssrarni.h.w \out3, vr13, 12
.endm
functionl inv_adst_8h_x4_lsx
inv_adst_8x4_lsx vr0, vr1, vr2, vr3, vr0, vr1, vr2, vr3
endfuncl
functionl inv_flipadst_8h_x4_lsx
inv_adst_8x4_lsx vr0, vr1, vr2, vr3, vr3, vr2, vr1, vr0
endfuncl
functionl inv_identity_4h_x4_lsx
li.w t0, 1697
vreplgr2vr.h vr20, t0
vilvl.d vr0, vr1, vr0
vilvl.d vr2, vr3, vr2
vmulwev.w.h vr16, vr0, vr20
vmulwod.w.h vr17, vr0, vr20
vmulwev.w.h vr18, vr2, vr20
vmulwod.w.h vr19, vr2, vr20
vilvl.w vr1, vr17, vr16
vilvh.w vr3, vr17, vr16
vilvl.w vr22, vr19, vr18
vilvh.w vr23, vr19, vr18
vssrarni.h.w vr3, vr1, 12
vssrarni.h.w vr23, vr22, 12
vsadd.h vr0, vr3, vr0 // t0
vsadd.h vr2, vr23, vr2 // t2
vilvh.d vr1, vr0, vr0 // t1
vilvh.d vr3, vr2, vr2 // t3
endfuncl
.macro inv_identity4_lsx1 in0, in1, in2, out0, out1
vsllwil.w.h vr16, \in0, 0
vexth.w.h vr17, \in1
vmul.w vr18, vr16, \in2
vmul.w vr19, vr17, \in2
vsrari.w vr18, vr18, 12
vsrari.w vr19, vr19, 12
vadd.w \out0, vr18, vr16
vadd.w \out1, vr19, vr17
vssrarni.h.w \out1, \out0, 1
.endm
functionl inv_identity_8h_x4_lsx
li.w t0, 1697
vreplgr2vr.h vr20, t0
vmulwev.w.h vr16, vr0, vr20
vmulwod.w.h vr17, vr0, vr20
vmulwev.w.h vr18, vr1, vr20
vmulwod.w.h vr19, vr1, vr20
vilvl.w vr21, vr17, vr16
vilvh.w vr22, vr17, vr16
vilvl.w vr23, vr19, vr18
vilvh.w vr16, vr19, vr18
vssrarni.h.w vr22, vr21, 12
vssrarni.h.w vr16, vr23, 12
vsadd.h vr0, vr22, vr0 // t0
vsadd.h vr1, vr16, vr1 // t1
vmulwev.w.h vr16, vr2, vr20
vmulwod.w.h vr17, vr2, vr20
vmulwev.w.h vr18, vr3, vr20
vmulwod.w.h vr19, vr3, vr20
vilvl.w vr21, vr17, vr16
vilvh.w vr22, vr17, vr16
vilvl.w vr23, vr19, vr18
vilvh.w vr16, vr19, vr18
vssrarni.h.w vr22, vr21, 12
vssrarni.h.w vr16, vr23, 12
vsadd.h vr2, vr22, vr2 // t2
vsadd.h vr3, vr16, vr3 // t3
endfuncl
functionl inv_identity_8h_x4_lsx1
li.w t0, 1697
vreplgr2vr.w vr20, t0
.irp i, vr0, vr1, vr2, vr3
inv_identity4_lsx1 \i, \i vr20, vr21, \i
.endr
endfuncl
functionl inv_txfm_add_4x4_lsx
vxor.v vr23, vr23, vr23
vld vr0, a2, 0
vld vr2, a2, 16
vilvh.d vr1, vr0, vr0
vilvh.d vr3, vr2, vr2
vst vr23, a2, 0
vst vr23, a2, 16
move t6, ra
jirl ra, t7, 0
move ra, t6
LSX_TRANSPOSE4x4_H vr0, vr1, vr2, vr3, vr0, vr1, vr2, vr3, vr4, vr5
move t6, ra
jirl ra, t8, 0
move ra, t6
vilvl.d vr4, vr1, vr0
vilvl.d vr5, vr3, vr2
vsrari.h vr4, vr4, 4
vsrari.h vr5, vr5, 4
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 vr4, vr5
endfuncl
.macro idct_dc w, h, shift
ld.h t2, a2, 0 // dc
vldi vr0, 0x8b5 // 181
vreplgr2vr.w vr1, t2
vldi vr20, 0x880 // 128
vmul.w vr2, vr0, vr1 // dc * 181
st.h zero, a2, 0
vsrari.w vr2, vr2, 8 // (dc * 181 + 128) >> 8
vld vr10, a0, 0 // 0 1 2 3 4 5 6 7
.if (2*\w == \h) || (2*\h == \w)
vmul.w vr2, vr0, vr2
vsrari.w vr2, vr2, 8 // (dc * 181 + 128) >> 8
.endif
.if \shift>0
vsrari.w vr2, vr2, \shift // (dc + rnd) >> shift
.endif
vldx vr11, a0, a1 // 8 9 10 11 12 13 14 15
alsl.d t2, a1, a0, 1
vmadd.w vr20, vr2, vr0
vld vr12, t2, 0 // 16 17 18 19 20 21 22 23
vssrarni.h.w vr20, vr20, 12
vldx vr13, t2, a1 // 24 25 26 27 28 29 30 31
.endm
.macro fun4x4 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_4x4_8bpc_lsx
.ifc \txfm1\()_\txfm2, dct_dct
bnez a3, 1f
idct_dc 4, 4, 0
DST_ADD_W4 vr10, vr11, vr12, vr13, vr20, vr20
b .\txfm1\()_\txfm2\()_4X4_END
1:
.endif
la.local t7, inv_\txfm1\()_4h_x4_lsx
la.local t8, inv_\txfm2\()_4h_x4_lsx
b inv_txfm_add_4x4_lsx
.\txfm1\()_\txfm2\()_4X4_END:
endfunc
.endm
fun4x4 dct, dct
fun4x4 identity, identity
fun4x4 adst, dct
fun4x4 dct, adst
fun4x4 adst, adst
fun4x4 dct, flipadst
fun4x4 flipadst, adst
fun4x4 adst, flipadst
fun4x4 flipadst, dct
fun4x4 flipadst, flipadst
fun4x4 dct, identity
fun4x4 identity, dct
fun4x4 flipadst, identity
fun4x4 identity, flipadst
fun4x4 identity, adst
fun4x4 adst, identity
const iadst8_coeffs_h, align=4
.short 4076, 401, 3612, 1931
.short 2598, 3166, 1189, 3920
.short 2896, 0, 1567, 3784, 0, 0, 0, 0
endconst
.macro inv_adst8_lsx out0, out1, out2, out3, out4, out5, out6, out7, sz
la.local t0, iadst8_coeffs_h
vldrepl.h vr20, t0, 0 // 4076
vldrepl.h vr21, t0, 2 // 401
vmulev_vmaddod_lsx vr7, vr0, vr20, vr21, vr16, vr17, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr7, vr0, vr21, vr20, vr18, vr19, \sz
vssrarni.h.w vr17, vr16, 12 // t0a
vssrarni.h.w vr19, vr18, 12 // t1a
vldrepl.h vr20, t0, 4 // 3612
vldrepl.h vr21, t0, 6 // 1931
vmulev_vmaddod_lsx vr5, vr2, vr20, vr21, vr0, vr16, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr5, vr2, vr21, vr20, vr7, vr18, \sz
vssrarni.h.w vr16, vr0, 12 // t2a
vssrarni.h.w vr18, vr7, 12 // t3a
vldrepl.h vr20, t0, 8 // 2598
vldrepl.h vr21, t0, 10 // 3166
vmulev_vmaddod_lsx vr3, vr4, vr20, vr21, vr2, vr0, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr3, vr4, vr21, vr20, vr5, vr7, \sz
vssrarni.h.w vr0, vr2, 12 // t4a
vssrarni.h.w vr7, vr5, 12 // t5a
vldrepl.h vr20, t0, 12 // 1189
vldrepl.h vr21, t0, 14 // 3920
vmulev_vmaddod_lsx vr1, vr6, vr20, vr21, vr3, vr2, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr1, vr6, vr21, vr20, vr4, vr5, \sz
vssrarni.h.w vr2, vr3, 12 // t6a
vssrarni.h.w vr5, vr4, 12 // t7a
vsadd.h vr3, vr17, vr0 // t0
vssub.h vr4, vr17, vr0 // t4
vsadd.h vr1, vr19, vr7 // t1
vssub.h vr6, vr19, vr7 // t5
vsadd.h vr17, vr16, vr2 // t2
vssub.h vr19, vr16, vr2 // t6
vsadd.h vr0, vr18, vr5 // t3
vssub.h vr7, vr18, vr5 // t7
la.local t0, idct_coeffs_h
vldrepl.h vr20, t0, 4 // 1567
vldrepl.h vr21, t0, 6 // 3784
vmulev_vmaddod_lsx vr4, vr6, vr21, vr20, vr16, vr5, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr4, vr6, vr20, vr21, vr18, vr2, \sz
vssrarni.h.w vr5, vr16, 12 // t4a
vssrarni.h.w vr2, vr18, 12 // t5a
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr7, vr19, vr20, vr21, vr4, vr16, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr7, vr19, vr21, vr20, vr6, vr18, \sz
vssrarni.h.w vr16, vr4, 12 // t7a
vssrarni.h.w vr18, vr6, 12 // t6a
vsadd.h vr4, vr5, vr18 // out1
vssub.h vr19, vr5, vr18 // t6
vsadd.h vr20, vr1, vr0 // out7
vssub.h vr18, vr1, vr0 // t3
vsadd.h \out0, vr3, vr17 // out0
vssub.h vr5, vr3, vr17 // t2
vsadd.h \out6, vr2, vr16 // out6
vssub.h vr23, vr2, vr16 // t7
vsllwil.w.h vr3, vr20, 0 // out7
vexth.w.h \out7, vr20 // out7
vsllwil.w.h vr21, vr4, 0 // out1
vexth.w.h \out1, vr4 // out1
vneg.w vr3, vr3
vneg.w \out7, \out7
vneg.w vr21, vr21
vneg.w \out1, \out1
vssrarni.h.w \out7, vr3, 0
vssrarni.h.w \out1, vr21, 0
la.local t0, idct_coeffs_h
vldrepl.h vr20, t0, 0 // 2896
vmulev_vmaddod_lsx vr5, vr18, vr20, vr20, vr16, \out3, \sz
vneg.h vr21, vr20
vmulev_vmaddod_lsx vr5, vr18, vr20, vr21, vr17, \out4, \sz
vsrari.w vr16, vr16, 12
vsrari.w \out3, \out3, 12
vneg.w vr16, vr16
vneg.w \out3, \out3
vssrarni.h.w \out3, vr16, 0 // out3
vssrarni.h.w \out4, vr17, 12 // out4
vmulev_vmaddod_lsx vr19, vr23, vr20, vr20, vr16, \out2, \sz
vmulev_vmaddod_lsx vr19, vr23, vr20, vr21, vr17, \out5, \sz
vssrarni.h.w \out2, vr16, 12 // out2
vsrari.w vr17, vr17, 12
vsrari.w \out5, \out5, 12
vneg.w vr17, vr17
vneg.w \out5, \out5
vssrarni.h.w \out5, vr17, 0 // out5
.endm
functionl inv_adst_8h_x8_lsx
inv_adst8_lsx vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, .8h
endfuncl
functionl inv_flipadst_8h_x8_lsx
inv_adst8_lsx vr7, vr6, vr5, vr4, vr3, vr2, vr1, vr0, .8h
endfuncl
functionl inv_adst_4h_x8_lsx
inv_adst8_lsx vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, .8h
endfuncl
functionl inv_flipadst_4h_x8_lsx
inv_adst8_lsx vr7, vr6, vr5, vr4, vr3, vr2, vr1, vr0, .8h
endfuncl
.macro inv_dct8_lsx in0, in1, in2, in3, in4, in5, in6, in7, sz
inv_dct4_lsx \in0, \in2, \in4, \in6, \in0, \in2, \in4, \in6, \sz
la.local t0, idct_coeffs_h
vldrepl.h vr20, t0, 8 // 799
vldrepl.h vr21, t0, 10 // 4017
vmulev_vmaddod_lsx \in1, \in7, vr21, vr20, vr16, vr17, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx \in1, \in7, vr20, vr21, vr18, vr19, \sz
vssrarni.h.w vr17, vr16, 12 // t7a
vssrarni.h.w vr19, vr18, 12 // t4a
vldrepl.h vr20, t0, 12 // 3406
vldrepl.h vr21, t0, 14 // 2276
vmulev_vmaddod_lsx \in5, \in3, vr21, vr20, \in1, vr16, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx \in5, \in3, vr20, vr21, \in7, vr18, \sz
vssrarni.h.w vr16, \in1, 12 // t6a
vssrarni.h.w vr18, \in7, 12 // t5a
vssub.h \in7, vr19, vr18 // t5a
vsadd.h vr18, vr19, vr18 // t4
vssub.h \in5, vr17, vr16 // t6a
vsadd.h vr16, vr17, vr16 // t7
vldrepl.h vr20, t0, 0 // 2896
vmulev_vmaddod_lsx \in5, \in7, vr20, vr20, \in1, vr17, \sz
vneg.h vr21, vr20
vmulev_vmaddod_lsx \in5, \in7, vr20, vr21, vr23, vr19, \sz
vssrarni.h.w vr17, \in1, 12 // t6
vssrarni.h.w vr19, vr23, 12 // t5
vssub.h \in7, \in0, vr16 //c[7]
vsadd.h \in0, \in0, vr16 //c[0]
vssub.h \in5, \in4, vr19 //c[5]
vsadd.h vr23, \in4, vr19 //c[2]
vssub.h \in4, \in6, vr18 //c[4]
vsadd.h \in3, \in6, vr18 //c[3]
vssub.h \in6, \in2, vr17 //c[6]
vsadd.h \in1, \in2, vr17 //c[1]
vor.v \in2, vr23, vr23
.endm
functionl inv_dct_8h_x8_lsx
inv_dct8_lsx vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, .8h
endfuncl
functionl inv_dct_4h_x8_lsx
inv_dct8_lsx vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, .4h
endfuncl
.macro DST_ADD_W8 in0, in1, in2, in3, in4, in5, in6, in7
vsllwil.hu.bu vr0, \in0, 0
vsllwil.hu.bu vr1, \in1, 0
vsllwil.hu.bu vr2, \in2, 0
vsllwil.hu.bu vr3, \in3, 0
vadd.h vr0, \in4, vr0
vadd.h vr1, \in5, vr1
vadd.h vr2, \in6, vr2
vadd.h vr3, \in7, vr3
vssrani.bu.h vr1, vr0, 0
vssrani.bu.h vr3, vr2, 0
vstelm.d vr1, a0, 0, 0
vstelmx.d vr1, a0, a1, 1
vstelmx.d vr3, a0, a1, 0
vstelmx.d vr3, a0, a1, 1
.endm
.macro VLD_DST_ADD_W8 in0, in1, in2, in3
vld vr0, a0, 0
vldx vr1, a0, a1
vld vr2, t2, 0
vldx vr3, t2, a1
DST_ADD_W8 vr0, vr1, vr2, vr3, \in0, \in1, \in2, \in3
.endm
functionl inv_identity_8h_x8_lsx
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vsadd.h \i, \i, \i
.endr
endfuncl
functionl inv_identity_4h_x8_lsx
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vsadd.h \i, \i, \i
.endr
endfuncl
.macro def_fn_8x8_base variant
functionl inv_txfm_\variant\()add_8x8_lsx
vxor.v vr23, vr23, vr23
vld_x8 a2, 0, 16, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
.irp i, 0, 16, 32, 48, 64, 80, 96, 112
vst vr23, a2, \i
.endr
.ifc \variant, identity_
// The identity shl #1 and downshift srshr #1 cancel out
b .itx_8x8_epilog
.else
move t6, ra
jirl ra, t7, 0
move ra, t6
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vsrari.h \i, \i, 1
.endr
.itx_8x8_epilog:
LSX_TRANSPOSE8x8_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
move t6, ra
jirl ra, t8, 0
move ra, t6
vsrari_h_x8 vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23, 4
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 vr16, vr17, vr18, vr19
add.d a0, a0, a1
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 vr20, vr21, vr22, vr23
.endif
endfuncl
.endm
def_fn_8x8_base identity_
def_fn_8x8_base
.macro fn8x8 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_8x8_8bpc_lsx
.ifc \txfm1\()_\txfm2, dct_dct
bnez a3, .NO_HAS_DCONLY_8x8
idct_dc 8, 8, 1
DST_ADD_W8 vr10, vr11, vr12, vr13, vr20, vr20, vr20, vr20
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 vr20, vr20, vr20, vr20
b .\txfm1\()_\txfm2\()_8X8_END
.NO_HAS_DCONLY_8x8:
.endif
la.local t8, inv_\txfm2\()_8h_x8_lsx
.ifc \txfm1, identity
b inv_txfm_identity_add_8x8_lsx
.else
la.local t7, inv_\txfm1\()_8h_x8_lsx
b inv_txfm_add_8x8_lsx
.endif
.\txfm1\()_\txfm2\()_8X8_END:
endfunc
.endm
fn8x8 dct, dct
fn8x8 identity, identity
fn8x8 dct, adst
fn8x8 dct, flipadst
fn8x8 dct, identity
fn8x8 adst, dct
fn8x8 adst, adst
fn8x8 adst, flipadst
fn8x8 flipadst, dct
fn8x8 flipadst, adst
fn8x8 flipadst, flipadst
fn8x8 identity, dct
fn8x8 adst, identity
fn8x8 flipadst, identity
fn8x8 identity, adst
fn8x8 identity, flipadst
.macro rect2_lsx in0, in1, out0
vsllwil.w.h vr22, \in0, 0 // in1
vexth.w.h \in0, \in0 // in1
vmul.w vr22, vr22, \in1
vmul.w \out0, \in0, \in1
vssrarni.h.w \out0, vr22, 12
.endm
.macro LSX_TRANSPOSE8x4_H in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5
vilvl.h \tmp0, \in1, \in0
vilvl.h \tmp1, \in3, \in2
vilvl.w \tmp2, \tmp1, \tmp0
vilvh.w \tmp3, \tmp1, \tmp0
vilvl.h \tmp0, \in5, \in4
vilvl.h \tmp1, \in7, \in6
vilvl.w \tmp4, \tmp1, \tmp0
vilvh.w \tmp5, \tmp1, \tmp0
vilvl.d \out0, \tmp4, \tmp2
vilvh.d \out1, \tmp4, \tmp2
vilvl.d \out2, \tmp5, \tmp3
vilvh.d \out3, \tmp5, \tmp3
.endm
functionl inv_txfm_add_8x4_lsx
vxor.v vr23, vr23, vr23
vld vr0, a2, 0
vld vr2, a2, 16
vld vr4, a2, 32
vld vr6, a2, 48
.irp i, 0, 16, 32, 48
vst vr23, a2, \i
.endr
li.w t0, 2896
vreplgr2vr.w vr23, t0
rect2_lsx vr0, vr23, vr0
rect2_lsx vr2, vr23, vr2
rect2_lsx vr4, vr23, vr4
rect2_lsx vr6, vr23, vr6
vilvh.d vr1, vr0, vr0
vilvh.d vr3, vr2, vr2
vilvh.d vr5, vr4, vr4
vilvh.d vr7, vr6, vr6
move t6, ra
jirl ra, t7, 0
move ra, t6
LSX_TRANSPOSE8x4_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, vr0, vr1, \
vr2, vr3, vr16, vr17, vr18, vr19, vr20, vr21
move t6, ra
jirl ra, t8, 0
move ra, t6
vsrari_h_x4 vr0, vr1, vr2, vr3, vr16, vr17, vr18, vr19, 4
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 vr16, vr17, vr18, vr19
endfuncl
.macro LSX_TRANSPOSE4x8_H in0, in1, in2, in3, out0, out1, out2, out3, out4, \
out5, out6, out7, tmp0, tmp1, tmp2, tmp3
vilvl.h \tmp0, \in1, \in0
vilvl.h \tmp1, \in3, \in2
vilvh.h \tmp2, \in1, \in0
vilvh.h \tmp3, \in3, \in2
vilvl.w \out0, \tmp1, \tmp0
vilvh.w \out2, \tmp1, \tmp0
vilvl.w \out4, \tmp3, \tmp2
vilvh.w \out6, \tmp3, \tmp2
vbsrl.v \out1, \out0, 8
vbsrl.v \out3, \out2, 8
vbsrl.v \out5, \out4, 8
vbsrl.v \out7, \out6, 8
vinsgr2vr.d \out0, zero, 1
vinsgr2vr.d \out2, zero, 1
vinsgr2vr.d \out4, zero, 1
vinsgr2vr.d \out6, zero, 1
.endm
functionl inv_txfm_add_4x8_lsx
vxor.v vr23, vr23, vr23
vld vr0, a2, 0
vld vr1, a2, 16
vld vr2, a2, 32
vld vr3, a2, 48
.irp i, 0, 16, 32, 48
vst vr23, a2, \i
.endr
li.w t0, 2896
vreplgr2vr.w vr23, t0
rect2_lsx vr0, vr23, vr0
rect2_lsx vr1, vr23, vr1
rect2_lsx vr2, vr23, vr2
rect2_lsx vr3, vr23, vr3
move t6, ra
jirl ra, t7, 0
move ra, t6
LSX_TRANSPOSE4x8_H vr0, vr1, vr2, vr3, vr0, vr1, vr2, vr3, vr4, vr5, \
vr6, vr7, vr16, vr17, vr18, vr19
move t6, ra
jirl ra, t8, 0
move ra, t6
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr2, vr5, vr4
vilvl.d vr3, vr7, vr6
vsrari_h_x4 vr0, vr1, vr2, vr3, vr16, vr17, vr18, vr19, 4
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 vr16, vr17
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 vr18, vr19
endfuncl
.macro fn8x4 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_8x4_8bpc_lsx
.ifc \txfm1()_\txfm2, dct_dct
bnez a3, .NO_HAS_DCONLY_8x4
idct_dc 8, 4, 0
DST_ADD_W8 vr10, vr11, vr12, vr13, vr5, vr5, vr5, vr5
b .\txfm1\()_\txfm2\()_8X4_END
.NO_HAS_DCONLY_8x4:
.endif
la.local t7, inv_\txfm1\()_4h_x8_lsx
la.local t8, inv_\txfm2\()_8h_x4_lsx
b inv_txfm_add_8x4_lsx
.\txfm1\()_\txfm2\()_8X4_END:
endfunc
.endm
fn8x4 dct, dct
fn8x4 identity, identity
fn8x4 dct, adst
fn8x4 dct, flipadst
fn8x4 dct, identity
fn8x4 adst, dct
fn8x4 adst, adst
fn8x4 adst, flipadst
fn8x4 flipadst, dct
fn8x4 flipadst, adst
fn8x4 flipadst, flipadst
fn8x4 identity, dct
fn8x4 adst, identity
fn8x4 flipadst, identity
fn8x4 identity, adst
fn8x4 identity, flipadst
.macro fn4x8 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_4x8_8bpc_lsx
.ifc \txfm1()_\txfm2, dct_dct
bnez a3, .NO_HAS_DCONLY_4x8
idct_dc 4, 8, 0
DST_ADD_W4 vr10, vr11, vr12, vr13, vr20, vr20
add.d a0, a0, a1
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 vr5, vr5
b .\txfm1\()_\txfm2\()_4X8_END
.NO_HAS_DCONLY_4x8:
.endif
la.local t7, inv_\txfm1\()_8h_x4_lsx
la.local t8, inv_\txfm2\()_4h_x8_lsx
b inv_txfm_add_4x8_lsx
.\txfm1\()_\txfm2\()_4X8_END:
endfunc
.endm
fn4x8 dct, dct
fn4x8 identity, identity
fn4x8 dct, adst
fn4x8 dct, flipadst
fn4x8 dct, identity
fn4x8 adst, dct
fn4x8 adst, adst
fn4x8 adst, flipadst
fn4x8 flipadst, dct
fn4x8 flipadst, adst
fn4x8 flipadst, flipadst
fn4x8 identity, dct
fn4x8 adst, identity
fn4x8 flipadst, identity
fn4x8 identity, adst
fn4x8 identity, flipadst
.macro inv_identity4_lsx_x2 in0, in1, in2, in3, in4, out0, out1
vsllwil.w.h vr4, \in0, 0
vexth.w.h vr5, \in0
vsllwil.w.h vr6, \in1, 0
vexth.w.h vr7, \in1
vmul.w vr4, vr4, \in2
vmul.w vr5, vr5, \in2
vmul.w vr6, vr6, \in2
vmul.w vr7, vr7, \in2
vssrarni.h.w vr5, vr4, 12
vssrarni.h.w vr7, vr6, 12
vsadd.h \out0, vr5, \in3
vsadd.h \out1, vr7, \in4
.endm
.macro vmul_vmadd_w in0, in1, in2, in3, out0, out1
vsllwil.w.h vr22, \in0, 0
vexth.w.h vr23, \in0
vmul.w \out0, vr22, \in2
vmul.w \out1, vr23, \in2
vsllwil.w.h vr22, \in1, 0
vexth.w.h vr23, \in1
vmadd.w \out0, vr22, \in3
vmadd.w \out1, vr23, \in3
.endm
.macro vmul_vmsub_w in0, in1, in2, in3, out0, out1
vsllwil.w.h vr22, \in0, 0
vexth.w.h vr23, \in0
vmul.w \out0, vr22, \in2
vmul.w \out1, vr23, \in2
vsllwil.w.h vr22, \in1, 0
vexth.w.h vr23, \in1
vmsub.w \out0, vr22, \in3
vmsub.w \out1, vr23, \in3
.endm
.macro inv_dct16_lsx sz
inv_dct8_lsx vr0, vr2, vr4, vr6, vr8, vr10, vr12, vr14, \sz
la.local t0, idct_coeffs_h
vldrepl.h vr20, t0, 16 // 401
vldrepl.h vr21, t0, 18 // 4076
vmulev_vmaddod_lsx vr1, vr15, vr21, vr20, vr16, vr17, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr1, vr15, vr20, vr21, vr18, vr19, \sz
vssrarni.h.w vr17, vr16, 12 // t15a
vssrarni.h.w vr19, vr18, 12 // t8a
vldrepl.h vr20, t0, 20 // 3166 -> 1583
vldrepl.h vr21, t0, 22 // 2598 -> 1299
vmulev_vmaddod_lsx vr9, vr7, vr21, vr20, vr1, vr16, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr9, vr7, vr20, vr21, vr15, vr18, \sz
vssrarni.h.w vr16, vr1, 12 // t14a
vssrarni.h.w vr18, vr15, 12 // t9a
vldrepl.h vr20, t0, 24 // 1931
vldrepl.h vr21, t0, 26 // 3612
vmulev_vmaddod_lsx vr5, vr11, vr21, vr20, vr7, vr1, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr5, vr11, vr20, vr21, vr9, vr15, \sz
vssrarni.h.w vr1, vr7, 12 // t13a
vssrarni.h.w vr15, vr9, 12 // t10a
vldrepl.h vr20, t0, 28 // 3920
vldrepl.h vr21, t0, 30 // 1189
vmulev_vmaddod_lsx vr13, vr3, vr21, vr20, vr5, vr7, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr13, vr3, vr20, vr21, vr11, vr9, \sz
vssrarni.h.w vr7, vr5, 12 // t12a
vssrarni.h.w vr9, vr11, 12 // t11a
vsadd.h vr5, vr19, vr18 // t8
vssub.h vr11, vr19, vr18 // t9
vssub.h vr3, vr9, vr15 // t10
vsadd.h vr13, vr9, vr15 // t11
vsadd.h vr18, vr7, vr1 // t12
vssub.h vr19, vr7, vr1 // t13
vssub.h vr9, vr17, vr16 // t14
vsadd.h vr15, vr17, vr16 // t15
vldrepl.h vr20, t0, 4 // 1567
vldrepl.h vr21, t0, 6 // 3784
vmulev_vmaddod_lsx vr9, vr11, vr21, vr20, vr1, vr16, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr9, vr11, vr20, vr21, vr7, vr17, \sz
vssrarni.h.w vr16, vr1, 12 // t14a
vssrarni.h.w vr17, vr7, 12 // t9a
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr19, vr3, vr21, vr20, vr9, vr1, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr19, vr3, vr20, vr21, vr11, vr7, \sz
vneg.w vr1, vr1
vneg.w vr9, vr9
vssrarni.h.w vr7, vr11, 12 // t13a
vssrarni.h.w vr1, vr9, 12 // t10a
vsadd.h vr9, vr5, vr13 // t8a
vssub.h vr11, vr5, vr13 // t11a
vssub.h vr3, vr15, vr18 // t12a
vsadd.h vr19, vr15, vr18 // t15a
vsadd.h vr5, vr17, vr1 // t9
vssub.h vr13, vr17, vr1 // t10
vssub.h vr15, vr16, vr7 // t13
vsadd.h vr18, vr16, vr7 // t14
vldrepl.h vr20, t0, 0 // 2896
vmulev_vmaddod_lsx vr15, vr13, vr20, vr20, vr1, vr7, \sz
vneg.h vr21, vr20
vmulev_vmaddod_lsx vr15, vr13, vr20, vr21, vr17, vr16, \sz
vssrarni.h.w vr7, vr1, 12 // t13a
vssrarni.h.w vr16, vr17, 12 // t10a
vmulev_vmaddod_lsx vr3, vr11, vr20, vr20, vr13, vr23, \sz
vmulev_vmaddod_lsx vr3, vr11, vr20, vr21, vr15, vr17, \sz
vssrarni.h.w vr23, vr13, 12 // t12
vssrarni.h.w vr17, vr15, 12 // t11
vssub.h vr15, vr0, vr19 // c[15]
vsadd.h vr0, vr0, vr19 // c[0]
vsadd.h vr1, vr2, vr18 // c[1]
vssub.h vr20, vr2, vr18 // c[14]
vsadd.h vr2, vr4, vr7 // c[2]
vssub.h vr13, vr4, vr7 // c[13]
vsadd.h vr3, vr6, vr23 // c[3]
vssub.h vr21, vr6, vr23 // c[12]
vsadd.h vr4, vr8, vr17 // c[4]
vssub.h vr11, vr8, vr17 // c[11]
vsadd.h vr7, vr14, vr9 // c[7]
vssub.h vr8, vr14, vr9 // c[8]
vsadd.h vr6, vr12, vr5 // c[6]
vssub.h vr9, vr12, vr5 // c[9]
vsadd.h vr5, vr10, vr16 // c[5]
vssub.h vr10, vr10, vr16 // c[10]
vor.v vr14, vr20, vr20
vor.v vr12, vr21, vr21
.endm
functionl inv_dct_8h_x16_lsx
inv_dct16_lsx .8h
endfuncl
functionl inv_dct_4h_x16_lsx
inv_dct16_lsx .4h
endfuncl
.macro VLD_DST_ADD_W4_x4 in0, in1, in2, in3, in4, in5, in6 ,in7
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 \in0, \in1
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 \in2, \in3
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 \in4, \in5
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 \in6, \in7
.endm
.macro def_fn_4x16_base txfm
functionl inv_txfm_\txfm\()add_4x16_lsx
PUSH_REG
blt a3, t5, 416f
vld vr0, a2, 16
vld vr1, a2, 48
vld vr2, a2, 80
vld vr3, a2, 112
vxor.v vr23, vr23, vr23
.irp i, 16, 48, 80, 112
vst vr23, a2, \i
.endr
move t6, ra
jirl ra, t7, 0
move ra, t6
.ifnc \txfm, identity_
vsrari.h vr0, vr0, 1
vsrari.h vr1, vr1, 1
vsrari.h vr2, vr2, 1
vsrari.h vr3, vr3, 1
.endif
LSX_TRANSPOSE4x8_H vr0, vr1, vr2, vr3, vr8, vr9, vr24, vr25, vr26, \
vr27, vr14, vr28, vr10, vr11, vr12, vr13
416:
ble t5, a3, 416416f
.irp i, vr8, vr9, vr24, vr25, vr26, vr27, vr14, vr28
vxor.v \i, \i, \i
.endr
416416:
vld vr0, a2, 0
vld vr1, a2, 32
vld vr2, a2, 64
vld vr3, a2, 96
vxor.v vr23, vr23, vr23
.irp i, 0, 32, 64, 96
vst vr23, a2, \i
.endr
move t6, ra
jirl ra, t7, 0
move ra, t6
.ifnc \txfm, identity_
vsrari.h vr0, vr0, 1
vsrari.h vr1, vr1, 1
vsrari.h vr2, vr2, 1
vsrari.h vr3, vr3, 1
.endif
LSX_TRANSPOSE4x8_H vr0, vr1, vr2, vr3, vr0, vr1, vr2, vr3, vr4, vr5, \
vr6, vr7, vr16, vr17, vr18, vr19
vor.v vr10, vr24, vr24
vor.v vr11, vr25, vr25
vor.v vr12, vr26, vr26
vor.v vr13, vr27, vr27
vor.v vr15, vr28, vr28
move t6, ra
jirl ra, t8, 0
move ra, t6
vilvl.d vr16, vr1, vr0
vilvl.d vr17, vr3, vr2
vilvl.d vr18, vr5, vr4
vilvl.d vr19, vr7, vr6
vilvl.d vr20, vr9, vr8
vilvl.d vr21, vr11, vr10
vilvl.d vr22, vr13, vr12
vilvl.d vr23, vr15, vr14
.irp i, vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
vsrari.h \i, \i, 4
.endr
VLD_DST_ADD_W4_x4 vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
POP_REG
endfuncl
.endm
def_fn_4x16_base identity_
def_fn_4x16_base
.macro fn4x16 txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_4x16_8bpc_lsx
.ifc \txfm1()_\txfm2, dct_dct
bnez a3, .NO_HAS_DCONLY_4x16
idct_dc 4, 16, 1
DST_ADD_W4 vr10, vr11, vr12, vr13, vr5, vr5
.rept 3
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W4 vr5, vr5
.endr
b .\txfm1\()_\txfm2\()_4X16_END
.NO_HAS_DCONLY_4x16:
.endif
li.w t5, \eob_half
la.local t7, inv_\txfm1\()_8h_x4_lsx
.ifc \txfm1, identity
la.local t7, inv_\txfm1\()_8h_x4_lsx1
.endif
la.local t8, inv_\txfm2\()_4h_x16_lsx
.ifc \txfm1, identity
b inv_txfm_identity_add_4x16_lsx
.else
b inv_txfm_add_4x16_lsx
.endif
.\txfm1\()_\txfm2\()_4X16_END:
endfunc
.endm
fn4x16 dct, dct, 29
fn4x16 identity, identity, 29
fn4x16 dct, adst, 29
fn4x16 dct, flipadst, 29
fn4x16 dct, identity, 8
fn4x16 adst, dct, 29
fn4x16 adst, adst, 29
fn4x16 adst, flipadst, 29
fn4x16 flipadst, dct, 29
fn4x16 flipadst, adst, 29
fn4x16 flipadst, flipadst, 29
fn4x16 identity, dct, 32
fn4x16 adst, identity, 8
fn4x16 flipadst, identity, 8
fn4x16 identity, adst, 32
fn4x16 identity, flipadst, 32
.macro inv_identity16_lsx in0, in1, in2, out0, sz
.ifc \sz, .8h
vsllwil.w.h vr16, \in0, 0
vexth.w.h vr17, \in0
vmul.w vr16, vr16, \in1
vmul.w vr17, vr17, \in1
vsadd.h \in2, \in2, \in2
vssrarni.h.w vr17, vr16, 11
vsadd.h \out0, vr17, \in2
.else
vsllwil.w.h vr16, \in0, 0
vmul.w vr16, vr16, \in1
vsadd.h \in2, \in2, \in2
vssrarni.h.w vr16, vr16, 11
vsadd.h \out0, vr16, \in2
.endif
.endm
.macro inv_identity16_lsx1 in0, in1, in2, out0
vsllwil.w.h vr16, \in0, 0
vexth.w.h vr17, \in1
vmul.w vr18, vr16, \in2
vmul.w vr19, vr17, \in2
vsrari.w vr18, vr18, 11
vsrari.w vr19, vr19, 11
vslli.w vr16, vr16, 1
vslli.w vr17, vr17, 1
vadd.w vr16, vr18, vr16
vadd.w \out0, vr19, vr17
vssrarni.h.w \out0, vr16, 1
.endm
functionl inv_identity_8h_x16_lsx
li.w t0, 1697
vreplgr2vr.w vr20, t0
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, vr8, \
vr9, vr10, vr11, vr12, vr13, vr14, vr15
inv_identity16_lsx \i, vr20, \i, \i, .8h
.endr
endfuncl
functionl inv_identity_4h_x16_lsx
li.w t0, 1697
vreplgr2vr.w vr20, t0
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, vr8, \
vr9, vr10, vr11, vr12, vr13, vr14, vr15
inv_identity16_lsx \i, vr20, \i, \i, .4h
.endr
endfuncl
functionl inv_identity_8h_x16_lsx1
li.w t0, 1697
vreplgr2vr.w vr20, t0
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, vr8, \
vr9, vr10, vr11, vr12, vr13, vr14, vr15
inv_identity16_lsx1 \i, \i, vr20, \i
.endr
endfuncl
const iadst16_coeffs_h, align=4
.short 4091, 201, 3973, 995
.short 3703, 1751, 3290, 2440
.short 2751, 3035, 2106, 3513
.short 1380, 3857, 601, 4052
endconst
.macro inv_adst16_lsx txfm, sz
la.local t0, iadst16_coeffs_h
vldrepl.h vr20, t0, 0 // 4091
vldrepl.h vr21, t0, 2 // 201
vmulev_vmaddod_lsx vr15, vr0, vr20, vr21, vr16, vr18, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr15, vr0, vr21, vr20, vr17, vr19, \sz
vssrarni.h.w vr18, vr16, 12 // t0
vssrarni.h.w vr19, vr17, 12 // t1
vldrepl.h vr20, t0, 4 // 3973
vldrepl.h vr21, t0, 6 // 995
vmulev_vmaddod_lsx vr13, vr2, vr20, vr21, vr16, vr0, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr13, vr2, vr21, vr20, vr17, vr15, \sz
vssrarni.h.w vr0, vr16, 12 // t2
vssrarni.h.w vr15, vr17, 12 // t3
vldrepl.h vr20, t0, 8 // 3703
vldrepl.h vr21, t0, 10 // 1751
vmulev_vmaddod_lsx vr11, vr4, vr20, vr21, vr16, vr2, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr11, vr4, vr21, vr20, vr17, vr13, \sz
vssrarni.h.w vr2, vr16, 12 // t4
vssrarni.h.w vr13, vr17, 12 // t5
vldrepl.h vr20, t0, 12 // 3290 -> 1645
vldrepl.h vr21, t0, 14 // 2440 -> 1220
vmulev_vmaddod_lsx vr9, vr6, vr20, vr21, vr16, vr4, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr9, vr6, vr21, vr20, vr17, vr11, \sz
vssrarni.h.w vr4, vr16, 12 // t6
vssrarni.h.w vr11, vr17, 12 // t7
vldrepl.h vr20, t0, 16 // 2751
vldrepl.h vr21, t0, 18 // 3035
vmulev_vmaddod_lsx vr7, vr8, vr20, vr21, vr16, vr6, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr7, vr8, vr21, vr20, vr17, vr9, \sz
vssrarni.h.w vr6, vr16, 12 // t8
vssrarni.h.w vr9, vr17, 12 // t9
vldrepl.h vr20, t0, 20 // 2106
vldrepl.h vr21, t0, 22 // 3513
vmulev_vmaddod_lsx vr5, vr10, vr20, vr21, vr16, vr7, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr5, vr10, vr21, vr20, vr17, vr8, \sz
vssrarni.h.w vr7, vr16, 12 // t10
vssrarni.h.w vr8, vr17, 12 // t11
vldrepl.h vr20, t0, 24 // 1380
vldrepl.h vr21, t0, 26 // 3857
vmulev_vmaddod_lsx vr3, vr12, vr20, vr21, vr16, vr5, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr3, vr12, vr21, vr20, vr17, vr10, \sz
vssrarni.h.w vr5, vr16, 12 // t12
vssrarni.h.w vr10, vr17, 12 // t13
vldrepl.h vr20, t0, 28 // 601
vldrepl.h vr21, t0, 30 // 4052
vmulev_vmaddod_lsx vr1, vr14, vr20, vr21, vr16, vr3, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr1, vr14, vr21, vr20, vr17, vr12, \sz
vssrarni.h.w vr3, vr16, 12 // t14
vssrarni.h.w vr12, vr17, 12 // t15
vsadd.h vr1, vr18, vr6 // t0a
vssub.h vr14, vr18, vr6 // t8a
vsadd.h vr16, vr19, vr9 // t1a
vssub.h vr17, vr19, vr9 // t9a
vsadd.h vr6, vr0, vr7 // t2a
vssub.h vr18, vr0, vr7 // t10a
vsadd.h vr9, vr15, vr8 // t3a
vssub.h vr19, vr15, vr8 // t11a
vsadd.h vr0, vr2, vr5 // t4a
vssub.h vr7, vr2, vr5 // t12a
vsadd.h vr8, vr13, vr10 // t5a
vssub.h vr15, vr13, vr10 // t13a
vsadd.h vr2, vr4, vr3 // t6a
vssub.h vr5, vr4, vr3 // t14a
vsadd.h vr10, vr11, vr12 // t7a
vssub.h vr13, vr11, vr12 // t15a
la.local t0, idct_coeffs_h
vldrepl.h vr20, t0, 8 // 799
vldrepl.h vr21, t0, 10 // 4017
vmulev_vmaddod_lsx vr14, vr17, vr21, vr20, vr3, vr11, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr14, vr17, vr20, vr21, vr4, vr12, \sz
vssrarni.h.w vr11, vr3, 12 // t8
vssrarni.h.w vr12, vr4, 12 // t9
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr15, vr7, vr20, vr21, vr3, vr14, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr15, vr7, vr21, vr20, vr4, vr17, \sz
vssrarni.h.w vr14, vr3, 12 // t13
vssrarni.h.w vr17, vr4, 12 // t12
vldrepl.h vr20, t0, 12 // 3406
vldrepl.h vr21, t0, 14 // 2276
vmulev_vmaddod_lsx vr18, vr19, vr21, vr20, vr3, vr7, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr18, vr19, vr20, vr21, vr4, vr15, \sz
vssrarni.h.w vr7, vr3, 12 // t10
vssrarni.h.w vr15, vr4, 12 // t11
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr13, vr5, vr20, vr21, vr3, vr18, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr13, vr5, vr21, vr20, vr4, vr19, \sz
vssrarni.h.w vr18, vr3, 12 // t15
vssrarni.h.w vr19, vr4, 12 // t14
vsadd.h vr5, vr1, vr0 // t0
vssub.h vr13, vr1, vr0 // t4
vsadd.h vr3, vr16, vr8 // t1
vssub.h vr4, vr16, vr8 // t5
vsadd.h vr0, vr6, vr2 // t2
vssub.h vr1, vr6, vr2 // t6
vsadd.h vr8, vr9, vr10 // t3
vssub.h vr16, vr9, vr10 // t7
vsadd.h vr2, vr11, vr17 // t8a
vssub.h vr6, vr11, vr17 // t12a
vsadd.h vr9, vr12, vr14 // t9a
vssub.h vr10, vr12, vr14 // t13a
vsadd.h vr11, vr7, vr19 // t10a
vssub.h vr17, vr7, vr19 // t14a
vsadd.h vr12, vr15, vr18 // t11a
vssub.h vr14, vr15, vr18 // t15a
vldrepl.h vr20, t0, 4 // 1567
vldrepl.h vr21, t0, 6 // 3784
vmulev_vmaddod_lsx vr13, vr4, vr21, vr20, vr7, vr18, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr13, vr4, vr20, vr21, vr15, vr19, \sz
vssrarni.h.w vr18, vr7, 12 // t4a
vssrarni.h.w vr19, vr15, 12 // t5a
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr16, vr1, vr20, vr21, vr7, vr4, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr16, vr1, vr21, vr20, vr15, vr13, \sz
vssrarni.h.w vr4, vr7, 12 // t7a
vssrarni.h.w vr13, vr15, 12 // t6a
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr6, vr10, vr21, vr20, vr7, vr1, \sz
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr6, vr10, vr20, vr21, vr15, vr16, \sz
vssrarni.h.w vr1, vr7, 12 // t12
vssrarni.h.w vr16, vr15, 12 // t13
vneg.h vr21, vr21
vmulev_vmaddod_lsx vr14, vr17, vr20, vr21, vr7, vr6, \sz
vneg.h vr20, vr20
vmulev_vmaddod_lsx vr14, vr17, vr21, vr20, vr15, vr10, \sz
vssrarni.h.w vr6, vr7, 12 // t15
vssrarni.h.w vr10, vr15, 12 // t14
vssub.h vr17, vr5, vr0 // t2a
vsadd.h vr14, vr5, vr0 // out[0]
vssub.h vr7, vr3, vr8 // t3a
vsadd.h vr15, vr3, vr8 // out[15]
vsllwil.w.h vr22, vr15, 0
vexth.w.h vr15, vr15
vneg.w vr22, vr22
vneg.w vr15, vr15
vssrarni.h.w vr15, vr22, 0 // out[15]
vsadd.h vr3, vr19, vr4 // out[12]
vssub.h vr8, vr19, vr4 // t7
vssub.h vr0, vr18, vr13 // t6
vsadd.h vr5, vr18, vr13 // out[3]
vsllwil.w.h vr22, vr5, 0
vexth.w.h vr5, vr5
vneg.w vr22, vr22
vneg.w vr5, vr5
vssrarni.h.w vr5, vr22, 0 // out[3]
vsadd.h vr13, vr9, vr12 // out[14]
vssub.h vr19, vr9, vr12 // t11
vssub.h vr4, vr2, vr11 // t10
vsadd.h vr18, vr2, vr11 // out[1]
vsllwil.w.h vr22, vr18, 0
vexth.w.h vr18, vr18
vneg.w vr22, vr22
vneg.w vr18, vr18
vssrarni.h.w vr18, vr22, 0 // out[1]
vsadd.h vr2, vr1, vr10 // out[2]
vssub.h vr11, vr1, vr10 // t14a
vssub.h vr12, vr16, vr6 // t15a
vsadd.h vr9, vr16, vr6 // out[13]
vsllwil.w.h vr22, vr9, 0
vexth.w.h vr9, vr9
vneg.w vr22, vr22
vneg.w vr9, vr9
vssrarni.h.w vr9, vr22, 0 // out[13]
vldrepl.h vr20, t0, 0 // 2896
vmulev_vmaddod_lsx vr17, vr7, vr20, vr20, vr6, vr10, \sz
vneg.h vr21, vr20
vmulev_vmaddod_lsx vr17, vr7, vr20, vr21, vr16, vr1, \sz
vssrarni.h.w vr1, vr16, 12 // out[8]
vsrari.w vr6, vr6, 12
vsrari.w vr10, vr10, 12
vneg.w vr6, vr6
vneg.w vr10, vr10
vssrarni.h.w vr10, vr6, 0 // out[7]
vmulev_vmaddod_lsx vr0, vr8, vr20, vr21, vr16, vr17, \sz
vmulev_vmaddod_lsx vr0, vr8, vr20, vr20, vr6, vr7, \sz
vssrarni.h.w vr7, vr6, 12 // out[4]
vsrari.w vr16, vr16, 12
vsrari.w vr17, vr17, 12
vneg.w vr16, vr16
vneg.w vr17, vr17
vssrarni.h.w vr17, vr16, 0 // out[11]
vmulev_vmaddod_lsx vr4, vr19, vr20, vr21, vr16, vr0, \sz
vmulev_vmaddod_lsx vr4, vr19, vr20, vr20, vr6, vr8, \sz
vssrarni.h.w vr8, vr6, 12 // out[6]
vsrari.w vr16, vr16, 12
vsrari.w vr0, vr0, 12
vneg.w vr16, vr16
vneg.w vr0, vr0
vssrarni.h.w vr0, vr16, 0 // out[9]
vmulev_vmaddod_lsx vr11, vr12, vr20, vr20, vr6, vr4, \sz
vmulev_vmaddod_lsx vr11, vr12, vr20, vr21, vr16, vr19, \sz
vssrarni.h.w vr19, vr16, 12 // out[10]
vsrari.w vr6, vr6, 12
vsrari.w vr4, vr4, 12
vneg.w vr6, vr6
vneg.w vr4, vr4
vssrarni.h.w vr4, vr6, 0 // out[5]
.ifc \txfm, adst
vor.v vr12, vr3, vr3
vor.v vr3, vr5, vr5
vor.v vr5, vr4, vr4
vor.v vr4, vr7, vr7
vor.v vr7, vr10, vr10
vor.v vr10, vr19, vr19
vor.v vr6, vr8, vr8
vor.v vr8, vr1, vr1
vor.v vr11, vr17, vr17
vor.v vr20, vr13, vr13
vor.v vr13, vr9, vr9
vor.v vr9, vr0, vr0
vor.v vr0, vr14, vr14
vor.v vr14, vr20, vr20
vor.v vr1, vr18, vr18
.else
vor.v vr6, vr0, vr0
vor.v vr0, vr15, vr15
vor.v vr15, vr14, vr14
vor.v vr14, vr18, vr18
vor.v vr11, vr7, vr7
vor.v vr7, vr1, vr1
vor.v vr1, vr13, vr13
vor.v vr13, vr2, vr2
vor.v vr2, vr9, vr9
vor.v vr9, vr8, vr8
vor.v vr8, vr10, vr10
vor.v vr10, vr4, vr4
vor.v vr4, vr17, vr17
vor.v vr12, vr5, vr5
vor.v vr5, vr19, vr19
.endif
.endm // inv_adst16_lsx
functionl inv_adst_8h_x16_lsx
inv_adst16_lsx adst, 8h
endfuncl
functionl inv_flipadst_8h_x16_lsx
inv_adst16_lsx flipadst, 8h
endfuncl
functionl inv_adst_4h_x16_lsx
inv_adst16_lsx adst, 4h
endfuncl
functionl inv_flipadst_4h_x16_lsx
inv_adst16_lsx flipadst, 4h
endfuncl
.macro VLD_DST_ADD_W8_x4 in0, in1, in2, in3, in4, in5, in6, in7, in8, \
in9, in10, in11, in12, in13, in14, in15
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 \in0, \in1, \in2, \in3
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 \in4, \in5, \in6, \in7
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 \in8, \in9, \in10, \in11
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 \in12, \in13, \in14, \in15
.endm
.macro def_base_8x16 txfm1
functionl inv_txfm_\txfm1\()add_8x16_lsx
blt a3, t5, 816f
vld_x8 a2, 16, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vxor.v vr23, vr23, vr23
.irp i, 16, 48, 80, 112, 144, 176, 208, 240
vst vr23, a2, \i
.endr
li.w t0, 2896
vreplgr2vr.w vr23, t0
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
rect2_lsx \i, vr23, \i
.endr
.ifc \txfm1, identity_
LSX_TRANSPOSE8x8_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
.else
move t6, ra
jirl ra, t7, 0
move ra, t6
vsrari_h_x8 vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, 1
LSX_TRANSPOSE8x8_H vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
.endif
816:
ble t5, a3, 816816f
.irp i, vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vxor.v \i, \i, \i
.endr
816816:
vld_x8 a2, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vxor.v vr23, vr23, vr23
.irp i, 0, 32, 64, 96, 128, 160, 192, 224
vst vr23, a2, \i
.endr
li.w t0, 2896
vreplgr2vr.w vr23, t0
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
rect2_lsx \i, vr23, \i
.endr
.ifc \txfm1, identity_
.else
move t6, ra
jirl ra, t7, 0
move ra, t6
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vsrari.h \i, \i, 1
.endr
.endif
LSX_TRANSPOSE8x8_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
move t6, ra
jirl ra, t8, 0
move ra, t6
vor.v vr0, vr0, vr0
vsrari_h_x8 vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23, 4
vsrari_h_x8 vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, 4
VLD_DST_ADD_W8_x4 vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
endfuncl
.endm
def_base_8x16 identity_
def_base_8x16
.macro DST_ADD_W16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11
vsllwil.hu.bu vr4, \in0, 0
vexth.hu.bu vr0, \in0
vsllwil.hu.bu vr5, \in1, 0
vexth.hu.bu vr1, \in1
vsllwil.hu.bu vr6, \in2, 0
vexth.hu.bu vr2, \in2
vsllwil.hu.bu vr7, \in3, 0
vexth.hu.bu vr3, \in3
vadd.h vr4, vr4, \in4
vadd.h vr0, vr0, \in5
vadd.h vr5, vr5, \in6
vadd.h vr1, vr1, \in7
vadd.h vr6, vr6, \in8
vadd.h vr2, vr2, \in9
vadd.h vr7, vr7, \in10
vadd.h vr3, vr3, \in11
vssrani.bu.h vr0, vr4, 0
vssrani.bu.h vr1, vr5, 0
vssrani.bu.h vr2, vr6, 0
vssrani.bu.h vr3, vr7, 0
vst vr0, a0, 0
vstx vr1, a0, a1
vst vr2, t2, 0
vstx vr3, t2, a1
.endm
.macro VLD_DST_ADD_W16 in0, in1, in2, in3, in4, in5, in6, in7
vld vr0, a0, 0
vldx vr1, a0, a1
vld vr2, t2, 0
vldx vr3, t2, a1
DST_ADD_W16 vr0, vr1, vr2, vr3, \in0, \in1, \in2, \in3, \
\in4, \in5, \in6, \in7
.endm
.macro def_fn_16x8 txfm1
functionl inv_txfm_\txfm1\()add_16x8_lsx
PUSH_REG
vld_x16 a2, 0, 16, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vxor.v vr23, vr23, vr23
.irp i, 0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, \
176, 192, 208, 224, 240
vst vr23, a2, \i
.endr
li.w t0, 2896
vreplgr2vr.w vr23, t0
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
rect2_lsx \i, vr23, \i
.endr
move t6, ra
jirl ra, t7, 0
move ra, t6
.ifnc \txfm1, identity_
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vsrari.h \i, \i, 1
.endr
.endif
LSX_TRANSPOSE8x8_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
move t6, ra
jirl ra, t8, 0
move ra, t6
vsrari_h_x8 vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr24, vr25, vr26, vr27, vr28, vr29, vr30, vr31, 4
LSX_TRANSPOSE8x8_H vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
move t6, ra
jirl ra, t8, 0
move ra, t6
vsrari_h_x8 vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, 4
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W16 vr24, vr8, vr25, vr9, vr26, vr10, vr27, vr11
alsl.d a0, a1, a0, 2
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W16 vr28, vr12, vr29, vr13, vr30, vr14, vr31, vr15
POP_REG
endfuncl
.endm
def_fn_16x8 identity_
def_fn_16x8
.macro fun16x8 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_16x8_8bpc_lsx
.ifc \txfm1\()_\txfm2, dct_dct
bnez a3, .NO_HAS_DCONLY_16x8
idct_dc 16, 8, 1
DST_ADD_W16 vr10, vr11, vr12, vr13, vr20, vr20, vr20, \
vr20, vr20, vr20, vr20, vr20
alsl.d a0, a1, a0, 2
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W16 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20,
b .\txfm1\()_\txfm2\()_16x8_END
.NO_HAS_DCONLY_16x8:
.endif
la.local t7, inv_\txfm1\()_8h_x16_lsx
.ifc \txfm1, identity
la.local t7, inv_identity_8h_x16_lsx1
.endif
la.local t8, inv_\txfm2\()_8h_x8_lsx
.ifc \txfm1, identity
b inv_txfm_identity_add_16x8_lsx
.else
b inv_txfm_add_16x8_lsx
.endif
.\txfm1\()_\txfm2\()_16x8_END:
endfunc
.endm
fun16x8 dct, dct
fun16x8 identity, identity
fun16x8 dct, adst
fun16x8 dct, flipadst
fun16x8 dct, identity
fun16x8 adst, dct
fun16x8 adst, adst
fun16x8 adst, flipadst
fun16x8 flipadst, dct
fun16x8 flipadst, adst
fun16x8 flipadst, flipadst
fun16x8 identity, dct
fun16x8 adst, identity
fun16x8 flipadst, identity
fun16x8 identity, adst
fun16x8 identity, flipadst
.macro fun8x16 txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_8x16_8bpc_lsx
.ifc \txfm1\()_\txfm2, dct_dct
bnez a3, .NO_HAS_DCONLY_8x16
idct_dc 8, 16, 1
DST_ADD_W8 vr10, vr11, vr12, vr13, vr20, vr20, vr20, vr20
.rept 3
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 vr20, vr20, vr20, vr20
.endr
b .\txfm1\()_\txfm2\()_8x16_END
.NO_HAS_DCONLY_8x16:
.endif
li.w t5, \eob_half
.ifnc \txfm1, identity
la.local t7, inv_\txfm1\()_8h_x8_lsx
.endif
la.local t8, inv_\txfm2\()_8h_x16_lsx
.ifc \txfm1, identity
b inv_txfm_identity_add_8x16_lsx
.else
b inv_txfm_add_8x16_lsx
.endif
.\txfm1\()_\txfm2\()_8x16_END:
endfunc
.endm
fun8x16 dct, dct, 43
fun8x16 identity, identity, 43
fun8x16 dct, adst, 43
fun8x16 dct, flipadst, 43
fun8x16 dct, identity, 8
fun8x16 adst, dct, 43
fun8x16 adst, adst, 43
fun8x16 adst, flipadst, 43
fun8x16 flipadst, dct, 43
fun8x16 flipadst, adst, 43
fun8x16 flipadst, flipadst, 43
fun8x16 identity, dct, 64
fun8x16 adst, identity, 8
fun8x16 flipadst, identity, 8
fun8x16 identity, adst, 64
fun8x16 identity, flipadst, 64
functionl inv_txfm_add_16x16_lsx
malloc_space 512
addi.d t1, sp, 64
addi.d t2, a2, 0
.rept 2
vld_x16 a2, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vxor.v vr23, vr23, vr23
.irp i, 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, \
384, 416, 448, 480
vst vr23, a2, \i
.endr
move t6, ra
jirl ra, t7, 0
move ra, t6
LSX_TRANSPOSE8x8_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
LSX_TRANSPOSE8x8_H vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vsrari.h \i, \i, 2
.endr
vst_x8 t1, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vst_x8 t1, 16, 32, vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
addi.d t1, t1, 256
addi.d a2, a2, 16
blt a3, t5, 1616f
.endr
1616:
ble t5, a3, 16161616f
addi.d t1, sp, 320
vxor.v vr23, vr23, vr23
.irp i, 0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, \
240
vst vr23, t1, \i
.endr
16161616:
addi.d t1, sp, 64
.rept 2
vld_x16 t1, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
move t6, ra
jirl ra, t8, 0
move ra, t6
vst_x16 t1, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
addi.d t1, t1, 16
.endr
alsl.d t2, a1, a0, 1
addi.d t1, sp, 64
.rept 4
vld_x8 t1, 0, 16, vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
vsrari_h_x8 vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23 4
VLD_DST_ADD_W16 vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
alsl.d a0, a1, a0, 2
alsl.d t2, a1, a0, 1
addi.d t1, t1, 128
.endr
free_space 512
endfuncl
.macro fun16x16 txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_16x16_8bpc_lsx
.ifc \txfm1\()_\txfm2, dct_dct
bnez a3, .NO_HAS_DCONLY_16x16
idct_dc 16, 16, 2
DST_ADD_W16 vr10, vr11, vr12, vr13, vr20, vr20, vr20, \
vr20, vr20, vr20, vr20, vr20
.rept 3
alsl.d a0, a1, a0, 2
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W16 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
.endr
b .\txfm1\()_\txfm2\()_16x16_END
.NO_HAS_DCONLY_16x16:
.endif
li.w t5, \eob_half
la.local t7, inv_\txfm1\()_8h_x16_lsx
la.local t8, inv_\txfm2\()_8h_x16_lsx
b inv_txfm_add_16x16_lsx
.\txfm1\()_\txfm2\()_16x16_END:
endfunc
.endm
fun16x16 dct, dct, 36
fun16x16 adst, adst, 36
fun16x16 adst, dct, 36
fun16x16 dct, adst, 36
fun16x16 flipadst, dct, 36
fun16x16 dct, flipadst, 36
fun16x16 adst, flipadst, 36
fun16x16 flipadst, adst, 36
.macro dct_8x32_core_lsx in1, in2, vld_st0, vld_st1, vld_stride, \
vst_st0, vst_st1, vst_st2, vst_st3, vst_stride, \
transpose8x8, shift
la.local t0, idct_coeffs
vldrepl.w vr20, t0, 64 // 201
vldrepl.w vr21, t0, 68 // 4091
vmul_vmadd_w vr0, vr30, vr21, vr20, vr8, vr9
vmul_vmsub_w vr0, vr30, vr20, vr21, vr11, vr10
vssrarni.h.w vr9, vr8, 12 // t31a
vssrarni.h.w vr10, vr11, 12 // t16a
vldrepl.w vr20, t0, 72 // 3035
vldrepl.w vr21, t0, 76 // 2751
vmul_vmadd_w vr19, vr7, vr21, vr20, vr8, vr0
vmul_vmsub_w vr19, vr7, vr20, vr21, vr11, vr30
vssrarni.h.w vr0, vr8, 12 // t30a
vssrarni.h.w vr30, vr11, 12 // t17a
vldrepl.w vr20, t0, 80 // 1751
vldrepl.w vr21, t0, 84 // 3703
vmul_vmadd_w vr4, vr26, vr21, vr20, vr8, vr7
vmul_vmsub_w vr4, vr26, vr20, vr21, vr11, vr19
vssrarni.h.w vr7, vr8, 12 // t29a
vssrarni.h.w vr19, vr11, 12 // t18a
vldrepl.w vr20, t0, 88 // 3857
vldrepl.w vr21, t0, 92 // 1380
vmul_vmadd_w vr27, vr3, vr21, vr20, vr8, vr4
vmul_vmsub_w vr27, vr3, vr20, vr21, vr11, vr26
vssrarni.h.w vr4, vr8, 12 // t28a
vssrarni.h.w vr26, vr11, 12 // t19a
vldrepl.w vr20, t0, 96 // 995
vldrepl.w vr21, t0, 100 // 3973
vmul_vmadd_w vr2, vr28, vr21, vr20, vr8, vr3
vmul_vmsub_w vr2, vr28, vr20, vr21, vr11, vr27
vssrarni.h.w vr3, vr8, 12 // t27a
vssrarni.h.w vr27, vr11, 12 // t20a
vldrepl.w vr20, t0, 104 // 3513
vldrepl.w vr21, t0, 108 // 2106
vmul_vmadd_w vr25, vr5, vr21, vr20, vr8, vr2
vmul_vmsub_w vr25, vr5, vr20, vr21, vr11, vr28
vssrarni.h.w vr2, vr8, 12 // t26a
vssrarni.h.w vr28, vr11, 12 // t21a
vldrepl.w vr20, t0, 112 // 2440 -> 1220
vldrepl.w vr21, t0, 116 // 3290 -> 1645
vmul_vmadd_w vr6, vr24, vr21, vr20, vr8, vr5
vmul_vmsub_w vr6, vr24, vr20, vr21, vr11, vr25
vssrarni.h.w vr5, vr8, 12 // t25a
vssrarni.h.w vr25, vr11, 12 // t22a
vldrepl.w vr20, t0, 120 // 4052
vldrepl.w vr21, t0, 124 // 601
vmul_vmadd_w vr29, vr1, vr21, vr20, vr8, vr6
vmul_vmsub_w vr29, vr1, vr20, vr21, vr11, vr24
vssrarni.h.w vr6, vr8, 12 // t24a
vssrarni.h.w vr24, vr11, 12 // t23a
vsadd.h vr1, vr10, vr30 // t16
vssub.h vr29, vr10, vr30 // t17
vssub.h vr8, vr26, vr19 // t18
vsadd.h vr31, vr26, vr19 // t19
vsadd.h vr10, vr27, vr28 // t20
vssub.h vr30, vr27, vr28 // t21
vssub.h vr19, vr24, vr25 // t22
vsadd.h vr26, vr24, vr25 // t23
vsadd.h vr27, vr6, vr5 // t24
vssub.h vr28, vr6, vr5 // t25
vssub.h vr24, vr3, vr2 // t26
vsadd.h vr25, vr3, vr2 // t27
vsadd.h vr5, vr4, vr7 // t28
vssub.h vr6, vr4, vr7 // t29
vssub.h vr2, vr9, vr0 // t30
vsadd.h vr3, vr9, vr0 // t31
vldrepl.w vr20, t0, 16 // 799
vldrepl.w vr21, t0, 20 // 4017
vmul_vmadd_w vr2, vr29, vr21, vr20, vr4, vr7
vmul_vmsub_w vr2, vr29, vr20, vr21, vr11, vr0
vssrarni.h.w vr7, vr4, 12 // t30a
vssrarni.h.w vr0, vr11, 12 // t17a
vmul_vmadd_w vr6, vr8, vr21, vr20, vr4, vr9
vneg.w vr4, vr4
vneg.w vr9, vr9
vmul_vmsub_w vr6, vr8, vr20, vr21, vr11, vr2
vssrarni.h.w vr9, vr4, 12 // t18a
vssrarni.h.w vr2, vr11, 12 // t29a
vldrepl.w vr20, t0, 24 // 3406 -> 1703
vldrepl.w vr21, t0, 28 // 2276 -> 1138
vmul_vmadd_w vr24, vr30, vr21, vr20, vr4, vr29
vmul_vmsub_w vr24, vr30, vr20, vr21, vr11, vr6
vssrarni.h.w vr29, vr4, 12 // t26a
vssrarni.h.w vr6, vr11, 12 // t21a
vmul_vmadd_w vr28, vr19, vr21, vr20, vr4, vr8
vneg.w vr4, vr4
vneg.w vr8, vr8
vmul_vmsub_w vr28, vr19, vr20, vr21, vr11, vr24
vssrarni.h.w vr8, vr4, 12 // t22a
vssrarni.h.w vr24, vr11, 12 // t25a
vsadd.h vr4, vr1, vr31 // t16a
vssub.h vr30, vr1, vr31 // t19a
vsadd.h vr19, vr0, vr9 // t17
vssub.h vr28, vr0, vr9 // t18
vssub.h vr1, vr26, vr10 // t20a
vsadd.h vr31, vr26, vr10 // t23a
vssub.h vr0, vr8, vr6 // t21
vsadd.h vr9, vr8, vr6 // t22
vsadd.h vr10, vr27, vr25 // t24a
vssub.h vr26, vr27, vr25 // t27a
vsadd.h vr6, vr24, vr29 // t25
vssub.h vr8, vr24, vr29 // t26
vssub.h vr25, vr3, vr5 // t28a
vsadd.h vr27, vr3, vr5 // t31a
vssub.h vr24, vr7, vr2 // t29
vsadd.h vr29, vr7, vr2 // t30
vldrepl.w vr20, t0, 8 // 1567
vldrepl.w vr21, t0, 12 // 3784
vmul_vmadd_w vr24, vr28, vr21, vr20, vr3, vr5
vmul_vmsub_w vr24, vr28, vr20, vr21, vr11, vr2
vssrarni.h.w vr5, vr3, 12 // t29a
vssrarni.h.w vr2, vr11, 12 // 18a
vmul_vmadd_w vr25, vr30, vr21, vr20, vr3, vr7
vmul_vmsub_w vr25, vr30, vr20, vr21, vr11, vr24
vssrarni.h.w vr7, vr3, 12 // t28
vssrarni.h.w vr24, vr11, 12 // t19
vmul_vmadd_w vr26, vr1, vr21, vr20, vr3, vr28
vneg.w vr3, vr3
vneg.w vr28, vr28
vmul_vmsub_w vr26, vr1, vr20, vr21, vr11, vr25
vssrarni.h.w vr28, vr3, 12 // t20
vssrarni.h.w vr25, vr11, 12 // t27
vmul_vmadd_w vr8, vr0, vr21, vr20, vr3, vr30
vneg.w vr3, vr3
vneg.w vr30, vr30
vmul_vmsub_w vr8, vr0, vr20, vr21, vr11, vr1
vssrarni.h.w vr30, vr3, 12 // t21a
vssrarni.h.w vr1, vr11, 12 // t26a
vsadd.h vr3, vr4, vr31 // t16
vssub.h vr26, vr4, vr31 // t23
vsadd.h vr0, vr19, vr9 // t17a
vssub.h vr8, vr19, vr9 // t22a
vsadd.h vr4, vr2, vr30 // t18
vssub.h vr31, vr2, vr30 // t21
vsadd.h vr9, vr24, vr28 // t19a
vssub.h vr19, vr24, vr28 // t20a
vssub.h vr2, vr27, vr10 // t24
vsadd.h vr30, vr27, vr10 // t31
vssub.h vr24, vr29, vr6 // t25a
vsadd.h vr28, vr29, vr6 // t30a
vssub.h vr10, vr5, vr1 // t26
vsadd.h vr27, vr5, vr1 // t29
vssub.h vr6, vr7, vr25 // t27a
vsadd.h vr29, vr7, vr25 // t28a
vldrepl.w vr20, t0, 0 // 2896
vmul_vmsub_w vr6, vr19, vr20, vr20, vr1, vr5
vmul_vmadd_w vr6, vr19, vr20, vr20, vr11, vr7
vssrarni.h.w vr5, vr1, 12 // t20
vssrarni.h.w vr7, vr11, 12 // t27
vmul_vmsub_w vr10, vr31, vr20, vr20, vr1, vr25
vmul_vmadd_w vr10, vr31, vr20, vr20, vr11, vr6
vssrarni.h.w vr25, vr1, 12 // t21a
vssrarni.h.w vr6, vr11, 12 // t26a
vmul_vmsub_w vr24, vr8, vr20, vr20, vr1, vr19
vmul_vmadd_w vr24, vr8, vr20, vr20, vr11, vr10
vssrarni.h.w vr19, vr1, 12 // t22
vssrarni.h.w vr10, vr11, 12 // t25
vmul_vmsub_w vr2, vr26, vr20, vr20, vr1, vr31
vmul_vmadd_w vr2, vr26, vr20, vr20, vr11, vr8
vssrarni.h.w vr31, vr1, 12 // t23a
vssrarni.h.w vr8, vr11, 12 // t24a
// t31 t30a t29 t28a t27 t26a t25 t24a t23a t22 t21a t20 t19a t18 t17a t16
// vr30 vr28 vr27 vr29 vr7 vr6 vr10 vr8 vr31 vr19 vr25 vr5 vr9 vr4 vr0 vr3
vld_x8 \in2, \vld_st0, \vld_stride, vr11, vr12, vr13, vr14, vr15, vr16, vr17, vr18
vsadd.h vr1, vr11, vr30 // c[0]
vssub.h vr2, vr11, vr30 // c[31]
vsadd.h vr24, vr12, vr28 // c[1]
vssub.h vr26, vr12, vr28 // c[30]
vsadd.h vr11, vr13, vr27 // c[2]
vssub.h vr30, vr13, vr27 // c[29]
vsadd.h vr12, vr14, vr29 // c[3]
vssub.h vr28, vr14, vr29 // c[28]
vsadd.h vr13, vr15, vr7 // c[4]
vssub.h vr27, vr15, vr7 // c[27]
vsadd.h vr14, vr16, vr6 // c[5]
vssub.h vr29, vr16, vr6 // c[26]
vsadd.h vr7, vr17, vr10 // c[6]
vssub.h vr15, vr17, vr10 // c[25]
vsadd.h vr6, vr18, vr8 // c[7]
vssub.h vr16, vr18, vr8 // c[24]
.ifnb \transpose8x8
LSX_TRANSPOSE8x8_H vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6, \
vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6, \
vr8, vr10, vr17, vr18, vr20, vr21, vr22, vr23
.endif
.ifnb \shift
.irp i, vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6
vsrari.h \i, \i, \shift
.endr
.endif
vst_x8 \in1, \vst_st0, \vst_stride, vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6
.ifnb \transpose8x8
LSX_TRANSPOSE8x8_H vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2, \
vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2, \
vr8, vr10, vr17, vr18, vr20, vr21, vr22, vr23
.endif
.ifnb \shift
.irp i, vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2
vsrari.h \i, \i, \shift
.endr
.endif
vst_x8 \in1, \vst_st1, \vst_stride, vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2
vld_x8 \in2, \vld_st1, \vld_stride, vr11, vr12, vr13, vr14, vr15, vr16, vr17, vr18
vsadd.h vr1, vr11, vr31 // c[8]
vssub.h vr2, vr11, vr31 // c[23]
vsadd.h vr24, vr12, vr19 // c[9]
vssub.h vr26, vr12, vr19 // c[22]
vsadd.h vr11, vr13, vr25 // c[10]
vssub.h vr30, vr13, vr25 // c[21]
vsadd.h vr12, vr14, vr5 // c[11]
vssub.h vr28, vr14, vr5 // c[20]
vsadd.h vr13, vr15, vr9 // c[12]
vssub.h vr27, vr15, vr9 // c[19]
vsadd.h vr14, vr16, vr4 // c[13]
vssub.h vr29, vr16, vr4 // c[18]
vsadd.h vr7, vr17, vr0 // c[14]
vssub.h vr15, vr17, vr0 // c[17]
vsadd.h vr6, vr18, vr3 // c[15]
vssub.h vr16, vr18, vr3 // c[16]
.ifnb \transpose8x8
LSX_TRANSPOSE8x8_H vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6, \
vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6, \
vr8, vr10, vr17, vr18, vr20, vr21, vr22, vr23
.endif
.ifnb \shift
.irp i, vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6
vsrari.h \i, \i, \shift
.endr
.endif
vst_x8 \in1, \vst_st2, \vst_stride, vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6
.ifnb \transpose8x8
LSX_TRANSPOSE8x8_H vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2, \
vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2, \
vr8, vr10, vr17, vr18, vr20, vr21, vr22, vr23
.endif
.ifnb \shift
.irp i, vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2
vsrari.h \i, \i, \shift
.endr
.endif
vst_x8 \in1, \vst_st3, \vst_stride, vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2
.endm
const eob_32x32
.short 36, 136, 300, 1024
endconst
const eob_8x32
.short 43, 107, 171, 256
endconst
const eob_16x32
.short 36, 151, 279, 512
endconst
.macro DST_ADD_W32 in0, in1, in2, in3, in4, in5, in6, in7
vsllwil.hu.bu vr4, vr10, 0
vsllwil.hu.bu vr5, vr11, 0
vsllwil.hu.bu vr6, vr12, 0
vsllwil.hu.bu vr7, vr13, 0
vexth.hu.bu vr10, vr10
vexth.hu.bu vr11, vr11
vexth.hu.bu vr12, vr12
vexth.hu.bu vr13, vr13
vadd.h vr4, vr4, \in0
vadd.h vr10, vr10, \in1
vadd.h vr5, vr5, \in2
vadd.h vr11, vr11, \in3
vadd.h vr6, vr6, \in4
vadd.h vr12, vr12, \in5
vadd.h vr7, vr7, \in6
vadd.h vr13, vr13, \in7
vssrani.bu.h vr10, vr4, 0
vssrani.bu.h vr11, vr5, 0
vssrani.bu.h vr12, vr6, 0
vssrani.bu.h vr13, vr7, 0
vst vr10, a0, 0
vst vr11, a0, 16
vst vr12, t2, 0
vst vr13, t2, 16
.endm
.macro idct_dc_w32 w, h, shift
ld.h t2, a2, 0 // dc
vldi vr0, 0x8b5 // 181
vreplgr2vr.w vr1, t2
vldi vr20, 0x880 // 128
vmul.w vr2, vr0, vr1 // dc * 181
st.h zero, a2, 0
add.d t2, a0, a1
vsrari.w vr2, vr2, 8 // (dc * 181 + 128) >> 8
vld vr13, t2, 16
.if (2*\w == \h) || (2*\h == \w)
vmul.w vr2, vr2, vr0
vsrari.w vr2, vr2, 8
.endif
.if \shift>0
vsrari.w vr2, vr2, \shift // (dc + rnd) >> shift
.endif
vld vr11, a0, 16
vmadd.w vr20, vr2, vr0
vld vr12, t2, 0
vssrarni.h.w vr20, vr20, 12
vld vr10, a0, 0
.endm
function inv_txfm_add_dct_dct_32x8_8bpc_lsx
bnez a3, .NO_HAS_DCONLY_32x8
idct_dc_w32 32, 8, 2
DST_ADD_W32 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
.rept 3
alsl.d a0, a1, a0, 1
add.d t2, a0, a1
vld vr10, a0, 0
vld vr11, a0, 16
vld vr12, t2, 0
vld vr13, t2, 16
DST_ADD_W32 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
.endr
b .DCT_DCT_32X8_END
.NO_HAS_DCONLY_32x8:
malloc_space 512+256
addi.d t1, sp, 64
addi.d t2, a2, 0
addi.d t3, sp, 64
addi.d t3, t3, 512
vld_x16 t2, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vxor.v vr31, vr31, vr31
vst_x16 t2, 0, 32, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
inv_dct16_lsx .8h
vst_x16 t3, 0, 16, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vld_x16 t2, 16, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr19, vr24, vr25, vr26, vr27, vr28, vr29, vr30
vxor.v vr31, vr31, vr31
vst_x16 t2, 16, 32, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
dct_8x32_core_lsx t1, t3, 0, 128, 16, 0, 48, 16, 32, 64, transpose8x8, 2
addi.d t2, sp, 64
.rept 4
vld_x8 t2, 0, 64, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
inv_dct8_lsx vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, .8h
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vsrari.h \i, \i, 4
.endr
vst_x8 t2, 0, 64, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
addi.d t2, t2, 16
.endr
addi.d t0, sp, 64
.rept 4
add.d t2, a0, a1
vld vr10, a0, 0
vld vr11, a0, 16
vld vr12, t2, 0
vld vr13, t2, 16
vld_x8 t0, 0, 16, vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
DST_ADD_W32 vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
alsl.d a0, a1, a0, 1
addi.d t0, t0, 128
.endr
free_space 512+256
.DCT_DCT_32X8_END:
endfunc
function inv_txfm_add_dct_dct_32x16_8bpc_lsx
bnez a3, .NO_HAS_DCONLY_32x16
idct_dc_w32 32, 16, 1
DST_ADD_W32 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
.rept 7
alsl.d a0, a1, a0, 1
add.d t2, a0, a1
vld vr10, a0, 0
vld vr11, a0, 16
vld vr12, t2, 0
vld vr13, t2, 16
DST_ADD_W32 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
.endr
b .DCT_DCT_32X16_END
.NO_HAS_DCONLY_32x16:
malloc_space 1024+256 // 32*32*2+512
addi.d t1, sp, 64
addi.d t2, a2, 0
addi.d t3, sp, 64
addi.d t3, t3, 1024
.rept 2
vld_x16 t2, 0, 64, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vxor.v vr31, vr31, vr31
vst_x16 t2, 0, 64, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
li.w t0, 2896
vreplgr2vr.w vr23, t0
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
rect2_lsx \i, vr23, \i
.endr
inv_dct16_lsx .8h
vst_x16 t3, 0, 16, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vld_x16 t2, 32, 64, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr19, vr24, vr25, vr26, vr27, vr28, vr29, vr30
la.local t0, idct_coeffs
vldrepl.w vr23, t0, 0 // 2896
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr19, vr24, vr25, vr26, vr27, vr28, vr29, vr30
rect2_lsx \i, vr23, \i
.endr
vxor.v vr31, vr31, vr31
vst_x16 t2, 32, 64, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
dct_8x32_core_lsx t1, t3, 0, 128, 16, 0, 48, 16, 32, 64, transpose8x8, 1
addi.d t2, t2, 16
addi.d t1, t1, 512
.endr
addi.d t2, sp, 64
.rept 4
vld_x16 t2, 0, 64, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
inv_dct16_lsx .8h
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vsrari.h \i, \i, 4
.endr
vst_x16 t2, 0, 64, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
addi.d t2, t2, 16
.endr
addi.d t0, sp, 64
.rept 8
add.d t2, a0, a1
vld vr10, a0, 0
vld vr11, a0, 16
vld vr12, t2, 0
vld vr13, t2, 16
vld_x8 t0, 0, 16, vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
DST_ADD_W32 vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
alsl.d a0, a1, a0, 1
addi.d t0, t0, 128
.endr
free_space 1024+256
.DCT_DCT_32X16_END:
endfunc
function inv_txfm_add_dct_dct_32x32_8bpc_lsx
bnez a3, .NO_HAS_DCONLY_32x32
idct_dc_w32 32, 32, 2
DST_ADD_W32 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
.rept 15
alsl.d a0, a1, a0, 1
add.d t2, a0, a1
vld vr10, a0, 0
vld vr11, a0, 16
vld vr12, t2, 0
vld vr13, t2, 16
DST_ADD_W32 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
.endr
b .DCT_DCT_32X32_END
.NO_HAS_DCONLY_32x32:
malloc_space 2560 // 32*32*2+512
addi.d t1, sp, 64
addi.d t2, a2, 0
addi.d t3, sp, 1024
addi.d t3, t3, 1024
addi.d t3, t3, 64
la.local t8, eob_32x32
.DCT_DCT_EOB_32x32:
ld.h t7, t8, 0
addi.d t8, t8, 2
vld_x16 t2, 0, 128, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vxor.v vr31, vr31, vr31
vst_x16 t2, 0, 128, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
inv_dct16_lsx .8h
vst_x16 t3, 0, 16, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vld_x16 t2, 64, 128, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr19, vr24, vr25, vr26, vr27, vr28, vr29, vr30
vxor.v vr31, vr31, vr31
vst_x16 t2, 64, 128, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
dct_8x32_core_lsx t1, t3, 0, 128, 16, 0, 48, 16, 32, 64, transpose8x8, 2
addi.d t2, t2, 16
addi.d t1, t1, 512
bge a3, t7, .DCT_DCT_EOB_32x32
la.local t8, eob_32x32
vxor.v vr31, vr31, vr31
ld.h t7, t8, 4
bge a3, t7, .DCT_DCT_EOB_32x32_END // a3>=t7
vst_x16 sp, 64+1536, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
addi.d t1, sp, 256+64
vst_x16 t1, 1536, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
ld.h t7, t8, 2
bge a3, t7, .DCT_DCT_EOB_32x32_END
vst_x16 sp, 64+1024, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
vst_x16 t1, 1024, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
ld.h t7, t8, 0
bge a3, t7, .DCT_DCT_EOB_32x32_END
vst_x16 sp, 64+512, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
vst_x16 t1, 512, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
.DCT_DCT_EOB_32x32_END:
addi.d t2, sp, 64
addi.d t1, sp, 64
.rept 4
vld_x16 t2, 0, 128, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
inv_dct16_lsx .8h
vst_x16 t3, 0, 16, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vld_x16 t2, 64, 128, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr19, vr24, vr25, vr26, vr27, vr28, vr29, vr30
dct_8x32_core_lsx t1, t3, 0, 128, 16, 0, 1536, 512, 1024, 64, , 4
addi.d t2, t2, 16
addi.d t1, t1, 16
.endr
addi.d t0, sp, 64
.rept 16
add.d t2, a0, a1
vld vr10, a0, 0
vld vr11, a0, 16
vld vr12, t2, 0
vld vr13, t2, 16
vld_x8 t0, 0, 16, vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
DST_ADD_W32 vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
alsl.d a0, a1, a0, 1
addi.d t0, t0, 128
.endr
free_space 2560 // 32*32*2+512
.DCT_DCT_32X32_END:
endfunc
/*
* temp: vr8, vr9, vr10, vr12, vr20, vr21, vr22, vr23
*/
.macro dct_8x8_tx64_core_lsx in0, in1, in2, in3, in4, in5, in6, in7, out0, \
out1, out2, out3, out4, out5, out6, out7, rect2
la.local t0, idct_coeffs
.ifc \rect2, rect2_lsx
vldrepl.w vr23, t0, 0 // 2896
.irp i, \in0, \in1, \in2, \in3, \in4, \in5, \in6, \in7
rect2_lsx \i, vr23, \i
.endr
.endif
la.local t0, idct_coeffs
vldrepl.w vr20, t0, 8 // 1567
vldrepl.w vr21, t0, 12 // 3784
vsllwil.w.h vr22, \in2, 0
vexth.w.h vr23, \in2
vmul.w vr8, vr22, vr20
vmul.w vr10, vr23, vr20
vmul.w \in2, vr22, vr21
vmul.w vr9, vr23, vr21
vssrarni.h.w vr10, vr8, 12 // t2
vssrarni.h.w vr9, \in2, 12 // t3
vldrepl.w vr20, t0, 0 // 2896
vsllwil.w.h vr22, \in0, 0
vexth.w.h vr23, \in0
vmul.w vr8, vr22, vr20
vmul.w \in2, vr23, vr20
vssrarni.h.w \in2, vr8, 12
vsadd.h vr8, \in2, vr9 // c[0]
vssub.h vr9, \in2, vr9 // c[3]
vsadd.h \in0, \in2, vr10 // c[1]
vssub.h vr10, \in2, vr10 // c[2]
// inv_dct8_1d_internal_c tx64
// in1 in3
vldrepl.w vr20, t0, 16 // 799
vldrepl.w vr21, t0, 20 // 4017
vsllwil.w.h vr22, \in1, 0
vexth.w.h vr23, \in1
vmul.w \in2, vr22, vr21
vmul.w \in4, vr23, vr21
vmul.w \in1, vr22, vr20
vmul.w \in6, vr23, vr20
vssrarni.h.w \in4, \in2, 12 // t7a
vssrarni.h.w \in6, \in1, 12 // t4a
vldrepl.w vr20, t0, 24 // 3406
vldrepl.w vr21, t0, 28 // 2276
vsllwil.w.h vr22, \in3, 0
vexth.w.h vr23, \in3
vneg.w vr21, vr21
vmul.w \in2, vr22, vr20
vmul.w \in1, vr23, vr20
vmul.w \in3, vr22, vr21
vmul.w \in7, vr23, vr21
vssrarni.h.w \in1, \in2, 12 // t6a
vssrarni.h.w \in7, \in3, 12 // t5a
vsadd.h \in3, \in6, \in7 // t4
vssub.h \in6, \in6, \in7 // t5a
vsadd.h \in5, \in4, \in1 // t7
vssub.h \in4, \in4, \in1 // t6a
vldrepl.w vr20, t0, 0 // 2896
vmul_vmadd_w \in4, \in6, vr20, vr20, vr21, \in1
vmul_vmsub_w \in4, \in6, vr20, vr20, \in2, \in7
vssrarni.h.w \in1, vr21, 12 // t6
vssrarni.h.w \in7, \in2, 12 // t5
vsadd.h \out0, vr8, \in5 // c[0]
vssub.h \out7, vr8, \in5 // c[7]
vsadd.h \out1, \in0, \in1 // c[1]
vssub.h \out6, \in0, \in1 // c[6]
vsadd.h \out2, vr10, \in7 // c[2]
vssub.h \out5, vr10, \in7 // c[5]
vsadd.h \out3, vr9, \in3 // c[3]
vssub.h \out4, vr9, \in3 // c[4]
.endm
/*
* input: in0, in1, in2, in3, in4, in5, in6, in7 (fixed)
* vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
* in8, in9, in10, in11, in12, in13, in14, in15
* vr19, vr24, vr25, vr26, vr27, vr28, vr29, vr30
* output: out0, out1, out2, out3, out4, out5, out6, out7 (fixed)
* vr22, vr18, vr17, vr28, vr20, vr14, vr15, vr16
* out8, out9, out10, out11, out12, out13, out14, out15
* vr27, vr30, vr23, vr21, vr29, vr26, vr25, vr24
*/
.macro dct_8x16_tx64_core_lsx rect2
dct_8x8_tx64_core_lsx vr0, vr2, vr4, vr6, vr19, vr25, vr27, vr29, vr11, \
vr12, vr13, vr14, vr15, vr16, vr17, vr18, \rect2
// in1 in3 in5 in7 in9 in11 in13 in15
// vr1 vr3 vr5 vr7 vr24 vr26 vr28 vr30
la.local t0, idct_coeffs
.ifc \rect2, rect2_lsx
vldrepl.w vr23, t0, 0 // 2896
.irp i, vr1, vr3, vr5, vr7, vr24, vr26, vr28, vr30
rect2_lsx \i, vr23, \i
.endr
.endif
vldrepl.w vr20, t0, 32 // 401
vldrepl.w vr21, t0, 36 // 4076
vsllwil.w.h vr22, vr1, 0
vexth.w.h vr23, vr1
vmul.w vr0, vr22, vr21
vmul.w vr10, vr23, vr21
vmul.w vr1, vr22, vr20
vmul.w vr29, vr23, vr20
vssrarni.h.w vr10, vr0, 12 // t15a
vssrarni.h.w vr29, vr1, 12 // t8a
vldrepl.w vr20, t0, 40 // 3166 -> 1583
vldrepl.w vr21, t0, 44 // 2598 -> 1299
vsllwil.w.h vr22, vr7, 0
vexth.w.h vr23, vr7
vneg.w vr21, vr21
vmul.w vr0, vr22, vr20
vmul.w vr30, vr23, vr20
vmul.w vr7, vr22, vr21
vmul.w vr31, vr23, vr21
vssrarni.h.w vr30, vr0, 12 // t14a
vssrarni.h.w vr31, vr7, 12 // t9a
vldrepl.w vr20, t0, 48 // 1931
vldrepl.w vr21, t0, 52 // 3612
vsllwil.w.h vr22, vr5, 0
vexth.w.h vr23, vr5
vmul.w vr0, vr22, vr21
vmul.w vr24, vr23, vr21
vmul.w vr5, vr22, vr20
vmul.w vr25, vr23, vr20
vssrarni.h.w vr24, vr0, 12 // t13a
vssrarni.h.w vr25, vr5, 12 // t10a
vldrepl.w vr20, t0, 56 // 3920
vldrepl.w vr21, t0, 60 // 1189
vsllwil.w.h vr22, vr3, 0
vexth.w.h vr23, vr3
vneg.w vr21, vr21
vmul.w vr0, vr22, vr20
vmul.w vr26, vr23, vr20
vmul.w vr3, vr22, vr21
vmul.w vr27, vr23, vr21
vssrarni.h.w vr26, vr0, 12 // t12a
vssrarni.h.w vr27, vr3, 12 // t11a
// vr22 vr23 vr30 vr31 vr24 vr25 vr26 vr27
vsadd.h vr28, vr29, vr31 // t8
vssub.h vr19, vr29, vr31 // t9
vssub.h vr29, vr27, vr25 // t10
vsadd.h vr9, vr27, vr25 // t11
vsadd.h vr31, vr26, vr24 // t12
vssub.h vr25, vr26, vr24 // t13
vssub.h vr27, vr10, vr30 // t14
vsadd.h vr24, vr10, vr30 // t15
vldrepl.w vr20, t0, 8 // 1567
vldrepl.w vr21, t0, 12 // 3784
vmul_vmadd_w vr27, vr19, vr21, vr20, vr0, vr26
vmul_vmsub_w vr27, vr19, vr20, vr21, vr1, vr30
vssrarni.h.w vr26, vr0, 12 // t14a
vssrarni.h.w vr30, vr1, 12 // t9a
vmul_vmadd_w vr25, vr29, vr21, vr20, vr0, vr19
vneg.w vr0, vr0
vneg.w vr19, vr19
vmul_vmsub_w vr25, vr29, vr20, vr21, vr1, vr27
vssrarni.h.w vr19, vr0, 12 // t10a
vssrarni.h.w vr27, vr1, 12 // t13a
vsadd.h vr25, vr28, vr9 // t8a
vssub.h vr29, vr28, vr9 // t11a
vssub.h vr28, vr24, vr31 // t12a
vsadd.h vr10, vr24, vr31 // t15a
vsadd.h vr9, vr30, vr19 // t9
vssub.h vr31, vr30, vr19 // t10
vssub.h vr30, vr26, vr27 // t13
vsadd.h vr24, vr26, vr27 // t14
vldrepl.w vr20, t0, 0 // 2896
vmul_vmadd_w vr30, vr31, vr20, vr20, vr0, vr26
vmul_vmsub_w vr30, vr31, vr20, vr20, vr1, vr27
vssrarni.h.w vr26, vr0, 12 // t13a
vssrarni.h.w vr27, vr1, 12 // t10a
vmul_vmadd_w vr28, vr29, vr20, vr20, vr0, vr31
vmul_vmsub_w vr28, vr29, vr20, vr20, vr1, vr30
vssrarni.h.w vr31, vr0, 12 // t12
vssrarni.h.w vr30, vr1, 12 // t11
// vr11 vr12 ... vr18
vsadd.h vr28, vr14, vr31 // c[3]
vssub.h vr29, vr14, vr31 // c[12]
vsadd.h vr20, vr15, vr30 // c[4]
vssub.h vr21, vr15, vr30 // c[11]
vsadd.h vr14, vr16, vr27 // c[5]
vssub.h vr23, vr16, vr27 // c[10]
vsadd.h vr15, vr17, vr9 // c[6]
vssub.h vr30, vr17, vr9 // c[9]
vsadd.h vr16, vr18, vr25 // c[7]
vssub.h vr27, vr18, vr25 // c[8]
vsadd.h vr17, vr13, vr26 // c[2]
vssub.h vr26, vr13, vr26 // c[13]
vsadd.h vr18, vr12, vr24 // c[1]
vssub.h vr25, vr12, vr24 // c[14]
vsadd.h vr22, vr11, vr10 // c[0]
vssub.h vr24, vr11, vr10 // c[15]
.endm // dct_8x16_tx64_core_lsx
.macro vmul_vssrarni_hw in0, in1, in2, tmp0, tmp1, out0, out1
vsllwil.w.h vr22, \in0, 0
vexth.w.h vr23, \in0
vmul.w \tmp0, vr22, \in1
vmul.w \out0, vr23, \in1
vmul.w \tmp1, vr22, \in2
vmul.w \out1, vr23, \in2
vssrarni.h.w \out0, \tmp0, 12
vssrarni.h.w \out1, \tmp1, 12
.endm
const idct64_coeffs, align=4
.word 101, 4095, 2967, -2824
.word 1660, 3745, 3822, -1474
.word 4076, 401, 4017, 799
.word 4036, -700, 2359, 3349
.word 3461, -2191, 897, 3996
.word -3166, -2598, -799, -4017
.word 501, 4065, 3229, -2520
.word 2019, 3564, 3948, -1092
.word 3612, 1931, 2276, 3406
.word 4085, -301, 2675, 3102
.word 3659, -1842, 1285, 3889
.word -3920, -1189, -3406, -2276
endconst
.macro dct64_step1_lsx
vldrepl.w vr20, t0, 0 // 101
vldrepl.w vr21, t0, 4 // 4095
vmul_vssrarni_hw vr0, vr20, vr21, vr16, vr0, vr8, vr9 // vr8 t32a vr9 t63a
vldrepl.w vr20, t0, 8 // 2967
vldrepl.w vr21, t0, 12 // -2824
vmul_vssrarni_hw vr1, vr20, vr21, vr16, vr1, vr10, vr11 // vr10 t62a vr11 t33a
vldrepl.w vr20, t0, 16 // 1660
vldrepl.w vr21, t0, 20 // 3745
vmul_vssrarni_hw vr2, vr20, vr21, vr16, vr2, vr12, vr13 // vr12 t34a vr13 t61a
vldrepl.w vr20, t0, 24 // 3822
vldrepl.w vr21, t0, 28 // -1474
vmul_vssrarni_hw vr3, vr20, vr21, vr16, vr3, vr14, vr15 // vr14 t60a vr15 t35a
vsadd.h vr0, vr8, vr11 // t32
vssub.h vr1, vr8, vr11 // t33
vssub.h vr2, vr15, vr12 // t34
vsadd.h vr3, vr15, vr12 // t35
vsadd.h vr4, vr14, vr13 // t60
vssub.h vr5, vr14, vr13 // t61
vssub.h vr6, vr9, vr10 // t62
vsadd.h vr7, vr9, vr10 // t63
vldrepl.w vr20, t0, 32 // 4076
vldrepl.w vr21, t0, 36 // 401
vmul_vmadd_w vr6, vr1, vr20, vr21, vr9, vr10
vmul_vmsub_w vr6, vr1, vr21, vr20, vr13, vr11
vssrarni.h.w vr10, vr9, 12 // t62a
vssrarni.h.w vr11, vr13, 12 // t33a
vmul_vmadd_w vr5, vr2, vr20, vr21, vr9, vr1
vmul_vmsub_w vr5, vr2, vr21, vr20, vr13, vr6
vneg.w vr9, vr9
vneg.w vr1, vr1
vssrarni.h.w vr6, vr13, 12 // t61a
vssrarni.h.w vr1, vr9, 12 // t34a
vsadd.h vr2, vr0, vr3 // t32a
vssub.h vr5, vr0, vr3 // t35a
vsadd.h vr9, vr11, vr1 // t33
vssub.h vr13, vr11, vr1 // t34
vssub.h vr0, vr7, vr4 // t60a
vsadd.h vr3, vr7, vr4 // t63a
vssub.h vr1, vr10, vr6 // t61
vsadd.h vr11, vr10, vr6 // t62
vldrepl.w vr20, t0, 40 // 4017
vldrepl.w vr21, t0, 44 // 799
vmul_vmadd_w vr1, vr13, vr20, vr21, vr8, vr4
vmul_vmsub_w vr1, vr13, vr21, vr20, vr12, vr7
vssrarni.h.w vr4, vr8, 12 // t61a
vssrarni.h.w vr7, vr12, 12 // t34a
vmul_vmadd_w vr0, vr5, vr20, vr21, vr8, vr6
vmul_vmsub_w vr0, vr5, vr21, vr20, vr12, vr10
vssrarni.h.w vr6, vr8, 12 // t60
vssrarni.h.w vr10, vr12, 12 // t35
vst_x8 t6, 0, 16, vr2, vr9, vr7, vr10, vr6, vr4, vr11, vr3
.endm // dct64_step1
// in1/31/17/15 -> t32a/33/34a/35/60/61a/62/63a
// in7/25/23/ 9 -> t56a/57/58a/59/36/37a/38/39a
// in5/27/21/11 -> t40a/41/42a/43/52/53a/54/55a
// in3/29/19/13 -> t48a/49/50a/51/44/45a/46/47a
.macro dct64_step2_lsx
vld vr0, t5, 0 // t32a
vld vr2, t4, 0 // t63a
vld vr3, t5, 16*8 // t56a
vld vr1, t4, 16*8 // t39a
vld vr4, t5, 16*16 // t40a
vld vr6, t4, 16*16 // t55a
vld vr7, t5, 16*24 // t48a
vld vr5, t4, 16*24 // t47a
vsadd.h vr8, vr0, vr1 // t32
vssub.h vr9, vr0, vr1 // t39
vsadd.h vr10, vr2, vr3 // t63
vssub.h vr11, vr2, vr3 // t56
vssub.h vr12, vr5, vr4 // t40
vsadd.h vr13, vr5, vr4 // t47
vsadd.h vr14, vr7, vr6 // t48
vssub.h vr15, vr7, vr6 // t55
vldrepl.w vr20, t0, 8 // 1567
vldrepl.w vr21, t0, 12 // 3784
vmul_vmadd_w vr11, vr9, vr21, vr20, vr0, vr2
vmul_vmsub_w vr11, vr9, vr20, vr21, vr1, vr3
vssrarni.h.w vr2, vr0, 12 // t56a
vssrarni.h.w vr3, vr1, 12 // t39a
vmul_vmadd_w vr15, vr12, vr21, vr20, vr0, vr4
vmul_vmsub_w vr15, vr12, vr20, vr21, vr1, vr5
vneg.w vr0, vr0
vneg.w vr4, vr4
vssrarni.h.w vr5, vr1, 12 // t55a
vssrarni.h.w vr4, vr0, 12 // t40a
vsadd.h vr9, vr8, vr13 // t32a
vssub.h vr11, vr8, vr13 // t47a
vsadd.h vr6, vr3, vr4 // t39
vssub.h vr7, vr3, vr4 // t40
vssub.h vr12, vr10, vr14 // t48a
vsadd.h vr15, vr10, vr14 // t63a
vssub.h vr0, vr2, vr5 // t55
vsadd.h vr1, vr2, vr5 // t56
vldrepl.w vr20, t0, 0 // 2896
vmul_vmsub_w vr0, vr7, vr20, vr20, vr8, vr13
vmul_vmadd_w vr0, vr7, vr20, vr20, vr3, vr4
vssrarni.h.w vr13, vr8, 12 // t40a
vssrarni.h.w vr4, vr3, 12 // t55a
vmul_vmsub_w vr12, vr11, vr20, vr20, vr8, vr10
vmul_vmadd_w vr12, vr11, vr20, vr20, vr3, vr14
vssrarni.h.w vr10, vr8, 12 // t47
vssrarni.h.w vr14, vr3, 12 // t48
// t32a t39 t40a t47 t48 t55a t56 t63a
// vr9 vr6 vr13 vr10 vr14 vr4 vr1 vr15
vst vr9, t5, 0 // t32a
vst vr6, t4, 0 // t39
vst vr13, t5, 16*8 // t40a
vst vr10, t4, 16*8 // t47
vst vr14, t5, 16*16 // t48
vst vr4, t4, 16*16 // t55a
vst vr1, t5, 16*24 // t56
vst vr15, t4, 16*24 // t63a
.endm // dct64_step2_lsx
.macro dct64_step3_lsx
// t0 t1 t2 t3 t4 t5 t6 t7
vld_x8 t3, 0, 16, vr2, vr3, vr7, vr8, vr11, vr12, vr16, vr17
vld vr9, t5, 16*24 // t56
vld vr6, t5, 16*24+16 // t57a
vld vr13, t5, 16*24+32 // t58
vld vr10, t5, 16*24+48 // t59a
vld vr14, t4, 16*24-48 // t60
vld vr4, t4, 16*24-32 // t61a
vld vr1, t4, 16*24-16 // t62
vld vr15, t4, 16*24 // t63a
vsadd.h vr20, vr2, vr15 // c[0]
vssub.h vr21, vr2, vr15 // c[63]
vsadd.h vr22, vr3, vr1 // c[1]
vssub.h vr23, vr3, vr1 // c[62]
vsadd.h vr24, vr7, vr4 // c[2]
vssub.h vr25, vr7, vr4 // c[61]
vsadd.h vr26, vr8, vr14 // c[3]
vssub.h vr27, vr8, vr14 // c[60]
vsadd.h vr28, vr11, vr10 // c[4]
vssub.h vr29, vr11, vr10 // c[59]
vsadd.h vr30, vr12, vr13 // c[5]
vssub.h vr31, vr12, vr13 // c[58]
vsadd.h vr2, vr16, vr6 // c[6]
vssub.h vr15, vr16, vr6 // c[57]
vsadd.h vr1, vr17, vr9 // c[7]
vssub.h vr3, vr17, vr9 // c[56]
.endm // dct64_step3_lsx
.macro dct64_step4_lsx transpose8x8, shift, start0, stride0, start1, stride1
dct64_step3_lsx
.ifnb \transpose8x8
LSX_TRANSPOSE8x8_H vr20, vr22, vr24, vr26, vr28, vr30, vr2, vr1, \
vr20, vr22, vr24, vr26, vr28, vr30, vr2, vr1, \
vr4, vr7, vr8, vr14, vr10, vr11, vr12, vr13
LSX_TRANSPOSE8x8_H vr3, vr15, vr31, vr29, vr27, vr25, vr23, vr21, \
vr3, vr15, vr31, vr29, vr27, vr25, vr23, vr21, \
vr4, vr7, vr8, vr14, vr10, vr11, vr12, vr13
.endif
.ifnb \shift
.irp i, vr20, vr22, vr24, vr26, vr28, vr30, vr2, vr1, \
vr3, vr15, vr31, vr29, vr27, vr25, vr23, vr21
vsrari.h \i, \i, \shift
.endr
.endif
vst_x8 t7, \start0, \stride0, vr20, vr22, vr24, vr26, vr28, vr30, vr2, vr1
vst_x8 t7, \start1, \stride1, vr3, vr15, vr31, vr29, vr27, vr25, vr23, vr21
.endm // dct64_step4_lsx
.macro dct64_step5_lsx in0, in1, in2, in3, in4, in5, in6, in7
fld.d f4, t0, 0
fldx.d f5, t0, a1
fld.d f6, t6, 0
fldx.d f7, t6, a1
alsl.d t0, a1, t0, 2
alsl.d t6, a1, t6, 2
fld.d f8, t0, 0
fldx.d f9, t0, a1
fld.d f10, t6, 0
fldx.d f11, t6, a1
.irp i, vr4, vr5, vr6, vr7, vr8, vr9, vr10, vr11
vsllwil.hu.bu \i, \i, 0
.endr
vsrari.h vr20, \in0, 4
vsrari.h vr22, \in1, 4
vsrari.h vr24, \in2, 4
vsrari.h vr26, \in3, 4
vsrari.h vr28, \in4, 4
vsrari.h vr30, \in5, 4
vsrari.h vr2, \in6, 4
vsrari.h vr1, \in7, 4
vadd.h vr4, vr4, vr20
vadd.h vr5, vr5, vr22
vadd.h vr6, vr6, vr24
vadd.h vr7, vr7, vr26
vadd.h vr8, vr8, vr28
vadd.h vr9, vr9, vr30
vadd.h vr10, vr10, vr2
vadd.h vr11, vr11, vr1
vssrani.bu.h vr5, vr4, 0
vssrani.bu.h vr7, vr6, 0
vssrani.bu.h vr9, vr8, 0
vssrani.bu.h vr11, vr10, 0
vstelm.d vr5, t1, 0, 0
vstelm.d vr5, t2, 0, 1
alsl.d t1, a1, t1, 1
alsl.d t2, a1, t2, 1
vstelm.d vr7, t1, 0, 0
vstelm.d vr7, t2, 0, 1
alsl.d t1, a1, t1, 1
alsl.d t2, a1, t2, 1
vstelm.d vr9, t1, 0, 0
vstelm.d vr9, t2, 0, 1
alsl.d t1, a1, t1, 1
alsl.d t2, a1, t2, 1
vstelm.d vr11, t1, 0, 0
vstelm.d vr11, t2, 0, 1
.endm // dct64_step5_lsx
.macro dct_8x32_tx64_new_lsx vld_loc0, stride0, vld_loc1, stride1, rect2
vld_x8 t2, \vld_loc0, \stride0, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
dct_8x16_tx64_core_lsx \rect2
vst_x16 t3, 0, 16, vr22, vr18, vr17, vr28, vr20, vr14, vr15, vr16, \
vr27, vr30, vr23, vr21, vr29, vr26, vr25, vr24
vxor.v vr31, vr31, vr31
vst_x8 t2, \vld_loc0, \stride0, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
vld_x8 t2, \vld_loc1, \stride1, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vst_x8 t2, \vld_loc1, \stride1, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
la.local t0, idct_coeffs
.ifc \rect2, rect2_lsx
vldrepl.w vr23, t0, 0 // 2896
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
rect2_lsx \i, vr23, \i
.endr
.endif
vldrepl.w vr20, t0, 64 // 201
vldrepl.w vr21, t0, 68 // 4091
vsllwil.w.h vr22, vr0, 0
vexth.w.h vr23, vr0
vmul.w vr8, vr22, vr21
vmul.w vr9, vr23, vr21
vmul.w vr0, vr22, vr20
vmul.w vr10, vr23, vr20
vssrarni.h.w vr9, vr8, 12 // t31a
vssrarni.h.w vr10, vr0, 12 // t16a
vldrepl.w vr20, t0, 72 // 3035
vldrepl.w vr21, t0, 76 // 2751
vsllwil.w.h vr22, vr7, 0
vexth.w.h vr23, vr7
vneg.w vr21, vr21
vmul.w vr8, vr22, vr20
vmul.w vr0, vr23, vr20
vmul.w vr7, vr22, vr21
vmul.w vr30, vr23, vr21
vssrarni.h.w vr0, vr8, 12 // t30a
vssrarni.h.w vr30, vr7, 12 // t17a
vldrepl.w vr20, t0, 80 // 1751
vldrepl.w vr21, t0, 84 // 3703
vsllwil.w.h vr22, vr4, 0
vexth.w.h vr23, vr4
vmul.w vr8, vr22, vr21
vmul.w vr7, vr23, vr21
vmul.w vr4, vr22, vr20
vmul.w vr19, vr23, vr20
vssrarni.h.w vr7, vr8, 12 // t29a
vssrarni.h.w vr19, vr4, 12 // t18a
vldrepl.w vr20, t0, 88 // 3857
vldrepl.w vr21, t0, 92 // 1380
vsllwil.w.h vr22, vr3, 0
vexth.w.h vr23, vr3
vneg.w vr21, vr21
vmul.w vr8, vr22, vr20
vmul.w vr4, vr23, vr20
vmul.w vr3, vr22, vr21
vmul.w vr26, vr23, vr21
vssrarni.h.w vr4, vr8, 12 // t28a
vssrarni.h.w vr26, vr3, 12 // t19a
vldrepl.w vr20, t0, 96 // 995
vldrepl.w vr21, t0, 100 // 3973
vsllwil.w.h vr22, vr2, 0
vexth.w.h vr23, vr2
vmul.w vr8, vr22, vr21
vmul.w vr3, vr23, vr21
vmul.w vr2, vr22, vr20
vmul.w vr27, vr23, vr20
vssrarni.h.w vr3, vr8, 12 // t27a
vssrarni.h.w vr27, vr2, 12 // t20a
vldrepl.w vr20, t0, 104 // 3513
vldrepl.w vr21, t0, 108 // 2106
vsllwil.w.h vr22, vr5, 0
vexth.w.h vr23, vr5
vneg.w vr21, vr21
vmul.w vr8, vr22, vr20
vmul.w vr2, vr23, vr20
vmul.w vr5, vr22, vr21
vmul.w vr28, vr23, vr21
vssrarni.h.w vr2, vr8, 12 // t26a
vssrarni.h.w vr28, vr5, 12 // t21a
vldrepl.w vr20, t0, 112 // 2440 -> 1220
vldrepl.w vr21, t0, 116 // 3290 -> 1645
vsllwil.w.h vr22, vr6, 0
vexth.w.h vr23, vr6
vmul.w vr8, vr22, vr21
vmul.w vr5, vr23, vr21
vmul.w vr6, vr22, vr20
vmul.w vr25, vr23, vr20
vssrarni.h.w vr5, vr8, 12 // t25a
vssrarni.h.w vr25, vr6, 12 // t22a
vldrepl.w vr20, t0, 120 // 4052
vldrepl.w vr21, t0, 124 // 601
vsllwil.w.h vr22, vr1, 0
vexth.w.h vr23, vr1
vneg.w vr21, vr21
vmul.w vr8, vr22, vr20
vmul.w vr6, vr23, vr20
vmul.w vr1, vr22, vr21
vmul.w vr24, vr23, vr21
vssrarni.h.w vr6, vr8, 12 // t24a
vssrarni.h.w vr24, vr1, 12 // t23a
vsadd.h vr1, vr10, vr30 // t16
vssub.h vr29, vr10, vr30 // t17
vssub.h vr8, vr26, vr19 // t18
vsadd.h vr31, vr26, vr19 // t19
vsadd.h vr10, vr27, vr28 // t20
vssub.h vr30, vr27, vr28 // t21
vssub.h vr19, vr24, vr25 // t22
vsadd.h vr26, vr24, vr25 // t23
vsadd.h vr27, vr6, vr5 // t24
vssub.h vr28, vr6, vr5 // t25
vssub.h vr24, vr3, vr2 // t26
vsadd.h vr25, vr3, vr2 // t27
vsadd.h vr5, vr4, vr7 // t28
vssub.h vr6, vr4, vr7 // t29
vssub.h vr2, vr9, vr0 // t30
vsadd.h vr3, vr9, vr0 // t31
vldrepl.w vr20, t0, 16 // 799
vldrepl.w vr21, t0, 20 // 4017
vmul_vmadd_w vr2, vr29, vr21, vr20, vr4, vr7
vmul_vmsub_w vr2, vr29, vr20, vr21, vr11, vr0
vssrarni.h.w vr7, vr4, 12 // t30a
vssrarni.h.w vr0, vr11, 12 // t17a
vmul_vmadd_w vr6, vr8, vr21, vr20, vr4, vr9
vneg.w vr4, vr4
vneg.w vr9, vr9
vmul_vmsub_w vr6, vr8, vr20, vr21, vr11, vr2
vssrarni.h.w vr9, vr4, 12 // t18a
vssrarni.h.w vr2, vr11, 12 // t29a
vldrepl.w vr20, t0, 24 // 3406 -> 1703
vldrepl.w vr21, t0, 28 // 2276 -> 1138
vmul_vmadd_w vr24, vr30, vr21, vr20, vr4, vr29
vmul_vmsub_w vr24, vr30, vr20, vr21, vr11, vr6
vssrarni.h.w vr29, vr4, 12 // t26a
vssrarni.h.w vr6, vr11, 12 // t21a
vmul_vmadd_w vr28, vr19, vr21, vr20, vr4, vr8
vneg.w vr4, vr4
vneg.w vr8, vr8
vmul_vmsub_w vr28, vr19, vr20, vr21, vr11, vr24
vssrarni.h.w vr8, vr4, 12 // t22a
vssrarni.h.w vr24, vr11, 12 // t25a
vsadd.h vr4, vr1, vr31 // t16a
vssub.h vr30, vr1, vr31 // t19a
vsadd.h vr19, vr0, vr9 // t17
vssub.h vr28, vr0, vr9 // t18
vssub.h vr1, vr26, vr10 // t20a
vsadd.h vr31, vr26, vr10 // t23a
vssub.h vr0, vr8, vr6 // t21
vsadd.h vr9, vr8, vr6 // t22
vsadd.h vr10, vr27, vr25 // t24a
vssub.h vr26, vr27, vr25 // t27a
vsadd.h vr6, vr24, vr29 // t25
vssub.h vr8, vr24, vr29 // t26
vssub.h vr25, vr3, vr5 // t28a
vsadd.h vr27, vr3, vr5 // t31a
vssub.h vr24, vr7, vr2 // t29
vsadd.h vr29, vr7, vr2 // t30
vldrepl.w vr20, t0, 8 // 1567
vldrepl.w vr21, t0, 12 // 3784
vmul_vmadd_w vr24, vr28, vr21, vr20, vr3, vr5
vmul_vmsub_w vr24, vr28, vr20, vr21, vr11, vr2
vssrarni.h.w vr5, vr3, 12 // t29a
vssrarni.h.w vr2, vr11, 12 // 18a
vmul_vmadd_w vr25, vr30, vr21, vr20, vr3, vr7
vmul_vmsub_w vr25, vr30, vr20, vr21, vr11, vr24
vssrarni.h.w vr7, vr3, 12 // t28
vssrarni.h.w vr24, vr11, 12 // t19
vmul_vmadd_w vr26, vr1, vr21, vr20, vr3, vr28
vneg.w vr3, vr3
vneg.w vr28, vr28
vmul_vmsub_w vr26, vr1, vr20, vr21, vr11, vr25
vssrarni.h.w vr28, vr3, 12 // t20
vssrarni.h.w vr25, vr11, 12 // t27
vmul_vmadd_w vr8, vr0, vr21, vr20, vr3, vr30
vneg.w vr3, vr3
vneg.w vr30, vr30
vmul_vmsub_w vr8, vr0, vr20, vr21, vr11, vr1
vssrarni.h.w vr30, vr3, 12 // t21a
vssrarni.h.w vr1, vr11, 12 // t26a
vsadd.h vr3, vr4, vr31 // t16
vssub.h vr26, vr4, vr31 // t23
vsadd.h vr0, vr19, vr9 // t17a
vssub.h vr8, vr19, vr9 // t22a
vsadd.h vr4, vr2, vr30 // t18
vssub.h vr31, vr2, vr30 // t21
vsadd.h vr9, vr24, vr28 // t19a
vssub.h vr19, vr24, vr28 // t20a
vssub.h vr2, vr27, vr10 // t24
vsadd.h vr30, vr27, vr10 // t31
vssub.h vr24, vr29, vr6 // t25a
vsadd.h vr28, vr29, vr6 // t30a
vssub.h vr10, vr5, vr1 // t26
vsadd.h vr27, vr5, vr1 // t29
vssub.h vr6, vr7, vr25 // t27a
vsadd.h vr29, vr7, vr25 // t28a
vldrepl.w vr20, t0, 0 // 2896
vmul_vmsub_w vr6, vr19, vr20, vr20, vr1, vr5
vmul_vmadd_w vr6, vr19, vr20, vr20, vr11, vr7
vssrarni.h.w vr5, vr1, 12 // t20
vssrarni.h.w vr7, vr11, 12 // t27
vmul_vmsub_w vr10, vr31, vr20, vr20, vr1, vr25
vmul_vmadd_w vr10, vr31, vr20, vr20, vr11, vr6
vssrarni.h.w vr25, vr1, 12 // t21a
vssrarni.h.w vr6, vr11, 12 // t26a
vmul_vmsub_w vr24, vr8, vr20, vr20, vr1, vr19
vmul_vmadd_w vr24, vr8, vr20, vr20, vr11, vr10
vssrarni.h.w vr19, vr1, 12 // t22
vssrarni.h.w vr10, vr11, 12 // t25
vmul_vmsub_w vr2, vr26, vr20, vr20, vr1, vr31
vmul_vmadd_w vr2, vr26, vr20, vr20, vr11, vr8
vssrarni.h.w vr31, vr1, 12 // t23a
vssrarni.h.w vr8, vr11, 12 // t24a
// t31 t30a t29 t28a t27 t26a t25 t24a t23a t22 t21a t20 t19a t18 t17a t16
// vr30 vr28 vr27 vr29 vr7 vr6 vr10 vr8 vr31 vr19 vr25 vr5 vr9 vr4 vr0 vr3
vld_x8 t3, 0, 16, vr11, vr12, vr13, vr14, vr15, vr16, vr17, vr18
vsadd.h vr1, vr11, vr30 // c[0]
vssub.h vr2, vr11, vr30 // c[31]
vsadd.h vr24, vr12, vr28 // c[1]
vssub.h vr26, vr12, vr28 // c[30]
vsadd.h vr11, vr13, vr27 // c[2]
vssub.h vr30, vr13, vr27 // c[29]
vsadd.h vr12, vr14, vr29 // c[3]
vssub.h vr28, vr14, vr29 // c[28]
vsadd.h vr13, vr15, vr7 // c[4]
vssub.h vr27, vr15, vr7 // c[27]
vsadd.h vr14, vr16, vr6 // c[5]
vssub.h vr29, vr16, vr6 // c[26]
vsadd.h vr7, vr17, vr10 // c[6]
vssub.h vr15, vr17, vr10 // c[25]
vsadd.h vr6, vr18, vr8 // c[7]
vssub.h vr16, vr18, vr8 // c[24]
vst_x8 t3, 0, 16, vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6
vst_x8 t3, 384, 16, vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2
vld_x8 t3, 128, 16, vr11, vr12, vr13, vr14, vr15, vr16, vr17, vr18
vsadd.h vr1, vr11, vr31 // c[8]
vssub.h vr2, vr11, vr31 // c[23]
vsadd.h vr24, vr12, vr19 // c[9]
vssub.h vr26, vr12, vr19 // c[22]
vsadd.h vr11, vr13, vr25 // c[10]
vssub.h vr30, vr13, vr25 // c[21]
vsadd.h vr12, vr14, vr5 // c[11]
vssub.h vr28, vr14, vr5 // c[20]
vsadd.h vr13, vr15, vr9 // c[12]
vssub.h vr27, vr15, vr9 // c[19]
vsadd.h vr14, vr16, vr4 // c[13]
vssub.h vr29, vr16, vr4 // c[18]
vsadd.h vr7, vr17, vr0 // c[14]
vssub.h vr15, vr17, vr0 // c[17]
vsadd.h vr6, vr18, vr3 // c[15]
vssub.h vr16, vr18, vr3 // c[16]
vst_x8 t3, 128, 16, vr1, vr24, vr11, vr12, vr13, vr14, vr7, vr6
vst_x8 t3, 256, 16, vr16, vr15, vr29, vr27, vr28, vr30, vr26, vr2
.endm // dct_8x32_tx64_new_lsx
.macro DST_ADD_W64 in0, in1, in2, in3, in4, in5, in6, in7
vsllwil.hu.bu vr4, vr10, 0
vsllwil.hu.bu vr5, vr11, 0
vsllwil.hu.bu vr6, vr12, 0
vsllwil.hu.bu vr7, vr13, 0
vexth.hu.bu vr10, vr10
vexth.hu.bu vr11, vr11
vexth.hu.bu vr12, vr12
vexth.hu.bu vr13, vr13
vadd.h vr4, vr4, \in0
vadd.h vr10, vr10, \in1
vadd.h vr5, vr5, \in2
vadd.h vr11, vr11, \in3
vadd.h vr6, vr6, \in4
vadd.h vr12, vr12, \in5
vadd.h vr7, vr7, \in6
vadd.h vr13, vr13, \in7
vssrani.bu.h vr10, vr4, 0
vssrani.bu.h vr11, vr5, 0
vssrani.bu.h vr12, vr6, 0
vssrani.bu.h vr13, vr7, 0
vst vr10, a0, 0
vst vr11, a0, 16
vst vr12, a0, 32
vst vr13, a0, 48
.endm
.macro idct_dc_w64 w, h, shift
ld.h t2, a2, 0
vldi vr0, 0x8b5
vreplgr2vr.w vr1, t2
vldi vr20, 0x880
vmul.w vr2, vr0, vr1
st.h zero, a2, 0
vsrari.w vr2, vr2, 8
vld vr13, a0, 48
.if (2*\w == \h) || (2*\h == \w)
vmul.w vr2, vr2, vr0
vsrari.w vr2, vr2, 8
.endif
.if \shift>0
vsrari.w vr2, vr2, \shift
.endif
vld vr11, a0, 16
vmadd.w vr20, vr2, vr0
vld vr12, a0, 32
vssrarni.h.w vr20, vr20, 12
vld vr10, a0, 0
.endm
function inv_txfm_add_dct_dct_64x64_8bpc_lsx
bnez a3, .NO_HAS_DCONLY_64x64
idct_dc_w64 64, 64, 2
DST_ADD_W64 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
li.w t3, 63
.loop63:
add.d a0, a0, a1
vld vr10, a0, 0
vld vr11, a0, 16
vld vr12, a0, 32
vld vr13, a0, 48
DST_ADD_W64 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
addi.d t3, t3, -1
blt zero, t3, .loop63
b .DCT_DCT_64X64_END
.NO_HAS_DCONLY_64x64:
malloc_space 64*32*2+512+512
.macro dct64x64_core1_lsx shift, rect2
//addi.d t2, a2, \in0
//addi.d t7, t7, \in1
li.w t4, 64*32*2+64
add.d t3, sp, t4
addi.d t6, t3, 512
add.d t5, t6, zero
dct_8x32_tx64_new_lsx 0, 256, 128, 256, \rect2
la.local t0, idct64_coeffs
vxor.v vr31, vr31, vr31
//addi.d a4, a2, \in2 // 32 ...
// in1/31/17/15 -> t32a/33/34a/35/60/61a/62/63a
vld vr0, a4, 128*0 // in1
vld vr1, a4, 128*15 // in31
vld vr2, a4, 128*8 // in17
vld vr3, a4, 128*7 // in15
la.local a6, idct_coeffs
.ifc \rect2, rect2_lsx
vldrepl.w vr23, a6, 0 // 2896
.irp i, vr0, vr1, vr2, vr3
rect2_lsx \i, vr23, \i
.endr
.endif
vst vr31, a4, 128*0
vst vr31, a4, 128*15
vst vr31, a4, 128*8
vst vr31, a4, 128*7
dct64_step1_lsx
addi.d t0, t0, 48
addi.d t6, t6, 128
// in7/25/23/ 9 -> t56a/57/58a/59/36/37a/38/39a
vld vr0, a4, 128*3 // in7
vld vr1, a4, 128*12 // in25
vld vr2, a4, 128*11 // in23
vld vr3, a4, 128*4 // in9
la.local a6, idct_coeffs
.ifc \rect2, rect2_lsx
vldrepl.w vr23, a6, 0 // 2896
.irp i, vr0, vr1, vr2, vr3
rect2_lsx \i, vr23, \i
.endr
.endif
vst vr31, a4, 128*3
vst vr31, a4, 128*12
vst vr31, a4, 128*11
vst vr31, a4, 128*4
dct64_step1_lsx
addi.d t0, t0, 48
addi.d t6, t6, 128
// in5/27/21/11 -> t40a/41/42a/43/52/53a/54/55a
vld vr0, a4, 128*2 // in5
vld vr1, a4, 128*13 // in27
vld vr2, a4, 128*10 // in21
vld vr3, a4, 128*5 // in11
la.local a6, idct_coeffs
.ifc \rect2, rect2_lsx
vldrepl.w vr23, a6, 0 // 2896
.irp i, vr0, vr1, vr2, vr3
rect2_lsx \i, vr23, \i
.endr
.endif
vst vr31, a4, 128*2
vst vr31, a4, 128*13
vst vr31, a4, 128*10
vst vr31, a4, 128*5
dct64_step1_lsx
addi.d t0, t0, 48
addi.d t6, t6, 128
// in3/29/19/13 -> t48a/49/50a/51/44/45a/46/47a
vld vr0, a4, 128*1 // in3
vld vr1, a4, 128*14 // in29
vld vr2, a4, 128*9 // in19
vld vr3, a4, 128*6 // in13
la.local a6, idct_coeffs
.ifc \rect2, rect2_lsx
vldrepl.w vr23, a6, 0 // 2896
.irp i, vr0, vr1, vr2, vr3
rect2_lsx \i, vr23, \i
.endr
.endif
vst vr31, a4, 128*1
vst vr31, a4, 128*14
vst vr31, a4, 128*9
vst vr31, a4, 128*6
dct64_step1_lsx
la.local t0, idct_coeffs
addi.d t4, t5, 16*7
// t32a/t39/t40a/t47/t48/t55a/t56/t63a
dct64_step2_lsx
addi.d t5, t5, 16
addi.d t4, t4, -16
// t33/t38a/t41/t46a/t49a/t54/t57a/t62
dct64_step2_lsx
addi.d t5, t5, 16
addi.d t4, t4, -16
// t34a/t37/t42a/t45/t50/t53a/t58/t61a
dct64_step2_lsx
addi.d t5, t5, 16
addi.d t4, t4, -16
// t35/t36a/t43/t44a/t51a/t52/t59a/t60
dct64_step2_lsx
li.w t4, 64*32*2+64+512
add.d t5, t4, sp
addi.d t4, t5, 16*7
dct64_step4_lsx transpose8x8, \shift, 0, 128, 112, 128
addi.d t3, t3, 128
addi.d t4, t4, -16*8
addi.d t5, t5, -16*8
dct64_step4_lsx transpose8x8, \shift, 16, 128, 96, 128
addi.d t5, t5, -16*8
addi.d t4, t4, -16*8
addi.d t3, t3, 128
dct64_step4_lsx transpose8x8, \shift, 32, 128, 80, 128
addi.d t5, t5, -16*8
addi.d t4, t4, -16*8
addi.d t3, t3, 128
dct64_step4_lsx transpose8x8, \shift, 48, 128, 64, 128
.endm
la.local t8, eob_32x32
addi.d t2, a2, 0
addi.d t7, sp, 64
addi.d t7, t7, 0
addi.d a4, a2, 64
.DCT_DCT_EOB_64x64:
ld.h a5, t8, 0
addi.d t8, t8, 2
dct64x64_core1_lsx 2, no_rect2
addi.d t2, t2, 16
addi.d t7, t7, 128*8
addi.d a4, a4, 16
bge a3, a5, .DCT_DCT_EOB_64x64
la.local t8, eob_32x32
vxor.v vr31, vr31, vr31
ld.h t7, t8, 4
bge a3, t7, .DCT_DCT_EOB_64x64_END
li.d t1, 1024*3+64
add.d t0, sp, t1
.rept 4
vst_x16 t0, 0, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
addi.d t0, t0, 256
.endr
ld.h t7, t8, 2
bge a3, t7, .DCT_DCT_EOB_64x64_END
li.d t1, 1024*2+64
add.d t0, sp, t1
.rept 4
vst_x16 t0, 0, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
addi.d t0, t0, 256
.endr
ld.h t7, t8, 0
bge a3, t7, .DCT_DCT_EOB_64x64_END
li.d t1, 1024*1+64
add.d t0, sp, t1
.rept 4
vst_x16 t0, 0, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
addi.d t0, t0, 256
.endr
.DCT_DCT_EOB_64x64_END:
.macro dct64x64_core2_lsx in0, in1, rect2
addi.d t2, sp, 64+\in0
addi.d t7, sp, 64+\in0
li.w t4, 64*32*2+64
add.d t3, sp, t4
addi.d t6, t3, 512
add.d t5, t6, zero
addi.d t2, t2, 1024
addi.d t2, t2, 1024
dct_8x32_tx64_new_lsx -2048, 512, 256-2048, 512, \rect2
la.local t0, idct64_coeffs
addi.d t2, sp, 64+64*2+\in0
addi.d t4, t2, 256*7
addi.d t4, t4, 256
vld vr0, t2, 256*0 // in1
vld vr1, t4, 256*7 // in31
vld vr2, t4, 256*0 // in17
vld vr3, t2, 256*7 // in15
dct64_step1_lsx
addi.d t0, t0, 48
addi.d t6, t6, 128
vld vr0, t2, 256*3 // in7
vld vr1, t4, 256*4 // in25
vld vr2, t4, 256*3 // in23
vld vr3, t2, 256*4 // in9
dct64_step1_lsx
addi.d t0, t0, 48
addi.d t6, t6, 128
vld vr0, t2, 256*2 // in5
vld vr1, t4, 256*5 // in27
vld vr2, t4, 256*2 // in21
vld vr3, t2, 256*5 // in11
dct64_step1_lsx
addi.d t0, t0, 48
addi.d t6, t6, 128
vld vr0, t2, 256*1 // in3
vld vr1, t4, 256*6 // in29
vld vr2, t4, 256*1 // in19
vld vr3, t2, 256*6 // in13
dct64_step1_lsx
la.local t0, idct_coeffs
addi.d t4, t5, 16*7
// t32a/t39/t40a/t47/t48/t55a/t56/t63a
dct64_step2_lsx
addi.d t5, t5, 16
addi.d t4, t4, -16
// t33/t38a/t41/t46a/t49a/t54/t57a/t62
dct64_step2_lsx
addi.d t5, t5, 16
addi.d t4, t4, -16
// t34a/t37/t42a/t45/t50/t53a/t58/t61a
dct64_step2_lsx
addi.d t5, t5, 16
addi.d t4, t4, -16
// t35/t36a/t43/t44a/t51a/t52/t59a/t60
dct64_step2_lsx
li.w t4, 64*32*2+64+512
add.d t5, t4, sp
addi.d t4, t5, 16*7
addi.d a0, a0, \in1
// 0 - 7, 56 -63
dct64_step3_lsx
li.w t8, 0
mul.w t0, t8, a1
add.d t0, a0, t0
alsl.d t6, a1, t0, 1
addi.d t1, t0, 0
add.d t2, t0, a1
dct64_step5_lsx vr20, vr22, vr24, vr26, vr28, vr30, vr2, vr1
li.w t8, 56
mul.w t0, t8, a1
add.d t0, a0, t0
alsl.d t6, a1, t0, 1
addi.d t1, t0, 0
add.d t2, t0, a1
dct64_step5_lsx vr3, vr15, vr31, vr29, vr27, vr25, vr23, vr21
// 8 - 15, 48 - 55
addi.d t3, t3, 128
addi.d t4, t4, -16*8
addi.d t5, t5, -16*8
dct64_step3_lsx
li.w t8, 8
mul.w t0, t8, a1
add.d t0, t0, a0
alsl.d t6, a1, t0, 1
addi.d t1, t0, 0
add.d t2, t0, a1
dct64_step5_lsx vr20, vr22, vr24, vr26, vr28, vr30, vr2, vr1
li.w t8, 48
mul.w t0, t8, a1
add.d t0, t0, a0
alsl.d t6, a1, t0, 1
addi.d t1, t0, 0
add.d t2, t0, a1
dct64_step5_lsx vr3, vr15, vr31, vr29, vr27, vr25, vr23, vr21
// 16 - 23, 40 - 47
addi.d t3, t3, 128
addi.d t4, t4, -16*8
addi.d t5, t5, -16*8
dct64_step3_lsx
li.w t8, 16
mul.w t0, t8, a1
add.d t0, t0, a0
alsl.d t6, a1, t0, 1
addi.d t1, t0, 0
add.d t2, t0, a1
dct64_step5_lsx vr20, vr22, vr24, vr26, vr28, vr30, vr2, vr1
li.w t8, 40
mul.w t0, t8, a1
add.d t0, t0, a0
alsl.d t6, a1, t0, 1
addi.d t1, t0, 0
add.d t2, t0, a1
dct64_step5_lsx vr3, vr15, vr31, vr29, vr27, vr25, vr23, vr21
// 24 - 31, 32 - 39
addi.d t3, t3, 128
addi.d t4, t4, -16*8
addi.d t5, t5, -16*8
dct64_step3_lsx
li.w t8, 24
mul.w t0, t8, a1
add.d t0, t0, a0
alsl.d t6, a1, t0, 1
addi.d t1, t0, 0
add.d t2, t0, a1
dct64_step5_lsx vr20, vr22, vr24, vr26, vr28, vr30, vr2, vr1
li.w t8, 32
mul.w t0, t8, a1
add.d t0, t0, a0
alsl.d t6, a1, t0, 1
addi.d t1, t0, 0
add.d t2, t0, a1
dct64_step5_lsx vr3, vr15, vr31, vr29, vr27, vr25, vr23, vr21
.endm
dct64x64_core2_lsx 16*0, 0, no_rect2
dct64x64_core2_lsx 16*1, 8, no_rect2
dct64x64_core2_lsx 16*2, 8, no_rect2
dct64x64_core2_lsx 16*3, 8, no_rect2
dct64x64_core2_lsx 16*4, 8, no_rect2
dct64x64_core2_lsx 16*5, 8, no_rect2
dct64x64_core2_lsx 16*6, 8, no_rect2
dct64x64_core2_lsx 16*7, 8, no_rect2
free_space 64*32*2+512+512
.DCT_DCT_64X64_END:
endfunc
function inv_txfm_add_dct_dct_64x32_8bpc_lsx
bnez a3, .NO_HAS_DCONLY_64x32
idct_dc_w64 64, 32, 1
DST_ADD_W64 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
li.w t3, 31
.loop31:
add.d a0, a0, a1
vld vr10, a0, 0
vld vr11, a0, 16
vld vr12, a0, 32
vld vr13, a0, 48
DST_ADD_W64 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
addi.d t3, t3, -1
blt zero, t3, .loop31
b .DCT_DCT_64X32_END
.NO_HAS_DCONLY_64x32:
malloc_space 64*32*2+512+512
la.local t8, eob_32x32
addi.d t2, a2, 0
addi.d t7, sp, 64
addi.d t7, t7, 0
addi.d a4, a2, 64
.DCT_DCT_EOB_64x32:
ld.h a5, t8, 0
addi.d t8, t8, 2
dct64x64_core1_lsx 1, rect2_lsx
addi.d t2, t2, 16
addi.d t7, t7, 128*8
addi.d a4, a4, 16
bge a3, a5, .DCT_DCT_EOB_64x32
la.local t8, eob_32x32
vxor.v vr31, vr31, vr31
ld.h t7, t8, 4
bge a3, t7, .DCT_DCT_EOB_64x32_END
li.d t1, 1024*3+64
add.d t0, sp, t1
.rept 4
vst_x16 t0, 0, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
addi.d t0, t0, 256
.endr
ld.h t7, t8, 2
bge a3, t7, .DCT_DCT_EOB_64x32_END
li.d t1, 1024*2+64
add.d t0, sp, t1
.rept 4
vst_x16 t0, 0, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
addi.d t0, t0, 256
.endr
ld.h t7, t8, 0
bge a3, t7, .DCT_DCT_EOB_64x32_END
li.d t1, 1024*1+64
add.d t0, sp, t1
.rept 4
vst_x16 t0, 0, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31, \
vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
addi.d t0, t0, 256
.endr
.DCT_DCT_EOB_64x32_END:
addi.d t2, sp, 64
li.w t4, 64*32*2+64
add.d t3, sp, t4
addi.d t5, sp, 64
addi.d t5, t5, 1024
addi.d t5, t5, 1024
.rept 8
vld_x8 t2, 0, 256, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
addi.d t4, t2, 1024
addi.d t4, t4, 1024
vld_x8 t4, 0, 256, vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
inv_dct16_lsx no_rect2
vst_x16 t3, 0, 16, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
addi.d t4, t2, 128
vld_x8 t4, 0, 256, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
addi.d t4, t4, 1024
addi.d t4, t4, 1024
vld_x8 t4, 0, 256, vr19, vr24, vr25, vr26, vr27, vr28, vr29, vr30
dct_8x32_core_lsx t5, t3, 0, 128, 16, -2048, 1024, -1024, 0, 128, , 4
addi.d t2, t2, 16
addi.d t5, t5, 16
addi.d t1, t1, 16
.endr
addi.d t2, sp, 64
li.w t3, 32
.loop32:
vld vr10, a0, 0
vld vr11, a0, 16
vld vr12, a0, 32
vld vr13, a0, 48
vld_x8 t2, 0, 16, vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
DST_ADD_W64 vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
add.d a0, a0, a1
addi.d t2, t2, 128
addi.d t3, t3, -1
blt zero, t3, .loop32
free_space 64*32*2+512+512
.DCT_DCT_64X32_END:
endfunc
.macro VLD_DST_ADD_W8_H32 in0
vld vr4, t3, 0
vld vr5, t3, 16
vld vr6, t3, 32
vld vr7, t3, 48
VLD_DST_ADD_W8 vr4, vr5, vr6, vr7
addi.d t3, t3, 64
add.d a0, a1, a0
alsl.d t2, a1, t2, 2
vld vr4, t3, 0
vld vr5, t3, 16
vld vr6, t3, 32
vld vr7, t3, 48
VLD_DST_ADD_W8 vr4, vr5, vr6, vr7
addi.d t3, sp, \in0
add.d a0, a1, a0
alsl.d t2, a1, t2, 2
.endm
function inv_txfm_add_dct_dct_8x32_8bpc_lsx
bnez a3, .NO_HAS_DCONLY_8x32
idct_dc 8, 32, 2
DST_ADD_W8 vr10, vr11, vr12, vr13, vr20, vr20, vr20, vr20
.rept 7
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 vr20, vr20, vr20, vr20
.endr
b .DCT_DCT_8X32_END
.NO_HAS_DCONLY_8x32:
malloc_space 512
la.local t8, eob_8x32
addi.d t3, sp, 64
addi.d t2, a2, 0
.DCT_DCT_EOB_8x32:
ld.h t7, t8, 0
addi.d t8, t8, 2
vld_x8 a2, 0, 64, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
inv_dct8_lsx vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, .8h
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vsrari.h \i, \i, 2
.endr
vxor.v vr31, vr31, vr31
vst_x8 a2, 0, 64, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
LSX_TRANSPOSE8x8_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
vst_x8 t3, 0, 16, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
addi.d a2, a2, 16
addi.d t3, t3, 128
bge a3, t7, .DCT_DCT_EOB_8x32
la.local t8, eob_8x32
vxor.v vr31, vr31, vr31
ld.h t7, t8, 4
bge a3, t7, .DCT_DCT_EOB_8x32_END
vst_x8 sp, 64+384, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
ld.h t7, t8, 2
bge a3, t7, .DCT_DCT_EOB_8x32_END
vst_x8 sp, 64+256, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
ld.h t7, t8, 0
bge a3, t7, .DCT_DCT_EOB_8x32_END
vst_x8 sp, 64+128, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
.DCT_DCT_EOB_8x32_END:
addi.d t2, sp, 64
addi.d t3, sp, 64
vld_x16 t2, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
inv_dct16_lsx .8h
vst_x16 t3, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vld_x16 t2, 16, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr19, vr24, vr25, vr26, vr27, vr28, vr29, vr30
dct_8x32_core_lsx t2, t3, 0, 256, 32, 0, 128, 256, 384, 16, , 4
alsl.d t2, a1, a0, 1
addi.d t3, sp, 64
VLD_DST_ADD_W8_H32 320
VLD_DST_ADD_W8_H32 448
VLD_DST_ADD_W8_H32 192
VLD_DST_ADD_W8_H32 0
free_space 512
.DCT_DCT_8X32_END:
endfunc
function inv_txfm_add_identity_identity_8x32_8bpc_lsx
la.local t7, eob_8x32
alsl.d t2, a1, a0, 1
.IDENTITY_IDENTITY_EOB_8x32:
ld.h t6, t7, 0
addi.d t7, t7, 2
vld_x8 a2, 0, 64, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vxor.v vr23, vr23, vr23
vst_x8 a2, 0, 64, vr23, vr23, vr23, vr23, vr23, vr23, vr23, vr23
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vsrari.h \i, \i, 1
.endr
LSX_TRANSPOSE8x8_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
.irp i, vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
vsrari.h \i, \i, 2
.endr
VLD_DST_ADD_W8 vr16, vr17, vr18, vr19
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W8 vr20, vr21, vr22, vr23
add.d a0, a1, a0
alsl.d t2, a1, a0, 1
addi.d a2, a2, 16
bge a3, t6, .IDENTITY_IDENTITY_EOB_8x32
endfunc
.macro def_fn_16x4_base txfm
functionl inv_txfm_\txfm\()add_16x4_lsx
vld_x8 a2, 0, 16, vr0, vr2, vr4, vr6, vr8, vr10, vr12, vr14
.ifc \txfm, identity_
li.w t0, 1697
vreplgr2vr.w vr20, t0
.irp i, vr0, vr2, vr4, vr6, vr8, vr10, vr12, vr14
inv_identity16_lsx \i, vr20, \i, \i, .8h
.endr
vilvh.d vr1, vr0, vr0
vilvh.d vr3, vr2, vr2
vilvh.d vr5, vr4, vr4
vilvh.d vr7, vr6, vr6
vilvh.d vr9, vr8, vr8
vilvh.d vr11, vr10, vr10
vilvh.d vr13, vr12, vr12
vilvh.d vr15, vr14, vr14
.else
vilvh.d vr1, vr0, vr0
vilvh.d vr3, vr2, vr2
vilvh.d vr5, vr4, vr4
vilvh.d vr7, vr6, vr6
vilvh.d vr9, vr8, vr8
vilvh.d vr11, vr10, vr10
vilvh.d vr13, vr12, vr12
vilvh.d vr15, vr14, vr14
move t6, ra
jirl ra, t7, 0
move ra, t6
.endif
vxor.v vr23, vr23, vr23
vst_x8 a2, 0, 16, vr23, vr23, vr23, vr23, vr23, vr23, vr23, vr23
LSX_TRANSPOSE8x4_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, vr0, vr1, \
vr2, vr3, vr16, vr17, vr18, vr19, vr20, vr21
LSX_TRANSPOSE8x4_H vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, vr4, \
vr5, vr6, vr7, vr16, vr17, vr18, vr19, vr20, vr21
vsrari.h vr0, vr0, 1
vsrari.h vr1, vr1, 1
vsrari.h vr2, vr2, 1
vsrari.h vr3, vr3, 1
move t6, ra
jirl ra, t8, 0
move ra, t6
vsrari.h vr8, vr0, 4
vsrari.h vr9, vr1, 4
vsrari.h vr10, vr2, 4
vsrari.h vr11, vr3, 4
vsrari.h vr0, vr4, 1
vsrari.h vr1, vr5, 1
vsrari.h vr2, vr6, 1
vsrari.h vr3, vr7, 1
move t6, ra
jirl ra, t8, 0
move ra, t6
vsrari.h vr16, vr0, 4
vsrari.h vr17, vr1, 4
vsrari.h vr18, vr2, 4
vsrari.h vr19, vr3, 4
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W16 vr8, vr16, vr9, vr17, vr10, vr18, vr11, vr19
endfuncl
.endm
def_fn_16x4_base identity_
def_fn_16x4_base
.macro fn_16x4 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_16x4_8bpc_lsx
.ifc \txfm1\()_\txfm2, dct_dct
bnez a3, .NO_HAS_DCONLY_16x4
idct_dc 16, 4, 1
DST_ADD_W16 vr10, vr11, vr12, vr13, vr20, vr20, vr20, \
vr20, vr20, vr20, vr20, vr20
b .\txfm1\()_\txfm2\()_16x4_END
.NO_HAS_DCONLY_16x4:
.endif
.ifnc \txfm1, identity
la.local t7, inv_\txfm1\()_4h_x16_lsx
.endif
la.local t8, inv_\txfm2\()_8h_x4_lsx
.ifc \txfm1, identity
b inv_txfm_identity_add_16x4_lsx
.else
b inv_txfm_add_16x4_lsx
.endif
.\txfm1\()_\txfm2\()_16x4_END:
endfunc
.endm
fn_16x4 dct, dct
fn_16x4 identity, identity
fn_16x4 adst, dct
.macro VLD_DST_ADD_W16_H32 in0
vld vr14, t3, 0
vld vr15, t3, 16
vld vr16, t3, 32
vld vr17, t3, 48
vld vr18, t5, 0
vld vr19, t5, 16
vld vr20, t5, 32
vld vr21, t5, 48
vsrari_h_x8 vr14, vr18, vr15, vr19, vr16, vr20, vr17, vr21, \
vr14, vr18, vr15, vr19, vr16, vr20, vr17, vr21, 4
VLD_DST_ADD_W16 vr14, vr18, vr15, vr19, vr16, vr20, vr17, vr21
alsl.d a0, a1, a0, 2
alsl.d t2, a1, t2, 2
addi.d t3, t3, 64
addi.d t5, t5, 64
vld vr14, t3, 0
vld vr15, t3, 16
vld vr16, t3, 32
vld vr17, t3, 48
vld vr18, t5, 0
vld vr19, t5, 16
vld vr20, t5, 32
vld vr21, t5, 48
vsrari_h_x8 vr14, vr18, vr15, vr19, vr16, vr20, vr17, vr21, \
vr14, vr18, vr15, vr19, vr16, vr20, vr17, vr21, 4
VLD_DST_ADD_W16 vr14, vr18, vr15, vr19, vr16, vr20, vr17, vr21
alsl.d a0, a1, a0, 2
alsl.d t2, a1, t2, 2
addi.d t3, sp, \in0
addi.d t5, sp, \in0+512
.endm
function inv_txfm_add_dct_dct_16x32_8bpc_lsx
bnez a3, .NO_HAS_DCONLY_16x32
idct_dc 16, 32, 1
DST_ADD_W16 vr10, vr11, vr12, vr13, vr20, vr20, vr20, \
vr20, vr20, vr20, vr20, vr20
.rept 7
alsl.d a0, a1, a0, 2
alsl.d t2, a1, a0, 1
VLD_DST_ADD_W16 vr20, vr20, vr20, vr20, vr20, vr20, vr20, vr20
.endr
b .DCT_DCT_16x32_END
.NO_HAS_DCONLY_16x32:
malloc_space 512+512
addi.d t3, sp, 64
la.local t8, eob_16x32
.DCT_DCT_EOB_16x32:
ld.h t7, t8, 0
addi.d t8, t8, 2
vld_x16 a2, 0, 64, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vxor.v vr31, vr31, vr31
.irp i, 0, 64, 128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960
vst vr31, a2, \i
.endr
li.w t0, 2896
vreplgr2vr.w vr23, t0
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
rect2_lsx \i, vr23, \i
.endr
inv_dct16_lsx .8h
LSX_TRANSPOSE8x8_H vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
LSX_TRANSPOSE8x8_H vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15, \
vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23
.irp i, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vsrari.h \i, \i, 1
.endr
vst_x8 t3, 0, 16, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7,
vst_x8 t3, 512, 16, vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
addi.d a2, a2, 16
addi.d t3, t3, 128
bge a3, t7, .DCT_DCT_EOB_16x32
la.local t8, eob_16x32
vxor.v vr31, vr31, vr31
ld.h t7, t8, 4
bge a3, t7, .DCT_DCT_EOB_16x32_END
vst_x8 sp, 64+384, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
vst_x8 sp, 64+896, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
ld.h t7, t8, 2
bge a3, t7, .DCT_DCT_EOB_16x32_END
vst_x8 sp, 64+256, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
vst_x8 sp, 64+768, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
ld.h t7, t8, 0
bge a3, t7, .DCT_DCT_EOB_16x32_END
vst_x8 sp, 64+128, 16, vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
vst_x8 sp, 64+512+128, 16 vr31, vr31, vr31, vr31, vr31, vr31, vr31, vr31
.DCT_DCT_EOB_16x32_END:
addi.d t7, sp, 64
.rept 2
vld_x16 t7, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
inv_dct16_lsx .8h
vst_x16 t7, 0, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr8, vr9, vr10, vr11, vr12, vr13, vr14, vr15
vld_x16 t7, 16, 32, vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, \
vr19, vr24, vr25, vr26, vr27, vr28, vr29, vr30
dct_8x32_core_lsx t7, t7, 0, 256, 32, 0, 128, 256, 384, 16, ,
addi.d t7, t7, 512
.endr
alsl.d t2, a1, a0, 1
addi.d t3, sp, 64
addi.d t5, sp, 512+64
VLD_DST_ADD_W16_H32 320
VLD_DST_ADD_W16_H32 448
VLD_DST_ADD_W16_H32 192
VLD_DST_ADD_W16_H32 0
free_space 512+512
.DCT_DCT_16x32_END:
endfunc
.macro xvmulev_xvmaddod_lasx in0, in1, in2, in3, out0, out1
xvmulwev.w.h \out0, \in0, \in2
xvmulwod.w.h \out1, \in0, \in2
xvmaddwev.w.h \out0, \in1, \in3
xvmaddwod.w.h \out1, \in1, \in3
.endm
.macro xvsrari_h_x16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \
in11, in12, in13, in14, in15, out0, out1, out2, out3, \
out4, out5, out6, out7, out8, out9, out10, out11, out12, \
out13, out14, out15, shift
xvsrari.h \out0, \in0, \shift
xvsrari.h \out1, \in1, \shift
xvsrari.h \out2, \in2, \shift
xvsrari.h \out3, \in3, \shift
xvsrari.h \out4, \in4, \shift
xvsrari.h \out5, \in5, \shift
xvsrari.h \out6, \in6, \shift
xvsrari.h \out7, \in7, \shift
xvsrari.h \out8, \in8, \shift
xvsrari.h \out9, \in9, \shift
xvsrari.h \out10, \in10, \shift
xvsrari.h \out11, \in11, \shift
xvsrari.h \out12, \in12, \shift
xvsrari.h \out13, \in13, \shift
xvsrari.h \out14, \in14, \shift
xvsrari.h \out15, \in15, \shift
.endm
.macro xvpermi_q_x2 in0, in1, in2, in3, out0, out1, out2, out3, tmp0, tmp1
xvor.v \tmp0, \in0, \in0
xvor.v \tmp1, \in1, \in1
xvpermi.q \out0, \in2, 0x02
xvpermi.q \out1, \in3, 0x02
xvpermi.q \out2, \tmp0, 0x31
xvpermi.q \out3, \tmp1, 0x31
.endm
.macro DST_ADD_W16_LASX in0, in1, in2, in3, in4, in5, in6, in7
vext2xv.hu.bu xr0, \in0
vext2xv.hu.bu xr1, \in1
vext2xv.hu.bu xr2, \in2
vext2xv.hu.bu xr3, \in3
xvadd.h xr0, xr0, \in4
xvadd.h xr1, xr1, \in5
xvadd.h xr2, xr2, \in6
xvadd.h xr3, xr3, \in7
xvssrani.bu.h xr1, xr0, 0
xvssrani.bu.h xr3, xr2, 0
xvpermi.d xr0, xr1, 0b11011000
xvpermi.d xr2, xr3, 0b11011000
xvpermi.d xr1, xr0, 0b00001110
xvpermi.d xr3, xr2, 0b00001110
vst vr0, a0, 0
vstx vr1, a0, a1
vst vr2, t2, 0
vstx vr3, t2, a1
.endm
.macro XVLD_DST_ADD_W16 in0, in1, in2, in3
vld vr0, a0, 0
vldx vr1, a0, a1
vld vr2, t2, 0
vldx vr3, t2, a1
DST_ADD_W16_LASX xr0, xr1, xr2, xr3, \in0, \in1, \in2, \in3
.endm
.macro inv_adst16_lasx
la.local t0, iadst16_coeffs_h
xvldrepl.h xr20, t0, 0 // 4091
xvldrepl.h xr21, t0, 2 // 201
xvmulev_xvmaddod_lasx xr15, xr0, xr20, xr21, xr16, xr18
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr15, xr0, xr21, xr20, xr17, xr19
xvilvl.w xr15, xr18, xr16
xvilvl.w xr0, xr19, xr17
xvilvh.w xr18, xr18, xr16
xvilvh.w xr19, xr19, xr17
xvssrarni.h.w xr18, xr15, 12 // t0
xvssrarni.h.w xr19, xr0, 12 // t1
xvldrepl.h xr20, t0, 4 // 3973
xvldrepl.h xr21, t0, 6 // 995
xvmulev_xvmaddod_lasx xr13, xr2, xr20, xr21, xr16, xr0
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr13, xr2, xr21, xr20, xr17, xr15
xvilvl.w xr13, xr0, xr16
xvilvl.w xr2, xr15, xr17
xvilvh.w xr0, xr0, xr16
xvilvh.w xr15, xr15, xr17
xvssrarni.h.w xr0, xr13, 12 // t2
xvssrarni.h.w xr15, xr2, 12 // t3
xvldrepl.h xr20, t0, 8 // 3703
xvldrepl.h xr21, t0, 10 // 1751
xvmulev_xvmaddod_lasx xr11, xr4, xr20, xr21, xr16, xr2
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr11, xr4, xr21, xr20, xr17, xr13
xvilvl.w xr11, xr2, xr16
xvilvl.w xr4, xr13, xr17
xvilvh.w xr2, xr2, xr16
xvilvh.w xr13, xr13, xr17
xvssrarni.h.w xr2, xr11, 12 // t4
xvssrarni.h.w xr13, xr4, 12 // t5
xvldrepl.h xr20, t0, 12 // 3290 -> 1645
xvldrepl.h xr21, t0, 14 // 2440 -> 1220
xvmulev_xvmaddod_lasx xr9, xr6, xr20, xr21, xr16, xr4
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr9, xr6, xr21, xr20, xr17, xr11
xvilvl.w xr9, xr4, xr16
xvilvl.w xr6, xr11, xr17
xvilvh.w xr4, xr4, xr16
xvilvh.w xr11, xr11, xr17
xvssrarni.h.w xr4, xr9, 12 // t6
xvssrarni.h.w xr11, xr6, 12 // t7
xvldrepl.h xr20, t0, 16 // 2751
xvldrepl.h xr21, t0, 18 // 3035
xvmulev_xvmaddod_lasx xr7, xr8, xr20, xr21, xr16, xr6
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr7, xr8, xr21, xr20, xr17, xr9
xvilvl.w xr7, xr6, xr16
xvilvl.w xr8, xr9, xr17
xvilvh.w xr6, xr6, xr16
xvilvh.w xr9, xr9, xr17
xvssrarni.h.w xr6, xr7, 12 // t8
xvssrarni.h.w xr9, xr8, 12 // t9
xvldrepl.h xr20, t0, 20 // 2106
xvldrepl.h xr21, t0, 22 // 3513
xvmulev_xvmaddod_lasx xr5, xr10, xr20, xr21, xr16, xr7
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr5, xr10, xr21, xr20, xr17, xr8
xvilvl.w xr5, xr7, xr16
xvilvl.w xr10, xr8, xr17
xvilvh.w xr7, xr7, xr16
xvilvh.w xr8, xr8, xr17
xvssrarni.h.w xr7, xr5, 12 // t10
xvssrarni.h.w xr8, xr10, 12 // t11
xvldrepl.h xr20, t0, 24 // 1380
xvldrepl.h xr21, t0, 26 // 3857
xvmulev_xvmaddod_lasx xr3, xr12, xr20, xr21, xr16, xr5
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr3, xr12, xr21, xr20, xr17, xr10
xvilvl.w xr3, xr5, xr16
xvilvl.w xr12, xr10, xr17
xvilvh.w xr5, xr5, xr16
xvilvh.w xr10, xr10, xr17
xvssrarni.h.w xr5, xr3, 12 // t12
xvssrarni.h.w xr10, xr12, 12 // t13
xvldrepl.h xr20, t0, 28 // 601
xvldrepl.h xr21, t0, 30 // 4052
xvmulev_xvmaddod_lasx xr1, xr14, xr20, xr21, xr16, xr3
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr1, xr14, xr21, xr20, xr17, xr12
xvilvl.w xr1, xr3, xr16
xvilvl.w xr14, xr12, xr17
xvilvh.w xr3, xr3, xr16
xvilvh.w xr12, xr12, xr17
xvssrarni.h.w xr3, xr1, 12 // t14
xvssrarni.h.w xr12, xr14, 12 // t15
xvsadd.h xr1, xr18, xr6 // t0a
xvssub.h xr14, xr18, xr6 // t8a
xvsadd.h xr16, xr19, xr9 // t1a
xvssub.h xr17, xr19, xr9 // t9a
xvsadd.h xr6, xr0, xr7 // t2a
xvssub.h xr18, xr0, xr7 // t10a
xvsadd.h xr9, xr15, xr8 // t3a
xvssub.h xr19, xr15, xr8 // t11a
xvsadd.h xr0, xr2, xr5 // t4a
xvssub.h xr7, xr2, xr5 // t12a
xvsadd.h xr8, xr13, xr10 // t5a
xvssub.h xr15, xr13, xr10 // t13a
xvsadd.h xr2, xr4, xr3 // t6a
xvssub.h xr5, xr4, xr3 // t14a
xvsadd.h xr10, xr11, xr12 // t7a
xvssub.h xr13, xr11, xr12 // t15a
la.local t0, idct_coeffs_h
xvldrepl.h xr20, t0, 8 // 799
xvldrepl.h xr21, t0, 10 // 4017
xvmulev_xvmaddod_lasx xr14, xr17, xr21, xr20, xr3, xr11
xvneg.h xr21, xr21
xvmulev_xvmaddod_lasx xr14, xr17, xr20, xr21, xr4, xr12
xvilvl.w xr14, xr11, xr3
xvilvl.w xr17, xr12, xr4
xvilvh.w xr11, xr11, xr3
xvilvh.w xr12, xr12, xr4
xvssrarni.h.w xr11, xr14, 12 // t8
xvssrarni.h.w xr12, xr17, 12 // t9
xvneg.h xr21, xr21
xvmulev_xvmaddod_lasx xr15, xr7, xr20, xr21, xr3, xr14
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr15, xr7, xr21, xr20, xr4, xr17
xvilvl.w xr15, xr14, xr3
xvilvl.w xr7, xr17, xr4
xvilvh.w xr14, xr14, xr3
xvilvh.w xr17, xr17, xr4
xvssrarni.h.w xr14, xr15, 12 // t13
xvssrarni.h.w xr17, xr7, 12 // t12
xvldrepl.h xr20, t0, 12 // 3406
xvldrepl.h xr21, t0, 14 // 2276
xvmulev_xvmaddod_lasx xr18, xr19, xr21, xr20, xr3, xr7
xvneg.h xr21, xr21
xvmulev_xvmaddod_lasx xr18, xr19, xr20, xr21, xr4, xr15
xvilvl.w xr18, xr7, xr3
xvilvl.w xr19, xr15, xr4
xvilvh.w xr7, xr7, xr3
xvilvh.w xr15, xr15, xr4
xvssrarni.h.w xr7, xr18, 12 // t10
xvssrarni.h.w xr15, xr19, 12 // t11
xvneg.h xr21, xr21
xvmulev_xvmaddod_lasx xr13, xr5, xr20, xr21, xr3, xr18
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr13, xr5, xr21, xr20, xr4, xr19
xvilvl.w xr13, xr18, xr3
xvilvl.w xr5, xr19, xr4
xvilvh.w xr18, xr18, xr3
xvilvh.w xr19, xr19, xr4
xvssrarni.h.w xr18, xr13, 12 // t15
xvssrarni.h.w xr19, xr5, 12 // t14
xvsadd.h xr5, xr1, xr0 // t0
xvssub.h xr13, xr1, xr0 // t4
xvsadd.h xr3, xr16, xr8 // t1
xvssub.h xr4, xr16, xr8 // t5
xvsadd.h xr0, xr6, xr2 // t2
xvssub.h xr1, xr6, xr2 // t6
xvsadd.h xr8, xr9, xr10 // t3
xvssub.h xr16, xr9, xr10 // t7
xvsadd.h xr2, xr11, xr17 // t8a
xvssub.h xr6, xr11, xr17 // t12a
xvsadd.h xr9, xr12, xr14 // t9a
xvssub.h xr10, xr12, xr14 // t13a
xvsadd.h xr11, xr7, xr19 // t10a
xvssub.h xr17, xr7, xr19 // t14a
xvsadd.h xr12, xr15, xr18 // t11a
xvssub.h xr14, xr15, xr18 // t15a
la.local t0, idct_coeffs_h
xvldrepl.h xr20, t0, 4 // 1567
xvldrepl.h xr21, t0, 6 // 3784
xvmulev_xvmaddod_lasx xr13, xr4, xr21, xr20, xr7, xr18
xvneg.h xr21, xr21
xvmulev_xvmaddod_lasx xr13, xr4, xr20, xr21, xr15, xr19
xvilvl.w xr13, xr18, xr7
xvilvl.w xr4, xr19, xr15
xvilvh.w xr18, xr18, xr7
xvilvh.w xr19, xr19, xr15
xvssrarni.h.w xr18, xr13, 12 // t4a
xvssrarni.h.w xr19, xr4, 12 // t5a
xvneg.h xr21, xr21
xvmulev_xvmaddod_lasx xr16, xr1, xr20, xr21, xr7, xr4
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr16, xr1, xr21, xr20, xr15, xr13
xvilvl.w xr16, xr4, xr7
xvilvl.w xr1, xr13, xr15
xvilvh.w xr4, xr4, xr7
xvilvh.w xr13, xr13, xr15
xvssrarni.h.w xr4, xr16, 12 // t7a
xvssrarni.h.w xr13, xr1, 12 // t6a
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr6, xr10, xr21, xr20, xr7, xr1
xvneg.h xr21, xr21
xvmulev_xvmaddod_lasx xr6, xr10, xr20, xr21, xr15, xr16
xvilvl.w xr6, xr1, xr7
xvilvl.w xr10, xr16, xr15
xvilvh.w xr1, xr1, xr7
xvilvh.w xr16, xr16, xr15
xvssrarni.h.w xr1, xr6, 12 // t12
xvssrarni.h.w xr16, xr10, 12 // t13
xvneg.h xr21, xr21
xvmulev_xvmaddod_lasx xr14, xr17, xr20, xr21, xr7, xr6
xvneg.h xr20, xr20
xvmulev_xvmaddod_lasx xr14, xr17, xr21, xr20, xr15, xr10
xvilvl.w xr14, xr6, xr7
xvilvl.w xr17, xr10, xr15
xvilvh.w xr6, xr6, xr7
xvilvh.w xr10, xr10, xr15
xvssrarni.h.w xr6, xr14, 12 // t15
xvssrarni.h.w xr10, xr17, 12 // t14
xvsadd.h xr14, xr5, xr0 // out[0]
xvssub.h xr17, xr5, xr0 // t2a
xvssub.h xr7, xr3, xr8 // t3a
xvsadd.h xr15, xr3, xr8 // out[15]
xvsllwil.w.h xr22, xr15, 0
xvexth.w.h xr15, xr15
xvneg.w xr22, xr22
xvneg.w xr15, xr15
xvssrarni.h.w xr15, xr22, 0 // out[15]
xvssub.h xr7, xr3, xr8 // t3a
xvsadd.h xr3, xr19, xr4 // out[12]
xvssub.h xr8, xr19, xr4 // t7
xvssub.h xr0, xr18, xr13 // t6
xvsadd.h xr5, xr18, xr13 // out[3]
xvsllwil.w.h xr22, xr5, 0
xvexth.w.h xr5, xr5
xvneg.w xr22, xr22
xvneg.w xr5, xr5
xvssrarni.h.w xr5, xr22, 0 // out[3]
xvsadd.h xr13, xr9, xr12 // out[14]
xvssub.h xr19, xr9, xr12 // t11
xvssub.h xr4, xr2, xr11 // t10
xvsadd.h xr18, xr2, xr11 // out[1]
xvsllwil.w.h xr22, xr18, 0
xvexth.w.h xr18, xr18
xvneg.w xr22, xr22
xvneg.w xr18, xr18
xvssrarni.h.w xr18, xr22, 0 // out[1]
xvsadd.h xr2, xr1, xr10 // out[2]
xvssub.h xr11, xr1, xr10 // t14a
xvssub.h xr12, xr16, xr6 // t15a
xvsadd.h xr9, xr16, xr6 // out[13]
xvsllwil.w.h xr22, xr9, 0
xvexth.w.h xr9, xr9
xvneg.w xr22, xr22
xvneg.w xr9, xr9
xvssrarni.h.w xr9, xr22, 0 // out[13]
xvldrepl.h xr20, t0, 0 // 2896
xvmulev_xvmaddod_lasx xr17, xr7, xr20, xr20, xr6, xr10
xvneg.h xr21, xr20
xvmulev_xvmaddod_lasx xr17, xr7, xr20, xr21, xr16, xr1
xvilvl.w xr17, xr10, xr6
xvilvl.w xr7, xr1, xr16
xvilvh.w xr10, xr10, xr6
xvilvh.w xr1, xr1, xr16
xvssrarni.h.w xr1, xr7, 12 // out[8]
xvsrari.w xr17, xr17, 12
xvsrari.w xr10, xr10, 12
xvneg.w xr17, xr17
xvneg.w xr10, xr10
xvssrarni.h.w xr10, xr17, 0 // out[7]
xvmulev_xvmaddod_lasx xr0, xr8, xr20, xr21, xr16, xr17
xvmulev_xvmaddod_lasx xr0, xr8, xr20, xr20, xr6, xr7
xvilvl.w xr0, xr17, xr16
xvilvl.w xr8, xr7, xr6
xvilvh.w xr17, xr17, xr16
xvilvh.w xr7, xr7, xr6
xvssrarni.h.w xr7, xr8, 12 // out[4]
xvsrari.w xr0, xr0, 12
xvsrari.w xr17, xr17, 12
xvneg.w xr0, xr0
xvneg.w xr17, xr17
xvssrarni.h.w xr17, xr0, 0 // out[11]
xvmulev_xvmaddod_lasx xr4, xr19, xr20, xr21, xr16, xr0
xvmulev_xvmaddod_lasx xr4, xr19, xr20, xr20, xr6, xr8
xvilvl.w xr4, xr0, xr16
xvilvl.w xr19, xr8, xr6
xvilvh.w xr0, xr0, xr16
xvilvh.w xr8, xr8, xr6
xvssrarni.h.w xr8, xr19, 12 // out[6]
xvsrari.w xr4, xr4, 12
xvsrari.w xr0, xr0, 12
xvneg.w xr4, xr4
xvneg.w xr0, xr0
xvssrarni.h.w xr0, xr4, 0 // out[9]
xvmulev_xvmaddod_lasx xr11, xr12, xr20, xr20, xr6, xr4
xvmulev_xvmaddod_lasx xr11, xr12, xr20, xr21, xr16, xr19
xvilvl.w xr11, xr4, xr6
xvilvl.w xr12, xr19, xr16
xvilvh.w xr4, xr4, xr6
xvilvh.w xr19, xr19, xr16
xvssrarni.h.w xr19, xr12, 12 // out[10]
xvsrari.w xr11, xr11, 12
xvsrari.w xr4, xr4, 12
xvneg.w xr11, xr11
xvneg.w xr4, xr4
xvssrarni.h.w xr4, xr11, 0 // out[5]
.endm
function inv_txfm_add_adst_adst_16x16_8bpc_lasx
PUSH_REG
xvld_x16 a2, 0, 32, xr0, xr1, xr2, xr3, xr4, xr5, xr6, xr7, \
xr8, xr9, xr10, xr11, xr12, xr13, xr14, xr15
inv_adst16_lasx
LASX_TRANSPOSE8x8_H xr14, xr18, xr2, xr5, xr7, xr4, xr8, xr10, \
xr14, xr18, xr2, xr5, xr7, xr28, xr6, xr10, \
xr20, xr21, xr22, xr23, xr24, xr25, xr26, xr27
LASX_TRANSPOSE8x8_H xr1, xr0, xr19, xr17, xr3, xr9, xr13, xr15, \
xr29, xr30, xr11, xr17, xr31, xr19, xr16, xr15, \
xr20, xr21, xr22, xr23, xr24, xr25, xr26, xr27
xvsrari_h_x16 xr14, xr18, xr2, xr5, xr7, xr28, xr6, xr10, \
xr29, xr30, xr11, xr17, xr31, xr19, xr16, xr15, \
xr0, xr1, xr2, xr3, xr4, xr5, xr6, xr7, \
xr8, xr9, xr10, xr11, xr12, xr13, xr14, xr15, 2
xvpermi_q_x2 xr0, xr1, xr8, xr9, xr0, xr1, xr8, xr9, xr20, xr21
xvpermi_q_x2 xr2, xr3, xr10, xr11, xr2, xr3, xr10, xr11, xr20, xr21
xvpermi_q_x2 xr4, xr5, xr12, xr13, xr4, xr5, xr12, xr13, xr20, xr21
xvpermi_q_x2 xr6, xr7, xr14, xr15, xr6, xr7, xr14, xr15, xr20, xr21
inv_adst16_lasx
xvsrari_h_x16 xr14, xr18, xr2, xr5, xr7, xr4, xr8, xr10, \
xr1, xr0, xr19, xr17, xr3, xr9, xr13, xr15, \
xr14, xr18, xr11, xr5, xr7, xr4, xr8, xr10, \
xr12, xr16, xr19, xr17, xr20, xr9, xr13, xr15, 4
xvxor.v xr23, xr23, xr23
.irp i, 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 480
xvst xr23, a2, \i
.endr
alsl.d t2, a1, a0, 1
XVLD_DST_ADD_W16 xr14, xr18, xr11, xr5
alsl.d a0, a1, a0, 2
alsl.d t2, a1, a0, 1
XVLD_DST_ADD_W16 xr7, xr4, xr8, xr10
alsl.d a0, a1, a0, 2
alsl.d t2, a1, a0, 1
XVLD_DST_ADD_W16 xr12, xr16, xr19, xr17
alsl.d a0, a1, a0, 2
alsl.d t2, a1, a0, 1
XVLD_DST_ADD_W16 xr20, xr9, xr13, xr15
POP_REG
endfunc
|
Admenri/urge
| 78,369
|
third_party/dav1d/src/loongarch/looprestoration.S
|
/*
* Copyright © 2023, VideoLAN and dav1d authors
* Copyright © 2023, Loongson Technology Corporation Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/loongarch/loongson_asm.S"
#define REST_UNIT_STRIDE (400)
.macro MADD_HU_BU in0, in1, out0, out1
vsllwil.hu.bu vr12, \in0, 0
vexth.hu.bu vr13, \in0
vmadd.h \out0, vr12, \in1
vmadd.h \out1, vr13, \in1
.endm
const wiener_shuf
.byte 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
endconst
/*
void wiener_filter_h_lsx(int32_t *hor_ptr,
uint8_t *tmp_ptr,
const int16_t filterh[8],
const int w, const int h)
*/
function wiener_filter_h_8bpc_lsx
addi.d sp, sp, -40
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
li.w t7, 1<<14 // clip_limit
la.local t1, wiener_shuf
vld vr4, t1, 0
vld vr14, a2, 0 // filter[0][k]
vreplvei.h vr21, vr14, 0
vreplvei.h vr22, vr14, 1
vreplvei.h vr23, vr14, 2
vreplvei.h vr24, vr14, 3
vreplvei.h vr25, vr14, 4
vreplvei.h vr26, vr14, 5
vreplvei.h vr27, vr14, 6
vreplgr2vr.w vr0, t7
.WIENER_FILTER_H_H:
addi.w a4, a4, -1 // h
addi.w t0, a3, 0 // w
addi.d t1, a1, 0 // tmp_ptr
addi.d t2, a0, 0 // hor_ptr
.WIENER_FILTER_H_W:
addi.w t0, t0, -16
vld vr5, t1, 0
vld vr13, t1, 16
vsubi.bu vr14, vr4, 2
vsubi.bu vr15, vr4, 1
vshuf.b vr6, vr13, vr5, vr14 // 1 ... 8, 9 ... 16
vshuf.b vr7, vr13, vr5, vr15 // 2 ... 9, 10 ... 17
vshuf.b vr8, vr13, vr5, vr4 // 3 ... 10, 11 ... 18
vaddi.bu vr14, vr4, 1
vaddi.bu vr15, vr4, 2
vshuf.b vr9, vr13, vr5, vr14 // 4 ... 11, 12 ... 19
vshuf.b vr10, vr13, vr5, vr15 // 5 ... 12, 13 ... 20
vaddi.bu vr14, vr4, 3
vshuf.b vr11, vr13, vr5, vr14 // 6 ... 13, 14 ... 21
vsllwil.hu.bu vr15, vr8, 0 // 3 4 5 6 7 8 9 10
vexth.hu.bu vr16, vr8 // 11 12 13 14 15 16 17 18
vsllwil.wu.hu vr17, vr15, 7 // 3 4 5 6
vexth.wu.hu vr18, vr15 // 7 8 9 10
vsllwil.wu.hu vr19, vr16, 7 // 11 12 13 14
vexth.wu.hu vr20, vr16 // 15 16 17 18
vslli.w vr18, vr18, 7
vslli.w vr20, vr20, 7
vxor.v vr15, vr15, vr15
vxor.v vr14, vr14, vr14
MADD_HU_BU vr5, vr21, vr14, vr15
MADD_HU_BU vr6, vr22, vr14, vr15
MADD_HU_BU vr7, vr23, vr14, vr15
MADD_HU_BU vr8, vr24, vr14, vr15
MADD_HU_BU vr9, vr25, vr14, vr15
MADD_HU_BU vr10, vr26, vr14, vr15
MADD_HU_BU vr11, vr27, vr14, vr15
vsllwil.w.h vr5, vr14, 0 // 0 1 2 3
vexth.w.h vr6, vr14 // 4 5 6 7
vsllwil.w.h vr7, vr15, 0 // 8 9 10 11
vexth.w.h vr8, vr15 // 12 13 14 15
vadd.w vr17, vr17, vr5
vadd.w vr18, vr18, vr6
vadd.w vr19, vr19, vr7
vadd.w vr20, vr20, vr8
vadd.w vr17, vr17, vr0
vadd.w vr18, vr18, vr0
vadd.w vr19, vr19, vr0
vadd.w vr20, vr20, vr0
vsrli.w vr1, vr0, 1
vsubi.wu vr1, vr1, 1
vxor.v vr3, vr3, vr3
vsrari.w vr17, vr17, 3
vsrari.w vr18, vr18, 3
vsrari.w vr19, vr19, 3
vsrari.w vr20, vr20, 3
vclip.w vr17, vr17, vr3, vr1
vclip.w vr18, vr18, vr3, vr1
vclip.w vr19, vr19, vr3, vr1
vclip.w vr20, vr20, vr3, vr1
vst vr17, t2, 0
vst vr18, t2, 16
vst vr19, t2, 32
vst vr20, t2, 48
addi.d t1, t1, 16
addi.d t2, t2, 64
blt zero, t0, .WIENER_FILTER_H_W
addi.d a1, a1, REST_UNIT_STRIDE
addi.d a0, a0, (REST_UNIT_STRIDE << 2)
bnez a4, .WIENER_FILTER_H_H
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
addi.d sp, sp, 40
endfunc
.macro APPLY_FILTER in0, in1, in2
alsl.d t7, \in0, \in1, 2
vld vr10, t7, 0
vld vr11, t7, 16
vld vr12, t7, 32
vld vr13, t7, 48
vmadd.w vr14, vr10, \in2
vmadd.w vr15, vr11, \in2
vmadd.w vr16, vr12, \in2
vmadd.w vr17, vr13, \in2
.endm
.macro wiener_filter_v_8bpc_core_lsx
vreplgr2vr.w vr14, t6
vreplgr2vr.w vr15, t6
vreplgr2vr.w vr16, t6
vreplgr2vr.w vr17, t6
addi.w t7, t2, 0 // j + index k
mul.w t7, t7, t8 // (j + index) * REST_UNIT_STRIDE
add.w t7, t7, t4 // (j + index) * REST_UNIT_STRIDE + i
APPLY_FILTER t7, a2, vr2
APPLY_FILTER t8, t7, vr3
APPLY_FILTER t8, t7, vr4
APPLY_FILTER t8, t7, vr5
APPLY_FILTER t8, t7, vr6
APPLY_FILTER t8, t7, vr7
APPLY_FILTER t8, t7, vr8
vssrarni.hu.w vr15, vr14, 11
vssrarni.hu.w vr17, vr16, 11
vssrlni.bu.h vr17, vr15, 0
.endm
/*
void wiener_filter_v_lsx(uint8_t *p,
const ptrdiff_t p_stride,
const int32_t *hor,
const int16_t filterv[8],
const int w, const int h)
*/
function wiener_filter_v_8bpc_lsx
li.w t6, -(1 << 18)
li.w t8, REST_UNIT_STRIDE
ld.h t0, a3, 0
ld.h t1, a3, 2
vreplgr2vr.w vr2, t0
vreplgr2vr.w vr3, t1
ld.h t0, a3, 4
ld.h t1, a3, 6
vreplgr2vr.w vr4, t0
vreplgr2vr.w vr5, t1
ld.h t0, a3, 8
ld.h t1, a3, 10
vreplgr2vr.w vr6, t0
vreplgr2vr.w vr7, t1
ld.h t0, a3, 12
vreplgr2vr.w vr8, t0
andi t1, a4, 0xf
sub.w t0, a4, t1 // w-w%16
or t2, zero, zero // j
or t4, zero, zero
beqz t0, .WIENER_FILTER_V_W_LT16
.WIENER_FILTER_V_H:
andi t1, a4, 0xf
add.d t3, zero, a0 // p
or t4, zero, zero // i
.WIENER_FILTER_V_W:
wiener_filter_v_8bpc_core_lsx
mul.w t5, t2, a1 // j * stride
add.w t5, t5, t4 // j * stride + i
add.d t3, a0, t5
addi.w t4, t4, 16
vst vr17, t3, 0
bne t0, t4, .WIENER_FILTER_V_W
beqz t1, .WIENER_FILTER_V_W_EQ16
wiener_filter_v_8bpc_core_lsx
addi.d t3, t3, 16
andi t1, a4, 0xf
.WIENER_FILTER_V_ST_REM:
vstelm.b vr17, t3, 0, 0
vbsrl.v vr17, vr17, 1
addi.d t3, t3, 1
addi.w t1, t1, -1
bnez t1, .WIENER_FILTER_V_ST_REM
.WIENER_FILTER_V_W_EQ16:
addi.w t2, t2, 1
blt t2, a5, .WIENER_FILTER_V_H
b .WIENER_FILTER_V_END
.WIENER_FILTER_V_W_LT16:
andi t1, a4, 0xf
add.d t3, zero, a0
wiener_filter_v_8bpc_core_lsx
mul.w t5, t2, a1 // j * stride
add.d t3, a0, t5
.WIENER_FILTER_V_ST_REM_1:
vstelm.b vr17, t3, 0, 0
vbsrl.v vr17, vr17, 1
addi.d t3, t3, 1
addi.w t1, t1, -1
bnez t1, .WIENER_FILTER_V_ST_REM_1
addi.w t2, t2, 1
blt t2, a5, .WIENER_FILTER_V_W_LT16
.WIENER_FILTER_V_END:
endfunc
/*
void boxsum3_h(int32_t *sumsq, coef *sum, const pixel *src,
const int w, const int h)
*/
function boxsum3_h_8bpc_lsx
addi.d a2, a2, REST_UNIT_STRIDE
li.w t0, 1
addi.w a3, a3, -2
addi.w a4, a4, -4
.LBS3_H_H:
alsl.d t1, t0, a1, 1 // sum_v *sum_v = sum + x
alsl.d t2, t0, a0, 2 // sumsq_v *sumsq_v = sumsq + x
add.d t3, t0, a2 // s
addi.w t5, a3, 0
.LBS3_H_W:
vld vr0, t3, 0
vld vr1, t3, REST_UNIT_STRIDE
vld vr2, t3, (REST_UNIT_STRIDE<<1)
vilvl.b vr3, vr1, vr0
vhaddw.hu.bu vr4, vr3, vr3
vilvh.b vr5, vr1, vr0
vhaddw.hu.bu vr6, vr5, vr5
vsllwil.hu.bu vr7, vr2, 0
vexth.hu.bu vr8, vr2
// sum_v
vadd.h vr4, vr4, vr7
vadd.h vr6, vr6, vr8
vst vr4, t1, REST_UNIT_STRIDE<<1
vst vr6, t1, (REST_UNIT_STRIDE<<1)+16
addi.d t1, t1, 32
// sumsq
vmulwev.h.bu vr9, vr3, vr3
vmulwod.h.bu vr10, vr3, vr3
vmulwev.h.bu vr11, vr5, vr5
vmulwod.h.bu vr12, vr5, vr5
vaddwev.w.hu vr13, vr10, vr9
vaddwod.w.hu vr14, vr10, vr9
vaddwev.w.hu vr15, vr12, vr11
vaddwod.w.hu vr16, vr12, vr11
vmaddwev.w.hu vr13, vr7, vr7
vmaddwod.w.hu vr14, vr7, vr7
vmaddwev.w.hu vr15, vr8, vr8
vmaddwod.w.hu vr16, vr8, vr8
vilvl.w vr9, vr14, vr13
vilvh.w vr10, vr14, vr13
vilvl.w vr11, vr16, vr15
vilvh.w vr12, vr16, vr15
vst vr9, t2, REST_UNIT_STRIDE<<2
vst vr10, t2, (REST_UNIT_STRIDE<<2)+16
vst vr11, t2, (REST_UNIT_STRIDE<<2)+32
vst vr12, t2, (REST_UNIT_STRIDE<<2)+48
addi.d t2, t2, 64
addi.w t5, t5, -16
addi.d t3, t3, 16
blt zero, t5, .LBS3_H_W
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a1, a1, REST_UNIT_STRIDE<<1
addi.d a2, a2, REST_UNIT_STRIDE
addi.d a4, a4, -1
blt zero, a4, .LBS3_H_H
endfunc
/*
void boxsum3_v(int32_t *sumsq, coef *sum,
const int w, const int h)
*/
function boxsum3_v_8bpc_lsx
addi.d a0, a0, (REST_UNIT_STRIDE<<2)
addi.d a1, a1, (REST_UNIT_STRIDE<<1)
addi.w a3, a3, -4
addi.w a2, a2, -4
.LBS3_V_H:
sub.w t3, a2, zero
addi.d t0, a0, 4
addi.d t1, a1, 2
addi.d t5, a0, 8
addi.d t6, a1, 4
vld vr0, t1, 0 // a 0 1 2 3 4 5 6 7
vld vr1, t1, 2 // b 1 2 3 4 5 6 7 8
vld vr2, t1, 4 // c 2 3 4 5 6 7 8 9
vld vr3, t0, 0 // a2 0 1 2 3
vld vr4, t0, 4 // b2 1 2 3 4
vld vr5, t0, 8 // c2 2 3 4 5
vld vr6, t0, 16 // 3 4 5 6
vld vr7, t0, 20 // 4 5 6 7
vld vr8, t0, 24 // 5 6 7 8
vadd.h vr9, vr0, vr1
vadd.w vr10, vr3, vr4
vadd.w vr11, vr6, vr7
vadd.h vr9, vr9, vr2
vadd.w vr10, vr10, vr5
vadd.w vr11, vr11, vr8
vpickve2gr.h t7, vr2, 6
vpickve2gr.w t8, vr8, 2
vst vr9, t6, 0
vst vr10, t5, 0
vst vr11, t5, 16
addi.d t1, t1, 16
addi.d t0, t0, 32
addi.d t5, t5, 32
addi.d t6, t6, 16
addi.d t3, t3, -8
ble t3, zero, .LBS3_V_H0
.LBS3_V_W8:
vld vr0, t1, 0 // a 0 1 2 3 4 5 6 7
vld vr1, t1, 2 // b 1 2 3 4 5 6 7 8
vld vr2, t1, 4 // c 2 3 4 5 6 7 8 9
vld vr3, t0, 0 // a2 0 1 2 3
vld vr4, t0, 4 // b2 1 2 3 4
vld vr5, t0, 8 // c2 2 3 4 5
vld vr6, t0, 16 // 3 4 5 6
vld vr7, t0, 20 // 4 5 6 7
vld vr8, t0, 24 // 5 6 7 8
vinsgr2vr.h vr0, t7, 0
vinsgr2vr.w vr3, t8, 0
vpickve2gr.h t7, vr2, 6
vpickve2gr.w t8, vr8, 2
vadd.h vr9, vr0, vr1
vadd.w vr10, vr3, vr4
vadd.w vr11, vr6, vr7
vadd.h vr9, vr9, vr2
vadd.w vr10, vr10, vr5
vadd.w vr11, vr11, vr8
vst vr9, t6, 0
vst vr10, t5, 0
vst vr11, t5, 16
addi.d t3, t3, -8
addi.d t1, t1, 16
addi.d t0, t0, 32
addi.d t5, t5, 32
addi.d t6, t6, 16
blt zero, t3, .LBS3_V_W8
.LBS3_V_H0:
addi.d a1, a1, REST_UNIT_STRIDE<<1
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.w a3, a3, -1
bnez a3, .LBS3_V_H
endfunc
/*
boxsum3_selfguided_filter(int32_t *sumsq, coef *sum,
const int w, const int h,
const unsigned s)
*/
function boxsum3_sgf_h_8bpc_lsx
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a0, a0, 12 // AA
addi.d a1, a1, REST_UNIT_STRIDE<<1
addi.d a1, a1, 6 // BB
la.local t8, dav1d_sgr_x_by_x
li.w t6, 455
vreplgr2vr.w vr20, t6
li.w t6, 255
vreplgr2vr.w vr22, t6
vaddi.wu vr21, vr22, 1 // 256
vreplgr2vr.w vr6, a4
vldi vr19, 0x809
addi.w a2, a2, 2 // w + 2
addi.w a3, a3, 2 // h + 2
.LBS3SGF_H_H:
addi.w t2, a2, 0
addi.d t0, a0, -4
addi.d t1, a1, -2
.LBS3SGF_H_W:
addi.w t2, t2, -8
vld vr0, t0, 0 // AA[i]
vld vr1, t0, 16
vld vr2, t1, 0 // BB[i]
vmul.w vr4, vr0, vr19 // a * n
vmul.w vr5, vr1, vr19 // a * n
vsllwil.w.h vr9, vr2, 0
vexth.w.h vr10, vr2
vmsub.w vr4, vr9, vr9 // p
vmsub.w vr5, vr10, vr10 // p
vmaxi.w vr4, vr4, 0
vmaxi.w vr5, vr5, 0 // p
vmul.w vr4, vr4, vr6 // p * s
vmul.w vr5, vr5, vr6 // p * s
vsrlri.w vr4, vr4, 20
vsrlri.w vr5, vr5, 20 // z
vmin.w vr4, vr4, vr22
vmin.w vr5, vr5, vr22
vpickve2gr.w t6, vr4, 0
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 0
vpickve2gr.w t6, vr4, 1
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 1
vpickve2gr.w t6, vr4, 2
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 2
vpickve2gr.w t6, vr4, 3
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 3
vpickve2gr.w t6, vr5, 0
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 0
vpickve2gr.w t6, vr5, 1
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 1
vpickve2gr.w t6, vr5, 2
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 2
vpickve2gr.w t6, vr5, 3
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 3 // x
vmul.w vr9, vr7, vr9 // x * BB[i]
vmul.w vr10, vr8, vr10
vmul.w vr9, vr9, vr20 // x * BB[i] * sgr_one_by_x
vmul.w vr10, vr10, vr20
vsrlri.w vr9, vr9, 12
vsrlri.w vr10, vr10, 12
vsub.w vr7, vr21, vr7
vsub.w vr8, vr21, vr8
vpickev.h vr8, vr8, vr7
vst vr9, t0, 0
vst vr10, t0, 16
vst vr8, t1, 0
addi.d t0, t0, 32
addi.d t1, t1, 16
blt zero, t2, .LBS3SGF_H_W
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a1, a1, REST_UNIT_STRIDE<<1
addi.w a3, a3, -1
bnez a3, .LBS3SGF_H_H
endfunc
/*
boxsum3_selfguided_filter(coef *dst, pixel *src,
int32_t *sumsq, coef *sum,
const int w, const int h)
*/
function boxsum3_sgf_v_8bpc_lsx
addi.d a1, a1, (3*REST_UNIT_STRIDE+3) // src
addi.d a2, a2, REST_UNIT_STRIDE<<2
addi.d a2, a2, (REST_UNIT_STRIDE<<2)+12
addi.d a3, a3, REST_UNIT_STRIDE<<2
addi.d a3, a3, 6
.LBS3SGF_V_H:
// A int32_t *sumsq
addi.d t0, a2, -(REST_UNIT_STRIDE<<2) // -stride
addi.d t1, a2, 0 // sumsq
addi.d t2, a2, REST_UNIT_STRIDE<<2 // +stride
addi.d t6, a1, 0
addi.w t7, a4, 0
addi.d t8, a0, 0
// B coef *sum
addi.d t3, a3, -(REST_UNIT_STRIDE<<1) // -stride
addi.d t4, a3, 0
addi.d t5, a3, REST_UNIT_STRIDE<<1
.LBS3SGF_V_W:
vld vr0, t0, 0 // P[i - REST_UNIT_STRIDE]
vld vr1, t0, 16
vld vr2, t1, -4 // P[i-1] -1 0 1 2
vld vr3, t1, 12 // 3 4 5 6
vld vr4, t2, 0 // P[i + REST_UNIT_STRIDE]
vld vr5, t2, 16
vld vr6, t1, 0 // p[i] 0 1 2 3
vld vr7, t1, 16 // 4 5 6 7
vld vr8, t1, 4 // p[i+1] 1 2 3 4
vld vr9, t1, 20 // 5 6 7 8
vld vr10, t0, -4 // P[i - 1 - REST_UNIT_STRIDE]
vld vr11, t0, 12
vld vr12, t2, -4 // P[i - 1 + REST_UNIT_STRIDE]
vld vr13, t2, 12
vld vr14, t0, 4 // P[i + 1 - REST_UNIT_STRIDE]
vld vr15, t0, 20
vld vr16, t2, 4 // P[i + 1 + REST_UNIT_STRIDE]
vld vr17, t2, 20
vadd.w vr0, vr2, vr0
vadd.w vr4, vr6, vr4
vadd.w vr0, vr0, vr8
vadd.w vr20, vr0, vr4
vslli.w vr20, vr20, 2 // 0 1 2 3
vadd.w vr0, vr1, vr3
vadd.w vr4, vr5, vr7
vadd.w vr0, vr0, vr9
vadd.w vr21, vr0, vr4
vslli.w vr21, vr21, 2 // 4 5 6 7
vadd.w vr12, vr10, vr12
vadd.w vr16, vr14, vr16
vadd.w vr22, vr12, vr16
vslli.w vr23, vr22, 1
vadd.w vr22, vr23, vr22
vadd.w vr11, vr11, vr13
vadd.w vr15, vr15, vr17
vadd.w vr0, vr11, vr15
vslli.w vr23, vr0, 1
vadd.w vr23, vr23, vr0
vadd.w vr20, vr20, vr22 // b
vadd.w vr21, vr21, vr23
// B coef *sum
vld vr0, t3, 0 // P[i - REST_UNIT_STRIDE]
vld vr1, t4, -2 // p[i - 1]
vld vr2, t4, 0 // p[i]
vld vr3, t4, 2 // p[i + 1]
vld vr4, t5, 0 // P[i + REST_UNIT_STRIDE]
vld vr5, t3, -2 // P[i - 1 - REST_UNIT_STRIDE]
vld vr6, t5, -2 // P[i - 1 + REST_UNIT_STRIDE]
vld vr7, t3, 2 // P[i + 1 - REST_UNIT_STRIDE]
vld vr8, t5, 2 // P[i + 1 + REST_UNIT_STRIDE]
vaddwev.w.h vr9, vr0, vr1
vaddwod.w.h vr10, vr0, vr1
vaddwev.w.h vr11, vr2, vr3
vaddwod.w.h vr12, vr2, vr3
vadd.w vr9, vr11, vr9
vadd.w vr10, vr12, vr10
vilvl.w vr11, vr10, vr9 // 0 1 2 3
vilvh.w vr12, vr10, vr9 // 4 5 6 7
vsllwil.w.h vr0, vr4, 0
vexth.w.h vr1, vr4
vadd.w vr0, vr11, vr0
vadd.w vr1, vr12, vr1
vslli.w vr0, vr0, 2
vslli.w vr1, vr1, 2
vaddwev.w.h vr9, vr5, vr6
vaddwod.w.h vr10, vr5, vr6
vaddwev.w.h vr11, vr7, vr8
vaddwod.w.h vr12, vr7, vr8
vadd.w vr9, vr11, vr9
vadd.w vr10, vr12, vr10
vilvl.w vr13, vr10, vr9
vilvh.w vr14, vr10, vr9
vslli.w vr15, vr13, 1
vslli.w vr16, vr14, 1
vadd.w vr15, vr13, vr15 // a
vadd.w vr16, vr14, vr16
vadd.w vr22, vr0, vr15
vadd.w vr23, vr1, vr16
vld vr0, t6, 0 // src
vsllwil.hu.bu vr0, vr0, 0
vsllwil.wu.hu vr1, vr0, 0
vexth.wu.hu vr2, vr0
vmadd.w vr20, vr22, vr1
vmadd.w vr21, vr23, vr2
vssrlrni.h.w vr21, vr20, 9
vst vr21, t8, 0
addi.d t8, t8, 16
addi.d t0, t0, 32
addi.d t1, t1, 32
addi.d t2, t2, 32
addi.d t3, t3, 16
addi.d t4, t4, 16
addi.d t5, t5, 16
addi.d t6, t6, 8
addi.w t7, t7, -8
blt zero, t7, .LBS3SGF_V_W
addi.w a5, a5, -1
addi.d a0, a0, 384*2
addi.d a1, a1, REST_UNIT_STRIDE
addi.d a3, a3, REST_UNIT_STRIDE<<1
addi.d a2, a2, REST_UNIT_STRIDE<<2
bnez a5, .LBS3SGF_V_H
endfunc
function boxsum3_sgf_v_8bpc_lasx
addi.d a1, a1, (3*REST_UNIT_STRIDE+3) // src
addi.d a2, a2, REST_UNIT_STRIDE<<2
addi.d a2, a2, (REST_UNIT_STRIDE<<2)+12
addi.d a3, a3, REST_UNIT_STRIDE<<2
addi.d a3, a3, 6
.LBS3SGF_V_H_LASX:
// A int32_t *sumsq
addi.d t0, a2, -(REST_UNIT_STRIDE<<2) // -stride
addi.d t1, a2, 0 // sumsq
addi.d t2, a2, REST_UNIT_STRIDE<<2 // +stride
addi.d t6, a1, 0
addi.w t7, a4, 0
addi.d t8, a0, 0
// B coef *sum
addi.d t3, a3, -(REST_UNIT_STRIDE<<1) // -stride
addi.d t4, a3, 0
addi.d t5, a3, REST_UNIT_STRIDE<<1
.LBS3SGF_V_W_LASX:
xvld xr0, t0, 0 // P[i - REST_UNIT_STRIDE]
xvld xr1, t0, 32
xvld xr2, t1, -4 // P[i-1] -1 0 1 2
xvld xr3, t1, 28 // 3 4 5 6
xvld xr4, t2, 0 // P[i + REST_UNIT_STRIDE]
xvld xr5, t2, 32
xvld xr6, t1, 0 // p[i] 0 1 2 3
xvld xr7, t1, 32 // 4 5 6 7
xvld xr8, t1, 4 // p[i+1] 1 2 3 4
xvld xr9, t1, 36 // 5 6 7 8
xvld xr10, t0, -4 // P[i - 1 - REST_UNIT_STRIDE]
xvld xr11, t0, 28
xvld xr12, t2, -4 // P[i - 1 + REST_UNIT_STRIDE]
xvld xr13, t2, 28
xvld xr14, t0, 4 // P[i + 1 - REST_UNIT_STRIDE]
xvld xr15, t0, 36
xvld xr16, t2, 4 // P[i + 1 + REST_UNIT_STRIDE]
xvld xr17, t2, 36
xvadd.w xr0, xr2, xr0
xvadd.w xr4, xr6, xr4
xvadd.w xr0, xr0, xr8
xvadd.w xr20, xr0, xr4
xvslli.w xr20, xr20, 2 // 0 1 2 3
xvadd.w xr0, xr1, xr3
xvadd.w xr4, xr5, xr7
xvadd.w xr0, xr0, xr9
xvadd.w xr21, xr0, xr4
xvslli.w xr21, xr21, 2 // 4 5 6 7
xvadd.w xr12, xr10, xr12
xvadd.w xr16, xr14, xr16
xvadd.w xr22, xr12, xr16
xvslli.w xr23, xr22, 1
xvadd.w xr22, xr23, xr22
xvadd.w xr11, xr11, xr13
xvadd.w xr15, xr15, xr17
xvadd.w xr0, xr11, xr15
xvslli.w xr23, xr0, 1
xvadd.w xr23, xr23, xr0
xvadd.w xr20, xr20, xr22 // b
xvadd.w xr21, xr21, xr23
// B coef *sum
xvld xr0, t3, 0 // P[i - REST_UNIT_STRIDE]
xvld xr1, t4, -2 // p[i - 1]
xvld xr2, t4, 0 // p[i]
xvld xr3, t4, 2 // p[i + 1]
xvld xr4, t5, 0 // P[i + REST_UNIT_STRIDE]
xvld xr5, t3, -2 // P[i - 1 - REST_UNIT_STRIDE]
xvld xr6, t5, -2 // P[i - 1 + REST_UNIT_STRIDE]
xvld xr7, t3, 2 // P[i + 1 - REST_UNIT_STRIDE]
xvld xr8, t5, 2 // P[i + 1 + REST_UNIT_STRIDE]
xvaddwev.w.h xr9, xr0, xr1
xvaddwod.w.h xr10, xr0, xr1
xvaddwev.w.h xr11, xr2, xr3
xvaddwod.w.h xr12, xr2, xr3
xvadd.w xr9, xr11, xr9 // 0 2 4 6 8 10 12 14
xvadd.w xr10, xr12, xr10 // 1 3 5 7 9 11 13 15
xvilvl.w xr11, xr10, xr9 // 0 1 2 3 8 9 10 11
xvilvh.w xr12, xr10, xr9 // 4 5 6 7 12 13 14 15
xvsllwil.w.h xr0, xr4, 0 // 0 1 2 3 8 9 10 11
xvexth.w.h xr1, xr4 // 4 5 6 7 12 13 14 15
xvadd.w xr0, xr11, xr0
xvadd.w xr1, xr12, xr1
xvslli.w xr0, xr0, 2
xvslli.w xr1, xr1, 2
xvaddwev.w.h xr9, xr5, xr6
xvaddwod.w.h xr10, xr5, xr6
xvaddwev.w.h xr11, xr7, xr8
xvaddwod.w.h xr12, xr7, xr8
xvadd.w xr9, xr11, xr9
xvadd.w xr10, xr12, xr10
xvilvl.w xr13, xr10, xr9 // 0 1 2 3 8 9 10 11
xvilvh.w xr14, xr10, xr9 // 4 5 6 7 12 13 14 15
xvslli.w xr15, xr13, 1
xvslli.w xr16, xr14, 1
xvadd.w xr15, xr13, xr15 // a
xvadd.w xr16, xr14, xr16
xvadd.w xr22, xr0, xr15 // A B
xvadd.w xr23, xr1, xr16 // C D
vld vr0, t6, 0 // src
vilvh.d vr2, vr0, vr0
vext2xv.wu.bu xr1, xr0
vext2xv.wu.bu xr2, xr2
xvor.v xr15, xr22, xr22 // A B
xvpermi.q xr22, xr23, 0b00000010 // A C
xvpermi.q xr23, xr15, 0b00110001
xvmadd.w xr20, xr22, xr1
xvmadd.w xr21, xr23, xr2
xvssrlrni.h.w xr21, xr20, 9
xvpermi.d xr22, xr21, 0b11011000
xvst xr22, t8, 0
addi.d t8, t8, 32
addi.d t0, t0, 64
addi.d t1, t1, 64
addi.d t2, t2, 64
addi.d t3, t3, 32
addi.d t4, t4, 32
addi.d t5, t5, 32
addi.d t6, t6, 16
addi.w t7, t7, -16
blt zero, t7, .LBS3SGF_V_W_LASX
addi.w a5, a5, -1
addi.d a0, a0, 384*2
addi.d a1, a1, REST_UNIT_STRIDE
addi.d a3, a3, REST_UNIT_STRIDE<<1
addi.d a2, a2, REST_UNIT_STRIDE<<2
bnez a5, .LBS3SGF_V_H_LASX
endfunc
#define FILTER_OUT_STRIDE (384)
/*
sgr_3x3_finish_c(const pixel *p, const ptrdiff_t stride,
const int16_t *dst, const int w1;
const int w, const int h);
*/
function sgr_3x3_finish_8bpc_lsx
vreplgr2vr.w vr3, a3 // w1
andi t4, a4, 0x7
sub.w t5, a4, t4
beq zero, t5, .LSGR3X3_REM
.LSGR3X3_H:
addi.d t0, a0, 0
addi.d t1, a2, 0
addi.w t2, t5, 0
andi t4, a4, 0x7
.LSGR3X3_W:
vld vr0, t0, 0
vld vr1, t1, 0
vsllwil.hu.bu vr2, vr0, 4 // u 8 h
vsllwil.wu.hu vr4, vr2, 0 // p
vexth.wu.hu vr5, vr2 // p
vslli.w vr6, vr4, 7
vslli.w vr7, vr5, 7
vsllwil.w.h vr8, vr1, 0 // dst
vexth.w.h vr9, vr1 // dst
vsub.w vr8, vr8, vr4
vsub.w vr9, vr9, vr5
vmadd.w vr6, vr8, vr3 // v 0 - 3
vmadd.w vr7, vr9, vr3 // v 4 - 7
vssrarni.hu.w vr7, vr6, 11
vssrlni.bu.h vr7, vr7, 0
vstelm.d vr7, t0, 0, 0
addi.d t0, t0, 8
addi.d t1, t1, 16
addi.d t2, t2, -8
bne zero, t2, .LSGR3X3_W
beq t4, zero, .LSGR3X3_NOREM
vld vr0, t0, 0
vld vr1, t1, 0
vsllwil.hu.bu vr2, vr0, 4 // u 8 h
vsllwil.wu.hu vr4, vr2, 0 // p
vexth.wu.hu vr5, vr2 // p
vslli.w vr6, vr4, 7
vslli.w vr7, vr5, 7
vsllwil.w.h vr8, vr1, 0 // dst
vexth.w.h vr9, vr1 // dst
vsub.w vr8, vr8, vr4
vsub.w vr9, vr9, vr5
vmadd.w vr6, vr8, vr3 // v 0 - 3
vmadd.w vr7, vr9, vr3 // v 4 - 7
vssrarni.hu.w vr7, vr6, 11
vssrlni.bu.h vr7, vr7, 0
.LSGR3X3_ST:
vstelm.b vr7, t0, 0, 0
addi.d t0, t0, 1
vbsrl.v vr7, vr7, 1
addi.w t4, t4, -1
bnez t4, .LSGR3X3_ST
.LSGR3X3_NOREM:
addi.w a5, a5, -1
add.d a0, a0, a1
addi.d a2, a2, (FILTER_OUT_STRIDE<<1)
bnez a5, .LSGR3X3_H
b .LSGR3X3_END
.LSGR3X3_REM:
andi t4, a4, 0x7
addi.d t0, a0, 0
vld vr0, t0, 0
vld vr1, a2, 0
vsllwil.hu.bu vr2, vr0, 4 // u 8 h
vsllwil.wu.hu vr4, vr2, 0 // p
vexth.wu.hu vr5, vr2 // p
vslli.w vr6, vr4, 7
vslli.w vr7, vr5, 7
vsllwil.w.h vr8, vr1, 0 // dst
vexth.w.h vr9, vr1 // dst
vsub.w vr8, vr8, vr4
vsub.w vr9, vr9, vr5
vmadd.w vr6, vr8, vr3 // v 0 - 3
vmadd.w vr7, vr9, vr3 // v 4 - 7
vssrarni.hu.w vr7, vr6, 11
vssrlni.bu.h vr7, vr7, 0
.LSGR3X3_REM_ST:
vstelm.b vr7, t0, 0, 0
addi.d t0, t0, 1
vbsrl.v vr7, vr7, 1
addi.w t4, t4, -1
bnez t4, .LSGR3X3_REM_ST
addi.w a5, a5, -1
add.d a0, a0, a1
addi.d a2, a2, (FILTER_OUT_STRIDE<<1)
bnez a5, .LSGR3X3_REM
.LSGR3X3_END:
endfunc
/*
void boxsum5(int32_t *sumsq, coef *sum,
const pixel *const src,
const int w, const int h)
*/
function boxsum5_h_8bpc_lsx
addi.w a4, a4, -4
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a1, a1, REST_UNIT_STRIDE<<1
li.w t6, 1
.LBOXSUM5_H_H:
addi.w t3, a3, 0
addi.d t2, a2, 0
addi.d t0, a0, 0
addi.d t1, a1, 0
.LBOXSUM5_H_W:
vld vr0, t2, 0 // a
vld vr1, t2, REST_UNIT_STRIDE // b
vld vr2, t2, REST_UNIT_STRIDE<<1 // c
vld vr3, t2, REST_UNIT_STRIDE*3 // d
vld vr4, t2, REST_UNIT_STRIDE<<2 // e
vilvl.b vr5, vr1, vr0
vilvh.b vr6, vr1, vr0
vilvl.b vr7, vr3, vr2
vilvh.b vr8, vr3, vr2
//sum_v
vhaddw.hu.bu vr9, vr5, vr5 // 0 1 2 3 4 5 6 7
vhaddw.hu.bu vr10, vr6, vr6 // 8 9 10 11 12 13 14 15 a+b
vhaddw.hu.bu vr11, vr7, vr7
vhaddw.hu.bu vr12, vr8, vr8
vadd.h vr9, vr9, vr11
vadd.h vr10, vr10, vr12 // a + b + c + d
vsllwil.hu.bu vr11, vr4, 0
vexth.hu.bu vr12, vr4
vadd.h vr9, vr9, vr11
vadd.h vr10, vr10, vr12
vst vr9, t1, 0
vst vr10, t1, 16
addi.d t1, t1, 32
// sumsq
vmulwev.h.bu vr9, vr5, vr5 // a*a 0 1 2 3 4 5 6 7
vmulwev.h.bu vr10, vr6, vr6 // a*a 8 9 10 11 12 13 14 15
vmulwod.h.bu vr13, vr5, vr5 // b*b 0 1 2 3 4 5 6 7
vmulwod.h.bu vr14, vr6, vr6 // b*b 8 9 10 11 12 13 14 15
vmulwev.h.bu vr15, vr7, vr7 // c*c 0 1 2 3 4 5 6 7
vmulwev.h.bu vr16, vr8, vr8 // c*c 8 9 10 11 12 13 14 15
vmulwod.h.bu vr17, vr7, vr7 // d*d 0 1 2 3 4 5 6 7
vmulwod.h.bu vr18, vr8, vr8 // d*d 8 9 10 11 12 13 14 15
vaddwev.w.hu vr5, vr9, vr13 // 0 2 4 6
vaddwod.w.hu vr6, vr9, vr13 // 1 3 5 7
vaddwev.w.hu vr7, vr10, vr14 // 8 10 12 14
vaddwod.w.hu vr8, vr10, vr14 // 9 11 13 15 a + b
vaddwev.w.hu vr19, vr15, vr17 // 0 2 4 6
vaddwod.w.hu vr20, vr15, vr17 // 1 3 5 7
vaddwev.w.hu vr21, vr16, vr18 // 8 10 12 14
vaddwod.w.hu vr22, vr16, vr18 // 9 11 13 15 c + d
vadd.w vr5, vr5, vr19
vadd.w vr6, vr6, vr20
vadd.w vr7, vr7, vr21
vadd.w vr8, vr8, vr22
vmaddwev.w.hu vr5, vr11, vr11
vmaddwod.w.hu vr6, vr11, vr11
vmaddwev.w.hu vr7, vr12, vr12
vmaddwod.w.hu vr8, vr12, vr12
vilvl.w vr19, vr6, vr5
vilvh.w vr20, vr6, vr5
vilvl.w vr21, vr8, vr7
vilvh.w vr22, vr8, vr7
vst vr19, t0, 0
vst vr20, t0, 16
vst vr21, t0, 32
vst vr22, t0, 48
addi.d t0, t0, 64
addi.d t2, t2, 16
addi.w t3, t3, -16
blt zero, t3, .LBOXSUM5_H_W
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a1, a1, REST_UNIT_STRIDE<<1
addi.d a2, a2, REST_UNIT_STRIDE
addi.d a4, a4, -1
bnez a4, .LBOXSUM5_H_H
endfunc
/*
void boxsum5_h(int32_t *sumsq, coef *sum,
const int w, const int h)
*/
function boxsum5_v_8bpc_lsx
addi.d a0, a0, (REST_UNIT_STRIDE<<2)
addi.d a1, a1, (REST_UNIT_STRIDE<<1)
addi.w a3, a3, -4
addi.w a2, a2, -4
.LBOXSUM5_V_H:
addi.w t3, a2, 0
addi.d t0, a0, 0
addi.d t1, a1, 0
addi.d t2, a0, 8
addi.d t3, a1, 4
addi.d t4, a2, 0
vld vr0, t1, 0 // a 0 1 2 3 4 5 6 7
vld vr1, t1, 2 // b 1 2 3 4 5 6 7 8
vld vr2, t1, 4 // c 2
vld vr3, t1, 6 // d 3
vld vr4, t1, 8 // e 4 5 6 7 8 9 10 11
vadd.h vr5, vr0, vr1
vadd.h vr6, vr2, vr3
vpickve2gr.w t5, vr4, 2
vadd.h vr5, vr5, vr6
vadd.h vr5, vr5, vr4
vst vr5, t3, 0
vld vr0, t0, 0 // 0 1 2 3 a
vld vr1, t0, 4 // 1 2 3 4 b
vld vr2, t0, 8 // 2 3 4 5 c
vld vr3, t0, 12 // 3 4 5 6 d
vld vr4, t0, 16 // 4 5 6 7 e a
vld vr5, t0, 20 // 5 6 7 8 b
vld vr6, t0, 24 // 6 7 8 9 c
vld vr7, t0, 28 // 7 8 9 10 d
vld vr8, t0, 32 // 8 9 10 11 e
vadd.w vr9, vr0, vr1
vadd.w vr10, vr2, vr3
vadd.w vr9, vr9, vr10
vadd.w vr9, vr9, vr4
vadd.w vr10, vr4, vr5
vadd.w vr11, vr6, vr7
vadd.w vr10, vr10, vr8
vadd.w vr10, vr10, vr11
vst vr9, t2, 0
vst vr10, t2, 16
addi.d t3, t3, 16
addi.d t1, t1, 16
addi.d t0, t0, 32
addi.d t2, t2, 32
addi.w t4, t4, -8
ble t4, zero, .LBOXSUM5_V_H1
.LBOXSUM5_V_W:
vld vr0, t1, 0 // a 0 1 2 3 4 5 6 7
vld vr1, t1, 2 // b 1 2 3 4 5 6 7 8
vld vr2, t1, 4 // c 2
vld vr3, t1, 6 // d 3
vld vr4, t1, 8 // e 4 5 6 7 8 9 10 11
vinsgr2vr.w vr0, t5, 0
vpickve2gr.w t5, vr4, 2
vextrins.h vr1, vr0, 0x01
vadd.h vr5, vr0, vr1
vadd.h vr6, vr2, vr3
vadd.h vr5, vr5, vr6
vadd.h vr5, vr5, vr4
vst vr5, t3, 0
vaddi.hu vr0, vr8, 0 // 8 9 10 11 a
vld vr1, t0, 4 // 9 10 11 12 b
vld vr2, t0, 8 // 10 11 12 13 c
vld vr3, t0, 12 // 14 15 16 17 d
vld vr4, t0, 16 // 15 16 17 18 e a
vld vr5, t0, 20 // 16 17 18 19 b
vld vr6, t0, 24 // 17 18 19 20 c
vld vr7, t0, 28 // 18 19 20 21 d
vld vr8, t0, 32 // 19 20 21 22 e
vextrins.w vr1, vr0, 0x01
vadd.w vr9, vr0, vr1
vadd.w vr10, vr2, vr3
vadd.w vr9, vr9, vr10
vadd.w vr9, vr9, vr4
vadd.w vr10, vr4, vr5
vadd.w vr11, vr6, vr7
vadd.w vr10, vr10, vr8
vadd.w vr10, vr10, vr11
vst vr9, t2, 0
vst vr10, t2, 16
addi.d t3, t3, 16
addi.d t1, t1, 16
addi.d t0, t0, 32
addi.d t2, t2, 32
addi.w t4, t4, -8
blt zero, t4, .LBOXSUM5_V_W
.LBOXSUM5_V_H1:
addi.d a1, a1, REST_UNIT_STRIDE<<1
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.w a3, a3, -1
bnez a3, .LBOXSUM5_V_H
endfunc
/*
selfguided_filter(int32_t *sumsq, coef *sum,
const int w, const int h,
const unsigned s)
*/
function boxsum5_sgf_h_8bpc_lsx
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a0, a0, 12 // AA
addi.d a1, a1, REST_UNIT_STRIDE<<1
addi.d a1, a1, 6 // BB
la.local t8, dav1d_sgr_x_by_x
li.w t6, 164
vreplgr2vr.w vr20, t6
li.w t6, 255
vreplgr2vr.w vr22, t6
vaddi.wu vr21, vr22, 1 // 256
vreplgr2vr.w vr6, a4
vldi vr19, 0x819
addi.w a2, a2, 2 // w + 2
addi.w a3, a3, 2 // h + 2
.LBS5SGF_H_H:
addi.w t2, a2, 0
addi.d t0, a0, -4
addi.d t1, a1, -2
.LBS5SGF_H_W:
vld vr0, t0, 0 // AA[i]
vld vr1, t0, 16
vld vr2, t1, 0 // BB[i]
vmul.w vr4, vr0, vr19 // a * n
vmul.w vr5, vr1, vr19 // a * n
vsllwil.w.h vr9, vr2, 0
vexth.w.h vr10, vr2
vmsub.w vr4, vr9, vr9 // p
vmsub.w vr5, vr10, vr10 // p
vmaxi.w vr4, vr4, 0
vmaxi.w vr5, vr5, 0 // p
vmul.w vr4, vr4, vr6 // p * s
vmul.w vr5, vr5, vr6 // p * s
vsrlri.w vr4, vr4, 20
vsrlri.w vr5, vr5, 20 // z
vmin.w vr4, vr4, vr22
vmin.w vr5, vr5, vr22
// load table data
vpickve2gr.w t6, vr4, 0
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 0
vpickve2gr.w t6, vr4, 1
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 1
vpickve2gr.w t6, vr4, 2
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 2
vpickve2gr.w t6, vr4, 3
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 3
vpickve2gr.w t6, vr5, 0
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 0
vpickve2gr.w t6, vr5, 1
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 1
vpickve2gr.w t6, vr5, 2
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 2
vpickve2gr.w t6, vr5, 3
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 3 // x
vmul.w vr9, vr7, vr9 // x * BB[i]
vmul.w vr10, vr8, vr10
vmul.w vr9, vr9, vr20 // x * BB[i] * sgr_one_by_x
vmul.w vr10, vr10, vr20
vsrlri.w vr9, vr9, 12
vsrlri.w vr10, vr10, 12
vsub.w vr7, vr21, vr7
vsub.w vr8, vr21, vr8
vpickev.h vr8, vr8, vr7
vst vr9, t0, 0
vst vr10, t0, 16
vst vr8, t1, 0
addi.d t0, t0, 32
addi.d t1, t1, 16
addi.w t2, t2, -8
blt zero, t2, .LBS5SGF_H_W
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a1, a1, REST_UNIT_STRIDE<<2
addi.w a3, a3, -2
blt zero, a3, .LBS5SGF_H_H
endfunc
/*
selfguided_filter(coef *dst, pixel *src,
int32_t *sumsq, coef *sum,
const int w, const int h)
*/
function boxsum5_sgf_v_8bpc_lsx
addi.d a1, a1, 3*REST_UNIT_STRIDE+3 // src
addi.d a2, a2, (2*REST_UNIT_STRIDE+3)<<1 // A
addi.d a2, a2, (2*REST_UNIT_STRIDE+3)<<1
addi.d a3, a3, (2*REST_UNIT_STRIDE+3)<<1 // B
addi.w a5, a5, -1
vldi vr10, 0x806
vldi vr11, 0x805
vldi vr22, 0x406
.LBS5SGF_V_H:
addi.d t0, a0, 0
addi.d t1, a1, 0
addi.d t2, a2, 0
addi.d t3, a3, 0
addi.w t4, a4, 0
addi.d t5, a0, 384*2
addi.d t6, a1, REST_UNIT_STRIDE
addi.d t7, a2, REST_UNIT_STRIDE<<2
addi.d t8, a3, REST_UNIT_STRIDE<<1 // B
.LBS5SGF_V_W:
// a
vld vr0, t3, -REST_UNIT_STRIDE*2
vld vr1, t3, REST_UNIT_STRIDE*2
vld vr2, t3, (-REST_UNIT_STRIDE-1)*2
vld vr3, t3, (REST_UNIT_STRIDE-1)*2
vld vr4, t3, (1-REST_UNIT_STRIDE)*2
vld vr5, t3, (1+REST_UNIT_STRIDE)*2
vaddwev.w.h vr6, vr0, vr1
vaddwod.w.h vr7, vr0, vr1
vmul.w vr6, vr6, vr10
vmul.w vr7, vr7, vr10
vaddwev.w.h vr8, vr2, vr3
vaddwod.w.h vr9, vr2, vr3
vaddwev.w.h vr12, vr4, vr5
vaddwod.w.h vr13, vr4, vr5
vadd.w vr8, vr8, vr12
vadd.w vr9, vr9, vr13
vmadd.w vr6, vr8, vr11
vmadd.w vr7, vr9, vr11
vilvl.w vr18, vr7, vr6
vilvh.w vr19, vr7, vr6
// b
vld vr0, t2, -REST_UNIT_STRIDE*4
vld vr1, t2, -REST_UNIT_STRIDE*4+16
vld vr2, t2, REST_UNIT_STRIDE*4
vld vr3, t2, REST_UNIT_STRIDE*4+16
vld vr4, t2, (-REST_UNIT_STRIDE-1)*4
vld vr5, t2, (-REST_UNIT_STRIDE-1)*4+16
vld vr8, t2, (REST_UNIT_STRIDE-1)*4
vld vr9, t2, (REST_UNIT_STRIDE-1)*4+16
vld vr12, t2, (1-REST_UNIT_STRIDE)*4
vld vr13, t2, (1-REST_UNIT_STRIDE)*4+16
vld vr14, t2, (1+REST_UNIT_STRIDE)*4
vld vr15, t2, (1+REST_UNIT_STRIDE)*4+16
vadd.w vr0, vr0, vr2 // 0 1 2 3
vadd.w vr1, vr1, vr3 // 4 5 6 7
vmul.w vr20, vr0, vr10
vmul.w vr21, vr1, vr10
vadd.w vr4, vr4, vr8 // 0 1 2 3
vadd.w vr5, vr5, vr9 // 4 5 6 7
vadd.w vr12, vr12, vr14
vadd.w vr13, vr13, vr15
vadd.w vr12, vr12, vr4
vadd.w vr13, vr13, vr5
vmadd.w vr20, vr12, vr11
vmadd.w vr21, vr13, vr11
vld vr2, t1, 0
vsllwil.hu.bu vr2, vr2, 0
vsllwil.wu.hu vr3, vr2, 0
vexth.wu.hu vr4, vr2
vmadd.w vr20, vr18, vr3
vmadd.w vr21, vr19, vr4
vssrlrni.h.w vr21, vr20, 9
vst vr21, t0, 0
addi.d t1, t1, 8
addi.d t2, t2, 32
addi.d t3, t3, 16
// a
vld vr0, t8, 0
vld vr1, t8, -2
vld vr2, t8, 2
vmulwev.w.h vr3, vr0, vr22
vmulwod.w.h vr4, vr0, vr22
vaddwev.w.h vr5, vr1, vr2
vaddwod.w.h vr6, vr1, vr2
vmadd.w vr3, vr5, vr11
vmadd.w vr4, vr6, vr11
vilvl.w vr19, vr4, vr3
vilvh.w vr20, vr4, vr3
// b
vld vr0, t7, 0
vld vr1, t7, -4
vld vr2, t7, 4
vld vr5, t7, 16
vld vr6, t7, 12
vld vr7, t7, 20
vmul.w vr8, vr0, vr10
vmul.w vr9, vr5, vr10
vadd.w vr12, vr1, vr2
vadd.w vr13, vr6, vr7
vmadd.w vr8, vr12, vr11
vmadd.w vr9, vr13, vr11
vld vr2, t6, 0
vsllwil.hu.bu vr2, vr2, 0
vsllwil.wu.hu vr3, vr2, 0
vexth.wu.hu vr4, vr2
vmadd.w vr8, vr19, vr3
vmadd.w vr9, vr20, vr4
vssrlrni.h.w vr9, vr8, 8
vst vr9, t0, 384*2
addi.d t0, t0, 16
addi.d t8, t8, 16
addi.d t7, t7, 32
addi.d t6, t6, 8
addi.w t4, t4, -8
blt zero, t4, .LBS5SGF_V_W
addi.w a5, a5, -2
addi.d a0, a0, 384*4 // dst
addi.d a1, a1, REST_UNIT_STRIDE<<1 // src
addi.d a2, a2, REST_UNIT_STRIDE<<2 //
addi.d a2, a2, REST_UNIT_STRIDE<<2
addi.d a3, a3, REST_UNIT_STRIDE<<2 //
blt zero, a5, .LBS5SGF_V_H
bnez a5, .LBS5SGF_END
.LBS5SGF_V_W1:
// a
vld vr0, a3, -REST_UNIT_STRIDE*2
vld vr1, a3, REST_UNIT_STRIDE*2
vld vr2, a3, (-REST_UNIT_STRIDE-1)*2
vld vr3, a3, (REST_UNIT_STRIDE-1)*2
vld vr4, a3, (1-REST_UNIT_STRIDE)*2
vld vr5, a3, (1+REST_UNIT_STRIDE)*2
vaddwev.w.h vr6, vr0, vr1
vaddwod.w.h vr7, vr0, vr1
vmul.w vr6, vr6, vr10
vmul.w vr7, vr7, vr10
vaddwev.w.h vr8, vr2, vr3
vaddwod.w.h vr9, vr2, vr3
vaddwev.w.h vr12, vr4, vr5
vaddwod.w.h vr13, vr4, vr5
vadd.w vr8, vr8, vr12
vadd.w vr9, vr9, vr13
vmadd.w vr6, vr8, vr11
vmadd.w vr7, vr9, vr11
vilvl.w vr18, vr7, vr6
vilvh.w vr19, vr7, vr6
// b
vld vr0, a2, -REST_UNIT_STRIDE*4
vld vr1, a2, -REST_UNIT_STRIDE*4+16
vld vr2, a2, REST_UNIT_STRIDE*4
vld vr3, a2, REST_UNIT_STRIDE*4+16
vld vr4, a2, (-REST_UNIT_STRIDE-1)*4
vld vr5, a2, (-REST_UNIT_STRIDE-1)*4+16
vld vr8, a2, (REST_UNIT_STRIDE-1)*4
vld vr9, a2, (REST_UNIT_STRIDE-1)*4+16
vld vr12, a2, (1-REST_UNIT_STRIDE)*4
vld vr13, a2, (1-REST_UNIT_STRIDE)*4+16
vld vr14, a2, (1+REST_UNIT_STRIDE)*4
vld vr15, a2, (1+REST_UNIT_STRIDE)*4+16
vadd.w vr0, vr0, vr2 // 0 1 2 3
vadd.w vr1, vr1, vr3 // 4 5 6 7
vmul.w vr20, vr0, vr10
vmul.w vr21, vr1, vr10
vadd.w vr4, vr4, vr8 // 0 1 2 3
vadd.w vr5, vr5, vr9 // 4 5 6 7
vadd.w vr12, vr12, vr14
vadd.w vr13, vr13, vr15
vadd.w vr12, vr12, vr4
vadd.w vr13, vr13, vr5
vmadd.w vr20, vr12, vr11
vmadd.w vr21, vr13, vr11
vld vr2, a1, 0
vsllwil.hu.bu vr2, vr2, 0
vsllwil.wu.hu vr3, vr2, 0
vexth.wu.hu vr4, vr2
vmadd.w vr20, vr18, vr3
vmadd.w vr21, vr19, vr4
vssrlrni.h.w vr21, vr20, 9
vst vr21, a0, 0
addi.d a3, a3, 16
addi.d a2, a2, 32
addi.d a1, a1, 8
addi.d a0, a0, 16
addi.w a4, a4, -8
blt zero, a4, .LBS5SGF_V_W1
.LBS5SGF_END:
endfunc
/*
void dav1d_sgr_mix_finish_lsx(uint8_t *p, const ptrdiff_t stride,
const int16_t *dst0, const int16_t *dst1,
const int w0, const int w1,
const int w, const int h);
*/
function sgr_mix_finish_8bpc_lsx
vreplgr2vr.w vr3, a4 // w0
vreplgr2vr.w vr13, a5 // w1
andi t4, a6, 0x7
sub.w t5, a6, t4
beq zero, t5, .LSGRMIX_REM
.LSGRMIX_H:
addi.d t0, a0, 0
addi.d t1, a2, 0 // dst0
addi.d t3, a3, 0 // dst1
addi.w t2, t5, 0
andi t4, a6, 0x7
.LSGRMIX_W:
vld vr0, t0, 0
vld vr1, t1, 0
vld vr10, t3, 0
vsllwil.hu.bu vr2, vr0, 4 // u 8 h
vsllwil.wu.hu vr4, vr2, 0 // u 0 1 2 3
vexth.wu.hu vr5, vr2 // u 4 5 6 7
vslli.w vr6, vr4, 7
vslli.w vr7, vr5, 7
vsllwil.w.h vr8, vr1, 0 // dst0
vexth.w.h vr9, vr1 // dst0
vsub.w vr8, vr8, vr4
vsub.w vr9, vr9, vr5
vmadd.w vr6, vr8, vr3 // v 0 - 3
vmadd.w vr7, vr9, vr3 // v 4 - 7
vsllwil.w.h vr11, vr10, 0 // dst1
vexth.w.h vr12, vr10 // dst1
vsub.w vr11, vr11, vr4
vsub.w vr12, vr12, vr5
vmadd.w vr6, vr11, vr13
vmadd.w vr7, vr12, vr13
vssrarni.hu.w vr7, vr6, 11
vssrlni.bu.h vr7, vr7, 0
vstelm.d vr7, t0, 0, 0
addi.d t0, t0, 8
addi.d t1, t1, 16
addi.d t3, t3, 16
addi.d t2, t2, -8
bne zero, t2, .LSGRMIX_W
beq t4, zero, .LSGRMIX_W8
vld vr0, t0, 0
vld vr1, t1, 0
vld vr10, t3, 0
vsllwil.hu.bu vr2, vr0, 4 // u 8 h
vsllwil.wu.hu vr4, vr2, 0 // p
vexth.wu.hu vr5, vr2 // p
vslli.w vr6, vr4, 7
vslli.w vr7, vr5, 7
vsllwil.w.h vr8, vr1, 0 // dst
vexth.w.h vr9, vr1 // dst
vsub.w vr8, vr8, vr4
vsub.w vr9, vr9, vr5
vmadd.w vr6, vr8, vr3 // v 0 - 3
vmadd.w vr7, vr9, vr3 // v 4 - 7
vsllwil.w.h vr11, vr10, 0 // dst1
vexth.w.h vr12, vr10 // dst1
vsub.w vr11, vr11, vr4
vsub.w vr12, vr12, vr5
vmadd.w vr6, vr11, vr13
vmadd.w vr7, vr12, vr13
vssrarni.hu.w vr7, vr6, 11
vssrlni.bu.h vr7, vr7, 0
.LSGRMIX_ST:
vstelm.b vr7, t0, 0, 0
addi.d t0, t0, 1
vbsrl.v vr7, vr7, 1
addi.w t4, t4, -1
bnez t4, .LSGRMIX_ST
.LSGRMIX_W8:
addi.w a7, a7, -1
add.d a0, a0, a1
addi.d a2, a2, (FILTER_OUT_STRIDE<<1)
addi.d a3, a3, (FILTER_OUT_STRIDE<<1)
bnez a7, .LSGRMIX_H
b .LSGR_MIX_END
.LSGRMIX_REM:
andi t4, a6, 0x7
vld vr0, a0, 0
vld vr1, a2, 0
vld vr10, a3, 0
vsllwil.hu.bu vr2, vr0, 4 // u 8 h
vsllwil.wu.hu vr4, vr2, 0 // p
vexth.wu.hu vr5, vr2 // p
vslli.w vr6, vr4, 7
vslli.w vr7, vr5, 7
vsllwil.w.h vr8, vr1, 0 // dst
vexth.w.h vr9, vr1 // dst
vsub.w vr8, vr8, vr4
vsub.w vr9, vr9, vr5
vmadd.w vr6, vr8, vr3 // v 0 - 3
vmadd.w vr7, vr9, vr3 // v 4 - 7
vsllwil.w.h vr11, vr10, 0 // dst1
vexth.w.h vr12, vr10 // dst1
vsub.w vr11, vr11, vr4
vsub.w vr12, vr12, vr5
vmadd.w vr6, vr11, vr13
vmadd.w vr7, vr12, vr13
vssrarni.hu.w vr7, vr6, 11
vssrlni.bu.h vr7, vr7, 0
addi.d t0, a0, 0
.LSGRMIX_REM_ST:
vstelm.b vr7, t0, 0, 0
addi.d t0, t0, 1
vbsrl.v vr7, vr7, 1
addi.w t4, t4, -1
bnez t4, .LSGRMIX_REM_ST
addi.w a7, a7, -1
add.d a0, a0, a1
addi.d a2, a2, (FILTER_OUT_STRIDE<<1)
addi.d a3, a3, (FILTER_OUT_STRIDE<<1)
bnez a7, .LSGRMIX_REM
.LSGR_MIX_END:
endfunc
.macro MADD_HU_BU_LASX in0, in1, out0, out1
xvsllwil.hu.bu xr12, \in0, 0
xvexth.hu.bu xr13, \in0
xvmadd.h \out0, xr12, \in1
xvmadd.h \out1, xr13, \in1
.endm
const wiener_shuf_lasx
.byte 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
.byte 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
endconst
function wiener_filter_h_8bpc_lasx
addi.d sp, sp, -40
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
li.w t7, 1<<14 // clip_limit
la.local t1, wiener_shuf_lasx
xvld xr4, t1, 0
vld vr27, a2, 0 // filter[0][k]
xvpermi.q xr14, xr27, 0b00000000
xvrepl128vei.h xr21, xr14, 0
xvrepl128vei.h xr22, xr14, 1
xvrepl128vei.h xr23, xr14, 2
xvrepl128vei.h xr24, xr14, 3
xvrepl128vei.h xr25, xr14, 4
xvrepl128vei.h xr26, xr14, 5
xvrepl128vei.h xr27, xr14, 6
xvreplgr2vr.w xr0, t7
.WIENER_FILTER_H_H_LASX:
addi.w a4, a4, -1 // h
addi.w t0, a3, 0 // w
addi.d t1, a1, 0 // tmp_ptr
addi.d t2, a0, 0 // hor_ptr
.WIENER_FILTER_H_W_LASX:
addi.w t0, t0, -32
xvld xr5, t1, 0
xvld xr13, t1, 16
xvsubi.bu xr14, xr4, 2
xvsubi.bu xr15, xr4, 1
xvshuf.b xr6, xr13, xr5, xr14 // 1 ... 8, 9 ... 16
xvshuf.b xr7, xr13, xr5, xr15 // 2 ... 9, 10 ... 17
xvshuf.b xr8, xr13, xr5, xr4 // 3 ... 10, 11 ... 18
xvaddi.bu xr14, xr4, 1
xvaddi.bu xr15, xr4, 2
xvshuf.b xr9, xr13, xr5, xr14 // 4 ... 11, 12 ... 19
xvshuf.b xr10, xr13, xr5, xr15 // 5 ... 12, 13 ... 20
xvaddi.bu xr14, xr4, 3
xvshuf.b xr11, xr13, xr5, xr14 // 6 ... 13, 14 ... 21
xvsllwil.hu.bu xr15, xr8, 0 // 3 4 5 6 7 8 9 10
xvexth.hu.bu xr16, xr8 // 11 12 13 14 15 16 17 18
xvsllwil.wu.hu xr17, xr15, 7 // 3 4 5 6
xvexth.wu.hu xr18, xr15 // 7 8 9 10
xvsllwil.wu.hu xr19, xr16, 7 // 11 12 13 14
xvexth.wu.hu xr20, xr16 // 15 16 17 18
xvslli.w xr18, xr18, 7
xvslli.w xr20, xr20, 7
xvxor.v xr15, xr15, xr15
xvxor.v xr14, xr14, xr14
MADD_HU_BU_LASX xr5, xr21, xr14, xr15
MADD_HU_BU_LASX xr6, xr22, xr14, xr15
MADD_HU_BU_LASX xr7, xr23, xr14, xr15
MADD_HU_BU_LASX xr8, xr24, xr14, xr15
MADD_HU_BU_LASX xr9, xr25, xr14, xr15
MADD_HU_BU_LASX xr10, xr26, xr14, xr15
MADD_HU_BU_LASX xr11, xr27, xr14, xr15
xvsllwil.w.h xr5, xr14, 0 // 0 1 2 3
xvexth.w.h xr6, xr14 // 4 5 6 7
xvsllwil.w.h xr7, xr15, 0 // 8 9 10 11
xvexth.w.h xr8, xr15 // 12 13 14 15
xvadd.w xr17, xr17, xr5
xvadd.w xr18, xr18, xr6
xvadd.w xr19, xr19, xr7
xvadd.w xr20, xr20, xr8
xvadd.w xr17, xr17, xr0
xvadd.w xr18, xr18, xr0
xvadd.w xr19, xr19, xr0
xvadd.w xr20, xr20, xr0
xvsrli.w xr1, xr0, 1
xvsubi.wu xr1, xr1, 1
xvxor.v xr3, xr3, xr3
xvsrari.w xr17, xr17, 3
xvsrari.w xr18, xr18, 3
xvsrari.w xr19, xr19, 3
xvsrari.w xr20, xr20, 3
xvclip.w xr17, xr17, xr3, xr1
xvclip.w xr18, xr18, xr3, xr1
xvclip.w xr19, xr19, xr3, xr1
xvclip.w xr20, xr20, xr3, xr1
xvor.v xr5, xr17, xr17
xvor.v xr6, xr19, xr19
xvpermi.q xr17, xr18, 0b00000010
xvpermi.q xr19, xr20, 0b00000010
xvst xr17, t2, 0
xvst xr19, t2, 32
xvpermi.q xr18, xr5, 0b00110001
xvpermi.q xr20, xr6, 0b00110001
xvst xr18, t2, 64
xvst xr20, t2, 96
addi.d t1, t1, 32
addi.d t2, t2, 128
blt zero, t0, .WIENER_FILTER_H_W_LASX
addi.d a1, a1, REST_UNIT_STRIDE
addi.d a0, a0, (REST_UNIT_STRIDE << 2)
bnez a4, .WIENER_FILTER_H_H_LASX
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
addi.d sp, sp, 40
endfunc
.macro APPLY_FILTER_LASX in0, in1, in2
alsl.d t7, \in0, \in1, 2
xvld xr10, t7, 0
xvld xr12, t7, 32
xvmadd.w xr14, xr10, \in2
xvmadd.w xr16, xr12, \in2
.endm
.macro wiener_filter_v_8bpc_core_lasx
xvreplgr2vr.w xr14, t6
xvreplgr2vr.w xr16, t6
addi.w t7, t2, 0 // j + index k
mul.w t7, t7, t8 // (j + index) * REST_UNIT_STRIDE
add.w t7, t7, t4 // (j + index) * REST_UNIT_STRIDE + i
APPLY_FILTER_LASX t7, a2, xr2
APPLY_FILTER_LASX t8, t7, xr3
APPLY_FILTER_LASX t8, t7, xr4
APPLY_FILTER_LASX t8, t7, xr5
APPLY_FILTER_LASX t8, t7, xr6
APPLY_FILTER_LASX t8, t7, xr7
APPLY_FILTER_LASX t8, t7, xr8
xvssrarni.hu.w xr16, xr14, 11
xvpermi.d xr17, xr16, 0b11011000
xvssrlni.bu.h xr17, xr17, 0
xvpermi.d xr17, xr17, 0b00001000
.endm
function wiener_filter_v_8bpc_lasx
li.w t6, -(1 << 18)
li.w t8, REST_UNIT_STRIDE
ld.h t0, a3, 0
ld.h t1, a3, 2
xvreplgr2vr.w xr2, t0
xvreplgr2vr.w xr3, t1
ld.h t0, a3, 4
ld.h t1, a3, 6
xvreplgr2vr.w xr4, t0
xvreplgr2vr.w xr5, t1
ld.h t0, a3, 8
ld.h t1, a3, 10
xvreplgr2vr.w xr6, t0
xvreplgr2vr.w xr7, t1
ld.h t0, a3, 12
xvreplgr2vr.w xr8, t0
andi t1, a4, 0xf
sub.w t0, a4, t1 // w-w%16
or t2, zero, zero // j
or t4, zero, zero
beqz t0, .WIENER_FILTER_V_W_LT16_LASX
.WIENER_FILTER_V_H_LASX:
andi t1, a4, 0xf
add.d t3, zero, a0 // p
or t4, zero, zero // i
.WIENER_FILTER_V_W_LASX:
wiener_filter_v_8bpc_core_lasx
mul.w t5, t2, a1 // j * stride
add.w t5, t5, t4 // j * stride + i
add.d t3, a0, t5
addi.w t4, t4, 16
vst vr17, t3, 0
bne t0, t4, .WIENER_FILTER_V_W_LASX
beqz t1, .WIENER_FILTER_V_W_EQ16_LASX
wiener_filter_v_8bpc_core_lsx
addi.d t3, t3, 16
andi t1, a4, 0xf
.WIENER_FILTER_V_ST_REM_LASX:
vstelm.b vr17, t3, 0, 0
vbsrl.v vr17, vr17, 1
addi.d t3, t3, 1
addi.w t1, t1, -1
bnez t1, .WIENER_FILTER_V_ST_REM_LASX
.WIENER_FILTER_V_W_EQ16_LASX:
addi.w t2, t2, 1
blt t2, a5, .WIENER_FILTER_V_H_LASX
b .WIENER_FILTER_V_LASX_END
.WIENER_FILTER_V_W_LT16_LASX:
andi t1, a4, 0xf
add.d t3, zero, a0
wiener_filter_v_8bpc_core_lsx
mul.w t5, t2, a1 // j * stride
add.d t3, a0, t5
.WIENER_FILTER_V_ST_REM_1_LASX:
vstelm.b vr17, t3, 0, 0
vbsrl.v vr17, vr17, 1
addi.d t3, t3, 1
addi.w t1, t1, -1
bnez t1, .WIENER_FILTER_V_ST_REM_1_LASX
addi.w t2, t2, 1
blt t2, a5, .WIENER_FILTER_V_W_LT16_LASX
.WIENER_FILTER_V_LASX_END:
endfunc
function boxsum3_sgf_h_8bpc_lasx
addi.d a0, a0, (REST_UNIT_STRIDE<<2)+12 // AA
//addi.d a0, a0, 12 // AA
addi.d a1, a1, (REST_UNIT_STRIDE<<1)+6 // BB
//addi.d a1, a1, 6 // BB
la.local t8, dav1d_sgr_x_by_x
li.w t6, 455
xvreplgr2vr.w xr20, t6
li.w t6, 255
xvreplgr2vr.w xr22, t6
xvaddi.wu xr21, xr22, 1 // 256
xvreplgr2vr.w xr6, a4
xvldi xr19, 0x809
addi.w a2, a2, 2 // w + 2
addi.w a3, a3, 2 // h + 2
.LBS3SGF_H_H_LASX:
addi.w t2, a2, 0
addi.d t0, a0, -4
addi.d t1, a1, -2
.LBS3SGF_H_W_LASX:
addi.w t2, t2, -16
xvld xr0, t0, 0 // AA[i]
xvld xr1, t0, 32
xvld xr2, t1, 0 // BB[i]
xvmul.w xr4, xr0, xr19 // a * n
xvmul.w xr5, xr1, xr19
vext2xv.w.h xr9, xr2
xvpermi.q xr10, xr2, 0b00000001
vext2xv.w.h xr10, xr10
xvmsub.w xr4, xr9, xr9 // p
xvmsub.w xr5, xr10, xr10
xvmaxi.w xr4, xr4, 0
xvmaxi.w xr5, xr5, 0
xvmul.w xr4, xr4, xr6 // p * s
xvmul.w xr5, xr5, xr6
xvsrlri.w xr4, xr4, 20
xvsrlri.w xr5, xr5, 20
xvmin.w xr4, xr4, xr22
xvmin.w xr5, xr5, xr22
vpickve2gr.w t6, vr4, 0
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 0
vpickve2gr.w t6, vr4, 1
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 1
vpickve2gr.w t6, vr4, 2
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 2
vpickve2gr.w t6, vr4, 3
ldx.bu t7, t8, t6
vinsgr2vr.w vr7, t7, 3
xvpickve2gr.w t6, xr4, 4
ldx.bu t7, t8, t6
xvinsgr2vr.w xr7, t7, 4
xvpickve2gr.w t6, xr4, 5
ldx.bu t7, t8, t6
xvinsgr2vr.w xr7, t7, 5
xvpickve2gr.w t6, xr4, 6
ldx.bu t7, t8, t6
xvinsgr2vr.w xr7, t7, 6
xvpickve2gr.w t6, xr4, 7
ldx.bu t7, t8, t6
xvinsgr2vr.w xr7, t7, 7 // x
vpickve2gr.w t6, vr5, 0
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 0
vpickve2gr.w t6, vr5, 1
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 1
vpickve2gr.w t6, vr5, 2
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 2
vpickve2gr.w t6, vr5, 3
ldx.bu t7, t8, t6
vinsgr2vr.w vr8, t7, 3
xvpickve2gr.w t6, xr5, 4
ldx.bu t7, t8, t6
xvinsgr2vr.w xr8, t7, 4
xvpickve2gr.w t6, xr5, 5
ldx.bu t7, t8, t6
xvinsgr2vr.w xr8, t7, 5
xvpickve2gr.w t6, xr5, 6
ldx.bu t7, t8, t6
xvinsgr2vr.w xr8, t7, 6
xvpickve2gr.w t6, xr5, 7
ldx.bu t7, t8, t6
xvinsgr2vr.w xr8, t7, 7 // x
xvmul.w xr9, xr7, xr9 // x * BB[i]
xvmul.w xr10, xr8, xr10
xvmul.w xr9, xr9, xr20 // x * BB[i] * sgr_one_by_x
xvmul.w xr10, xr10, xr20
xvsrlri.w xr9, xr9, 12
xvsrlri.w xr10, xr10, 12
xvsub.w xr7, xr21, xr7
xvsub.w xr8, xr21, xr8
xvpickev.h xr12, xr8, xr7
xvpermi.d xr11, xr12, 0b11011000
xvst xr9, t0, 0
xvst xr10, t0, 32
xvst xr11, t1, 0
addi.d t0, t0, 64
addi.d t1, t1, 32
blt zero, t2, .LBS3SGF_H_W_LASX
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a1, a1, REST_UNIT_STRIDE<<1
addi.w a3, a3, -1
bnez a3, .LBS3SGF_H_H_LASX
endfunc
function boxsum3_h_8bpc_lasx
addi.d a2, a2, REST_UNIT_STRIDE
li.w t0, 1
addi.w a3, a3, -2
addi.w a4, a4, -4
.LBS3_H_H_LASX:
alsl.d t1, t0, a1, 1 // sum_v *sum_v = sum + x
alsl.d t2, t0, a0, 2 // sumsq_v *sumsq_v = sumsq + x
add.d t3, t0, a2 // s
addi.w t5, a3, 0
.LBS3_H_W_LASX:
xvld xr0, t3, 0
xvld xr1, t3, REST_UNIT_STRIDE
xvld xr2, t3, (REST_UNIT_STRIDE<<1)
xvilvl.b xr3, xr1, xr0
xvhaddw.hu.bu xr4, xr3, xr3
xvilvh.b xr5, xr1, xr0
xvhaddw.hu.bu xr6, xr5, xr5
xvsllwil.hu.bu xr7, xr2, 0
xvexth.hu.bu xr8, xr2
// sum_v
xvadd.h xr4, xr4, xr7 // 0 2
xvadd.h xr6, xr6, xr8 // 1 3
xvor.v xr9, xr4, xr4
xvpermi.q xr4, xr6, 0b00000010
xvpermi.q xr6, xr9, 0b00110001
xvst xr4, t1, REST_UNIT_STRIDE<<1
xvst xr6, t1, (REST_UNIT_STRIDE<<1)+32
addi.d t1, t1, 64
// sumsq
xvmulwev.h.bu xr9, xr3, xr3
xvmulwod.h.bu xr10, xr3, xr3
xvmulwev.h.bu xr11, xr5, xr5
xvmulwod.h.bu xr12, xr5, xr5
xvaddwev.w.hu xr13, xr10, xr9
xvaddwod.w.hu xr14, xr10, xr9
xvaddwev.w.hu xr15, xr12, xr11
xvaddwod.w.hu xr16, xr12, xr11
xvmaddwev.w.hu xr13, xr7, xr7
xvmaddwod.w.hu xr14, xr7, xr7
xvmaddwev.w.hu xr15, xr8, xr8
xvmaddwod.w.hu xr16, xr8, xr8
xvilvl.w xr9, xr14, xr13
xvilvh.w xr10, xr14, xr13
xvilvl.w xr11, xr16, xr15
xvilvh.w xr12, xr16, xr15
xvor.v xr7, xr9, xr9
xvor.v xr8, xr11, xr11
xvpermi.q xr9, xr10, 0b00000010
xvpermi.q xr10, xr7, 0b00110001
xvpermi.q xr11, xr12, 0b00000010
xvpermi.q xr12, xr8, 0b00110001
xvst xr9, t2, REST_UNIT_STRIDE<<2
xvst xr11, t2, (REST_UNIT_STRIDE<<2)+32
xvst xr10, t2, (REST_UNIT_STRIDE<<2)+64
xvst xr12, t2, (REST_UNIT_STRIDE<<2)+96
addi.d t2, t2, 128
addi.w t5, t5, -32
addi.d t3, t3, 32
blt zero, t5, .LBS3_H_W_LASX
addi.d a0, a0, REST_UNIT_STRIDE<<2
addi.d a1, a1, REST_UNIT_STRIDE<<1
addi.d a2, a2, REST_UNIT_STRIDE
addi.d a4, a4, -1
blt zero, a4, .LBS3_H_H_LASX
endfunc
|
Admenri/urge
| 222,545
|
third_party/dav1d/src/loongarch/mc.S
|
/*
* Copyright © 2023, VideoLAN and dav1d authors
* Copyright © 2023, Loongson Technology Corporation Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/loongarch/loongson_asm.S"
/*
static void warp_affine_8x8_c(pixel *dst, const ptrdiff_t dst_stride,
const pixel *src, const ptrdiff_t src_stride,
const int16_t *const abcd, int mx, int my
HIGHBD_DECL_SUFFIX)
*/
.macro vld_filter_row dst, src, inc
addi.w t3, \src, 512
srai.w t3, t3, 10
add.w \src, \src, \inc
addi.w t3, t3, 64
slli.w t3, t3, 3
fldx.d \dst, t4, t3
.endm
.macro warp_filter_horz_lsx
addi.w t5, a5, 0
vld vr10, a2, 0
add.d a2, a2, a3
vld_filter_row f0, t5, t0
vld_filter_row f1, t5, t0
vld_filter_row f2, t5, t0
vld_filter_row f3, t5, t0
vld_filter_row f4, t5, t0
vld_filter_row f5, t5, t0
vld_filter_row f6, t5, t0
vld_filter_row f7, t5, t0
vxor.v vr10, vr10, vr20
vbsrl.v vr8, vr10, 1
vbsrl.v vr9, vr10, 2
vilvl.d vr8, vr8, vr10
vilvl.d vr0, vr1, vr0
vmulwev.h.b vr11, vr8, vr0
vmulwod.h.b vr12, vr8, vr0
vbsrl.v vr8, vr10, 3
vbsrl.v vr19, vr10, 4
vilvl.d vr8, vr8, vr9
vilvl.d vr2, vr3, vr2
vmulwev.h.b vr13, vr8, vr2
vmulwod.h.b vr14, vr8, vr2
vbsrl.v vr8, vr10, 5
vbsrl.v vr9, vr10, 6
vilvl.d vr8, vr8, vr19
vilvl.d vr4, vr5, vr4
vmulwev.h.b vr15, vr8, vr4
vmulwod.h.b vr16, vr8, vr4
vbsrl.v vr8, vr10, 7
vilvl.d vr8, vr8, vr9
vilvl.d vr6, vr7, vr6
vmulwev.h.b vr17, vr8, vr6
vmulwod.h.b vr18, vr8, vr6
vadd.h vr11, vr11, vr12
vadd.h vr13, vr13, vr14
vadd.h vr15, vr15, vr16
vadd.h vr17, vr17, vr18
vpickev.h vr12, vr13, vr11
vpickod.h vr14, vr13, vr11
vpickev.h vr16, vr17, vr15
vpickod.h vr18, vr17, vr15
vadd.h vr11, vr12, vr14
vadd.h vr15, vr16, vr18
vpickev.h vr12, vr15, vr11
vpickod.h vr14, vr15, vr11
vadd.h vr11, vr12, vr14
add.d a5, a5, t1
.endm
.macro transpose_8x8b_extend_lsx in0, in1, in2, in3, in4, in5, in6, in7
vilvl.b \in0, \in1, \in0
vilvl.b \in2, \in3, \in2
vilvl.b \in4, \in5, \in4
vilvl.b \in6, \in7, \in6
vpackev.h \in1, \in2, \in0
vpackod.h \in3, \in2, \in0
vpackev.h \in5, \in6, \in4
vpackod.h \in7, \in6, \in4
vpackev.w \in0, \in5, \in1
vpackod.w \in2, \in5, \in1
vpackev.w \in1, \in7, \in3
vpackod.w \in3, \in7, \in3
vexth.h.b \in4, \in0
vsllwil.h.b \in0, \in0, 0
vexth.h.b \in5, \in1
vsllwil.h.b \in1, \in1, 0
vexth.h.b \in6, \in2
vsllwil.h.b \in2, \in2, 0
vexth.h.b \in7, \in3
vsllwil.h.b \in3, \in3, 0
.endm
.macro warp t, shift
function warp_affine_8x8\t\()_8bpc_lsx
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
ld.h t0, a4, 0
ld.h t1, a4, 2
ld.h t2, a4, 4
ld.h a4, a4, 6
li.d t7, 8
alsl.w t3, a3, a3, 1
sub.d a2, a2, t3
addi.d a2, a2, -3
la.local t4, dav1d_mc_warp_filter
.ifnb \t
slli.d a1, a1, 1
.endif
li.w t3, 128
vreplgr2vr.b vr20, t3
.ifb \t
vreplgr2vr.h vr21, t3
.else
li.w t3, 2048
vreplgr2vr.h vr21, t3
.endif
warp_filter_horz_lsx
vsrari.h vr24, vr11, 3
warp_filter_horz_lsx
vsrari.h vr25, vr11, 3
warp_filter_horz_lsx
vsrari.h vr26, vr11, 3
warp_filter_horz_lsx
vsrari.h vr27, vr11, 3
warp_filter_horz_lsx
vsrari.h vr28, vr11, 3
warp_filter_horz_lsx
vsrari.h vr29, vr11, 3
warp_filter_horz_lsx
vsrari.h vr30, vr11, 3
1:
addi.d t6, a6, 0
warp_filter_horz_lsx
vsrari.h vr31, vr11, 3
vld_filter_row f0, t6, t2
vld_filter_row f1, t6, t2
vld_filter_row f2, t6, t2
vld_filter_row f3, t6, t2
vld_filter_row f4, t6, t2
vld_filter_row f5, t6, t2
vld_filter_row f6, t6, t2
vld_filter_row f7, t6, t2
transpose_8x8b_extend_lsx vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vmulwev.w.h vr16, vr24, vr0
vmulwod.w.h vr17, vr24, vr0
vmaddwev.w.h vr16, vr25, vr1
vmaddwod.w.h vr17, vr25, vr1
vmaddwev.w.h vr16, vr26, vr2
vmaddwod.w.h vr17, vr26, vr2
vmaddwev.w.h vr16, vr27, vr3
vmaddwod.w.h vr17, vr27, vr3
vmaddwev.w.h vr16, vr28, vr4
vmaddwod.w.h vr17, vr28, vr4
vmaddwev.w.h vr16, vr29, vr5
vmaddwod.w.h vr17, vr29, vr5
vmaddwev.w.h vr16, vr30, vr6
vmaddwod.w.h vr17, vr30, vr6
vmaddwev.w.h vr16, vr31, vr7
vmaddwod.w.h vr17, vr31, vr7
vssrarni.h.w vr16, vr16, \shift
vssrarni.h.w vr17, vr17, \shift
vilvl.h vr16, vr17, vr16
vadd.h vr16, vr16, vr21
vor.v vr24, vr25, vr25
vor.v vr25, vr26, vr26
vor.v vr26, vr27, vr27
vor.v vr27, vr28, vr28
vor.v vr28, vr29, vr29
vor.v vr29, vr30, vr30
vor.v vr30, vr31, vr31
.ifb \t
vssrarni.bu.h vr16, vr16, 0
.endif
addi.d t7, t7, -1
.ifnb \t
vst vr16, a0, 0
.else
vstelm.d vr16, a0, 0, 0
.endif
add.d a0, a1, a0
add.d a6, a6, a4
blt zero, t7, 1b
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc
.endm
warp , 11
warp t, 7
.macro FILTER_WARP_RND_P_LASX in0, in1, in2, out0, out1, out2, out3
xvshuf.b xr2, \in0, \in0, \in2
addi.w t4, \in1, 512
srai.w t4, t4, 10
addi.w t4, t4, 64
slli.w t4, t4, 3
vldx vr3, t5, t4
add.w t3, t3, t0 // tmx += abcd[0]
addi.w t4, t3, 512
srai.w t4, t4, 10
addi.w t4, t4, 64
slli.w t4, t4, 3
vldx vr4, t5, t4
add.w t3, t3, t0 // tmx += abcd[0]
addi.w t4, t3, 512
srai.w t4, t4, 10
addi.w t4, t4, 64
slli.w t4, t4, 3
vldx vr5, t5, t4
add.w t3, t3, t0 // tmx += abcd[0]
addi.w t4, t3, 512
srai.w t4, t4, 10
addi.w t4, t4, 64
slli.w t4, t4, 3
vldx vr6, t5, t4
add.w t3, t3, t0 // tmx += abcd[0]
xvinsve0.d xr3, xr5, 1
xvinsve0.d xr3, xr4, 2
xvinsve0.d xr3, xr6, 3
xvmulwev.h.bu.b xr4, xr2, xr3
xvmulwod.h.bu.b xr5, xr2, xr3
xvilvl.d xr2, xr5, xr4
xvilvh.d xr3, xr5, xr4
xvhaddw.w.h xr2, xr2, xr2
xvhaddw.w.h xr3, xr3, xr3
xvhaddw.d.w xr2, xr2, xr2
xvhaddw.d.w xr3, xr3, xr3
xvhaddw.q.d xr2, xr2, xr2
xvhaddw.q.d xr3, xr3, xr3
xvextrins.w \out0, xr2, \out1
xvextrins.w \out2, xr3, \out3
.endm
.macro FILTER_WARP_CLIP_LASX in0, in1, in2, out0, out1
add.w \in0, \in0, \in1
addi.w t6, \in0, 512
srai.w t6, t6, 10
addi.w t6, t6, 64
slli.w t6, t6, 3
fldx.d f1, t5, t6
add.w t2, t2, t7
addi.w t6, t2, 512
srai.w t6, t6, 10
addi.w t6, t6, 64
slli.w t6, t6, 3
fldx.d f2, t5, t6
vilvl.d vr0, vr2, vr1
vext2xv.h.b xr0, xr0
xvmulwev.w.h xr3, \in2, xr0
xvmaddwod.w.h xr3, \in2, xr0
xvhaddw.d.w xr3, xr3, xr3
xvhaddw.q.d xr3, xr3, xr3
xvextrins.w \out0, xr3, \out1
.endm
const shuf0
.byte 0, 1, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 7, 8, 9
.byte 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 9, 10
endconst
const warp_sh
.rept 2
.byte 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
.endr
.rept 2
.byte 18, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.endr
endconst
.macro warp_lasx t, shift
function warp_affine_8x8\t\()_8bpc_lasx
addi.d sp, sp, -16
ld.h t0, a4, 0 // abcd[0]
ld.h t1, a4, 2 // abcd[1]
fst.d f24, sp, 0
fst.d f25, sp, 8
alsl.w t2, a3, a3, 1
addi.w t3, a5, 0
la.local t4, warp_sh
la.local t5, dav1d_mc_warp_filter
sub.d a2, a2, t2
addi.d a2, a2, -3
vld vr0, a2, 0
xvld xr24, t4, 0
xvld xr25, t4, 32
la.local t2, shuf0
xvld xr1, t2, 0
xvpermi.q xr0, xr0, 0x00
xvaddi.bu xr9, xr1, 4
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr7, 0x00, xr8, 0x00
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr10, 0x00, xr11, 0x00
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr7, 0x10, xr8, 0x10
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr10, 0x10, xr11, 0x10
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr7, 0x20, xr8, 0x20
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr10, 0x20, xr11, 0x20
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr7, 0x30, xr8, 0x30
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr10, 0x30, xr11, 0x30
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr12, 0x00, xr13, 0x00
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr14, 0x00, xr15, 0x00
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr12, 0x10, xr13, 0x10
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr14, 0x10, xr15, 0x10
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr12, 0x20, xr13, 0x20
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr14, 0x20, xr15, 0x20
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr12, 0x30, xr13, 0x30
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr14, 0x30, xr15, 0x30
xvsrarni.h.w xr12, xr7, 3
xvsrarni.h.w xr13, xr8, 3
xvsrarni.h.w xr14, xr10, 3
xvsrarni.h.w xr15, xr11, 3
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr7, 0x00, xr8, 0x00
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr10, 0x00, xr11, 0x00
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr7, 0x10, xr8, 0x10
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr10, 0x10, xr11, 0x10
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr7, 0x20, xr8, 0x20
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr10, 0x20, xr11, 0x20
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr7, 0x30, xr8, 0x30
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr10, 0x30, xr11, 0x30
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr16, 0x00, xr17, 0x00
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr18, 0x00, xr19, 0x00
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr16, 0x10, xr17, 0x10
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr18, 0x10, xr19, 0x10
add.w a5, a5, t1
or t3, a5, a5
add.d a2, a2, a3
vld vr0, a2, 0
xvpermi.q xr0, xr0, 0x00
FILTER_WARP_RND_P_LASX xr0, a5, xr1, xr16, 0x20, xr17, 0x20
FILTER_WARP_RND_P_LASX xr0, t3, xr9, xr18, 0x20, xr19, 0x20
xvsrarni.h.w xr16, xr7, 3
xvsrarni.h.w xr17, xr8, 3
xvsrarni.h.w xr18, xr10, 3
xvsrarni.h.w xr19, xr11, 3
addi.w t2, a6, 0 // my
ld.h t7, a4, 4 // abcd[2]
ld.h t8, a4, 6 // abcd[3]
.ifnb \t
slli.d a1, a1, 1
.endif
// y = 0
FILTER_WARP_CLIP_LASX t2, zero, xr12, xr20, 0x00
FILTER_WARP_CLIP_LASX t2, t7, xr13, xr20, 0x10
FILTER_WARP_CLIP_LASX t2, t7, xr14, xr20, 0x20
FILTER_WARP_CLIP_LASX t2, t7, xr15, xr20, 0x30
xvshuf.b xr12, xr16, xr12, xr24
xvshuf.b xr13, xr17, xr13, xr24
xvshuf.b xr14, xr18, xr14, xr24
xvshuf.b xr15, xr19, xr15, xr24
xvextrins.h xr24, xr25, 0x70
add.w a6, a6, t8
addi.w t2, a6, 0
FILTER_WARP_CLIP_LASX t2, zero, xr12, xr21, 0x00
FILTER_WARP_CLIP_LASX t2, t7, xr13, xr21, 0x10
FILTER_WARP_CLIP_LASX t2, t7, xr14, xr21, 0x20
FILTER_WARP_CLIP_LASX t2, t7, xr15, xr21, 0x30
.ifnb \t
xvssrarni.h.w xr21, xr20, \shift
xvpermi.q xr22, xr21, 0x01
vilvl.h vr23, vr22, vr21
vilvh.h vr21, vr22, vr21
vst vr23, a0, 0
vstx vr21, a0, a1
.else
xvssrarni.hu.w xr21, xr20, \shift
xvssrlni.bu.h xr22, xr21, 0
xvpermi.q xr23, xr22, 0x01
vilvl.b vr21, vr23, vr22
fst.d f21, a0, 0
add.d a0, a0, a1
vstelm.d vr21, a0, 0, 1
.endif
xvaddi.bu xr25, xr25, 2
xvshuf.b xr12, xr16, xr12, xr24
xvshuf.b xr13, xr17, xr13, xr24
xvshuf.b xr14, xr18, xr14, xr24
xvshuf.b xr15, xr19, xr15, xr24
xvextrins.h xr24, xr25, 0x70
add.w a6, a6, t8
addi.w t2, a6, 0
FILTER_WARP_CLIP_LASX t2, zero, xr12, xr20, 0x00
FILTER_WARP_CLIP_LASX t2, t7, xr13, xr20, 0x10
FILTER_WARP_CLIP_LASX t2, t7, xr14, xr20, 0x20
FILTER_WARP_CLIP_LASX t2, t7, xr15, xr20, 0x30
xvaddi.bu xr25, xr25, 2
xvshuf.b xr12, xr16, xr12, xr24
xvshuf.b xr13, xr17, xr13, xr24
xvshuf.b xr14, xr18, xr14, xr24
xvshuf.b xr15, xr19, xr15, xr24
xvextrins.h xr24, xr25, 0x70
add.w a6, a6, t8
addi.w t2, a6, 0
FILTER_WARP_CLIP_LASX t2, zero, xr12, xr21, 0x00
FILTER_WARP_CLIP_LASX t2, t7, xr13, xr21, 0x10
FILTER_WARP_CLIP_LASX t2, t7, xr14, xr21, 0x20
FILTER_WARP_CLIP_LASX t2, t7, xr15, xr21, 0x30
.ifnb \t
xvssrarni.h.w xr21, xr20, \shift
alsl.d a0, a1, a0, 1
xvpermi.q xr22, xr21, 0x01
vilvl.h vr23, vr22, vr21
vilvh.h vr21, vr22, vr21
vst vr23, a0, 0
vstx vr21, a0, a1
.else
xvssrarni.hu.w xr21, xr20, 11
xvssrlni.bu.h xr22, xr21, 0
xvpermi.q xr23, xr22, 0x01
vilvl.b vr21, vr23, vr22
add.d a0, a0, a1
fst.d f21, a0, 0
add.d a0, a0, a1
vstelm.d vr21, a0, 0, 1
.endif
xvaddi.bu xr25, xr25, 2
xvshuf.b xr12, xr16, xr12, xr24
xvshuf.b xr13, xr17, xr13, xr24
xvshuf.b xr14, xr18, xr14, xr24
xvshuf.b xr15, xr19, xr15, xr24
xvextrins.h xr24, xr25, 0x70
add.w a6, a6, t8
addi.w t2, a6, 0
FILTER_WARP_CLIP_LASX t2, zero, xr12, xr20, 0x00
FILTER_WARP_CLIP_LASX t2, t7, xr13, xr20, 0x10
FILTER_WARP_CLIP_LASX t2, t7, xr14, xr20, 0x20
FILTER_WARP_CLIP_LASX t2, t7, xr15, xr20, 0x30
xvaddi.bu xr25, xr25, 2
xvshuf.b xr12, xr16, xr12, xr24
xvshuf.b xr13, xr17, xr13, xr24
xvshuf.b xr14, xr18, xr14, xr24
xvshuf.b xr15, xr19, xr15, xr24
xvextrins.h xr24, xr25, 0x70
add.w a6, a6, t8
addi.w t2, a6, 0
FILTER_WARP_CLIP_LASX t2, zero, xr12, xr21, 0x00
FILTER_WARP_CLIP_LASX t2, t7, xr13, xr21, 0x10
FILTER_WARP_CLIP_LASX t2, t7, xr14, xr21, 0x20
FILTER_WARP_CLIP_LASX t2, t7, xr15, xr21, 0x30
.ifnb \t
xvssrarni.h.w xr21, xr20, \shift
alsl.d a0, a1, a0, 1
xvpermi.q xr22, xr21, 0x01
vilvl.h vr23, vr22, vr21
vilvh.h vr21, vr22, vr21
vst vr23, a0, 0
vstx vr21, a0, a1
.else
xvssrarni.hu.w xr21, xr20, 11
xvssrlni.bu.h xr22, xr21, 0
xvpermi.q xr23, xr22, 0x01
vilvl.b vr21, vr23, vr22
add.d a0, a0, a1
fst.d f21, a0, 0
add.d a0, a0, a1
vstelm.d vr21, a0, 0, 1
.endif
xvaddi.bu xr25, xr25, 2
xvshuf.b xr12, xr16, xr12, xr24
xvshuf.b xr13, xr17, xr13, xr24
xvshuf.b xr14, xr18, xr14, xr24
xvshuf.b xr15, xr19, xr15, xr24
xvextrins.h xr24, xr25, 0x70
add.w a6, a6, t8
addi.w t2, a6, 0
FILTER_WARP_CLIP_LASX t2, zero, xr12, xr20, 0x00
FILTER_WARP_CLIP_LASX t2, t7, xr13, xr20, 0x10
FILTER_WARP_CLIP_LASX t2, t7, xr14, xr20, 0x20
FILTER_WARP_CLIP_LASX t2, t7, xr15, xr20, 0x30
xvshuf.b xr12, xr16, xr12, xr24
xvshuf.b xr13, xr17, xr13, xr24
xvshuf.b xr14, xr18, xr14, xr24
xvshuf.b xr15, xr19, xr15, xr24
add.w a6, a6, t8
addi.w t2, a6, 0
FILTER_WARP_CLIP_LASX t2, zero, xr12, xr21, 0x00
FILTER_WARP_CLIP_LASX t2, t7, xr13, xr21, 0x10
FILTER_WARP_CLIP_LASX t2, t7, xr14, xr21, 0x20
FILTER_WARP_CLIP_LASX t2, t7, xr15, xr21, 0x30
.ifnb \t
xvssrarni.h.w xr21, xr20, \shift
alsl.d a0, a1, a0, 1
xvpermi.q xr22, xr21, 0x01
vilvl.h vr23, vr22, vr21
vilvh.h vr21, vr22, vr21
vst vr23, a0, 0
vstx vr21, a0, a1
.else
xvssrarni.hu.w xr21, xr20, 11
xvssrlni.bu.h xr22, xr21, 0
xvpermi.q xr23, xr22, 0x01
vilvl.b vr21, vr23, vr22
add.d a0, a0, a1
fst.d f21, a0, 0
add.d a0, a0, a1
vstelm.d vr21, a0, 0, 1
.endif
fld.d f24, sp, 0
fld.d f25, sp, 8
addi.d sp, sp, 16
endfunc
.endm
warp_lasx , 11
warp_lasx t, 7
/*
static void w_avg_c(pixel *dst, const ptrdiff_t dst_stride,
const int16_t *tmp1, const int16_t *tmp2,
const int w, int h,
const int weight HIGHBD_DECL_SUFFIX)
*/
#define bpc8_sh 5 // sh = intermediate_bits + 1
#define bpcw8_sh 8 // sh = intermediate_bits + 4
#define bpc_sh bpc8_sh
#define bpcw_sh bpcw8_sh
function avg_8bpc_lsx
addi.d t8, a0, 0
clz.w t0, a4
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .AVG_LSX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0 // The jump addresses are relative to AVG_LSX_JRTABLE
add.d t1, t1, t2 // Get absolute address
jirl $r0, t1, 0
.align 3
.AVG_LSX_JRTABLE:
.hword .AVG_W128_LSX - .AVG_LSX_JRTABLE
.hword .AVG_W64_LSX - .AVG_LSX_JRTABLE
.hword .AVG_W32_LSX - .AVG_LSX_JRTABLE
.hword .AVG_W16_LSX - .AVG_LSX_JRTABLE
.hword .AVG_W8_LSX - .AVG_LSX_JRTABLE
.hword .AVG_W4_LSX - .AVG_LSX_JRTABLE
.AVG_W4_LSX:
vld vr0, a2, 0
vld vr1, a3, 0
vadd.h vr2, vr0, vr1
vssrarni.bu.h vr3, vr2, bpc_sh
vstelm.w vr3, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr3, a0, 0, 1
addi.w a5, a5, -2
addi.d a2, a2, 16
addi.d a3, a3, 16
add.d a0, a0, a1
blt zero, a5, .AVG_W4_LSX
b .AVG_END_LSX
.AVG_W8_LSX:
vld vr0, a2, 0
vld vr2, a2, 16
vld vr1, a3, 0
vld vr3, a3, 16
vadd.h vr4, vr0, vr1
vadd.h vr5, vr2, vr3
vssrarni.bu.h vr5, vr4, bpc_sh
addi.w a5, a5, -2
addi.d a2, a2, 32
vstelm.d vr5, a0, 0, 0
add.d a0, a0, a1
vstelm.d vr5, a0, 0, 1
addi.d a3, a3, 32
add.d a0, a0, a1
blt zero, a5, .AVG_W8_LSX
b .AVG_END_LSX
.AVG_W16_LSX:
vld vr0, a2, 0
vld vr2, a2, 16
vld vr1, a3, 0
vld vr3, a3, 16
vadd.h vr4, vr0, vr1
vadd.h vr5, vr2, vr3
vssrarni.bu.h vr5, vr4, bpc_sh
addi.w a5, a5, -1
addi.d a2, a2, 32
vst vr5, a0, 0
addi.d a3, a3, 32
add.d a0, a0, a1
blt zero, a5, .AVG_W16_LSX
b .AVG_END_LSX
.AVG_W32_LSX:
vld vr0, a2, 0
vld vr2, a2, 16
vld vr4, a2, 32
vld vr6, a2, 48
vld vr1, a3, 0
vld vr3, a3, 16
vld vr5, a3, 32
vld vr7, a3, 48
vadd.h vr0, vr0, vr1
vadd.h vr2, vr2, vr3
vadd.h vr4, vr4, vr5
vadd.h vr6, vr6, vr7
vssrarni.bu.h vr2, vr0, bpc_sh
vssrarni.bu.h vr6, vr4, bpc_sh
addi.w a5, a5, -1
addi.d a2, a2, 64
vst vr2, a0, 0
vst vr6, a0, 16
addi.d a3, a3, 64
add.d a0, a0, a1
blt zero, a5, .AVG_W32_LSX
b .AVG_END_LSX
.AVG_W64_LSX:
.rept 4
vld vr0, a2, 0
vld vr2, a2, 16
vld vr1, a3, 0
vld vr3, a3, 16
vadd.h vr0, vr0, vr1
vadd.h vr2, vr2, vr3
vssrarni.bu.h vr2, vr0, bpc_sh
addi.d a2, a2, 32
addi.d a3, a3, 32
vst vr2, a0, 0
addi.d a0, a0, 16
.endr
addi.w a5, a5, -1
add.d t8, t8, a1
add.d a0, t8, zero
blt zero, a5, .AVG_W64_LSX
b .AVG_END_LSX
.AVG_W128_LSX:
.rept 8
vld vr0, a2, 0
vld vr2, a2, 16
vld vr1, a3, 0
vld vr3, a3, 16
vadd.h vr0, vr0, vr1
vadd.h vr2, vr2, vr3
vssrarni.bu.h vr2, vr0, bpc_sh
addi.d a2, a2, 32
addi.d a3, a3, 32
vst vr2, a0, 0
addi.d a0, a0, 16
.endr
addi.w a5, a5, -1
add.d t8, t8, a1
add.d a0, t8, zero
blt zero, a5, .AVG_W128_LSX
.AVG_END_LSX:
endfunc
function avg_8bpc_lasx
clz.w t0, a4
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .AVG_LASX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0
add.d t1, t1, t2
jirl $r0, t1, 0
.align 3
.AVG_LASX_JRTABLE:
.hword .AVG_W128_LASX - .AVG_LASX_JRTABLE
.hword .AVG_W64_LASX - .AVG_LASX_JRTABLE
.hword .AVG_W32_LASX - .AVG_LASX_JRTABLE
.hword .AVG_W16_LASX - .AVG_LASX_JRTABLE
.hword .AVG_W8_LASX - .AVG_LASX_JRTABLE
.hword .AVG_W4_LASX - .AVG_LASX_JRTABLE
.AVG_W4_LASX:
vld vr0, a2, 0
vld vr1, a3, 0
vadd.h vr0, vr0, vr1
vssrarni.bu.h vr1, vr0, bpc_sh
vstelm.w vr1, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr1, a0, 0, 1
addi.w a5, a5, -2
addi.d a2, a2, 16
addi.d a3, a3, 16
add.d a0, a0, a1
blt zero, a5, .AVG_W4_LASX
b .AVG_END_LASX
.AVG_W8_LASX:
xvld xr0, a2, 0
xvld xr1, a3, 0
xvadd.h xr2, xr0, xr1
xvssrarni.bu.h xr1, xr2, bpc_sh
xvstelm.d xr1, a0, 0, 0
add.d a0, a0, a1
xvstelm.d xr1, a0, 0, 2
addi.w a5, a5, -2
addi.d a2, a2, 32
addi.d a3, a3, 32
add.d a0, a1, a0
blt zero, a5, .AVG_W8_LASX
b .AVG_END_LASX
.AVG_W16_LASX:
xvld xr0, a2, 0
xvld xr2, a2, 32
xvld xr1, a3, 0
xvld xr3, a3, 32
xvadd.h xr4, xr0, xr1
xvadd.h xr5, xr2, xr3
xvssrarni.bu.h xr5, xr4, bpc_sh
xvpermi.d xr2, xr5, 0xd8
xvpermi.d xr3, xr5, 0x8d
vst vr2, a0, 0
vstx vr3, a0, a1
addi.w a5, a5, -2
addi.d a2, a2, 64
addi.d a3, a3, 64
alsl.d a0, a1, a0, 1
blt zero, a5, .AVG_W16_LASX
b .AVG_END_LASX
.AVG_W32_LASX:
xvld xr0, a2, 0
xvld xr2, a2, 32
xvld xr1, a3, 0
xvld xr3, a3, 32
xvadd.h xr4, xr0, xr1
xvadd.h xr5, xr2, xr3
xvssrarni.bu.h xr5, xr4, bpc_sh
xvpermi.d xr6, xr5, 0xd8
xvst xr6, a0, 0
addi.w a5, a5, -1
addi.d a2, a2, 64
addi.d a3, a3, 64
add.d a0, a0, a1
blt zero, a5, .AVG_W32_LASX
b .AVG_END_LASX
.AVG_W64_LASX:
xvld xr0, a2, 0
xvld xr2, a2, 32
xvld xr4, a2, 64
xvld xr6, a2, 96
xvld xr1, a3, 0
xvld xr3, a3, 32
xvld xr5, a3, 64
xvld xr7, a3, 96
xvadd.h xr0, xr0, xr1
xvadd.h xr2, xr2, xr3
xvadd.h xr4, xr4, xr5
xvadd.h xr6, xr6, xr7
xvssrarni.bu.h xr2, xr0, bpc_sh
xvssrarni.bu.h xr6, xr4, bpc_sh
xvpermi.d xr1, xr2, 0xd8
xvpermi.d xr3, xr6, 0xd8
xvst xr1, a0, 0
xvst xr3, a0, 32
addi.w a5, a5, -1
addi.d a2, a2, 128
addi.d a3, a3, 128
add.d a0, a0, a1
blt zero, a5, .AVG_W64_LASX
b .AVG_END_LASX
.AVG_W128_LASX:
xvld xr0, a2, 0
xvld xr2, a2, 32
xvld xr4, a2, 64
xvld xr6, a2, 96
xvld xr8, a2, 128
xvld xr10, a2, 160
xvld xr12, a2, 192
xvld xr14, a2, 224
xvld xr1, a3, 0
xvld xr3, a3, 32
xvld xr5, a3, 64
xvld xr7, a3, 96
xvld xr9, a3, 128
xvld xr11, a3, 160
xvld xr13, a3, 192
xvld xr15, a3, 224
xvadd.h xr0, xr0, xr1
xvadd.h xr2, xr2, xr3
xvadd.h xr4, xr4, xr5
xvadd.h xr6, xr6, xr7
xvadd.h xr8, xr8, xr9
xvadd.h xr10, xr10, xr11
xvadd.h xr12, xr12, xr13
xvadd.h xr14, xr14, xr15
xvssrarni.bu.h xr2, xr0, bpc_sh
xvssrarni.bu.h xr6, xr4, bpc_sh
xvssrarni.bu.h xr10, xr8, bpc_sh
xvssrarni.bu.h xr14, xr12, bpc_sh
xvpermi.d xr1, xr2, 0xd8
xvpermi.d xr3, xr6, 0xd8
xvpermi.d xr5, xr10, 0xd8
xvpermi.d xr7, xr14, 0xd8
xvst xr1, a0, 0
xvst xr3, a0, 32
xvst xr5, a0, 64
xvst xr7, a0, 96
addi.w a5, a5, -1
addi.d a2, a2, 256
addi.d a3, a3, 256
add.d a0, a0, a1
blt zero, a5, .AVG_W128_LASX
.AVG_END_LASX:
endfunc
function w_avg_8bpc_lsx
addi.d t8, a0, 0
li.w t2, 16
sub.w t2, t2, a6 // 16 - weight
vreplgr2vr.h vr21, a6
vreplgr2vr.h vr22, t2
clz.w t0, a4
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .W_AVG_LSX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0
add.d t1, t1, t2
jirl $r0, t1, 0
.align 3
.W_AVG_LSX_JRTABLE:
.hword .W_AVG_W128_LSX - .W_AVG_LSX_JRTABLE
.hword .W_AVG_W64_LSX - .W_AVG_LSX_JRTABLE
.hword .W_AVG_W32_LSX - .W_AVG_LSX_JRTABLE
.hword .W_AVG_W16_LSX - .W_AVG_LSX_JRTABLE
.hword .W_AVG_W8_LSX - .W_AVG_LSX_JRTABLE
.hword .W_AVG_W4_LSX - .W_AVG_LSX_JRTABLE
.W_AVG_W4_LSX:
vld vr0, a2, 0
vld vr1, a3, 0
vmulwev.w.h vr2, vr0, vr21
vmulwod.w.h vr3, vr0, vr21
vmaddwev.w.h vr2, vr1, vr22
vmaddwod.w.h vr3, vr1, vr22
vssrarni.hu.w vr3, vr2, bpcw_sh
vssrlni.bu.h vr1, vr3, 0
vpickod.w vr4, vr2, vr1
vilvl.b vr0, vr4, vr1
fst.s f0, a0, 0
add.d a0, a0, a1
vstelm.w vr0, a0, 0, 1
addi.w a5, a5, -2
addi.d a2, a2, 16
addi.d a3, a3, 16
add.d a0, a1, a0
blt zero, a5, .W_AVG_W4_LSX
b .W_AVG_END_LSX
.W_AVG_W8_LSX:
vld vr0, a2, 0
vld vr1, a3, 0
vmulwev.w.h vr2, vr0, vr21
vmulwod.w.h vr3, vr0, vr21
vmaddwev.w.h vr2, vr1, vr22
vmaddwod.w.h vr3, vr1, vr22
vssrarni.hu.w vr3, vr2, bpcw_sh
vssrlni.bu.h vr1, vr3, 0
vpickod.w vr4, vr2, vr1
vilvl.b vr0, vr4, vr1
fst.d f0, a0, 0
addi.w a5, a5, -1
addi.d a2, a2, 16
addi.d a3, a3, 16
add.d a0, a0, a1
blt zero, a5, .W_AVG_W8_LSX
b .W_AVG_END_LSX
.W_AVG_W16_LSX:
vld vr0, a2, 0
vld vr2, a2, 16
vld vr1, a3, 0
vld vr3, a3, 16
vmulwev.w.h vr4, vr0, vr21
vmulwod.w.h vr5, vr0, vr21
vmulwev.w.h vr6, vr2, vr21
vmulwod.w.h vr7, vr2, vr21
vmaddwev.w.h vr4, vr1, vr22
vmaddwod.w.h vr5, vr1, vr22
vmaddwev.w.h vr6, vr3, vr22
vmaddwod.w.h vr7, vr3, vr22
vssrarni.hu.w vr6, vr4, bpcw_sh
vssrarni.hu.w vr7, vr5, bpcw_sh
vssrlrni.bu.h vr7, vr6, 0
vshuf4i.w vr8, vr7, 0x4E
vilvl.b vr0, vr8, vr7
vst vr0, a0, 0
addi.w a5, a5, -1
addi.d a2, a2, 32
addi.d a3, a3, 32
add.d a0, a0, a1
blt zero, a5, .W_AVG_W16_LSX
b .W_AVG_END_LSX
.W_AVG_W32_LSX:
.rept 2
vld vr0, a2, 0
vld vr2, a2, 16
vld vr1, a3, 0
vld vr3, a3, 16
vmulwev.w.h vr4, vr0, vr21
vmulwod.w.h vr5, vr0, vr21
vmulwev.w.h vr6, vr2, vr21
vmulwod.w.h vr7, vr2, vr21
vmaddwev.w.h vr4, vr1, vr22
vmaddwod.w.h vr5, vr1, vr22
vmaddwev.w.h vr6, vr3, vr22
vmaddwod.w.h vr7, vr3, vr22
vssrarni.hu.w vr6, vr4, bpcw_sh
vssrarni.hu.w vr7, vr5, bpcw_sh
vssrlrni.bu.h vr7, vr6, 0
vshuf4i.w vr8, vr7, 0x4E
vilvl.b vr0, vr8, vr7
vst vr0, a0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a0, a0, 16
.endr
addi.w a5, a5, -1
add.d t8, t8, a1
add.d a0, t8, zero
blt zero, a5, .W_AVG_W32_LSX
b .W_AVG_END_LSX
.W_AVG_W64_LSX:
.rept 4
vld vr0, a2, 0
vld vr2, a2, 16
vld vr1, a3, 0
vld vr3, a3, 16
vmulwev.w.h vr4, vr0, vr21
vmulwod.w.h vr5, vr0, vr21
vmulwev.w.h vr6, vr2, vr21
vmulwod.w.h vr7, vr2, vr21
vmaddwev.w.h vr4, vr1, vr22
vmaddwod.w.h vr5, vr1, vr22
vmaddwev.w.h vr6, vr3, vr22
vmaddwod.w.h vr7, vr3, vr22
vssrarni.hu.w vr6, vr4, bpcw_sh
vssrarni.hu.w vr7, vr5, bpcw_sh
vssrlrni.bu.h vr7, vr6, 0
vshuf4i.w vr8, vr7, 0x4E
vilvl.b vr0, vr8, vr7
vst vr0, a0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a0, a0, 16
.endr
addi.w a5, a5, -1
add.d t8, t8, a1
add.d a0, t8, zero
blt zero, a5, .W_AVG_W64_LSX
b .W_AVG_END_LSX
.W_AVG_W128_LSX:
.rept 8
vld vr0, a2, 0
vld vr2, a2, 16
vld vr1, a3, 0
vld vr3, a3, 16
vmulwev.w.h vr4, vr0, vr21
vmulwod.w.h vr5, vr0, vr21
vmulwev.w.h vr6, vr2, vr21
vmulwod.w.h vr7, vr2, vr21
vmaddwev.w.h vr4, vr1, vr22
vmaddwod.w.h vr5, vr1, vr22
vmaddwev.w.h vr6, vr3, vr22
vmaddwod.w.h vr7, vr3, vr22
vssrarni.hu.w vr6, vr4, bpcw_sh
vssrarni.hu.w vr7, vr5, bpcw_sh
vssrlrni.bu.h vr7, vr6, 0
vshuf4i.w vr8, vr7, 0x4E
vilvl.b vr0, vr8, vr7
vst vr0, a0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a0, a0, 16
.endr
addi.w a5, a5, -1
add.d t8, t8, a1
add.d a0, t8, zero
blt zero, a5, .W_AVG_W128_LSX
.W_AVG_END_LSX:
endfunc
function w_avg_8bpc_lasx
addi.d t8, a0, 0
li.w t2, 16
sub.w t2, t2, a6 // 16 - weight
xvreplgr2vr.h xr21, a6
xvreplgr2vr.h xr22, t2
clz.w t0, a4
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .W_AVG_LASX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0
add.d t1, t1, t2
jirl $r0, t1, 0
.align 3
.W_AVG_LASX_JRTABLE:
.hword .W_AVG_W128_LASX - .W_AVG_LASX_JRTABLE
.hword .W_AVG_W64_LASX - .W_AVG_LASX_JRTABLE
.hword .W_AVG_W32_LASX - .W_AVG_LASX_JRTABLE
.hword .W_AVG_W16_LASX - .W_AVG_LASX_JRTABLE
.hword .W_AVG_W8_LASX - .W_AVG_LASX_JRTABLE
.hword .W_AVG_W4_LASX - .W_AVG_LASX_JRTABLE
.W_AVG_W4_LASX:
vld vr0, a2, 0
vld vr1, a3, 0
xvpermi.d xr2, xr0, 0xD8
xvpermi.d xr3, xr1, 0xD8
xvilvl.h xr4, xr3, xr2
xvmulwev.w.h xr0, xr4, xr21
xvmaddwod.w.h xr0, xr4, xr22
xvssrarni.hu.w xr1, xr0, bpcw_sh
xvssrlni.bu.h xr0, xr1, 0
fst.s f0, a0, 0
add.d a0, a0, a1
xvstelm.w xr0, a0, 0, 4
addi.w a5, a5, -2
addi.d a2, a2, 16
addi.d a3, a3, 16
add.d a0, a1, a0
blt zero, a5, .W_AVG_W4_LASX
b .W_AVG_END_LASX
.W_AVG_W8_LASX:
xvld xr0, a2, 0
xvld xr1, a3, 0
xvmulwev.w.h xr2, xr0, xr21
xvmulwod.w.h xr3, xr0, xr21
xvmaddwev.w.h xr2, xr1, xr22
xvmaddwod.w.h xr3, xr1, xr22
xvssrarni.hu.w xr3, xr2, bpcw_sh
xvssrlni.bu.h xr1, xr3, 0
xvpickod.w xr4, xr2, xr1
xvilvl.b xr0, xr4, xr1
xvstelm.d xr0, a0, 0, 0
add.d a0, a0, a1
xvstelm.d xr0, a0, 0, 2
addi.w a5, a5, -2
addi.d a2, a2, 32
addi.d a3, a3, 32
add.d a0, a0, a1
blt zero, a5, .W_AVG_W8_LASX
b .W_AVG_END_LASX
.W_AVG_W16_LASX:
xvld xr0, a2, 0
xvld xr1, a3, 0
xvmulwev.w.h xr2, xr0, xr21
xvmulwod.w.h xr3, xr0, xr21
xvmaddwev.w.h xr2, xr1, xr22
xvmaddwod.w.h xr3, xr1, xr22
xvssrarni.hu.w xr3, xr2, bpcw_sh
xvssrlni.bu.h xr1, xr3, 0
xvpickod.w xr4, xr2, xr1
xvilvl.b xr0, xr4, xr1
xvpermi.d xr1, xr0, 0xD8
vst vr1, a0, 0
addi.w a5, a5, -1
addi.d a2, a2, 32
addi.d a3, a3, 32
add.d a0, a0, a1
blt zero, a5, .W_AVG_W16_LASX
b .W_AVG_END_LSX
.W_AVG_W32_LASX:
xvld xr0, a2, 0
xvld xr2, a2, 32
xvld xr1, a3, 0
xvld xr3, a3, 32
xvmulwev.w.h xr4, xr0, xr21
xvmulwod.w.h xr5, xr0, xr21
xvmulwev.w.h xr6, xr2, xr21
xvmulwod.w.h xr7, xr2, xr21
xvmaddwev.w.h xr4, xr1, xr22
xvmaddwod.w.h xr5, xr1, xr22
xvmaddwev.w.h xr6, xr3, xr22
xvmaddwod.w.h xr7, xr3, xr22
xvssrarni.hu.w xr6, xr4, bpcw_sh
xvssrarni.hu.w xr7, xr5, bpcw_sh
xvssrlni.bu.h xr7, xr6, 0
xvshuf4i.w xr8, xr7, 0x4E
xvilvl.b xr9, xr8, xr7
xvpermi.d xr0, xr9, 0xD8
xvst xr0, a0, 0
addi.w a5, a5, -1
addi.d a2, a2, 64
addi.d a3, a3, 64
add.d a0, a0, a1
blt zero, a5, .W_AVG_W32_LASX
b .W_AVG_END_LASX
.W_AVG_W64_LASX:
.rept 2
xvld xr0, a2, 0
xvld xr2, a2, 32
xvld xr1, a3, 0
xvld xr3, a3, 32
xvmulwev.w.h xr4, xr0, xr21
xvmulwod.w.h xr5, xr0, xr21
xvmulwev.w.h xr6, xr2, xr21
xvmulwod.w.h xr7, xr2, xr21
xvmaddwev.w.h xr4, xr1, xr22
xvmaddwod.w.h xr5, xr1, xr22
xvmaddwev.w.h xr6, xr3, xr22
xvmaddwod.w.h xr7, xr3, xr22
xvssrarni.hu.w xr6, xr4, bpcw_sh
xvssrarni.hu.w xr7, xr5, bpcw_sh
xvssrlni.bu.h xr7, xr6, 0
xvshuf4i.w xr8, xr7, 0x4E
xvilvl.b xr9, xr8, xr7
xvpermi.d xr0, xr9, 0xD8
xvst xr0, a0, 0
addi.d a2, a2, 64
addi.d a3, a3, 64
addi.d a0, a0, 32
.endr
addi.w a5, a5, -1
add.d t8, t8, a1
add.d a0, t8, zero
blt zero, a5, .W_AVG_W64_LASX
b .W_AVG_END_LASX
.W_AVG_W128_LASX:
.rept 4
xvld xr0, a2, 0
xvld xr2, a2, 32
xvld xr1, a3, 0
xvld xr3, a3, 32
xvmulwev.w.h xr4, xr0, xr21
xvmulwod.w.h xr5, xr0, xr21
xvmulwev.w.h xr6, xr2, xr21
xvmulwod.w.h xr7, xr2, xr21
xvmaddwev.w.h xr4, xr1, xr22
xvmaddwod.w.h xr5, xr1, xr22
xvmaddwev.w.h xr6, xr3, xr22
xvmaddwod.w.h xr7, xr3, xr22
xvssrarni.hu.w xr6, xr4, bpcw_sh
xvssrarni.hu.w xr7, xr5, bpcw_sh
xvssrlni.bu.h xr7, xr6, 0
xvshuf4i.w xr8, xr7, 0x4E
xvilvl.b xr9, xr8, xr7
xvpermi.d xr0, xr9, 0xD8
xvst xr0, a0, 0
addi.d a2, a2, 64
addi.d a3, a3, 64
addi.d a0, a0, 32
.endr
addi.w a5, a5, -1
add.d t8, t8, a1
add.d a0, t8, zero
blt zero, a5, .W_AVG_W128_LASX
.W_AVG_END_LASX:
endfunc
#undef bpc_sh
#undef bpcw_sh
#define mask_sh 10
/*
static void mask_c(pixel *dst, const ptrdiff_t dst_stride,
const int16_t *tmp1, const int16_t *tmp2, const int w, int h,
const uint8_t *mask HIGHBD_DECL_SUFFIX)
*/
function mask_8bpc_lsx
vldi vr21, 0x440 // 64
vxor.v vr19, vr19, vr19
addi.d t8, a0, 0
clz.w t0, a4
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .MASK_LSX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0
add.d t1, t1, t2
jirl $r0, t1, 0
.align 3
.MASK_LSX_JRTABLE:
.hword .MASK_W128_LSX - .MASK_LSX_JRTABLE
.hword .MASK_W64_LSX - .MASK_LSX_JRTABLE
.hword .MASK_W32_LSX - .MASK_LSX_JRTABLE
.hword .MASK_W16_LSX - .MASK_LSX_JRTABLE
.hword .MASK_W8_LSX - .MASK_LSX_JRTABLE
.hword .MASK_W4_LSX - .MASK_LSX_JRTABLE
.MASK_W4_LSX:
vld vr0, a2, 0
vld vr1, a3, 0
fld.d f22, a6, 0
vilvl.b vr2, vr19, vr22
vsub.h vr3, vr21, vr2
vmulwev.w.h vr4, vr0, vr2
vmulwod.w.h vr5, vr0, vr2
vmaddwev.w.h vr4, vr1, vr3
vmaddwod.w.h vr5, vr1, vr3
vssrarni.hu.w vr5, vr4, mask_sh
vssrlrni.bu.h vr1, vr5, 0
vpickod.w vr4, vr2, vr1
vilvl.b vr0, vr4, vr1
fst.s f0, a0, 0
add.d a0, a0, a1
vstelm.w vr0, a0, 0, 1
addi.d a2, a2, 16
addi.d a3, a3, 16
addi.d a6, a6, 8
add.d a0, a0, a1
addi.w a5, a5, -2
blt zero, a5, .MASK_W4_LSX
b .MASK_END_LSX
.MASK_W8_LSX:
vld vr0, a2, 0
vld vr10, a2, 16
vld vr1, a3, 0
vld vr11, a3, 16
vld vr22, a6, 0
vilvl.b vr2, vr19, vr22
vilvh.b vr12, vr19, vr22
vsub.h vr3, vr21, vr2
vsub.h vr13, vr21, vr12
vmulwev.w.h vr4, vr0, vr2
vmulwod.w.h vr5, vr0, vr2
vmulwev.w.h vr14, vr10, vr12
vmulwod.w.h vr15, vr10, vr12
vmaddwev.w.h vr4, vr1, vr3
vmaddwod.w.h vr5, vr1, vr3
vmaddwev.w.h vr14, vr11, vr13
vmaddwod.w.h vr15, vr11, vr13
vssrarni.hu.w vr14, vr4, mask_sh
vssrarni.hu.w vr15, vr5, mask_sh
vssrlrni.bu.h vr15, vr14, 0
vshuf4i.w vr6, vr15, 0x4E
vilvl.b vr0, vr6, vr15
fst.d f0, a0, 0
add.d a0, a0, a1
vstelm.d vr0, a0, 0, 1
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 16
add.d a0, a0, a1
addi.w a5, a5, -2
blt zero, a5, .MASK_W8_LSX
b .MASK_END_LSX
.MASK_W16_LSX:
vld vr0, a2, 0
vld vr10, a2, 16
vld vr1, a3, 0
vld vr11, a3, 16
vld vr22, a6, 0
vilvl.b vr2, vr19, vr22
vilvh.b vr12, vr19, vr22
vsub.h vr3, vr21, vr2
vsub.h vr13, vr21, vr12
vmulwev.w.h vr4, vr0, vr2
vmulwod.w.h vr5, vr0, vr2
vmulwev.w.h vr14, vr10, vr12
vmulwod.w.h vr15, vr10, vr12
vmaddwev.w.h vr4, vr1, vr3
vmaddwod.w.h vr5, vr1, vr3
vmaddwev.w.h vr14, vr11, vr13
vmaddwod.w.h vr15, vr11, vr13
vssrarni.hu.w vr14, vr4, mask_sh
vssrarni.hu.w vr15, vr5, mask_sh
vssrlrni.bu.h vr15, vr14, 0
vshuf4i.w vr6, vr15, 0x4E
vilvl.b vr0, vr6, vr15
vst vr0, a0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 16
add.d a0, a0, a1
addi.w a5, a5, -1
blt zero, a5, .MASK_W16_LSX
b .MASK_END_LSX
.MASK_W32_LSX:
.rept 2
vld vr0, a2, 0
vld vr10, a2, 16
vld vr1, a3, 0
vld vr11, a3, 16
vld vr22, a6, 0
vilvl.b vr2, vr19, vr22
vilvh.b vr12, vr19, vr22
vsub.h vr3, vr21, vr2
vsub.h vr13, vr21, vr12
vmulwev.w.h vr4, vr0, vr2
vmulwod.w.h vr5, vr0, vr2
vmulwev.w.h vr14, vr10, vr12
vmulwod.w.h vr15, vr10, vr12
vmaddwev.w.h vr4, vr1, vr3
vmaddwod.w.h vr5, vr1, vr3
vmaddwev.w.h vr14, vr11, vr13
vmaddwod.w.h vr15, vr11, vr13
vssrarni.hu.w vr14, vr4, mask_sh
vssrarni.hu.w vr15, vr5, mask_sh
vssrlrni.bu.h vr15, vr14, 0
vshuf4i.w vr6, vr15, 0x4E
vilvl.b vr0, vr6, vr15
vst vr0, a0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 16
addi.d a0, a0, 16
.endr
add.d t8, t8, a1
add.d a0, t8, zero
addi.w a5, a5, -1
blt zero, a5, .MASK_W32_LSX
b .MASK_END_LSX
.MASK_W64_LSX:
.rept 4
vld vr0, a2, 0
vld vr10, a2, 16
vld vr1, a3, 0
vld vr11, a3, 16
vld vr22, a6, 0
vilvl.b vr2, vr19, vr22
vilvh.b vr12, vr19, vr22
vsub.h vr3, vr21, vr2
vsub.h vr13, vr21, vr12
vmulwev.w.h vr4, vr0, vr2
vmulwod.w.h vr5, vr0, vr2
vmulwev.w.h vr14, vr10, vr12
vmulwod.w.h vr15, vr10, vr12
vmaddwev.w.h vr4, vr1, vr3
vmaddwod.w.h vr5, vr1, vr3
vmaddwev.w.h vr14, vr11, vr13
vmaddwod.w.h vr15, vr11, vr13
vssrarni.hu.w vr14, vr4, mask_sh
vssrarni.hu.w vr15, vr5, mask_sh
vssrlrni.bu.h vr15, vr14, 0
vshuf4i.w vr6, vr15, 0x4E
vilvl.b vr0, vr6, vr15
vst vr0, a0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 16
addi.d a0, a0, 16
.endr
add.d t8, t8, a1
add.d a0, t8, zero
addi.w a5, a5, -1
blt zero, a5, .MASK_W64_LSX
b .MASK_END_LSX
.MASK_W128_LSX:
.rept 8
vld vr0, a2, 0
vld vr10, a2, 16
vld vr1, a3, 0
vld vr11, a3, 16
vld vr22, a6, 0
vilvl.b vr2, vr19, vr22
vilvh.b vr12, vr19, vr22
vsub.h vr3, vr21, vr2
vsub.h vr13, vr21, vr12
vmulwev.w.h vr4, vr0, vr2
vmulwod.w.h vr5, vr0, vr2
vmulwev.w.h vr14, vr10, vr12
vmulwod.w.h vr15, vr10, vr12
vmaddwev.w.h vr4, vr1, vr3
vmaddwod.w.h vr5, vr1, vr3
vmaddwev.w.h vr14, vr11, vr13
vmaddwod.w.h vr15, vr11, vr13
vssrarni.hu.w vr14, vr4, mask_sh
vssrarni.hu.w vr15, vr5, mask_sh
vssrlrni.bu.h vr15, vr14, 0
vshuf4i.w vr6, vr15, 0x4E
vilvl.b vr0, vr6, vr15
vst vr0, a0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 16
addi.d a0, a0, 16
.endr
add.d t8, t8, a1
add.d a0, t8, zero
addi.w a5, a5, -1
blt zero, a5, .MASK_W128_LSX
.MASK_END_LSX:
endfunc
function mask_8bpc_lasx
xvldi xr21, 0x440 // 64
xvxor.v xr19, xr19, xr19
addi.d t8, a0, 0
clz.w t0, a4
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .MASK_LASX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0
add.d t1, t1, t2
jirl $r0, t1, 0
.align 3
.MASK_LASX_JRTABLE:
.hword .MASK_W128_LASX - .MASK_LASX_JRTABLE
.hword .MASK_W64_LASX - .MASK_LASX_JRTABLE
.hword .MASK_W32_LASX - .MASK_LASX_JRTABLE
.hword .MASK_W16_LASX - .MASK_LASX_JRTABLE
.hword .MASK_W8_LASX - .MASK_LASX_JRTABLE
.hword .MASK_W4_LASX - .MASK_LASX_JRTABLE
.MASK_W4_LASX:
vld vr0, a2, 0
vld vr1, a3, 0
fld.d f22, a6, 0
vilvl.h vr4, vr1, vr0
vilvh.h vr14, vr1, vr0
vilvl.b vr2, vr19, vr22
vsub.h vr3, vr21, vr2
xvpermi.q xr14, xr4, 0x20
vilvl.h vr5, vr3, vr2
vilvh.h vr15, vr3, vr2
xvpermi.q xr15, xr5, 0x20
xvmulwev.w.h xr0, xr14, xr15
xvmaddwod.w.h xr0, xr14, xr15
xvssrarni.hu.w xr1, xr0, mask_sh
xvssrlni.bu.h xr2, xr1, 0
fst.s f2, a0, 0
add.d a0, a0, a1
xvstelm.w xr2, a0, 0, 4
addi.d a2, a2, 16
addi.d a3, a3, 16
addi.d a6, a6, 8
add.d a0, a0, a1
addi.w a5, a5, -2
blt zero, a5, .MASK_W4_LASX
b .MASK_END_LASX
.MASK_W8_LASX:
xvld xr0, a2, 0
xvld xr1, a3, 0
vld vr22, a6, 0
vext2xv.hu.bu xr2, xr22
xvsub.h xr3, xr21, xr2
xvmulwev.w.h xr4, xr0, xr2
xvmulwod.w.h xr5, xr0, xr2
xvmaddwev.w.h xr4, xr1, xr3
xvmaddwod.w.h xr5, xr1, xr3
xvssrarni.hu.w xr5, xr4, mask_sh
xvssrlni.bu.h xr1, xr5, 0
xvpickod.w xr4, xr2, xr1
xvilvl.b xr0, xr4, xr1
fst.d f0, a0, 0
add.d a0, a0, a1
xvstelm.d xr0, a0, 0, 2
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 16
add.d a0, a0, a1
addi.w a5, a5, -2
blt zero, a5, .MASK_W8_LASX
b .MASK_END_LASX
.MASK_W16_LASX:
xvld xr0, a2, 0
xvld xr1, a3, 0
vld vr22, a6, 0
vext2xv.hu.bu xr2, xr22
xvsub.h xr3, xr21, xr2
xvmulwev.w.h xr4, xr0, xr2
xvmulwod.w.h xr5, xr0, xr2
xvmaddwev.w.h xr4, xr1, xr3
xvmaddwod.w.h xr5, xr1, xr3
xvssrarni.hu.w xr5, xr4, mask_sh
xvssrlni.bu.h xr1, xr5, 0
xvpickod.w xr4, xr2, xr1
xvilvl.b xr0, xr4, xr1
xvpermi.d xr1, xr0, 0xD8
vst vr1, a0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 16
add.d a0, a0, a1
addi.w a5, a5, -1
blt zero, a5, .MASK_W16_LASX
b .MASK_END_LASX
.MASK_W32_LASX:
xvld xr0, a2, 0
xvld xr10, a2, 32
xvld xr1, a3, 0
xvld xr11, a3, 32
xvld xr22, a6, 0
vext2xv.hu.bu xr2, xr22
xvpermi.q xr4, xr22, 0x01
vext2xv.hu.bu xr12, xr4
xvsub.h xr3, xr21, xr2
xvsub.h xr13, xr21, xr12
xvmulwev.w.h xr4, xr0, xr2
xvmulwod.w.h xr5, xr0, xr2
xvmulwev.w.h xr14, xr10, xr12
xvmulwod.w.h xr15, xr10, xr12
xvmaddwev.w.h xr4, xr1, xr3
xvmaddwod.w.h xr5, xr1, xr3
xvmaddwev.w.h xr14, xr11, xr13
xvmaddwod.w.h xr15, xr11, xr13
xvssrarni.hu.w xr14, xr4, mask_sh
xvssrarni.hu.w xr15, xr5, mask_sh
xvssrlni.bu.h xr15, xr14, 0
xvshuf4i.w xr6, xr15, 0x4E
xvilvl.b xr1, xr6, xr15
xvpermi.d xr0, xr1, 0xD8
xvst xr0, a0, 0
addi.d a2, a2, 64
addi.d a3, a3, 64
addi.d a6, a6, 32
add.d a0, a0, a1
addi.w a5, a5, -1
blt zero, a5, .MASK_W32_LASX
b .MASK_END_LASX
.MASK_W64_LASX:
.rept 2
xvld xr0, a2, 0
xvld xr10, a2, 32
xvld xr1, a3, 0
xvld xr11, a3, 32
xvld xr22, a6, 0
vext2xv.hu.bu xr2, xr22
xvpermi.q xr4, xr22, 0x01
vext2xv.hu.bu xr12, xr4
xvsub.h xr3, xr21, xr2
xvsub.h xr13, xr21, xr12
xvmulwev.w.h xr4, xr0, xr2
xvmulwod.w.h xr5, xr0, xr2
xvmulwev.w.h xr14, xr10, xr12
xvmulwod.w.h xr15, xr10, xr12
xvmaddwev.w.h xr4, xr1, xr3
xvmaddwod.w.h xr5, xr1, xr3
xvmaddwev.w.h xr14, xr11, xr13
xvmaddwod.w.h xr15, xr11, xr13
xvssrarni.hu.w xr14, xr4, mask_sh
xvssrarni.hu.w xr15, xr5, mask_sh
xvssrlni.bu.h xr15, xr14, 0
xvshuf4i.w xr6, xr15, 0x4E
xvilvl.b xr1, xr6, xr15
xvpermi.d xr0, xr1, 0xD8
xvst xr0, a0, 0
addi.d a2, a2, 64
addi.d a3, a3, 64
addi.d a6, a6, 32
addi.d a0, a0, 32
.endr
add.d t8, t8, a1
add.d a0, t8, zero
addi.w a5, a5, -1
blt zero, a5, .MASK_W64_LASX
b .MASK_END_LASX
.MASK_W128_LASX:
.rept 4
xvld xr0, a2, 0
xvld xr10, a2, 32
xvld xr1, a3, 0
xvld xr11, a3, 32
xvld xr22, a6, 0
vext2xv.hu.bu xr2, xr22
xvpermi.q xr4, xr22, 0x01
vext2xv.hu.bu xr12, xr4
xvsub.h xr3, xr21, xr2
xvsub.h xr13, xr21, xr12
xvmulwev.w.h xr4, xr0, xr2
xvmulwod.w.h xr5, xr0, xr2
xvmulwev.w.h xr14, xr10, xr12
xvmulwod.w.h xr15, xr10, xr12
xvmaddwev.w.h xr4, xr1, xr3
xvmaddwod.w.h xr5, xr1, xr3
xvmaddwev.w.h xr14, xr11, xr13
xvmaddwod.w.h xr15, xr11, xr13
xvssrarni.hu.w xr14, xr4, mask_sh
xvssrarni.hu.w xr15, xr5, mask_sh
xvssrlni.bu.h xr15, xr14, 0
xvshuf4i.w xr6, xr15, 0x4E
xvilvl.b xr1, xr6, xr15
xvpermi.d xr0, xr1, 0xD8
xvst xr0, a0, 0
addi.d a2, a2, 64
addi.d a3, a3, 64
addi.d a6, a6, 32
addi.d a0, a0, 32
.endr
add.d t8, t8, a1
add.d a0, t8, zero
addi.w a5, a5, -1
blt zero, a5, .MASK_W128_LASX
.MASK_END_LASX:
endfunc
/*
static void w_mask_c(pixel *dst, const ptrdiff_t dst_stride,
const int16_t *tmp1, const int16_t *tmp2, const int w, int h,
uint8_t *mask, const int sign,
const int ss_hor, const int ss_ver HIGHBD_DECL_SUFFIX)
*/
function w_mask_420_8bpc_lsx
addi.d sp, sp, -24
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
vldi vr20, 0x440
vreplgr2vr.h vr21, a7
vldi vr22, 0x426
clz.w t0, a4
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .WMASK420_LSX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t8, t0, 0
add.d t1, t1, t8
jirl $r0, t1, 0
.align 3
.WMASK420_LSX_JRTABLE:
.hword .WMASK420_W128_LSX - .WMASK420_LSX_JRTABLE
.hword .WMASK420_W64_LSX - .WMASK420_LSX_JRTABLE
.hword .WMASK420_W32_LSX - .WMASK420_LSX_JRTABLE
.hword .WMASK420_W16_LSX - .WMASK420_LSX_JRTABLE
.hword .WMASK420_W8_LSX - .WMASK420_LSX_JRTABLE
.hword .WMASK420_W4_LSX - .WMASK420_LSX_JRTABLE
.WMASK420_W4_LSX:
vld vr0, a2, 0
vld vr1, a2, 16
vld vr2, a3, 0
vld vr3, a3, 16
addi.w a5, a5, -4
vabsd.h vr4, vr0, vr2
vabsd.h vr5, vr1, vr3
vaddi.hu vr4, vr4, 8
vaddi.hu vr5, vr5, 8
vsrli.h vr4, vr4, 8
vsrli.h vr5, vr5, 8
vadd.h vr4, vr4, vr22
vadd.h vr5, vr5, vr22
vmin.hu vr6, vr4, vr20
vmin.hu vr7, vr5, vr20
vsub.h vr8, vr20, vr6
vsub.h vr9, vr20, vr7
vmulwev.w.h vr4, vr6, vr0
vmulwod.w.h vr5, vr6, vr0
vmulwev.w.h vr10, vr7, vr1
vmulwod.w.h vr11, vr7, vr1
vmaddwev.w.h vr4, vr8, vr2
vmaddwod.w.h vr5, vr8, vr2
vmaddwev.w.h vr10, vr9, vr3
vmaddwod.w.h vr11, vr9, vr3
vilvl.w vr0, vr5, vr4
vilvh.w vr1, vr5, vr4
vilvl.w vr2, vr11, vr10
vilvh.w vr3, vr11, vr10
vssrarni.hu.w vr1, vr0, 10
vssrarni.hu.w vr3, vr2, 10
vssrlni.bu.h vr3, vr1, 0
vstelm.w vr3, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr3, a0, 0, 1
add.d a0, a0, a1
vstelm.w vr3, a0, 0, 2
add.d a0, a0, a1
vstelm.w vr3, a0, 0, 3
add.d a0, a0, a1
vpickev.h vr0, vr7, vr6
vpickod.h vr1, vr7, vr6
vadd.h vr0, vr0, vr1
vshuf4i.h vr0, vr0, 0xd8
vhaddw.w.h vr2, vr0, vr0
vpickev.h vr2, vr2, vr2
vsub.h vr2, vr2, vr21
vaddi.hu vr2, vr2, 2
vssrani.bu.h vr2, vr2, 2
vstelm.w vr2, a6, 0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 4
blt zero, a5, .WMASK420_W4_LSX
b .END_W420
.WMASK420_W8_LSX:
vld vr0, a2, 0
vld vr1, a2, 16
vld vr2, a3, 0
vld vr3, a3, 16
addi.w a5, a5, -2
vabsd.h vr4, vr0, vr2
vabsd.h vr5, vr1, vr3
vaddi.hu vr4, vr4, 8
vaddi.hu vr5, vr5, 8
vsrli.h vr4, vr4, 8
vsrli.h vr5, vr5, 8
vadd.h vr4, vr4, vr22
vadd.h vr5, vr5, vr22
vmin.hu vr6, vr4, vr20
vmin.hu vr7, vr5, vr20
vsub.h vr8, vr20, vr6
vsub.h vr9, vr20, vr7
vmulwev.w.h vr4, vr6, vr0
vmulwod.w.h vr5, vr6, vr0
vmulwev.w.h vr10, vr7, vr1
vmulwod.w.h vr11, vr7, vr1
vmaddwev.w.h vr4, vr8, vr2
vmaddwod.w.h vr5, vr8, vr2
vmaddwev.w.h vr10, vr9, vr3
vmaddwod.w.h vr11, vr9, vr3
vssrarni.hu.w vr10, vr4, 10
vssrarni.hu.w vr11, vr5, 10
vssrlni.bu.h vr11, vr10, 0
vshuf4i.w vr0, vr11, 0x4E
vilvl.b vr3, vr0, vr11
vstelm.d vr3, a0, 0, 0
add.d a0, a0, a1
vstelm.d vr3, a0, 0, 1
add.d a0, a0, a1
vpickev.h vr0, vr7, vr6
vpickod.h vr1, vr7, vr6
vadd.h vr0, vr0, vr1
vilvh.d vr2, vr0, vr0
vadd.h vr2, vr2, vr0
vsub.h vr2, vr2, vr21
vaddi.hu vr2, vr2, 2
vssrani.bu.h vr2, vr2, 2
vstelm.w vr2, a6, 0, 0
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 4
blt zero, a5, .WMASK420_W8_LSX
b .END_W420
.WMASK420_W16_LSX:
vld vr0, a2, 0
vld vr1, a2, 16
alsl.d a2, a4, a2, 1
vld vr2, a2, 0
vld vr3, a2, 16
vld vr4, a3, 0
vld vr5, a3, 16
alsl.d a3, a4, a3, 1
vld vr6, a3, 0
vld vr7, a3, 16
vabsd.h vr8, vr0, vr4
vabsd.h vr9, vr1, vr5
vabsd.h vr10, vr2, vr6
vabsd.h vr11, vr3, vr7
vaddi.hu vr8, vr8, 8
vaddi.hu vr9, vr9, 8
vaddi.hu vr10, vr10, 8
vaddi.hu vr11, vr11, 8
vsrli.h vr8, vr8, 8
vsrli.h vr9, vr9, 8
vsrli.h vr10, vr10, 8
vsrli.h vr11, vr11, 8
vadd.h vr8, vr8, vr22
vadd.h vr9, vr9, vr22
vadd.h vr10, vr10, vr22
vadd.h vr11, vr11, vr22
vmin.hu vr12, vr8, vr20
vmin.hu vr13, vr9, vr20
vmin.hu vr14, vr10, vr20
vmin.hu vr15, vr11, vr20
vsub.h vr16, vr20, vr12
vsub.h vr17, vr20, vr13
vsub.h vr18, vr20, vr14
vsub.h vr19, vr20, vr15
vmulwev.w.h vr8, vr12, vr0
vmulwod.w.h vr9, vr12, vr0
vmulwev.w.h vr10, vr13, vr1
vmulwod.w.h vr11, vr13, vr1
vmulwev.w.h vr23, vr14, vr2
vmulwod.w.h vr24, vr14, vr2
vmulwev.w.h vr25, vr15, vr3
vmulwod.w.h vr26, vr15, vr3
vmaddwev.w.h vr8, vr16, vr4
vmaddwod.w.h vr9, vr16, vr4
vmaddwev.w.h vr10, vr17, vr5
vmaddwod.w.h vr11, vr17, vr5
vmaddwev.w.h vr23, vr18, vr6
vmaddwod.w.h vr24, vr18, vr6
vmaddwev.w.h vr25, vr19, vr7
vmaddwod.w.h vr26, vr19, vr7
vssrarni.hu.w vr10, vr8, 10
vssrarni.hu.w vr11, vr9, 10
vssrarni.hu.w vr25, vr23, 10
vssrarni.hu.w vr26, vr24, 10
vssrlni.bu.h vr11, vr10, 0
vssrlni.bu.h vr26, vr25, 0
vshuf4i.w vr0, vr11, 0x4E
vshuf4i.w vr1, vr26, 0x4E
vilvl.b vr3, vr0, vr11
vilvl.b vr7, vr1, vr26
vst vr3, a0, 0
vstx vr7, a0, a1
vpickev.h vr0, vr13, vr12
vpickod.h vr1, vr13, vr12
vpickev.h vr2, vr15, vr14
vpickod.h vr3, vr15, vr14
vadd.h vr4, vr0, vr1
vadd.h vr5, vr2, vr3
vadd.h vr4, vr4, vr5
vsub.h vr4, vr4, vr21
vssrarni.bu.h vr4, vr4, 2
vstelm.d vr4, a6, 0, 0
alsl.d a2, a4, a2, 1
alsl.d a3, a4, a3, 1
alsl.d a0, a1, a0, 1
addi.d a6, a6, 8
addi.w a5, a5, -2
blt zero, a5, .WMASK420_W16_LSX
b .END_W420
.WMASK420_W32_LSX:
.WMASK420_W64_LSX:
.WMASK420_W128_LSX:
.LOOP_W32_420_LSX:
add.d t1, a2, zero
add.d t2, a3, zero
add.d t3, a0, zero
add.d t4, a6, zero
alsl.d t5, a4, t1, 1
alsl.d t6, a4, t2, 1
or t7, a4, a4
.W32_420_LSX:
vld vr0, t1, 0
vld vr1, t1, 16
vld vr2, t2, 0
vld vr3, t2, 16
vld vr4, t5, 0
vld vr5, t5, 16
vld vr6, t6, 0
vld vr7, t6, 16
addi.d t1, t1, 32
addi.d t2, t2, 32
addi.d t5, t5, 32
addi.d t6, t6, 32
addi.w t7, t7, -16
vabsd.h vr8, vr0, vr2
vabsd.h vr9, vr1, vr3
vabsd.h vr10, vr4, vr6
vabsd.h vr11, vr5, vr7
vaddi.hu vr8, vr8, 8
vaddi.hu vr9, vr9, 8
vaddi.hu vr10, vr10, 8
vaddi.hu vr11, vr11, 8
vsrli.h vr8, vr8, 8
vsrli.h vr9, vr9, 8
vsrli.h vr10, vr10, 8
vsrli.h vr11, vr11, 8
vadd.h vr8, vr8, vr22
vadd.h vr9, vr9, vr22
vadd.h vr10, vr10, vr22
vadd.h vr11, vr11, vr22
vmin.hu vr12, vr8, vr20
vmin.hu vr13, vr9, vr20
vmin.hu vr14, vr10, vr20
vmin.hu vr15, vr11, vr20
vsub.h vr16, vr20, vr12
vsub.h vr17, vr20, vr13
vsub.h vr18, vr20, vr14
vsub.h vr19, vr20, vr15
vmulwev.w.h vr8, vr12, vr0
vmulwod.w.h vr9, vr12, vr0
vmulwev.w.h vr10, vr13, vr1
vmulwod.w.h vr11, vr13, vr1
vmulwev.w.h vr23, vr14, vr4
vmulwod.w.h vr24, vr14, vr4
vmulwev.w.h vr25, vr15, vr5
vmulwod.w.h vr26, vr15, vr5
vmaddwev.w.h vr8, vr16, vr2
vmaddwod.w.h vr9, vr16, vr2
vmaddwev.w.h vr10, vr17, vr3
vmaddwod.w.h vr11, vr17, vr3
vmaddwev.w.h vr23, vr18, vr6
vmaddwod.w.h vr24, vr18, vr6
vmaddwev.w.h vr25, vr19, vr7
vmaddwod.w.h vr26, vr19, vr7
vssrarni.hu.w vr10, vr8, 10
vssrarni.hu.w vr11, vr9, 10
vssrarni.hu.w vr25, vr23, 10
vssrarni.hu.w vr26, vr24, 10
vssrlni.bu.h vr11, vr10, 0
vssrlni.bu.h vr26, vr25, 0
vshuf4i.w vr8, vr11, 0x4E
vshuf4i.w vr9, vr26, 0x4E
vilvl.b vr3, vr8, vr11
vilvl.b vr7, vr9, vr26
vst vr3, t3, 0
vstx vr7, a1, t3
addi.d t3, t3, 16
vpickev.h vr8, vr13, vr12
vpickod.h vr9, vr13, vr12
vpickev.h vr10, vr15, vr14
vpickod.h vr11, vr15, vr14
vadd.h vr8, vr8, vr9
vadd.h vr10, vr10, vr11
vadd.h vr12, vr8, vr10
vsub.h vr12, vr12, vr21
vssrarni.bu.h vr12, vr12, 2
vstelm.d vr12, t4, 0, 0
addi.d t4, t4, 8
bne t7, zero, .W32_420_LSX
alsl.d a2, a4, a2, 2
alsl.d a3, a4, a3, 2
alsl.d a0, a1, a0, 1
srai.w t8, a4, 1
add.d a6, a6, t8
addi.w a5, a5, -2
blt zero, a5, .LOOP_W32_420_LSX
.END_W420:
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
addi.d sp, sp, 24
endfunc
function w_mask_420_8bpc_lasx
xvldi xr20, 0x440
xvreplgr2vr.h xr21, a7
xvldi xr22, 0x426
clz.w t0, a4
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .WMASK420_LASX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t8, t0, 0
add.d t1, t1, t8
jirl $r0, t1, 0
.align 3
.WMASK420_LASX_JRTABLE:
.hword .WMASK420_W128_LASX - .WMASK420_LASX_JRTABLE
.hword .WMASK420_W64_LASX - .WMASK420_LASX_JRTABLE
.hword .WMASK420_W32_LASX - .WMASK420_LASX_JRTABLE
.hword .WMASK420_W16_LASX - .WMASK420_LASX_JRTABLE
.hword .WMASK420_W8_LASX - .WMASK420_LASX_JRTABLE
.hword .WMASK420_W4_LASX - .WMASK420_LASX_JRTABLE
.WMASK420_W4_LASX:
xvld xr0, a2, 0
xvld xr1, a3, 0
addi.w a5, a5, -4
xvabsd.h xr2, xr0, xr1
xvaddi.hu xr2, xr2, 8
xvsrli.h xr2, xr2, 8
xvadd.h xr2, xr2, xr22
xvmin.hu xr3, xr2, xr20
xvsub.h xr4, xr20, xr3
xvmulwev.w.h xr5, xr3, xr0
xvmulwod.w.h xr6, xr3, xr0
xvmaddwev.w.h xr5, xr4, xr1
xvmaddwod.w.h xr6, xr4, xr1
xvilvl.w xr7, xr6, xr5
xvilvh.w xr8, xr6, xr5
xvssrarni.hu.w xr8, xr7, 10
xvssrlni.bu.h xr9, xr8, 0
vstelm.w vr9, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr9, a0, 0, 1
add.d a0, a0, a1
xvstelm.w xr9, a0, 0, 4
add.d a0, a0, a1
xvstelm.w xr9, a0, 0, 5
add.d a0, a0, a1
xvhaddw.w.h xr3, xr3, xr3
xvpermi.d xr4, xr3, 0xb1
xvadd.h xr3, xr3, xr4
xvpickev.h xr3, xr3, xr3
xvsub.h xr3, xr3, xr21
xvssrarni.bu.h xr3, xr3, 2
vstelm.h vr3, a6, 0, 0
xvstelm.h xr3, a6, 2, 8
addi.d a2, a2, 32
addi.d a3, a3, 32
addi.d a6, a6, 4
blt zero, a5, .WMASK420_W4_LASX
b .END_W420_LASX
.WMASK420_W8_LASX:
xvld xr0, a2, 0
xvld xr1, a2, 32
xvld xr2, a3, 0
xvld xr3, a3, 32
addi.w a5, a5, -4
xvabsd.h xr4, xr0, xr2
xvabsd.h xr5, xr1, xr3
xvaddi.hu xr4, xr4, 8
xvaddi.hu xr5, xr5, 8
xvsrli.h xr4, xr4, 8
xvsrli.h xr5, xr5, 8
xvadd.h xr4, xr4, xr22
xvadd.h xr5, xr5, xr22
xvmin.hu xr6, xr4, xr20
xvmin.hu xr7, xr5, xr20
xvsub.h xr8, xr20, xr6
xvsub.h xr9, xr20, xr7
xvmulwev.w.h xr10, xr6, xr0
xvmulwod.w.h xr11, xr6, xr0
xvmulwev.w.h xr12, xr7, xr1
xvmulwod.w.h xr13, xr7, xr1
xvmaddwev.w.h xr10, xr8, xr2
xvmaddwod.w.h xr11, xr8, xr2
xvmaddwev.w.h xr12, xr9, xr3
xvmaddwod.w.h xr13, xr9, xr3
xvssrarni.hu.w xr12, xr10, 10
xvssrarni.hu.w xr13, xr11, 10
xvssrlni.bu.h xr13, xr12, 0
xvshuf4i.w xr1, xr13, 0x4E
xvilvl.b xr17, xr1, xr13
vstelm.d vr17, a0, 0, 0
add.d a0, a0, a1
xvstelm.d xr17, a0, 0, 2
add.d a0, a0, a1
xvstelm.d xr17, a0, 0, 1
add.d a0, a0, a1
xvstelm.d xr17, a0, 0, 3
add.d a0, a0, a1
xvhaddw.w.h xr6, xr6, xr6
xvhaddw.w.h xr7, xr7, xr7
xvpickev.h xr8, xr7, xr6
xvpermi.q xr9, xr8, 0x01
vadd.h vr8, vr8, vr9
vsub.h vr8, vr8, vr21
vssrarni.bu.h vr8, vr8, 2
vstelm.d vr8, a6, 0, 0
addi.d a2, a2, 64
addi.d a3, a3, 64
addi.d a6, a6, 8
blt zero, a5, .WMASK420_W8_LASX
b .END_W420_LASX
.WMASK420_W16_LASX:
xvld xr0, a2, 0
xvld xr1, a2, 32
xvld xr2, a3, 0
xvld xr3, a3, 32
addi.w a5, a5, -2
xvabsd.h xr4, xr0, xr2
xvabsd.h xr5, xr1, xr3
xvaddi.hu xr4, xr4, 8
xvaddi.hu xr5, xr5, 8
xvsrli.h xr4, xr4, 8
xvsrli.h xr5, xr5, 8
xvadd.h xr4, xr4, xr22
xvadd.h xr5, xr5, xr22
xvmin.hu xr4, xr4, xr20
xvmin.hu xr5, xr5, xr20
xvsub.h xr6, xr20, xr4
xvsub.h xr7, xr20, xr5
xvmulwev.w.h xr8, xr4, xr0
xvmulwod.w.h xr9, xr4, xr0
xvmulwev.w.h xr10, xr5, xr1
xvmulwod.w.h xr11, xr5, xr1
xvmaddwev.w.h xr8, xr6, xr2
xvmaddwod.w.h xr9, xr6, xr2
xvmaddwev.w.h xr10, xr7, xr3
xvmaddwod.w.h xr11, xr7, xr3
xvssrarni.hu.w xr10, xr8, 10
xvssrarni.hu.w xr11, xr9, 10
xvssrlni.bu.h xr11, xr10, 0
xvshuf4i.w xr8, xr11, 0x4E
xvilvl.b xr15, xr8, xr11
xvpermi.d xr16, xr15, 0xd8
vst vr16, a0, 0
add.d a0, a0, a1
xvpermi.q xr16, xr16, 0x01
vst vr16, a0, 0
add.d a0, a0, a1
xvhaddw.w.h xr4, xr4, xr4
xvhaddw.w.h xr5, xr5, xr5
xvadd.h xr4, xr5, xr4
xvpickev.h xr6, xr4, xr4
xvpermi.d xr7, xr6, 0x08
vsub.h vr7, vr7, vr21
vssrarni.bu.h vr7, vr7, 2
vstelm.d vr7, a6, 0, 0
addi.d a2, a2, 64
addi.d a3, a3, 64
addi.d a6, a6, 8
blt zero, a5, .WMASK420_W16_LASX
b .END_W420_LASX
.WMASK420_W32_LASX:
.WMASK420_W64_LASX:
.WMASK420_W128_LASX:
.LOOP_W32_420_LASX:
add.d t1, a2, zero
add.d t2, a3, zero
add.d t3, a0, zero
add.d t4, a6, zero
alsl.d t5, a4, t1, 1
alsl.d t6, a4, t2, 1
or t7, a4, a4
.W32_420_LASX:
xvld xr0, t1, 0
xvld xr1, t2, 0
xvld xr2, t5, 0
xvld xr3, t6, 0
addi.d t1, t1, 32
addi.d t2, t2, 32
addi.d t5, t5, 32
addi.d t6, t6, 32
addi.w t7, t7, -16
xvabsd.h xr4, xr0, xr1
xvabsd.h xr5, xr2, xr3
xvaddi.hu xr4, xr4, 8
xvaddi.hu xr5, xr5, 8
xvsrli.h xr4, xr4, 8
xvsrli.h xr5, xr5, 8
xvadd.h xr4, xr4, xr22
xvadd.h xr5, xr5, xr22
xvmin.hu xr6, xr4, xr20
xvmin.hu xr7, xr5, xr20
xvsub.h xr8, xr20, xr6
xvsub.h xr9, xr20, xr7
xvmulwev.w.h xr10, xr6, xr0
xvmulwod.w.h xr11, xr6, xr0
xvmulwev.w.h xr12, xr7, xr2
xvmulwod.w.h xr13, xr7, xr2
xvmaddwev.w.h xr10, xr8, xr1
xvmaddwod.w.h xr11, xr8, xr1
xvmaddwev.w.h xr12, xr9, xr3
xvmaddwod.w.h xr13, xr9, xr3
xvssrarni.hu.w xr12, xr10, 10
xvssrarni.hu.w xr13, xr11, 10
xvssrlni.bu.h xr13, xr12, 0
xvshuf4i.w xr10, xr13, 0x4E
xvilvl.b xr17, xr10, xr13
xvpermi.d xr18, xr17, 0x08
xvpermi.d xr19, xr17, 0x0d
vst vr18, t3, 0
vstx vr19, t3, a1
addi.d t3, t3, 16
xvhaddw.w.h xr6, xr6, xr6
xvhaddw.w.h xr7, xr7, xr7
xvadd.h xr6, xr7, xr6
xvpickev.h xr7, xr6, xr6
xvpermi.d xr8, xr7, 0x08
vsub.h vr9, vr8, vr21
vssrarni.bu.h vr9, vr9, 2
vstelm.d vr9, t4, 0, 0
addi.d t4, t4, 8
bne t7, zero, .W32_420_LASX
alsl.d a2, a4, a2, 2
alsl.d a3, a4, a3, 2
alsl.d a0, a1, a0, 1
srai.w t8, a4, 1
add.d a6, a6, t8
addi.w a5, a5, -2
blt zero, a5, .LOOP_W32_420_LASX
.END_W420_LASX:
endfunc
#undef bpc_sh
#undef bpcw_sh
.macro vhaddw.d.h in0
vhaddw.w.h \in0, \in0, \in0
vhaddw.d.w \in0, \in0, \in0
.endm
.macro vhaddw.q.w in0
vhaddw.d.w \in0, \in0, \in0
vhaddw.q.d \in0, \in0, \in0
.endm
.macro PUT_H_8W in0
vshuf.b vr2, \in0, \in0, vr6
vshuf.b vr3, \in0, \in0, vr7
vshuf.b vr4, \in0, \in0, vr8
vmulwev.h.bu.b vr12, vr2, vr10
vmulwev.h.bu.b vr13, vr3, vr11
vmulwev.h.bu.b vr14, vr3, vr10
vmulwev.h.bu.b vr15, vr4, vr11
vmaddwod.h.bu.b vr12, vr2, vr10
vmaddwod.h.bu.b vr13, vr3, vr11
vmaddwod.h.bu.b vr14, vr3, vr10
vmaddwod.h.bu.b vr15, vr4, vr11
vadd.h vr12, vr12, vr13
vadd.h vr14, vr14, vr15
vhaddw.w.h vr12, vr12, vr12
vhaddw.w.h vr14, vr14, vr14
vpickev.h \in0, vr14, vr12
vadd.h \in0, \in0, vr9
.endm
const subpel_h_shuf0
.byte 0, 1, 2, 3, 1, 2, 3, 4, 16, 17, 18, 19, 17, 18, 19, 20
endconst
const subpel_h_shuf1
.byte 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6
endconst
const subpel_h_shuf2
.byte 0, 1, 2, 3, 1, 2, 3, 4, 8, 9, 10, 11, 9, 10, 11, 12
.byte 2, 3, 4, 5, 3, 4, 5, 6, 10, 11, 12, 13, 11, 12, 13, 14
endconst
const subpel_h_shuf3
.byte 0, 4, 1, 5, 2, 6, 3, 7, 4, 8, 5, 9, 6, 10, 7, 11
.byte 0, 4, 1, 5, 2, 6, 3, 7, 4, 8, 5, 9, 6, 10, 7, 11
endconst
.macro FILTER_8TAP_8W in0
vshuf.b vr13, \in0, \in0, vr7
vshuf.b vr14, \in0, \in0, vr11
vshuf.b vr15, \in0, \in0, vr12
vmulwev.h.bu.b vr16, vr13, vr8
vmulwev.h.bu.b vr17, vr14, vr10
vmulwev.h.bu.b vr18, vr14, vr8
vmulwev.h.bu.b vr19, vr15, vr10
vmaddwod.h.bu.b vr16, vr13, vr8
vmaddwod.h.bu.b vr17, vr14, vr10
vmaddwod.h.bu.b vr18, vr14, vr8
vmaddwod.h.bu.b vr19, vr15, vr10
vadd.h vr16, vr16, vr17
vadd.h vr18, vr18, vr19
vhaddw.w.h vr16, vr16, vr16
vhaddw.w.h \in0, vr18, vr18
vssrarni.h.w \in0, vr16, 2
.endm
.macro PUT_8TAP_8BPC_LSX lable
li.w t0, 4
la.local t6, dav1d_mc_subpel_filters
slli.d t2, a3, 1 //src_stride*2
add.d t3, t2, a3 //src_stride*3
slli.d t4, t2, 1 //src_stride*4
bnez a6, .l_\lable\()put_h //mx
bnez a7, .l_\lable\()put_v //my
clz.w t1, a4
li.w t5, 24
sub.w t1, t1, t5
la.local t5, .l_\lable\()put_hv0_jtable
alsl.d t1, t1, t5, 3
ld.d t6, t1, 0
add.d t5, t5, t6
jirl $r0, t5, 0
.align 3
.l_\lable\()put_hv0_jtable:
.dword .l_\lable\()put_hv0_128w - .l_\lable\()put_hv0_jtable
.dword .l_\lable\()put_hv0_64w - .l_\lable\()put_hv0_jtable
.dword .l_\lable\()put_hv0_32w - .l_\lable\()put_hv0_jtable
.dword .l_\lable\()put_hv0_16w - .l_\lable\()put_hv0_jtable
.dword .l_\lable\()put_hv0_8w - .l_\lable\()put_hv0_jtable
.dword .l_\lable\()put_hv0_4w - .l_\lable\()put_hv0_jtable
.dword .l_\lable\()put_hv0_2w - .l_\lable\()put_hv0_jtable
.l_\lable\()put_hv0_2w:
vldrepl.h vr0, a2, 0
add.d a2, a2, a3
vldrepl.h vr1, a2, 0
vstelm.h vr0, a0, 0, 0
add.d a0, a0, a1
vstelm.h vr1, a0, 0, 0
add.d a2, a2, a3
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_hv0_2w
b .l_\lable\()end_put_8tap
.l_\lable\()put_hv0_4w:
fld.s f0, a2, 0
fldx.s f1, a2, a3
fst.s f0, a0, 0
fstx.s f1, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a0, a1, a0, 1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_hv0_4w
b .l_\lable\()end_put_8tap
.l_\lable\()put_hv0_8w:
fld.d f0, a2, 0
fldx.d f1, a2, a3
fst.d f0, a0, 0
fstx.d f1, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a0, a1, a0, 1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_hv0_8w
b .l_\lable\()end_put_8tap
.l_\lable\()put_hv0_16w:
vld vr0, a2, 0
vldx vr1, a2, a3
vst vr0, a0, 0
vstx vr1, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a0, a1, a0, 1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_hv0_16w
b .l_\lable\()end_put_8tap
.l_\lable\()put_hv0_32w:
vld vr0, a2, 0
vld vr1, a2, 16
add.d a2, a2, a3
vld vr2, a2, 0
vld vr3, a2, 16
vst vr0, a0, 0
vst vr1, a0, 16
add.d a0, a0, a1
vst vr2, a0, 0
vst vr3, a0, 16
add.d a2, a2, a3
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_hv0_32w
b .l_\lable\()end_put_8tap
.l_\lable\()put_hv0_64w:
vld vr0, a2, 0
vld vr1, a2, 16
vld vr2, a2, 32
vld vr3, a2, 48
add.d a2, a2, a3
vld vr4, a2, 0
vld vr5, a2, 16
vld vr6, a2, 32
vld vr7, a2, 48
add.d a2, a2, a3
vst vr0, a0, 0
vst vr1, a0, 16
vst vr2, a0, 32
vst vr3, a0, 48
add.d a0, a0, a1
vst vr4, a0, 0
vst vr5, a0, 16
vst vr6, a0, 32
vst vr7, a0, 48
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_hv0_64w
b .l_\lable\()end_put_8tap
.l_\lable\()put_hv0_128w:
vld vr0, a2, 0
vld vr1, a2, 16
vld vr2, a2, 32
vld vr3, a2, 48
vld vr4, a2, 64
vld vr5, a2, 80
vld vr6, a2, 96
vld vr7, a2, 112
add.d a2, a2, a3
vld vr8, a2, 0
vld vr9, a2, 16
vld vr10, a2, 32
vld vr11, a2, 48
vld vr12, a2, 64
vld vr13, a2, 80
vld vr14, a2, 96
vld vr15, a2, 112
add.d a2, a2, a3
vst vr0, a0, 0
vst vr1, a0, 16
vst vr2, a0, 32
vst vr3, a0, 48
vst vr4, a0, 64
vst vr5, a0, 80
vst vr6, a0, 96
vst vr7, a0, 112
add.d a0, a0, a1
vst vr8, a0, 0
vst vr9, a0, 16
vst vr10, a0, 32
vst vr11, a0, 48
vst vr12, a0, 64
vst vr13, a0, 80
vst vr14, a0, 96
vst vr15, a0, 112
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_hv0_128w
b .l_\lable\()end_put_8tap
.l_\lable\()put_h:
bnez a7, .l_\lable\()put_hv //if(fh) && if (fv)
ld.d t5, sp, 0 //filter_type
andi t1, t5, 3
blt t0, a4, .l_\lable\()put_h_idx_fh
andi t1, t5, 1
addi.w t1, t1, 3
.l_\lable\()put_h_idx_fh:
addi.w t5, zero, 120
mul.w t1, t1, t5
addi.w t5, a6, -1
slli.w t5, t5, 3
add.w t1, t1, t5
add.d t7, t6, t1 //fh's offset
li.w t1, 34
vreplgr2vr.h vr9, t1
clz.w t1, a4
li.w t5, 24
sub.w t1, t1, t5
la.local t5, .l_\lable\()put_h_jtable
alsl.d t1, t1, t5, 3
ld.d t6, t1, 0
add.d t5, t5, t6
jirl $r0, t5, 0
.align 3
.l_\lable\()put_h_jtable:
.dword .l_\lable\()put_h_128w - .l_\lable\()put_h_jtable
.dword .l_\lable\()put_h_64w - .l_\lable\()put_h_jtable
.dword .l_\lable\()put_h_32w - .l_\lable\()put_h_jtable
.dword .l_\lable\()put_h_16w - .l_\lable\()put_h_jtable
.dword .l_\lable\()put_h_8w - .l_\lable\()put_h_jtable
.dword .l_\lable\()put_h_4w - .l_\lable\()put_h_jtable
.dword .l_\lable\()put_h_2w - .l_\lable\()put_h_jtable
.l_\lable\()put_h_2w:
addi.d t7, t7, 2
addi.d a2, a2, -1
vldrepl.w vr8, t7, 0
la.local t7, subpel_h_shuf0
vld vr7, t7, 0
.l_\lable\()put_h_2w_loop:
vld vr0, a2, 0
vldx vr1, a2, a3
add.d a2, a2, t2
vshuf.b vr0, vr1, vr0, vr7
vdp2.h.bu.b vr1, vr0, vr8
vhaddw.w.h vr0, vr1, vr1
vpickev.h vr0, vr0, vr0
vadd.h vr0, vr0, vr9
vssrani.bu.h vr0, vr0, 6
vstelm.h vr0, a0, 0, 0
add.d a0, a0, a1
vstelm.h vr0, a0, 0, 1
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_h_2w_loop
b .l_\lable\()end_put_8tap
.l_\lable\()put_h_4w:
addi.d t7, t7, 2
addi.d a2, a2, -1
vldrepl.w vr8, t7, 0
la.local t7, subpel_h_shuf1
vld vr7, t7, 0
.l_\lable\()put_h_4w_loop:
vld vr0, a2, 0
vldx vr1, a2, a3
add.d a2, a2, t2
vshuf.b vr0, vr0, vr0, vr7
vshuf.b vr1, vr1, vr1, vr7
vmulwev.h.bu.b vr2, vr0, vr8
vmulwev.h.bu.b vr3, vr1, vr8
vmaddwod.h.bu.b vr2, vr0, vr8
vmaddwod.h.bu.b vr3, vr1, vr8
vhaddw.w.h vr0, vr2, vr2
vhaddw.w.h vr1, vr3, vr3
vpickev.h vr0, vr1, vr0
vadd.h vr0, vr0, vr9
vssrani.bu.h vr0, vr0, 6
vstelm.w vr0, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr0, a0, 0, 1
add.d a0, a0, a1
addi.d a5, a5, -2
bnez a5, .l_\lable\()put_h_4w_loop
b .l_\lable\()end_put_8tap
.l_\lable\()put_h_8w:
fld.d f10, t7, 0
vreplvei.w vr11, vr10, 1
vreplvei.w vr10, vr10, 0
la.local t7, subpel_h_shuf1
vld vr6, t7, 0
vaddi.bu vr7, vr6, 4
vaddi.bu vr8, vr6, 8
addi.d a2, a2, -3
.l_\lable\()put_h_8w_loop:
vld vr0, a2, 0
vldx vr1, a2, a3
add.d a2, a2, t2
PUT_H_8W vr0
PUT_H_8W vr1
vssrani.bu.h vr1, vr0, 6
vstelm.d vr1, a0, 0, 0
add.d a0, a0, a1
vstelm.d vr1, a0, 0, 1
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_h_8w_loop
b .l_\lable\()end_put_8tap
.l_\lable\()put_h_16w:
.l_\lable\()put_h_32w:
.l_\lable\()put_h_64w:
.l_\lable\()put_h_128w:
fld.d f10, t7, 0
vreplvei.w vr11, vr10, 1
vreplvei.w vr10, vr10, 0
la.local t7, subpel_h_shuf1
vld vr6, t7, 0
vaddi.bu vr7, vr6, 4
vaddi.bu vr8, vr6, 8
addi.d a2, a2, -3
addi.d t0, a2, 0 //src
addi.w t5, a5, 0 //h
addi.d t8, a0, 0 //dst
.l_\lable\()put_h_16w_loop:
vld vr0, a2, 0
vld vr1, a2, 8
add.d a2, a2, a3
PUT_H_8W vr0
PUT_H_8W vr1
vssrani.bu.h vr1, vr0, 6
vst vr1, a0, 0
add.d a0, a0, a1
addi.d a5, a5, -1
bnez a5, .l_\lable\()put_h_16w_loop
addi.d a2, t0, 16
addi.d t0, t0, 16
addi.d a0, t8, 16
addi.d t8, t8, 16
addi.w a5, t5, 0
addi.w a4, a4, -16
bnez a4, .l_\lable\()put_h_16w_loop
b .l_\lable\()end_put_8tap
.l_\lable\()put_v:
ld.d t1, sp, 0 //filter_type
srli.w t1, t1, 2
blt t0, a5, .l_\lable\()put_v_idx_fv
andi t1, t1, 1
addi.w t1, t1, 3
.l_\lable\()put_v_idx_fv:
addi.w t5, zero, 120
mul.w t1, t1, t5
addi.w t5, a7, -1
slli.w t5, t5, 3
add.w t1, t1, t5
add.d t1, t6, t1 //fv's offset
vldrepl.d vr8, t1, 0
sub.d a2, a2, t3
vilvl.h vr8, vr8, vr8
vreplvei.w vr9, vr8, 1
vreplvei.w vr10, vr8, 2
vreplvei.w vr11, vr8, 3
vreplvei.w vr8, vr8, 0
clz.w t1, a4
li.w t5, 24
sub.w t1, t1, t5
la.local t5, .l_\lable\()put_v_jtable
alsl.d t1, t1, t5, 3
ld.d t6, t1, 0
add.d t5, t5, t6
jirl $r0, t5, 0
.align 3
.l_\lable\()put_v_jtable:
.dword .l_\lable\()put_v_128w - .l_\lable\()put_v_jtable
.dword .l_\lable\()put_v_64w - .l_\lable\()put_v_jtable
.dword .l_\lable\()put_v_32w - .l_\lable\()put_v_jtable
.dword .l_\lable\()put_v_16w - .l_\lable\()put_v_jtable
.dword .l_\lable\()put_v_8w - .l_\lable\()put_v_jtable
.dword .l_\lable\()put_v_4w - .l_\lable\()put_v_jtable
.dword .l_\lable\()put_v_2w - .l_\lable\()put_v_jtable
.l_\lable\()put_v_2w:
fld.s f0, a2, 0
fldx.s f1, a2, a3
fldx.s f2, a2, t2
add.d a2, a2, t3
fld.s f3, a2, 0
fldx.s f4, a2, a3
fldx.s f5, a2, t2
fldx.s f6, a2, t3
add.d a2, a2, t4
vilvl.h vr0, vr1, vr0 //0 1
vilvl.h vr1, vr2, vr1 //1 2
vilvl.b vr0, vr1, vr0 //01 12
vilvl.h vr2, vr3, vr2 //2 3
vilvl.h vr3, vr4, vr3 //3 4
vilvl.b vr1, vr3, vr2 //23 34
vilvl.h vr2, vr5, vr4 //4 5
vilvl.h vr3, vr6, vr5 //5 6
vilvl.b vr2, vr3, vr2 //45 56
.l_\lable\()put_v_2w_loop:
fld.s f7, a2, 0
vilvl.h vr3, vr7, vr6 //6 7
fldx.s f6, a2, a3
add.d a2, a2, t2
vilvl.h vr4, vr6, vr7 //7 8
vilvl.b vr3, vr4, vr3 //67 78
vmulwev.h.bu.b vr12, vr0, vr8
vmulwev.h.bu.b vr13, vr1, vr9
vmulwev.h.bu.b vr14, vr2, vr10
vmulwev.h.bu.b vr15, vr3, vr11
vmaddwod.h.bu.b vr12, vr0, vr8
vmaddwod.h.bu.b vr13, vr1, vr9
vmaddwod.h.bu.b vr14, vr2, vr10
vmaddwod.h.bu.b vr15, vr3, vr11
vaddi.hu vr0, vr1, 0
vaddi.hu vr1, vr2, 0
vaddi.hu vr2, vr3, 0
vadd.h vr12, vr12, vr13
vadd.h vr12, vr12, vr14
vadd.h vr12, vr12, vr15
vssrarni.bu.h vr12, vr12, 6
vstelm.h vr12, a0, 0, 0
add.d a0, a0, a1
vstelm.h vr12, a0, 0, 1
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_v_2w_loop
b .l_\lable\()end_put_8tap
.l_\lable\()put_v_4w:
fld.s f0, a2, 0
fldx.s f1, a2, a3
fldx.s f2, a2, t2
add.d a2, a2, t3
fld.s f3, a2, 0
fldx.s f4, a2, a3
fldx.s f5, a2, t2
fldx.s f6, a2, t3
add.d a2, a2, t4
vilvl.w vr0, vr1, vr0
vilvl.w vr1, vr2, vr1
vilvl.b vr0, vr1, vr0
vilvl.w vr1, vr3, vr2
vilvl.w vr2, vr4, vr3
vilvl.b vr1, vr2, vr1
vilvl.w vr2, vr5, vr4
vilvl.w vr3, vr6, vr5
vilvl.b vr2, vr3, vr2
.l_\lable\()put_v_4w_loop:
fld.s f7, a2, 0
vilvl.w vr3, vr7, vr6
fldx.s f6, a2, a3
add.d a2, a2, t2
vilvl.w vr4, vr6, vr7
vilvl.b vr3, vr4, vr3
vmulwev.h.bu.b vr12, vr0, vr8
vmulwev.h.bu.b vr13, vr1, vr9
vmulwev.h.bu.b vr14, vr2, vr10
vmulwev.h.bu.b vr15, vr3, vr11
vmaddwod.h.bu.b vr12, vr0, vr8
vmaddwod.h.bu.b vr13, vr1, vr9
vmaddwod.h.bu.b vr14, vr2, vr10
vmaddwod.h.bu.b vr15, vr3, vr11
vaddi.hu vr0, vr1, 0
vaddi.hu vr1, vr2, 0
vaddi.hu vr2, vr3, 0
vadd.h vr12, vr12, vr13
vadd.h vr12, vr12, vr14
vadd.h vr12, vr12, vr15
vssrarni.bu.h vr12, vr12, 6
vstelm.w vr12, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr12, a0, 0, 1
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_v_4w_loop
b .l_\lable\()end_put_8tap
.l_\lable\()put_v_8w:
.l_\lable\()put_v_16w:
.l_\lable\()put_v_32w:
.l_\lable\()put_v_64w:
.l_\lable\()put_v_128w:
addi.d t0, a2, 0 //src
addi.d t5, a5, 0 //h
addi.d t8, a0, 0 //dst
.l_\lable\()put_v_8w_loop0:
fld.d f0, a2, 0
fldx.d f1, a2, a3
fldx.d f2, a2, t2
add.d a2, a2, t3
fld.d f3, a2, 0
fldx.d f4, a2, a3
fldx.d f5, a2, t2
fldx.d f6, a2, t3
add.d a2, a2, t4
vilvl.b vr0, vr1, vr0 //0 1
vilvl.b vr1, vr2, vr1 //1 2
vilvl.b vr2, vr3, vr2 //2 3
vilvl.b vr3, vr4, vr3 //3 4
vilvl.b vr4, vr5, vr4 //4 5
vilvl.b vr5, vr6, vr5 //5 6
.l_\lable\()put_v_8w_loop:
fld.d f7, a2, 0
vilvl.b vr12, vr7, vr6 //6 7
fldx.d f6, a2, a3
add.d a2, a2, t2
vilvl.b vr13, vr6, vr7 //7 8
vmulwev.h.bu.b vr14, vr0, vr8
vmulwev.h.bu.b vr15, vr1, vr8
vmulwev.h.bu.b vr16, vr2, vr9
vmulwev.h.bu.b vr17, vr3, vr9
vmulwev.h.bu.b vr18, vr4, vr10
vmulwev.h.bu.b vr19, vr5, vr10
vmulwev.h.bu.b vr20, vr12, vr11
vmulwev.h.bu.b vr21, vr13, vr11
vmaddwod.h.bu.b vr14, vr0, vr8
vmaddwod.h.bu.b vr15, vr1, vr8
vmaddwod.h.bu.b vr16, vr2, vr9
vmaddwod.h.bu.b vr17, vr3, vr9
vmaddwod.h.bu.b vr18, vr4, vr10
vmaddwod.h.bu.b vr19, vr5, vr10
vmaddwod.h.bu.b vr20, vr12, vr11
vmaddwod.h.bu.b vr21, vr13, vr11
vaddi.hu vr0, vr2, 0
vaddi.hu vr1, vr3, 0
vaddi.hu vr2, vr4, 0
vaddi.hu vr3, vr5, 0
vaddi.hu vr4, vr12, 0
vaddi.hu vr5, vr13, 0
vadd.h vr14, vr14, vr16
vadd.h vr14, vr14, vr18
vadd.h vr14, vr14, vr20
vadd.h vr15, vr15, vr17
vadd.h vr15, vr15, vr19
vadd.h vr15, vr15, vr21
vssrarni.bu.h vr15, vr14, 6
vstelm.d vr15, a0, 0, 0
add.d a0, a0, a1
vstelm.d vr15, a0, 0, 1
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_v_8w_loop
addi.d a2, t0, 8
addi.d t0, t0, 8
addi.d a0, t8, 8
addi.d t8, t8, 8
addi.d a5, t5, 0
addi.w a4, a4, -8
bnez a4, .l_\lable\()put_v_8w_loop0
b .l_\lable\()end_put_8tap
.l_\lable\()put_hv:
ld.d t5, sp, 0 //filter_type
andi t1, t5, 3
blt t0, a4, .l_\lable\()put_hv_idx_fh
andi t1, t5, 1
addi.w t1, t1, 3
.l_\lable\()put_hv_idx_fh:
addi.w t5, zero, 120
mul.w t1, t1, t5
addi.w t5, a6, -1
slli.w t5, t5, 3
add.w t1, t1, t5
add.d t1, t6, t1 //fh's offset
vldrepl.d vr8, t1, 0
ld.d t1, sp, 0 //filter_type
srli.w t1, t1, 2
blt t0, a5, .l_\lable\()put_hv_idx_fv
andi t1, t1, 1
addi.w t1, t1, 3
.l_\lable\()put_hv_idx_fv:
addi.w t5, zero, 120
mul.w t1, t1, t5
addi.w t5, a7, -1
slli.w t5, t5, 3
add.w t1, t1, t5
add.d t1, t6, t1 //fv's offset
vldrepl.d vr9, t1, 0
vexth.h.b vr9, vr9
sub.d a2, a2, t3
addi.d a2, a2, -3
clz.w t1, a4
li.w t5, 24
sub.w t1, t1, t5
la.local t5, .l_\lable\()put_hv_jtable
alsl.d t1, t1, t5, 3
ld.d t6, t1, 0
add.d t5, t5, t6
jirl $r0, t5, 0
.align 3
.l_\lable\()put_hv_jtable:
.dword .l_\lable\()put_hv_128w - .l_\lable\()put_hv_jtable
.dword .l_\lable\()put_hv_64w - .l_\lable\()put_hv_jtable
.dword .l_\lable\()put_hv_32w - .l_\lable\()put_hv_jtable
.dword .l_\lable\()put_hv_16w - .l_\lable\()put_hv_jtable
.dword .l_\lable\()put_hv_8w - .l_\lable\()put_hv_jtable
.dword .l_\lable\()put_hv_4w - .l_\lable\()put_hv_jtable
.dword .l_\lable\()put_hv_2w - .l_\lable\()put_hv_jtable
.l_\lable\()put_hv_2w:
addi.d a2, a2, 2
vld vr0, a2, 0
vldx vr1, a2, a3
vldx vr2, a2, t2
add.d a2, a2, t3
vld vr3, a2, 0
vldx vr4, a2, a3
vldx vr5, a2, t2
vldx vr6, a2, t3
add.d a2, a2, t4
la.local t1, subpel_h_shuf0
vld vr7, t1, 0
vbsrl.v vr8, vr8, 2
vreplvei.w vr8, vr8, 0
//fv
vreplvei.w vr14, vr9, 1
vreplvei.w vr15, vr9, 2
vreplvei.w vr16, vr9, 3
vreplvei.w vr9, vr9, 0
vshuf.b vr0, vr1, vr0, vr7
vshuf.b vr1, vr3, vr2, vr7
vshuf.b vr2, vr5, vr4, vr7
vshuf.b vr3, vr6, vr6, vr7
vmulwev.h.bu.b vr10, vr0, vr8
vmulwev.h.bu.b vr11, vr1, vr8
vmulwev.h.bu.b vr12, vr2, vr8
vmulwev.h.bu.b vr13, vr3, vr8
vmaddwod.h.bu.b vr10, vr0, vr8
vmaddwod.h.bu.b vr11, vr1, vr8
vmaddwod.h.bu.b vr12, vr2, vr8
vmaddwod.h.bu.b vr13, vr3, vr8
vhaddw.w.h vr0, vr10, vr10
vhaddw.w.h vr1, vr11, vr11
vssrarni.h.w vr1, vr0, 2 //h0 h1 h2 h3
vhaddw.w.h vr2, vr12, vr12
vhaddw.w.h vr3, vr13, vr13
vssrarni.h.w vr3, vr2, 2 //h4 h5 h6 ~
vbsrl.v vr2, vr1, 4
vextrins.w vr2, vr3, 0x30 //h1 h2 h3 h4
vilvl.h vr4, vr2, vr1 //h0 h1 h1 h2 --
vilvh.h vr5, vr2, vr1 //h2 h3 h3 h4 --
vbsrl.v vr6, vr3, 4
vilvl.h vr6, vr6, vr3 //h4 h5 h5 h6 --
vbsrl.v vr3, vr3, 8 //h6 ~
.l_\lable\()put_hv_2w_loop:
vld vr0, a2, 0
vldx vr2, a2, a3
add.d a2, a2, t2
vshuf.b vr0, vr2, vr0, vr7
vdp2.h.bu.b vr17, vr0, vr8
vhaddw.w.h vr17, vr17, vr17
vssrarni.h.w vr17, vr17, 2 //h7 h8
vextrins.w vr3, vr17, 0x10 //h6 h7
vilvl.h vr3, vr17, vr3 //h6 h7 h7 h8 --
vmulwev.w.h vr18, vr4, vr9
vmulwev.w.h vr19, vr5, vr14
vmulwev.w.h vr20, vr6, vr15
vmulwev.w.h vr21, vr3, vr16
vmaddwod.w.h vr18, vr4, vr9
vmaddwod.w.h vr19, vr5, vr14
vmaddwod.w.h vr20, vr6, vr15
vmaddwod.w.h vr21, vr3, vr16
vaddi.hu vr4, vr5, 0
vaddi.hu vr5, vr6, 0
vaddi.hu vr6, vr3, 0
vbsrl.v vr3, vr17, 4 //h8 ~
vadd.w vr18, vr18, vr19
vadd.w vr18, vr18, vr20
vadd.w vr18, vr18, vr21
vssrarni.hu.w vr0, vr18, 10
vssrani.bu.h vr0, vr0, 0
vstelm.h vr0, a0, 0, 0
add.d a0, a0, a1
vstelm.h vr0, a0, 0, 1
add.d a0, a0, a1
addi.d a5, a5, -2
bnez a5, .l_\lable\()put_hv_2w_loop
b .l_\lable\()end_put_8tap
.l_\lable\()put_hv_4w:
addi.d a2, a2, 2 //ignore leading 0
vld vr0, a2, 0
vldx vr1, a2, a3
vldx vr2, a2, t2
add.d a2, a2, t3
vld vr3, a2, 0
vldx vr4, a2, a3
vldx vr5, a2, t2
vldx vr6, a2, t3
add.d a2, a2, t4
la.local t1, subpel_h_shuf1
vld vr7, t1, 0
vbsrl.v vr8, vr8, 2
vreplvei.w vr8, vr8, 0
//fv
vreplvei.w vr17, vr9, 0
vreplvei.w vr18, vr9, 1
vreplvei.w vr19, vr9, 2
vreplvei.w vr20, vr9, 3
//DAV1D_FILTER_8TAP_RND
vshuf.b vr0, vr0, vr0, vr7
vshuf.b vr1, vr1, vr1, vr7
vshuf.b vr2, vr2, vr2, vr7
vshuf.b vr3, vr3, vr3, vr7
vshuf.b vr4, vr4, vr4, vr7
vshuf.b vr5, vr5, vr5, vr7
vshuf.b vr6, vr6, vr6, vr7
vmulwev.h.bu.b vr10, vr0, vr8
vmulwev.h.bu.b vr11, vr1, vr8
vmulwev.h.bu.b vr12, vr2, vr8
vmulwev.h.bu.b vr13, vr3, vr8
vmulwev.h.bu.b vr14, vr4, vr8
vmulwev.h.bu.b vr15, vr5, vr8
vmulwev.h.bu.b vr16, vr6, vr8
vmaddwod.h.bu.b vr10, vr0, vr8
vmaddwod.h.bu.b vr11, vr1, vr8
vmaddwod.h.bu.b vr12, vr2, vr8
vmaddwod.h.bu.b vr13, vr3, vr8
vmaddwod.h.bu.b vr14, vr4, vr8
vmaddwod.h.bu.b vr15, vr5, vr8
vmaddwod.h.bu.b vr16, vr6, vr8
vhaddw.w.h vr10, vr10, vr10
vhaddw.w.h vr11, vr11, vr11
vhaddw.w.h vr12, vr12, vr12
vhaddw.w.h vr13, vr13, vr13
vhaddw.w.h vr14, vr14, vr14
vhaddw.w.h vr15, vr15, vr15
vhaddw.w.h vr16, vr16, vr16
vssrarni.h.w vr10, vr10, 2 //h0
vssrarni.h.w vr11, vr11, 2 //h1
vssrarni.h.w vr12, vr12, 2 //h2
vssrarni.h.w vr13, vr13, 2 //h3
vssrarni.h.w vr14, vr14, 2 //h4
vssrarni.h.w vr15, vr15, 2 //h5
vssrarni.h.w vr16, vr16, 2 //h6
//h0
vilvl.h vr0, vr11, vr10 //01
vilvl.h vr1, vr13, vr12 //23
vilvl.h vr2, vr15, vr14 //45
//h1
vilvl.h vr4, vr12, vr11 //12
vilvl.h vr5, vr14, vr13 //34
vilvl.h vr6, vr16, vr15 //56
.l_\lable\()put_hv_4w_loop:
vld vr9, a2, 0
vldx vr10, a2, a3
add.d a2, a2, t2
//DAV1D_FILTER_8TAP_CLIP
vshuf.b vr9, vr9, vr9, vr7
vshuf.b vr10, vr10, vr10, vr7
vmulwev.h.bu.b vr11, vr9, vr8
vmulwev.h.bu.b vr12, vr10, vr8
vmaddwod.h.bu.b vr11, vr9, vr8
vmaddwod.h.bu.b vr12, vr10, vr8
vhaddw.w.h vr11, vr11, vr11
vhaddw.w.h vr12, vr12, vr12
vssrarni.h.w vr11, vr11, 2 //h7
vssrarni.h.w vr12, vr12, 2 //h8
vilvl.h vr3, vr11, vr16 //67
vilvl.h vr13, vr12, vr11 //78
vmulwev.w.h vr9, vr0, vr17
vmulwev.w.h vr10, vr1, vr18
vmulwev.w.h vr14, vr2, vr19
vmulwev.w.h vr15, vr3, vr20
vmaddwod.w.h vr9, vr0, vr17
vmaddwod.w.h vr10, vr1, vr18
vmaddwod.w.h vr14, vr2, vr19
vmaddwod.w.h vr15, vr3, vr20
vadd.w vr16, vr9, vr10
vadd.w vr16, vr16, vr14
vadd.w vr16, vr16, vr15
vmulwev.w.h vr9, vr4, vr17
vmulwev.w.h vr10, vr5, vr18
vmulwev.w.h vr14, vr6, vr19
vmulwev.w.h vr15, vr13, vr20
vmaddwod.w.h vr9, vr4, vr17
vmaddwod.w.h vr10, vr5, vr18
vmaddwod.w.h vr14, vr6, vr19
vmaddwod.w.h vr15, vr13, vr20
vadd.w vr21, vr9, vr10
vadd.w vr21, vr21, vr14
vadd.w vr21, vr21, vr15
vssrarni.hu.w vr21, vr16, 10
vssrani.bu.h vr21, vr21, 0
//cache
vaddi.hu vr0, vr1, 0
vaddi.hu vr1, vr2, 0
vaddi.hu vr2, vr3, 0
vaddi.hu vr4, vr5, 0
vaddi.hu vr5, vr6, 0
vaddi.hu vr6, vr13, 0
vaddi.hu vr16, vr12, 0
vstelm.w vr21, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr21, a0, 0, 1
add.d a0, a0, a1
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_hv_4w_loop
b .l_\lable\()end_put_8tap
.l_\lable\()put_hv_8w:
.l_\lable\()put_hv_16w:
.l_\lable\()put_hv_32w:
.l_\lable\()put_hv_64w:
.l_\lable\()put_hv_128w:
addi.d sp, sp, -8*8
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
addi.d t0, a2, 0 //src
addi.d t5, a5, 0 //h
addi.d t8, a0, 0 //dst
la.local t1, subpel_h_shuf1
vld vr7, t1, 0
vaddi.bu vr11, vr7, 4
vaddi.bu vr12, vr7, 8
vreplvei.w vr10, vr8, 1
vreplvei.w vr8, vr8, 0
vreplvei.w vr20, vr9, 1
vreplvei.w vr21, vr9, 2
vreplvei.w vr22, vr9, 3
vreplvei.w vr9, vr9, 0
.l_\lable\()put_hv_8w_loop0:
vld vr0, a2, 0
vldx vr1, a2, a3
vldx vr2, a2, t2
add.d a2, a2, t3
vld vr3, a2, 0
vldx vr4, a2, a3
vldx vr5, a2, t2
vldx vr6, a2, t3
add.d a2, a2, t4
FILTER_8TAP_8W vr0 //h0
FILTER_8TAP_8W vr1 //h1
FILTER_8TAP_8W vr2 //h2
FILTER_8TAP_8W vr3 //h3
FILTER_8TAP_8W vr4 //h4
FILTER_8TAP_8W vr5 //h5
FILTER_8TAP_8W vr6 //h6
//h0' low part
vilvl.h vr23, vr1, vr0 //01
vilvl.h vr24, vr3, vr2 //23
vilvl.h vr25, vr5, vr4 //45
//h0' high part
vilvh.h vr26, vr1, vr0 //01
vilvh.h vr27, vr3, vr2 //23
vilvh.h vr28, vr5, vr4 //45
//h1' low part
vilvl.h vr29, vr2, vr1 //12
vilvl.h vr30, vr4, vr3 //34
vilvl.h vr31, vr6, vr5 //56
//h1' high part
vilvh.h vr0, vr2, vr1 //12
vilvh.h vr1, vr4, vr3 //34
vilvh.h vr2, vr6, vr5 //56
.l_\lable\()put_hv_8w_loop:
vld vr3, a2, 0
vldx vr4, a2, a3
add.d a2, a2, t2
FILTER_8TAP_8W vr3 //h7
FILTER_8TAP_8W vr4 //h8
//h0' low part
vilvl.h vr16, vr3, vr6 //67 ~low
vmulwev.w.h vr13, vr23, vr9
vmulwev.w.h vr14, vr24, vr20
vmulwev.w.h vr15, vr25, vr21
vmulwev.w.h vr17, vr16, vr22
vmaddwod.w.h vr13, vr23, vr9
vmaddwod.w.h vr14, vr24, vr20
vmaddwod.w.h vr15, vr25, vr21
vmaddwod.w.h vr17, vr16, vr22
vadd.w vr13, vr13, vr14
vadd.w vr13, vr13, vr15
vadd.w vr13, vr13, vr17
//cache
vaddi.hu vr23, vr24, 0
vaddi.hu vr24, vr25, 0
vaddi.hu vr25, vr16, 0
//h0' high part
vilvh.h vr17, vr3, vr6 //67 ~high
vmulwev.w.h vr14, vr26, vr9
vmulwev.w.h vr15, vr27, vr20
vmulwev.w.h vr16, vr28, vr21
vmulwev.w.h vr18, vr17, vr22
vmaddwod.w.h vr14, vr26, vr9
vmaddwod.w.h vr15, vr27, vr20
vmaddwod.w.h vr16, vr28, vr21
vmaddwod.w.h vr18, vr17, vr22
vadd.w vr14, vr14, vr15
vadd.w vr14, vr14, vr16
vadd.w vr14, vr14, vr18
vssrarni.hu.w vr14, vr13, 10
vssrarni.bu.h vr5, vr14, 0
vstelm.d vr5, a0, 0, 0
add.d a0, a0, a1
//cache
vaddi.hu vr26, vr27, 0
vaddi.hu vr27, vr28, 0
vaddi.hu vr28, vr17, 0
vaddi.hu vr6, vr4, 0
vilvl.h vr5, vr4, vr3 //78 ~low
vilvh.h vr4, vr4, vr3 //78 ~high
//h1' low part
vmulwev.w.h vr13, vr29, vr9
vmulwev.w.h vr14, vr30, vr20
vmulwev.w.h vr15, vr31, vr21
vmulwev.w.h vr16, vr5, vr22
vmaddwod.w.h vr13, vr29, vr9
vmaddwod.w.h vr14, vr30, vr20
vmaddwod.w.h vr15, vr31, vr21
vmaddwod.w.h vr16, vr5, vr22
vadd.w vr13, vr13, vr14
vadd.w vr13, vr13, vr15
vadd.w vr13, vr13, vr16
//cache
vaddi.hu vr29, vr30, 0
vaddi.hu vr30, vr31, 0
vaddi.hu vr31, vr5, 0
//h1' high part
vmulwev.w.h vr14, vr0, vr9
vmulwev.w.h vr15, vr1, vr20
vmulwev.w.h vr16, vr2, vr21
vmulwev.w.h vr17, vr4, vr22
vmaddwod.w.h vr14, vr0, vr9
vmaddwod.w.h vr15, vr1, vr20
vmaddwod.w.h vr16, vr2, vr21
vmaddwod.w.h vr17, vr4, vr22
vadd.w vr14, vr14, vr15
vadd.w vr14, vr14, vr16
vadd.w vr14, vr14, vr17
vssrarni.hu.w vr14, vr13, 10
vssrarni.bu.h vr5, vr14, 0
vstelm.d vr5, a0, 0, 0
add.d a0, a0, a1
//cache
vaddi.hu vr0, vr1, 0
vaddi.hu vr1, vr2, 0
vaddi.hu vr2, vr4, 0
addi.w a5, a5, -2
bnez a5, .l_\lable\()put_hv_8w_loop
addi.d a2, t0, 8
addi.d t0, t0, 8
addi.d a0, t8, 8
addi.d t8, t8, 8
addi.d a5, t5, 0
addi.w a4, a4, -8
bnez a4, .l_\lable\()put_hv_8w_loop0
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 8*8
.l_\lable\()end_put_8tap:
.endm
function put_8tap_regular_8bpc_lsx
addi.d sp, sp, -16
st.d zero, sp, 0
PUT_8TAP_8BPC_LSX 0
addi.d sp, sp, 16
endfunc
function put_8tap_smooth_regular_8bpc_lsx
addi.d sp, sp, -16
li.w t0, 1
st.d t0, sp, 0
PUT_8TAP_8BPC_LSX 1
addi.d sp, sp, 16
endfunc
function put_8tap_sharp_regular_8bpc_lsx
addi.d sp, sp, -16
li.w t0, 2
st.d t0, sp, 0
PUT_8TAP_8BPC_LSX 2
addi.d sp, sp, 16
endfunc
function put_8tap_regular_smooth_8bpc_lsx
addi.d sp, sp, -16
li.w t0, 4
st.d t0, sp, 0
PUT_8TAP_8BPC_LSX 4
addi.d sp, sp, 16
endfunc
function put_8tap_smooth_8bpc_lsx
addi.d sp, sp, -16
li.w t0, 5
st.d t0, sp, 0
PUT_8TAP_8BPC_LSX 5
addi.d sp, sp, 16
endfunc
function put_8tap_sharp_smooth_8bpc_lsx
addi.d sp, sp, -16
li.w t0, 6
st.d t0, sp, 0
PUT_8TAP_8BPC_LSX 6
addi.d sp, sp, 16
endfunc
function put_8tap_regular_sharp_8bpc_lsx
addi.d sp, sp, -16
li.w t0, 8
st.d t0, sp, 0
PUT_8TAP_8BPC_LSX 8
addi.d sp, sp, 16
endfunc
function put_8tap_smooth_sharp_8bpc_lsx
addi.d sp, sp, -16
li.w t0, 9
st.d t0, sp, 0
PUT_8TAP_8BPC_LSX 9
addi.d sp, sp, 16
endfunc
function put_8tap_sharp_8bpc_lsx
addi.d sp, sp, -16
li.w t0, 10
st.d t0, sp, 0
PUT_8TAP_8BPC_LSX 10
addi.d sp, sp, 16
endfunc
const shufb1
.byte 0,1,2,3,4,5,6,7,1,2,3,4,5,6,7,8
endconst
.macro PREP_H_8W in0
vshuf.b vr2, \in0, \in0, vr6
vshuf.b vr3, \in0, \in0, vr7
vshuf.b vr4, \in0, \in0, vr8
vmulwev.h.bu.b vr12, vr2, vr22
vmulwev.h.bu.b vr13, vr3, vr23
vmulwev.h.bu.b vr14, vr3, vr22
vmulwev.h.bu.b vr15, vr4, vr23
vmaddwod.h.bu.b vr12, vr2, vr22
vmaddwod.h.bu.b vr13, vr3, vr23
vmaddwod.h.bu.b vr14, vr3, vr22
vmaddwod.h.bu.b vr15, vr4, vr23
vadd.h vr12, vr12, vr13
vadd.h vr14, vr14, vr15
vhaddw.w.h vr12, vr12, vr12
vhaddw.w.h \in0, vr14, vr14
vssrarni.h.w \in0, vr12, 2
.endm
.macro PREP_HV_8W_LASX in0
xvshuf.b xr4, \in0, \in0, xr19
xvshuf.b xr5, \in0, \in0, xr20
xvshuf.b xr6, \in0, \in0, xr21
xvmulwev.h.bu.b xr7, xr4, xr22
xvmulwev.h.bu.b xr9, xr5, xr23
xvmulwev.h.bu.b xr10, xr5, xr22
xvmulwev.h.bu.b xr11, xr6, xr23
xvmaddwod.h.bu.b xr7, xr4, xr22
xvmaddwod.h.bu.b xr9, xr5, xr23
xvmaddwod.h.bu.b xr10, xr5, xr22
xvmaddwod.h.bu.b xr11, xr6, xr23
xvadd.h xr7, xr7, xr9
xvadd.h xr9, xr10, xr11
xvhaddw.w.h xr7, xr7, xr7
xvhaddw.w.h \in0, xr9, xr9
xvssrarni.h.w \in0, xr7, 2
.endm
.macro PREP_8TAP_8BPC_LASX lable
li.w t0, 4
la.local t6, dav1d_mc_subpel_filters
slli.d t2, a2, 1 //src_stride*2
add.d t3, t2, a2 //src_stride*3
slli.d t4, t2, 1
bnez a5, .l_\lable\()h_lasx //mx
bnez a6, .l_\lable\()v_lasx
clz.w t1, a3
li.w t5, 24
sub.w t1, t1, t5
la.local t5, .l_\lable\()prep_hv0_jtable_lasx
alsl.d t1, t1, t5, 1
ld.h t8, t1, 0
add.d t5, t5, t8
jirl $r0, t5, 0
.align 3
.l_\lable\()prep_hv0_jtable_lasx:
.hword .l_\lable\()hv0_128w_lasx - .l_\lable\()prep_hv0_jtable_lasx
.hword .l_\lable\()hv0_64w_lasx - .l_\lable\()prep_hv0_jtable_lasx
.hword .l_\lable\()hv0_32w_lasx - .l_\lable\()prep_hv0_jtable_lasx
.hword .l_\lable\()hv0_16w_lasx - .l_\lable\()prep_hv0_jtable_lasx
.hword .l_\lable\()hv0_8w_lasx - .l_\lable\()prep_hv0_jtable_lasx
.hword .l_\lable\()hv0_4w_lasx - .l_\lable\()prep_hv0_jtable_lasx
.l_\lable\()hv0_4w_lasx:
fld.s f0, a1, 0
fldx.s f1, a1, a2
fldx.s f2, a1, t2
fldx.s f3, a1, t3
add.d a1, a1, t4
xvpackev.w xr0, xr1, xr0
xvpackev.w xr1, xr3, xr2
xvpermi.q xr0, xr1, 0x02
xvsllwil.hu.bu xr0, xr0, 4
xvst xr0, a0, 0
addi.d a0, a0, 32
addi.d a4, a4, -4
bnez a4, .l_\lable\()hv0_4w_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()hv0_8w_lasx:
fld.d f0, a1, 0
fldx.d f1, a1, a2
fldx.d f2, a1, t2
fldx.d f3, a1, t3
add.d a1, a1, t4
xvpermi.q xr0, xr1, 0x02
xvpermi.q xr2, xr3, 0x02
xvsllwil.hu.bu xr0, xr0, 4
xvsllwil.hu.bu xr2, xr2, 4
xvst xr0, a0, 0
xvst xr2, a0, 32
addi.d a0, a0, 64
addi.d a4, a4, -4
bnez a4, .l_\lable\()hv0_8w_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()hv0_16w_lasx:
vld vr0, a1, 0
vldx vr1, a1, a2
vldx vr2, a1, t2
vldx vr3, a1, t3
add.d a1, a1, t4
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
vext2xv.hu.bu xr3, xr3
xvslli.h xr0, xr0, 4
xvslli.h xr1, xr1, 4
xvslli.h xr2, xr2, 4
xvslli.h xr3, xr3, 4
xvst xr0, a0, 0
xvst xr1, a0, 32
xvst xr2, a0, 64
xvst xr3, a0, 96
addi.d a0, a0, 128
addi.d a4, a4, -4
bnez a4, .l_\lable\()hv0_16w_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()hv0_32w_lasx:
xvld xr0, a1, 0
xvldx xr1, a1, a2
xvldx xr2, a1, t2
xvldx xr3, a1, t3
add.d a1, a1, t4
xvpermi.d xr4, xr0, 0xD8
xvpermi.d xr5, xr1, 0xD8
xvpermi.d xr6, xr2, 0xD8
xvpermi.d xr7, xr3, 0xD8
xvpermi.d xr10, xr0, 0x32
xvpermi.d xr11, xr1, 0x32
xvpermi.d xr12, xr2, 0x32
xvpermi.d xr13, xr3, 0x32
xvsllwil.hu.bu xr0, xr4, 4
xvsllwil.hu.bu xr1, xr5, 4
xvsllwil.hu.bu xr2, xr6, 4
xvsllwil.hu.bu xr3, xr7, 4
xvsllwil.hu.bu xr4, xr10, 4
xvsllwil.hu.bu xr5, xr11, 4
xvsllwil.hu.bu xr6, xr12, 4
xvsllwil.hu.bu xr7, xr13, 4
xvst xr0, a0, 0
xvst xr4, a0, 32
xvst xr1, a0, 64
xvst xr5, a0, 96
xvst xr2, a0, 128
xvst xr6, a0, 160
xvst xr3, a0, 192
xvst xr7, a0, 224
addi.d a0, a0, 256
addi.d a4, a4, -4
bnez a4, .l_\lable\()hv0_32w_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()hv0_64w_lasx:
.l_\lable\()hv0_128w_lasx:
addi.d t0, a1, 0
addi.d t5, a4, 0
srli.w t7, a3, 5
slli.w t7, t7, 6
addi.d t8, a0, 0
.l_\lable\()hv0_32_loop_lasx:
xvld xr0, a1, 0
xvldx xr1, a1, a2
xvldx xr2, a1, t2
xvldx xr3, a1, t3
add.d a1, a1, t4
xvpermi.d xr4, xr0, 0xD8
xvpermi.d xr5, xr1, 0xD8
xvpermi.d xr6, xr2, 0xD8
xvpermi.d xr7, xr3, 0xD8
xvpermi.d xr10, xr0, 0x32
xvpermi.d xr11, xr1, 0x32
xvpermi.d xr12, xr2, 0x32
xvpermi.d xr13, xr3, 0x32
xvsllwil.hu.bu xr0, xr4, 4
xvsllwil.hu.bu xr1, xr5, 4
xvsllwil.hu.bu xr2, xr6, 4
xvsllwil.hu.bu xr3, xr7, 4
xvsllwil.hu.bu xr4, xr10, 4
xvsllwil.hu.bu xr5, xr11, 4
xvsllwil.hu.bu xr6, xr12, 4
xvsllwil.hu.bu xr7, xr13, 4
xvst xr0, a0, 0
xvst xr4, a0, 32
add.d t1, a0, t7
xvst xr1, t1, 0
xvst xr5, t1, 32
add.d t1, t1, t7
xvst xr2, t1, 0
xvst xr6, t1, 32
add.d t1, t1, t7
xvst xr3, t1, 0
xvst xr7, t1, 32
add.d a0, t1, t7
addi.d a4, a4, -4
bnez a4, .l_\lable\()hv0_32_loop_lasx
addi.d a1, t0, 32
addi.d t0, t0, 32
addi.d a0, t8, 64
addi.d t8, t8, 64
addi.d a4, t5, 0
addi.d a3, a3, -32
bnez a3, .l_\lable\()hv0_32_loop_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()h_lasx:
bnez a6, .l_\lable\()hv_lasx //if(fh) && if (fv)
andi t1, a7, 3
blt t0, a3, .l_\lable\()h_idx_fh_lasx
andi t1, a7, 1
addi.w t1, t1, 3
.l_\lable\()h_idx_fh_lasx:
addi.w t5, zero, 120
mul.w t1, t1, t5
addi.w t5, a5, -1
slli.w t5, t5, 3
add.w t1, t1, t5
add.d t1, t6, t1 //fh's offset
xvldrepl.d xr22, t1, 0
addi.d a1, a1, -3
clz.w t1, a3
li.w t5, 24
sub.w t1, t1, t5
la.local t5, .l_\lable\()prep_h_jtable_lasx
alsl.d t1, t1, t5, 1
ld.h t8, t1, 0
add.d t5, t5, t8
jirl $r0, t5, 0
.align 3
.l_\lable\()prep_h_jtable_lasx:
.hword .l_\lable\()h_128w_lasx - .l_\lable\()prep_h_jtable_lasx
.hword .l_\lable\()h_64w_lasx - .l_\lable\()prep_h_jtable_lasx
.hword .l_\lable\()h_32w_lasx - .l_\lable\()prep_h_jtable_lasx
.hword .l_\lable\()h_16w_lasx - .l_\lable\()prep_h_jtable_lasx
.hword .l_\lable\()h_8w_lasx - .l_\lable\()prep_h_jtable_lasx
.hword .l_\lable\()h_4w_lasx - .l_\lable\()prep_h_jtable_lasx
.l_\lable\()h_4w_lasx:
addi.d a1, a1, 2
la.local t7, subpel_h_shuf1
vld vr7, t7, 0
xvreplve0.q xr7, xr7
xvbsrl.v xr22, xr22, 2
xvreplve0.w xr22, xr22
.l_\lable\()h_4w_loop_lasx:
vld vr0, a1, 0
vldx vr1, a1, a2
vldx vr2, a1, t2
vldx vr3, a1, t3
add.d a1, a1, t4
xvpermi.q xr1, xr0, 0x20
xvpermi.q xr3, xr2, 0x20
xvshuf.b xr1, xr1, xr1, xr7
xvshuf.b xr3, xr3, xr3, xr7
xvmulwev.h.bu.b xr0, xr1, xr22
xvmulwev.h.bu.b xr2, xr3, xr22
xvmaddwod.h.bu.b xr0, xr1, xr22
xvmaddwod.h.bu.b xr2, xr3, xr22
xvhaddw.w.h xr0, xr0, xr0
xvhaddw.w.h xr2, xr2, xr2
xvssrarni.h.w xr2, xr0, 2
xvpermi.d xr2, xr2, 0xd8
xvst xr2, a0, 0
addi.d a0, a0, 32
addi.w a4, a4, -4
bnez a4, .l_\lable\()h_4w_loop_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()h_8w_lasx:
la.local t7, subpel_h_shuf1
vld vr6, t7, 0
vbsrl.v vr23, vr22, 4 //fh
xvreplve0.w xr23, xr23
xvreplve0.w xr22, xr22
xvreplve0.q xr19, xr6
xvaddi.bu xr20, xr19, 4
xvaddi.bu xr21, xr19, 8
.l_\lable\()h_8w_loop_lasx:
xvld xr0, a1, 0
xvldx xr1, a1, a2
add.d a1, a1, t2
xvpermi.q xr0, xr1, 0x02
PREP_HV_8W_LASX xr0
xvst xr0, a0, 0
addi.d a0, a0, 32
addi.d a4, a4, -2
bnez a4, .l_\lable\()h_8w_loop_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()h_16w_lasx:
la.local t7, subpel_h_shuf1
vld vr6, t7, 0
vbsrl.v vr23, vr22, 4 //fh
xvreplve0.w xr23, xr23
xvreplve0.w xr22, xr22
xvreplve0.q xr19, xr6
xvaddi.bu xr20, xr19, 4
xvaddi.bu xr21, xr19, 8
.l_\lable\()h_16w_loop_lasx:
xvld xr0, a1, 0
xvld xr1, a1, 8
add.d a1, a1, a2
xvpermi.q xr0, xr1, 0x02
PREP_HV_8W_LASX xr0
xvst xr0, a0, 0
xvld xr0, a1, 0
xvld xr1, a1, 8
add.d a1, a1, a2
xvpermi.q xr0, xr1, 0x02
PREP_HV_8W_LASX xr0
xvst xr0, a0, 32
addi.d a0, a0, 64
addi.w a4, a4, -2
bnez a4, .l_\lable\()h_16w_loop_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()h_32w_lasx:
.l_\lable\()h_64w_lasx:
.l_\lable\()h_128w_lasx:
la.local t7, subpel_h_shuf1
vld vr6, t7, 0
vbsrl.v vr23, vr22, 4 //fh
xvreplve0.w xr23, xr23
xvreplve0.w xr22, xr22
xvreplve0.q xr19, xr6
xvaddi.bu xr20, xr19, 4
xvaddi.bu xr21, xr19, 8
addi.d t5, a1, 0 //src
addi.d t6, a3, 0 //w
slli.w t7, a3, 1 //store offset
addi.d t8, a0, 0 //dst
.l_\lable\()h_16_loop_lasx:
xvld xr0, a1, 0
xvld xr1, a1, 8
xvpermi.q xr0, xr1, 0x02
PREP_HV_8W_LASX xr0
xvst xr0, a0, 0
xvld xr0, a1, 16
xvld xr1, a1, 24
xvpermi.q xr0, xr1, 0x02
PREP_HV_8W_LASX xr0
xvst xr0, a0, 32
addi.d a0, a0, 64
addi.d a1, a1, 32
addi.d a3, a3, -32
bnez a3, .l_\lable\()h_16_loop_lasx
add.d a1, t5, a2
add.d t5, t5, a2
add.d a0, t8, t7
add.d t8, t8, t7
addi.d a3, t6, 0
addi.d a4, a4, -1
bnez a4, .l_\lable\()h_16_loop_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()hv_lasx:
andi t1, a7, 3
blt t0, a3, .l_\lable\()hv_idx_fh_lasx
andi t1, a7, 1
addi.w t1, t1, 3
.l_\lable\()hv_idx_fh_lasx:
addi.w t5, zero, 120
mul.w t1, t1, t5
addi.w t5, a5, -1
slli.w t5, t5, 3
add.w t1, t1, t5
add.d t1, t6, t1 //fh's offset
xvldrepl.d xr22, t1, 0
srli.w a7, a7, 2
blt t0, a4, .l_\lable\()hv_idx_fv_lasx
andi a7, a7, 1
addi.w a7, a7, 3
.l_\lable\()hv_idx_fv_lasx:
addi.w t5, zero, 120
mul.w a7, a7, t5
addi.w t5, a6, -1
slli.w t5, t5, 3
add.w a7, a7, t5
add.d a7, t6, a7 //fv's offset
xvldrepl.d xr8, a7, 0
xvsllwil.h.b xr8, xr8, 0
sub.d a1, a1, t3
addi.d a1, a1, -1 //ignore leading 0s
beq a3, t0, .l_\lable\()hv_4w_lasx
addi.d a1, a1, -2
b .l_\lable\()hv_8w_lasx
.l_\lable\()hv_4w_lasx:
xvld xr0, a1, 0
xvldx xr1, a1, a2
xvldx xr2, a1, t2
xvldx xr3, a1, t3
add.d a1, a1, t4
xvld xr4, a1, 0
xvldx xr5, a1, a2
xvldx xr6, a1, t2
la.local t1, subpel_h_shuf2
xvld xr7, t1, 0
vbsrl.v vr22, vr22, 2
xvreplve0.w xr22, xr22
xvreplve0.q xr8, xr8
xvrepl128vei.w xr12, xr8, 0
xvrepl128vei.w xr13, xr8, 1
xvrepl128vei.w xr14, xr8, 2
xvrepl128vei.w xr15, xr8, 3
xvilvl.d xr0, xr1, xr0
xvilvl.d xr2, xr3, xr2
xvilvl.d xr4, xr5, xr4
xvreplve0.q xr0, xr0
xvreplve0.q xr2, xr2
xvreplve0.q xr4, xr4
xvreplve0.q xr6, xr6
xvshuf.b xr0, xr0, xr0, xr7
xvshuf.b xr2, xr2, xr2, xr7
xvshuf.b xr4, xr4, xr4, xr7
xvshuf.b xr6, xr6, xr6, xr7
xvmulwev.h.bu.b xr1, xr0, xr22
xvmulwev.h.bu.b xr3, xr2, xr22
xvmulwev.h.bu.b xr5, xr4, xr22
xvmulwev.h.bu.b xr9, xr6, xr22
xvmaddwod.h.bu.b xr1, xr0, xr22
xvmaddwod.h.bu.b xr3, xr2, xr22
xvmaddwod.h.bu.b xr5, xr4, xr22
xvmaddwod.h.bu.b xr9, xr6, xr22
xvhaddw.w.h xr1, xr1, xr1 // a0 b0 a1 b1 c0 d0 c1 d1
xvhaddw.w.h xr3, xr3, xr3 // a2 b2 a3 b3 c2 d2 c3 d3
xvhaddw.w.h xr5, xr5, xr5 // a4 b4 a5 b5 c4 d4 c5 d5
xvhaddw.w.h xr9, xr9, xr9 // a6 b6 - - c6 d6 - -
xvssrarni.h.w xr3, xr1, 2 // a0 b0 a1 b1 a2 b2 a3 b3 c0 d0 c1 d1 c2 d2 c3 d3
xvssrarni.h.w xr9, xr5, 2 // a4 b4 a5 b5 a6 b6 - - c4 d4 c5 d5 c6 d6 - -
xvbsrl.v xr4, xr3, 4
xvextrins.w xr4, xr9, 0x30 // a1 b1 a2 b2 a3 b3 a4 b4 c1 d1 c2 d2 c3 d3 c4 d4
xvilvl.h xr5, xr4, xr3 // a0 a1 b0 b1 a1 a2 b1 b2 c0 c1 d0 d1 c1 c2 d1 d2
xvilvh.h xr6, xr4, xr3 // a2 a3 b2 b3 a3 a4 b3 b4 c2 c3 d2 d3 c3 c4 d3 d4
xvbsrl.v xr10, xr9, 4 // a5 b5 a6 b6 - - - - c5 d5 c6 d6 - - - -
xvilvl.h xr11, xr10, xr9 // a4 a5 b4 b5 a5 a6 b5 b6 c4 c5 d4 d5 c5 c6 d5 d6
.l_\lable\()hv_w4_loop_lasx:
xvmulwev.w.h xr16, xr5, xr12 //a0 a1 (h0)
xvmulwev.w.h xr17, xr6, xr12 //a2 a3 (h1)
xvmulwev.w.h xr18, xr6, xr13 //a2 a3 (h0)
xvmulwev.w.h xr19, xr11, xr13 //a4 a5 (h1)
xvmulwev.w.h xr20, xr11, xr14 //a4 a5 (h0)
xvmaddwod.w.h xr16, xr5, xr12 //
xvmaddwod.w.h xr17, xr6, xr12 //
xvmaddwod.w.h xr18, xr6, xr13 //
xvmaddwod.w.h xr19, xr11, xr13 //
xvmaddwod.w.h xr20, xr11, xr14 //
xvaddi.wu xr5, xr11, 0
xvadd.w xr16, xr16, xr18 //a0 a1 + a2 a3
xvldx xr18, a1, t3 //a7 b7 c7 d7
add.d a1, a1, t4
xvadd.w xr17, xr17, xr19 //a2 a3 + a4 a5
xvld xr19, a1, 0 //a8 b8 c8 d8
xvadd.w xr16, xr16, xr20 //a0 a1 + a2 a3 + a4 a5
xvldx xr20, a1, a2 //a9 b9 c9 d9
xvilvl.d xr18, xr19, xr18
xvreplve0.q xr18, xr18
xvldx xr19, a1, t2 //aa ba ca da
xvilvl.d xr20, xr19, xr20
xvreplve0.q xr20, xr20
xvshuf.b xr18, xr18, xr18, xr7
xvshuf.b xr20, xr20, xr20, xr7
xvmulwev.h.bu.b xr21, xr18, xr22
xvmulwev.h.bu.b xr23, xr20, xr22
xvmaddwod.h.bu.b xr21, xr18, xr22
xvmaddwod.h.bu.b xr23, xr20, xr22
xvhaddw.w.h xr21, xr21, xr21 //a7 b7 a8 b8 c7 d7 c8 d8
xvhaddw.w.h xr23, xr23, xr23 //a9 b9 aa ba c9 d9 ca da
xvssrarni.h.w xr23, xr21, 2 //a7 b7 a8 b8 a9 b9 aa ba c7 d7 c8 d8 c9 d9 ca da
xvbsll.v xr0, xr23, 4
xvextrins.w xr0, xr9, 0x02 //a6 b6 a7 b7 a8 b8 a9 b9 c6 d6 c7 d7 c8 d8 c9 d9
xvilvl.h xr6, xr23, xr0 //a6 a7 b6 b7 a7 a8 b7 b8 c6 c7 d6 d7 c7 c8 d7 d8
xvilvh.h xr11, xr23, xr0 //a8 a9 b8 b9 a9 aa b9 ba c8 c9 d8 d9 c9 ca d9 da
xvbsrl.v xr9, xr23, 4
xvmulwev.w.h xr1 , xr6, xr14 //a6 a7 (h0)
xvmulwev.w.h xr2 , xr6, xr15 //a6 a7 (h1)
xvmulwev.w.h xr3 , xr11, xr15 //a8 a9 (h1)
xvmaddwod.w.h xr1 , xr6, xr14
xvmaddwod.w.h xr2 , xr6, xr15
xvmaddwod.w.h xr3 , xr11, xr15
xvadd.w xr17, xr17, xr1 //a2 a3 + a4 a5 + a6 a7
xvadd.w xr16, xr16, xr2 //a0 a1 + a2 a3 + a4 a5 + a6 a7
xvadd.w xr17, xr17, xr3 //a2 a3 + a4 a5 + a6 a7 + a8 a9
xvssrarni.h.w xr17, xr16, 6 //a01 b01 a12 b12 a23 b23 a34 b34 c01 d01 c12 d12 c23 d23 c34 d34
xvpermi.d xr17, xr17, 0xd8 //a01 b01 a12 b12 c01 d01 c12 d12 a23 b23 a34 b34 c23 d23 c34 d34
xvshuf4i.w xr17, xr17, 0xd8
xvst xr17, a0, 0
addi.d a0, a0, 32
addi.d a4, a4, -4
bnez a4, .l_\lable\()hv_w4_loop_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()hv_8w_lasx:
addi.d sp, sp, -4*8
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
la.local t1, subpel_h_shuf1
vld vr19, t1, 0
addi.d t0, a1, 0
addi.d t5, a4, 0
slli.w t7, a3, 1 // store offset
addi.d t8, a0, 0
xvreplve0.q xr19, xr19
xvaddi.bu xr20, xr19, 4
xvaddi.bu xr21, xr19, 8
vbsrl.v vr23, vr22, 4
xvreplve0.w xr22, xr22 //f0f1f2f3
xvreplve0.w xr23, xr23 //f4f5f6f7
xvreplve0.q xr8, xr8
xvrepl128vei.w xr24, xr8, 0
xvrepl128vei.w xr25, xr8, 1
xvrepl128vei.w xr26, xr8, 2
xvrepl128vei.w xr27, xr8, 3
.l_\lable\()hv_8w_loop0_lasx:
xvld xr0, a1, 0
xvldx xr1, a1, a2
xvldx xr2, a1, t2
add.d a1, a1, t3
xvld xr3, a1, 0
xvldx xr4, a1, a2
xvldx xr5, a1, t2
xvldx xr6, a1, t3
add.d a1, a1, t4
xvpermi.q xr0, xr3, 0x02 //0 3
xvpermi.q xr1, xr4, 0x02 //1 4
xvpermi.q xr2, xr5, 0x02 //2 5
xvpermi.q xr3, xr6, 0x02 //3 6
PREP_HV_8W_LASX xr0 //a0b0c0d0 e0f0g0h0 a3b3c3d3 e3f3g3h3
PREP_HV_8W_LASX xr1 //a1b1c1d1 e1f1g1h1 a4b4c4d4 e4f4g4h4
PREP_HV_8W_LASX xr2 //a2b2c2d2 e2f2g2h2 a5b5c5d5 e5f5g5h5
PREP_HV_8W_LASX xr3 //a3b3c3d3 e3f3g3h3 a6b6c6d6 e6f6g6h6
xvpermi.d xr0, xr0, 0xd8
xvpermi.d xr1, xr1, 0xd8
xvpermi.d xr2, xr2, 0xd8
xvpermi.d xr18, xr3, 0xd8
xvilvl.h xr12, xr1, xr0 //a0a1b0b1c0c1d0d1 e0e1f0f1g0g1h0h1
xvilvh.h xr13, xr1, xr0 //a3a4b3b4c3c4d3d4 e3e4f3f4g3g4h3h4
xvilvl.h xr14, xr2, xr1 //a1a2b1b2c1c2d1d2 e1e2f1f2g1g2h1h2
xvilvh.h xr15, xr2, xr1 //a4a5b4b5c4c5d4d5 e4e5f4f5g4g5h4h5
xvilvl.h xr16, xr18, xr2 //a2a3b2b3c2c3d2d3 e2e3f2f3g2g3h2h3
xvilvh.h xr17, xr18, xr2 //a5a6b5b6c5c6d5d6 e5e6f5f6g5g6h5h6
.l_\lable\()hv_8w_loop_lasx:
xvld xr0, a1, 0
xvldx xr1, a1, a2
add.d a1, a1, t2
xvpermi.q xr0, xr1, 0x02 //7 8
PREP_HV_8W_LASX xr0 //a7b7c7d7e7f7g7h7 a8b8c8d8e8f8g8h8
xvpermi.q xr3, xr0, 0x03 //a6b6c6d6e6f6g6h6 a7b7c7d7e7f7g7h7
xvpermi.d xr3, xr3, 0xd8 //a6b6c6d6a7b7c7d7 e6f6g6h6e7f7g7h7
xvpermi.d xr1, xr0, 0xd8 //a7b7c7d7a8b8c8d8 e7f7g7h7e8f8g8h8
xvilvl.h xr18, xr1, xr3 //a6a7b6b7c6c7d6d7 e6e7f6f7g6g7h6h7
xvilvh.h xr2, xr1, xr3 //a7a8b7b8c7c8d7d8 e7e8f7f8g7g8h7h8
xvaddi.hu xr3, xr0, 0
xvmulwev.w.h xr4, xr12, xr24 //01
xvmulwev.w.h xr5, xr14, xr24 //12
xvmulwev.w.h xr6, xr16, xr25 //23
xvmulwev.w.h xr7, xr13, xr25 //34
xvmulwev.w.h xr8, xr15, xr26 //45
xvmulwev.w.h xr9, xr17, xr26 //56
xvmulwev.w.h xr10, xr18, xr27 //67
xvmulwev.w.h xr11, xr2, xr27 //78
xvmaddwod.w.h xr4, xr12, xr24 //01
xvmaddwod.w.h xr5, xr14, xr24 //12
xvmaddwod.w.h xr6, xr16, xr25 //23
xvmaddwod.w.h xr7, xr13, xr25 //34
xvmaddwod.w.h xr8, xr15, xr26 //45
xvmaddwod.w.h xr9, xr17, xr26 //56
xvmaddwod.w.h xr10, xr18, xr27 //67
xvmaddwod.w.h xr11, xr2, xr27 //78
xvadd.w xr4, xr4, xr6
xvadd.w xr5, xr5, xr7
xvadd.w xr4, xr4, xr8
xvadd.w xr5, xr5, xr9
xvadd.w xr4, xr4, xr10
xvadd.w xr5, xr5, xr11
xvaddi.hu xr12, xr16, 0 //01 <-- 23
xvaddi.hu xr14, xr13, 0 //12 <-- 34
xvaddi.hu xr16, xr15, 0 //23 <-- 45
xvaddi.hu xr13, xr17, 0 //34 <-- 56
xvaddi.hu xr15, xr18, 0 //45 <-- 67
xvaddi.hu xr17, xr2, 0 //56 <-- 78
xvssrarni.h.w xr5, xr4, 6
xvpermi.d xr5, xr5, 0xd8
vst vr5, a0, 0
xvpermi.q xr5, xr5, 0x11
vstx vr5, a0, t7
alsl.d a0, t7, a0, 1
addi.d a4, a4, -2
bnez a4, .l_\lable\()hv_8w_loop_lasx
addi.d a1, t0, 8
addi.d t0, t0, 8
addi.d a0, t8, 16
addi.d t8, t8, 16
addi.d a4, t5, 0
addi.d a3, a3, -8
bnez a3, .l_\lable\()hv_8w_loop0_lasx
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
addi.d sp, sp, 4*8
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()v_lasx:
srli.w a7, a7, 2
blt t0, a4, .l_\lable\()v_idx_fv_lasx
andi a7, a7, 1
addi.w a7, a7, 3
.l_\lable\()v_idx_fv_lasx:
addi.w t5, zero, 120
mul.w a7, a7, t5
addi.w t5, a6, -1
slli.w t5, t5, 3
add.w a7, a7, t5
add.d a7, t6, a7 //fv's offset
xvldrepl.d xr8, a7, 0
xvrepl128vei.h xr12, xr8, 0
xvrepl128vei.h xr13, xr8, 1
xvrepl128vei.h xr14, xr8, 2
xvrepl128vei.h xr15, xr8, 3
sub.d a1, a1, t3
beq a3, t0, .l_\lable\()v_4w_lasx
addi.w t0, t0, 4
beq a3, t0, .l_\lable\()v_8w_lasx
blt t0, a3, .l_\lable\()v_16w_lasx
.l_\lable\()v_4w_lasx:
la.local t6, subpel_h_shuf3
xvld xr11, t6, 0
fld.s f0, a1, 0 //a0b0c0d0
fldx.s f1, a1, a2 //a1b1c1d1
fldx.s f2, a1, t2 //a2b2c2d2
add.d a1, a1, t3
fld.s f3, a1, 0 //a3b3c3d3
fldx.s f4, a1, a2 //a4b4c4d4
fldx.s f5, a1, t2 //a5b5c5d5
fldx.s f6, a1, t3 //a6b6c6d6
vilvl.w vr0, vr1, vr0 //01
vilvl.w vr1, vr3, vr2 //23
vilvl.d vr0, vr1, vr0 //0123
vilvl.w vr2, vr5, vr4 //45
vilvl.d vr1, vr2, vr1 //2345
xvpermi.q xr0, xr1, 0x02 //0123 2345
xvbsrl.v xr1, xr0, 4 //123- 345-
xvpermi.q xr4, xr6, 0x02
xvextrins.w xr1, xr4, 0x30 //1234 3456
xvilvl.b xr2, xr1, xr0 //0112 2334 //a0a1b0b1c0c1d0d1 a1a2b1b2c1c2d1d2 a2a3b2b3c2c3d2d3 a3a4b3b4c3c4d3d4
xvilvh.b xr3, xr1, xr0 //2334 4556 //a2a3b2b3c2c3d2d3 a3a4b3b4c3c4d3d4 a4a5b4b5c4c5d4d5 a5a6b5b6c5c6d5d6
.l_\lable\()v_4w_loop_lasx:
add.d a1, a1, t4
fld.s f0, a1, 0 //a7b7c7d7
fldx.s f1, a1, a2 //a8b8c8d8
fldx.s f4, a1, t2 //a9b9c9d9
fldx.s f5, a1, t3 //aabacada
vilvl.w vr7, vr0, vr6 //67
vilvl.w vr10, vr4, vr1 //89
vextrins.w vr7, vr1, 0x20//678-
vextrins.w vr10, vr5, 0x20//89a-
xvpermi.q xr7, xr10, 0x02//678- 89a-
xvshuf.b xr4, xr7, xr7, xr11 //67 78 89 9a //a6a7b6b7c6c7d6d7 a7a8b7b8c7c8d7d8 a8a9b8b9c8c9d8d9 a9aab9bac9cad9da
xvpermi.q xr7, xr3, 0x11 //4556
xvpermi.q xr7, xr4, 0x02 //45 56 67 78 //a4a5b4b5c4c5d4d5 a5a6b5b6c5c6d5d6 a6a7b6b7c6c7d6d7 a7a8b7b8c7c8d7d8
xvmulwev.h.bu.b xr16, xr2, xr12
xvmulwev.h.bu.b xr17, xr3, xr13
xvmulwev.h.bu.b xr18, xr7, xr14
xvmulwev.h.bu.b xr19, xr4, xr15
xvmaddwod.h.bu.b xr16, xr2, xr12
xvmaddwod.h.bu.b xr17, xr3, xr13
xvmaddwod.h.bu.b xr18, xr7, xr14
xvmaddwod.h.bu.b xr19, xr4, xr15
xvadd.h xr16, xr16, xr17
xvadd.h xr16, xr16, xr18
xvadd.h xr16, xr16, xr19
xvsrari.h xr16, xr16, 2
xvaddi.bu xr2, xr7, 0
xvaddi.bu xr3, xr4, 0
xvaddi.bu xr6, xr5, 0
xvst xr16, a0, 0
addi.d a0, a0, 32
addi.w a4, a4, -4
bnez a4, .l_\lable\()v_4w_loop_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()v_8w_lasx:
fld.d f0, a1, 0
fldx.d f1, a1, a2
fldx.d f2, a1, t2
add.d a1, a1, t3
fld.d f3, a1, 0
fldx.d f4, a1, a2
fldx.d f5, a1, t2
fldx.d f6, a1, t3
xvpermi.q xr0, xr1, 0x02
xvpermi.q xr1, xr2, 0x02
xvilvl.b xr0, xr1, xr0 //01 12
xvpermi.q xr2, xr3, 0x02
xvpermi.q xr3, xr4, 0x02
xvilvl.b xr2, xr3, xr2 //23 34
xvpermi.q xr4, xr5, 0x02
xvpermi.q xr5, xr6, 0x02
xvilvl.b xr4, xr5, xr4 //45 56
.l_\lable\()v_8w_loop_lasx:
add.d a1, a1, t4
fld.d f7, a1, 0 //7
fldx.d f10, a1, a2 //8
fldx.d f11, a1, t2 //9
fldx.d f18, a1, t3 //a
xvpermi.q xr6, xr7, 0x02
xvpermi.q xr7, xr10, 0x02
xvilvl.b xr6, xr7, xr6 //67 78
xvpermi.q xr10, xr11, 0x02
xvpermi.q xr11, xr18, 0x02
xvilvl.b xr10, xr11, xr10 //89 9a
xvmulwev.h.bu.b xr1, xr0, xr12
xvmulwev.h.bu.b xr3, xr2, xr13
xvmulwev.h.bu.b xr5, xr4, xr14
xvmulwev.h.bu.b xr7, xr6, xr15
xvmulwev.h.bu.b xr9, xr2, xr12
xvmulwev.h.bu.b xr11, xr4, xr13
xvmulwev.h.bu.b xr16, xr6, xr14
xvmulwev.h.bu.b xr17, xr10, xr15
xvmaddwod.h.bu.b xr1, xr0, xr12
xvmaddwod.h.bu.b xr3, xr2, xr13
xvmaddwod.h.bu.b xr5, xr4, xr14
xvmaddwod.h.bu.b xr7, xr6, xr15
xvmaddwod.h.bu.b xr9, xr2, xr12
xvmaddwod.h.bu.b xr11, xr4, xr13
xvmaddwod.h.bu.b xr16, xr6, xr14
xvmaddwod.h.bu.b xr17, xr10, xr15
xvadd.h xr1, xr1, xr3
xvadd.h xr1, xr1, xr5
xvadd.h xr1, xr1, xr7
xvadd.h xr9, xr9, xr11
xvadd.h xr9, xr9, xr16
xvadd.h xr9, xr9, xr17
xvaddi.bu xr0, xr4, 0
xvaddi.bu xr2, xr6, 0
xvaddi.bu xr4, xr10, 0
xvaddi.bu xr6, xr18, 0
xvsrari.h xr1, xr1, 2
xvsrari.h xr9, xr9, 2
xvst xr1, a0, 0
xvst xr9, a0, 32
addi.d a0, a0, 64
addi.w a4, a4, -4
bnez a4, .l_\lable\()v_8w_loop_lasx
b .l_\lable\()end_pre_8tap_lasx
.l_\lable\()v_16w_lasx:
addi.d t0, a0, 0 //dst
addi.d t5, a1, 0 //src
slli.w t7, a3, 1 //w
addi.d t8, a4, 0 //h
.l_\lable\()v_16w_loop0_lasx:
vld vr0, a1, 0
vldx vr1, a1, a2
vldx vr2, a1, t2
add.d a1, a1, t3
vld vr3, a1, 0
vldx vr4, a1, a2
vldx vr5, a1, t2
vldx vr6, a1, t3
add.d a1, a1, t4
xvpermi.d xr0, xr0, 0xd8
xvpermi.d xr1, xr1, 0xd8
xvpermi.d xr2, xr2, 0xd8
xvpermi.d xr3, xr3, 0xd8
xvpermi.d xr4, xr4, 0xd8
xvpermi.d xr5, xr5, 0xd8
xvpermi.d xr6, xr6, 0xd8
xvilvl.b xr0, xr1, xr0 //01
xvilvl.b xr1, xr2, xr1 //12
xvilvl.b xr2, xr3, xr2 //23
xvilvl.b xr3, xr4, xr3 //34
xvilvl.b xr4, xr5, xr4 //45
xvilvl.b xr5, xr6, xr5 //56
.l_\lable\()v_16w_loop_lasx:
vld vr7, a1, 0 //7
vldx vr10, a1, a2 //8
add.d a1, a1, t2
xvpermi.d xr7, xr7, 0xd8
xvpermi.d xr10, xr10, 0xd8
xvilvl.b xr6, xr7, xr6 //67
xvilvl.b xr7, xr10, xr7 //78
xvmulwev.h.bu.b xr9, xr0, xr12
xvmulwev.h.bu.b xr11, xr2, xr13
xvmulwev.h.bu.b xr16, xr4, xr14
xvmulwev.h.bu.b xr17, xr6, xr15
xvmulwev.h.bu.b xr18, xr1, xr12
xvmulwev.h.bu.b xr19, xr3, xr13
xvmulwev.h.bu.b xr20, xr5, xr14
xvmulwev.h.bu.b xr21, xr7, xr15
xvmaddwod.h.bu.b xr9, xr0, xr12
xvmaddwod.h.bu.b xr11, xr2, xr13
xvmaddwod.h.bu.b xr16, xr4, xr14
xvmaddwod.h.bu.b xr17, xr6, xr15
xvmaddwod.h.bu.b xr18, xr1, xr12
xvmaddwod.h.bu.b xr19, xr3, xr13
xvmaddwod.h.bu.b xr20, xr5, xr14
xvmaddwod.h.bu.b xr21, xr7, xr15
xvadd.h xr9, xr9, xr11
xvadd.h xr9, xr9, xr16
xvadd.h xr9, xr9, xr17
xvadd.h xr11, xr18, xr19
xvadd.h xr11, xr11, xr20
xvadd.h xr11, xr11, xr21
xvsrari.h xr9, xr9, 2
xvsrari.h xr11, xr11, 2
xvaddi.bu xr0, xr2, 0
xvaddi.bu xr1, xr3, 0
xvaddi.bu xr2, xr4, 0
xvaddi.bu xr3, xr5, 0
xvaddi.bu xr4, xr6, 0
xvaddi.bu xr5, xr7, 0
xvaddi.bu xr6, xr10, 0
xvst xr9, a0, 0
xvstx xr11, a0, t7
alsl.d a0, t7, a0, 1
addi.d a4, a4, -2
bnez a4, .l_\lable\()v_16w_loop_lasx
addi.d a3, a3, -16
addi.d a0, t0, 32
addi.d t0, t0, 32
addi.d a1, t5, 16
addi.d t5, t5, 16
addi.d a4, t8, 0
bnez a3, .l_\lable\()v_16w_loop0_lasx
.l_\lable\()end_pre_8tap_lasx:
.endm
function prep_8tap_regular_8bpc_lasx
addi.w a7, zero, 0
PREP_8TAP_8BPC_LASX 0
endfunc
function prep_8tap_smooth_regular_8bpc_lasx
addi.w a7, zero, 1
PREP_8TAP_8BPC_LASX 1
endfunc
function prep_8tap_sharp_regular_8bpc_lasx
addi.w a7, zero, 2
PREP_8TAP_8BPC_LASX 2
endfunc
function prep_8tap_regular_smooth_8bpc_lasx
addi.w a7, zero, 4
PREP_8TAP_8BPC_LASX 4
endfunc
function prep_8tap_smooth_8bpc_lasx
addi.w a7, zero, 5
PREP_8TAP_8BPC_LASX 5
endfunc
function prep_8tap_sharp_smooth_8bpc_lasx
addi.w a7, zero, 6
PREP_8TAP_8BPC_LASX 6
endfunc
function prep_8tap_regular_sharp_8bpc_lasx
addi.w a7, zero, 8
PREP_8TAP_8BPC_LASX 8
endfunc
function prep_8tap_smooth_sharp_8bpc_lasx
addi.w a7, zero, 9
PREP_8TAP_8BPC_LASX 9
endfunc
function prep_8tap_sharp_8bpc_lasx
addi.w a7, zero, 10
PREP_8TAP_8BPC_LASX 10
endfunc
.macro PREP_8TAP_8BPC_LSX lable
li.w t0, 4
la.local t6, dav1d_mc_subpel_filters
la.local t7, shufb1
vld vr23, t7, 0
slli.d t2, a2, 1 //src_stride*2
add.d t3, t2, a2 //src_stride*3
slli.d t4, t2, 1
bnez a5, .l_\lable\()h_lsx //mx
bnez a6, .l_\lable\()v_lsx
clz.w t1, a3
li.w t5, 24
sub.w t1, t1, t5
la.local t5, .l_\lable\()prep_hv0_jtable_lsx
alsl.d t1, t1, t5, 1
ld.h t8, t1, 0
add.d t5, t5, t8
jirl $r0, t5, 0
.align 3
.l_\lable\()prep_hv0_jtable_lsx:
.hword .l_\lable\()hv0_128w_lsx - .l_\lable\()prep_hv0_jtable_lsx
.hword .l_\lable\()hv0_64w_lsx - .l_\lable\()prep_hv0_jtable_lsx
.hword .l_\lable\()hv0_32w_lsx - .l_\lable\()prep_hv0_jtable_lsx
.hword .l_\lable\()hv0_16w_lsx - .l_\lable\()prep_hv0_jtable_lsx
.hword .l_\lable\()hv0_8w_lsx - .l_\lable\()prep_hv0_jtable_lsx
.hword .l_\lable\()hv0_4w_lsx - .l_\lable\()prep_hv0_jtable_lsx
.l_\lable\()hv0_4w_lsx:
fld.s f0, a1, 0
fldx.s f1, a1, a2
add.d a1, a1, t2
vilvl.w vr0, vr1, vr0
vsllwil.hu.bu vr0, vr0, 4
vst vr0, a0, 0
addi.d a0, a0, 16
addi.d a4, a4, -2
bnez a4, .l_\lable\()hv0_4w_lsx
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()hv0_8w_lsx:
fld.d f0, a1, 0
fldx.d f1, a1, a2
add.d a1, a1, t2
vsllwil.hu.bu vr0, vr0, 4
vsllwil.hu.bu vr1, vr1, 4
vst vr0, a0, 0
vst vr1, a0, 16
addi.d a0, a0, 32
addi.d a4, a4, -2
bnez a4, .l_\lable\()hv0_8w_lsx
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()hv0_16w_lsx:
vld vr0, a1, 0
vldx vr1, a1, a2
add.d a1, a1, t2
vsllwil.hu.bu vr2, vr0, 4
vsllwil.hu.bu vr4, vr1, 4
vexth.hu.bu vr3, vr0
vexth.hu.bu vr5, vr1
vslli.h vr3, vr3, 4
vslli.h vr5, vr5, 4
vst vr2, a0, 0
vst vr3, a0, 16
vst vr4, a0, 32
vst vr5, a0, 48
addi.d a0, a0, 64
addi.d a4, a4, -2
bnez a4, .l_\lable\()hv0_16w_lsx
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()hv0_32w_lsx:
.l_\lable\()hv0_64w_lsx:
.l_\lable\()hv0_128w_lsx:
addi.d t0, a1, 0
addi.d t5, a4, 0
srli.w t7, a3, 4
slli.w t7, t7, 5
addi.d t8, a0, 0
.l_\lable\()hv0_16_loop_lsx:
vld vr0, a1, 0
vldx vr1, a1, a2
add.d a1, a1, t2
vsllwil.hu.bu vr2, vr0, 4
vsllwil.hu.bu vr3, vr1, 4
vexth.hu.bu vr0, vr0
vexth.hu.bu vr1, vr1
vslli.h vr0, vr0, 4
vslli.h vr1, vr1, 4
vst vr2, a0, 0
vst vr0, a0, 16
add.d a0, a0, t7
vst vr3, a0, 0
vst vr1, a0, 16
add.d a0, a0, t7
addi.d a4, a4, -2
bnez a4, .l_\lable\()hv0_16_loop_lsx
addi.d a1, t0, 16
addi.d t0, t0, 16
addi.d a0, t8, 32
addi.d t8, t8, 32
addi.d a4, t5, 0
addi.d a3, a3, -16
bnez a3, .l_\lable\()hv0_16_loop_lsx
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()h_lsx:
bnez a6, .l_\lable\()hv_lsx //if(fh) && if (fv)
andi t1, a7, 3
blt t0, a3, .l_\lable\()h_idx_fh_lsx
andi t1, a7, 1
addi.w t1, t1, 3
.l_\lable\()h_idx_fh_lsx:
addi.w t5, zero, 120
mul.w t1, t1, t5
addi.w t5, a5, -1
slli.w t5, t5, 3
add.w t1, t1, t5
add.d t1, t6, t1 //fh's offset
vldrepl.d vr23, t1, 0
addi.d a1, a1, -3
clz.w t1, a3
li.w t5, 24
sub.w t1, t1, t5
la.local t5, .l_\lable\()prep_h_jtable_lsx
alsl.d t1, t1, t5, 1
ld.h t8, t1, 0
add.d t5, t5, t8
jirl $r0, t5, 0
.align 3
.l_\lable\()prep_h_jtable_lsx:
.hword .l_\lable\()h_128w_lsx - .l_\lable\()prep_h_jtable_lsx
.hword .l_\lable\()h_64w_lsx - .l_\lable\()prep_h_jtable_lsx
.hword .l_\lable\()h_32w_lsx - .l_\lable\()prep_h_jtable_lsx
.hword .l_\lable\()h_16w_lsx - .l_\lable\()prep_h_jtable_lsx
.hword .l_\lable\()h_8w_lsx - .l_\lable\()prep_h_jtable_lsx
.hword .l_\lable\()h_4w_lsx - .l_\lable\()prep_h_jtable_lsx
.l_\lable\()h_4w_lsx:
addi.d a1, a1, 2
la.local t7, subpel_h_shuf1
vld vr7, t7, 0
vbsrl.v vr23, vr23, 2
vreplvei.w vr23, vr23, 0
.l_\lable\()h_4w_loop_lsx:
vld vr0, a1, 0
vldx vr1, a1, a2
add.d a1, a1, t2
vshuf.b vr0, vr0, vr0, vr7
vshuf.b vr1, vr1, vr1, vr7
vmulwev.h.bu.b vr2, vr0, vr23
vmulwev.h.bu.b vr3, vr1, vr23
vmaddwod.h.bu.b vr2, vr0, vr23
vmaddwod.h.bu.b vr3, vr1, vr23
vhaddw.w.h vr0, vr2, vr2
vhaddw.w.h vr1, vr3, vr3
vssrarni.h.w vr1, vr0, 2
vst vr1, a0, 0
addi.d a0, a0, 16
addi.w a4, a4, -2
bnez a4, .l_\lable\()h_4w_loop_lsx
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()h_8w_lsx:
vreplvei.w vr22, vr23, 0 //fh
vreplvei.w vr23, vr23, 1
la.local t7, subpel_h_shuf1
vld vr6, t7, 0
vaddi.bu vr7, vr6, 4
vaddi.bu vr8, vr6, 8
.l_\lable\()h_8w_loop_lsx:
vld vr0, a1, 0
vldx vr1, a1, a2
add.d a1, a1, t2
PREP_H_8W vr0
PREP_H_8W vr1
vst vr0, a0, 0
vst vr1, a0, 16
addi.d a0, a0, 32
addi.d a4, a4, -2
bnez a4, .l_\lable\()h_8w_loop_lsx
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()h_16w_lsx:
.l_\lable\()h_32w_lsx:
.l_\lable\()h_64w_lsx:
.l_\lable\()h_128w_lsx:
vreplvei.w vr22, vr23, 0 //fh
vreplvei.w vr23, vr23, 1
la.local t7, subpel_h_shuf1
vld vr6, t7, 0
vaddi.bu vr7, vr6, 4
vaddi.bu vr8, vr6, 8
srli.w t7, a3, 4
slli.w t6, t7, 5
.l_\lable\()h_16w_loop0_lsx:
addi.d t0, a1, 0 //src
addi.d t5, a4, 0 //h
addi.d t8, a0, 0 //dst
.l_\lable\()h_16w_loop_lsx:
vld vr0, a1, 0
vld vr1, a1, 8
add.d a1, a1, a2
PREP_H_8W vr0
PREP_H_8W vr1
vst vr0, a0, 0
vst vr1, a0, 16
add.d a0, a0, t6
addi.d t5, t5, -1
bnez t5, .l_\lable\()h_16w_loop_lsx
addi.d a1, t0, 16
addi.d a0, t8, 32
addi.w t7, t7, -1
bnez t7, .l_\lable\()h_16w_loop0_lsx
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()hv_lsx:
andi t1, a7, 3
blt t0, a3, .l_\lable\()hv_idx_fh_lsx
andi t1, a7, 1
addi.w t1, t1, 3
.l_\lable\()hv_idx_fh_lsx:
addi.w t5, zero, 120
mul.w t1, t1, t5
addi.w t5, a5, -1
slli.w t5, t5, 3
add.w t1, t1, t5
add.d t1, t6, t1 //fh's offset
vldrepl.d vr8, t1, 0
srli.w a7, a7, 2
blt t0, a4, .l_\lable\()hv_idx_fv_lsx
andi a7, a7, 1
addi.w a7, a7, 3
.l_\lable\()hv_idx_fv_lsx:
addi.w t5, zero, 120
mul.w a7, a7, t5
addi.w t5, a6, -1
slli.w t5, t5, 3
add.w a7, a7, t5
add.d a7, t6, a7 //fv's offset
vldrepl.d vr9, a7, 0
vsllwil.h.b vr9, vr9, 0
sub.d a1, a1, t3
addi.d a1, a1, -3
beq a3, t0, .l_\lable\()hv_4w_lsx
b .l_\lable\()hv_8w_lsx
.l_\lable\()hv_4w_lsx:
addi.d a1, a1, 2 //ignore leading 0s
vld vr0, a1, 0
vldx vr1, a1, a2
vldx vr2, a1, t2
add.d a1, a1, t3
vld vr3, a1, 0
vldx vr4, a1, a2
vldx vr5, a1, t2
vldx vr6, a1, t3
add.d a1, a1, t4
la.local t1, subpel_h_shuf1
vld vr7, t1, 0
vbsrl.v vr8, vr8, 2
vreplvei.w vr8, vr8, 0
//fv
vreplvei.w vr17, vr9, 0
vreplvei.w vr18, vr9, 1
vreplvei.w vr19, vr9, 2
vreplvei.w vr20, vr9, 3
//DAV1D_FILTER_8TAP_RND
vshuf.b vr0, vr0, vr0, vr7
vshuf.b vr1, vr1, vr1, vr7
vshuf.b vr2, vr2, vr2, vr7
vshuf.b vr3, vr3, vr3, vr7
vshuf.b vr4, vr4, vr4, vr7
vshuf.b vr5, vr5, vr5, vr7
vshuf.b vr6, vr6, vr6, vr7
vmulwev.h.bu.b vr10, vr0, vr8
vmulwev.h.bu.b vr11, vr1, vr8
vmulwev.h.bu.b vr12, vr2, vr8
vmulwev.h.bu.b vr13, vr3, vr8
vmulwev.h.bu.b vr14, vr4, vr8
vmulwev.h.bu.b vr15, vr5, vr8
vmulwev.h.bu.b vr16, vr6, vr8
vmaddwod.h.bu.b vr10, vr0, vr8
vmaddwod.h.bu.b vr11, vr1, vr8
vmaddwod.h.bu.b vr12, vr2, vr8
vmaddwod.h.bu.b vr13, vr3, vr8
vmaddwod.h.bu.b vr14, vr4, vr8
vmaddwod.h.bu.b vr15, vr5, vr8
vmaddwod.h.bu.b vr16, vr6, vr8
vhaddw.w.h vr10, vr10, vr10
vhaddw.w.h vr11, vr11, vr11
vhaddw.w.h vr12, vr12, vr12
vhaddw.w.h vr13, vr13, vr13
vhaddw.w.h vr14, vr14, vr14
vhaddw.w.h vr15, vr15, vr15
vhaddw.w.h vr16, vr16, vr16
vssrarni.h.w vr10, vr10, 2 //h0
vssrarni.h.w vr11, vr11, 2 //h1
vssrarni.h.w vr12, vr12, 2 //h2
vssrarni.h.w vr13, vr13, 2 //h3
vssrarni.h.w vr14, vr14, 2 //h4
vssrarni.h.w vr15, vr15, 2 //h5
vssrarni.h.w vr16, vr16, 2 //h6
//h0
vilvl.h vr0, vr11, vr10 //01
vilvl.h vr1, vr13, vr12 //23
vilvl.h vr2, vr15, vr14 //45
//h1
vilvl.h vr4, vr12, vr11 //12
vilvl.h vr5, vr14, vr13 //34
vilvl.h vr6, vr16, vr15 //56
.l_\lable\()hv_w4_loop_lsx:
vld vr9, a1, 0
vldx vr10, a1, a2
add.d a1, a1, t2
//DAV1D_FILTER_8TAP_CLIP
vshuf.b vr9, vr9, vr9, vr7
vshuf.b vr10, vr10, vr10, vr7
vmulwev.h.bu.b vr11, vr9, vr8
vmulwev.h.bu.b vr12, vr10, vr8
vmaddwod.h.bu.b vr11, vr9, vr8
vmaddwod.h.bu.b vr12, vr10, vr8
vhaddw.w.h vr11, vr11, vr11
vhaddw.w.h vr12, vr12, vr12
vssrarni.h.w vr11, vr11, 2 //7h
vssrarni.h.w vr12, vr12, 2 //h8
vilvl.h vr3, vr11, vr16 //67
vilvl.h vr13, vr12, vr11 //78
vmulwev.w.h vr9, vr0, vr17
vmulwev.w.h vr10, vr1, vr18
vmulwev.w.h vr14, vr2, vr19
vmulwev.w.h vr15, vr3, vr20
vmaddwod.w.h vr9, vr0, vr17
vmaddwod.w.h vr10, vr1, vr18
vmaddwod.w.h vr14, vr2, vr19
vmaddwod.w.h vr15, vr3, vr20
vadd.w vr16, vr9, vr10
vadd.w vr16, vr16, vr14
vadd.w vr16, vr16, vr15
vmulwev.w.h vr9, vr4, vr17
vmulwev.w.h vr10, vr5, vr18
vmulwev.w.h vr14, vr6, vr19
vmulwev.w.h vr15, vr13, vr20
vmaddwod.w.h vr9, vr4, vr17
vmaddwod.w.h vr10, vr5, vr18
vmaddwod.w.h vr14, vr6, vr19
vmaddwod.w.h vr15, vr13, vr20
vadd.w vr21, vr9, vr10
vadd.w vr21, vr21, vr14
vadd.w vr21, vr21, vr15
vssrarni.h.w vr21, vr16, 6
//cache
vaddi.hu vr0, vr1, 0
vaddi.hu vr1, vr2, 0
vaddi.hu vr2, vr3, 0
vaddi.hu vr4, vr5, 0
vaddi.hu vr5, vr6, 0
vaddi.hu vr6, vr13, 0
vaddi.hu vr16, vr12, 0
vst vr21, a0, 0
addi.d a0, a0, 16
addi.d a4, a4, -2
bnez a4, .l_\lable\()hv_w4_loop_lsx
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()hv_8w_lsx:
.l_\lable\()hv_16w_lsx:
.l_\lable\()hv_32w_lsx:
.l_\lable\()hv_64w_lsx:
.l_\lable\()hv_128w_lsx:
addi.d sp, sp, -8*8
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
addi.d t0, a1, 0 //src
addi.d t5, a4, 0 //h
addi.d t8, a0, 0 //dst
slli.w t6, a3, 1
la.local t1, subpel_h_shuf1
vld vr7, t1, 0
vaddi.bu vr11, vr7, 4
vaddi.bu vr12, vr7, 8
vreplvei.w vr10, vr8, 1
vreplvei.w vr8, vr8, 0
vreplvei.w vr20, vr9, 1
vreplvei.w vr21, vr9, 2
vreplvei.w vr22, vr9, 3
vreplvei.w vr9, vr9, 0
.l_\lable\()prep_hv_8w_loop0_lsx:
vld vr0, a1, 0
vldx vr1, a1, a2
vldx vr2, a1, t2
add.d a1, a1, t3
vld vr3, a1, 0
vldx vr4, a1, a2
vldx vr5, a1, t2
vldx vr6, a1, t3
add.d a1, a1, t4
FILTER_8TAP_8W vr0 //h0
FILTER_8TAP_8W vr1 //h1
FILTER_8TAP_8W vr2 //h2
FILTER_8TAP_8W vr3 //h3
FILTER_8TAP_8W vr4 //h4
FILTER_8TAP_8W vr5 //h5
FILTER_8TAP_8W vr6 //h6
//h0' low part
vilvl.h vr23, vr1, vr0 //01
vilvl.h vr24, vr3, vr2 //23
vilvl.h vr25, vr5, vr4 //45
//h0' high part
vilvh.h vr26, vr1, vr0 //01
vilvh.h vr27, vr3, vr2 //23
vilvh.h vr28, vr5, vr4 //45
//h1' low part
vilvl.h vr29, vr2, vr1 //12
vilvl.h vr30, vr4, vr3 //34
vilvl.h vr31, vr6, vr5 //56
//h1' high part
vilvh.h vr0, vr2, vr1 //12
vilvh.h vr1, vr4, vr3 //34
vilvh.h vr2, vr6, vr5 //56
.l_\lable\()prep_hv_8w_loop_lsx:
vld vr3, a1, 0
vldx vr4, a1, a2
add.d a1, a1, t2
FILTER_8TAP_8W vr3 //h7
FILTER_8TAP_8W vr4 //h8
//h0' low part
vilvl.h vr16, vr3, vr6 //67 ~low
vmulwev.w.h vr13, vr23, vr9
vmulwev.w.h vr14, vr24, vr20
vmulwev.w.h vr15, vr25, vr21
vmulwev.w.h vr17, vr16, vr22
vmaddwod.w.h vr13, vr23, vr9
vmaddwod.w.h vr14, vr24, vr20
vmaddwod.w.h vr15, vr25, vr21
vmaddwod.w.h vr17, vr16, vr22
vadd.w vr13, vr13, vr14
vadd.w vr13, vr13, vr15
vadd.w vr13, vr13, vr17
//cache
vaddi.hu vr23, vr24, 0
vaddi.hu vr24, vr25, 0
vaddi.hu vr25, vr16, 0
//h0' high part
vilvh.h vr17, vr3, vr6 //67 ~high
vmulwev.w.h vr14, vr26, vr9
vmulwev.w.h vr15, vr27, vr20
vmulwev.w.h vr16, vr28, vr21
vmulwev.w.h vr18, vr17, vr22
vmaddwod.w.h vr14, vr26, vr9
vmaddwod.w.h vr15, vr27, vr20
vmaddwod.w.h vr16, vr28, vr21
vmaddwod.w.h vr18, vr17, vr22
vadd.w vr14, vr14, vr15
vadd.w vr14, vr14, vr16
vadd.w vr14, vr14, vr18
vssrarni.h.w vr14, vr13, 6
vst vr14, a0, 0
add.d a0, a0, t6
//cache
vaddi.hu vr26, vr27, 0
vaddi.hu vr27, vr28, 0
vaddi.hu vr28, vr17, 0
vaddi.hu vr6, vr4, 0
vilvl.h vr5, vr4, vr3 //78 ~low
vilvh.h vr4, vr4, vr3 //78 ~high
//h1' low part
vmulwev.w.h vr13, vr29, vr9
vmulwev.w.h vr14, vr30, vr20
vmulwev.w.h vr15, vr31, vr21
vmulwev.w.h vr16, vr5, vr22
vmaddwod.w.h vr13, vr29, vr9
vmaddwod.w.h vr14, vr30, vr20
vmaddwod.w.h vr15, vr31, vr21
vmaddwod.w.h vr16, vr5, vr22
vadd.w vr13, vr13, vr14
vadd.w vr13, vr13, vr15
vadd.w vr13, vr13, vr16
//cache
vaddi.hu vr29, vr30, 0
vaddi.hu vr30, vr31, 0
vaddi.hu vr31, vr5, 0
//h1' high part
vmulwev.w.h vr14, vr0, vr9
vmulwev.w.h vr15, vr1, vr20
vmulwev.w.h vr16, vr2, vr21
vmulwev.w.h vr17, vr4, vr22
vmaddwod.w.h vr14, vr0, vr9
vmaddwod.w.h vr15, vr1, vr20
vmaddwod.w.h vr16, vr2, vr21
vmaddwod.w.h vr17, vr4, vr22
vadd.w vr14, vr14, vr15
vadd.w vr14, vr14, vr16
vadd.w vr14, vr14, vr17
vssrarni.h.w vr14, vr13, 6
vst vr14, a0, 0
add.d a0, a0, t6
//cache
vaddi.hu vr0, vr1, 0
vaddi.hu vr1, vr2, 0
vaddi.hu vr2, vr4, 0
addi.w a4, a4, -2
bnez a4, .l_\lable\()prep_hv_8w_loop_lsx
addi.d a1, t0, 8
addi.d t0, t0, 8
addi.d a0, t8, 16
addi.d t8, t8, 16
addi.d a4, t5, 0
addi.w a3, a3, -8
bnez a3, .l_\lable\()prep_hv_8w_loop0_lsx
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 8*8
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()v_lsx:
srli.w a7, a7, 2
blt t0, a4, .l_\lable\()v_idx_fv_lsx
andi a7, a7, 1
addi.w a7, a7, 3
.l_\lable\()v_idx_fv_lsx:
addi.w t5, zero, 120
mul.w a7, a7, t5
addi.w t5, a6, -1
slli.w t5, t5, 3
add.w a7, a7, t5
add.d a7, t6, a7 //fv's offset
vldrepl.d vr8, a7, 0
vilvl.h vr8, vr8, vr8
vreplvei.w vr9, vr8, 1
vreplvei.w vr10, vr8, 2
vreplvei.w vr11, vr8, 3
vreplvei.w vr8, vr8, 0
sub.d a1, a1, t3
beq a3, t0, .l_\lable\()v_4w_lsx
blt t0, a3, .l_\lable\()v_8w_lsx
.l_\lable\()v_4w_lsx:
fld.s f0, a1, 0
fldx.s f1, a1, a2
fldx.s f2, a1, t2
add.d a1, a1, t3
fld.s f3, a1, 0
fldx.s f4, a1, a2
fldx.s f5, a1, t2
fldx.s f6, a1, t3
add.d a1, a1, t4
vilvl.w vr0, vr1, vr0
vilvl.w vr1, vr2, vr1
vilvl.b vr0, vr1, vr0 //0 1 1 2
vilvl.w vr1, vr3, vr2
vilvl.w vr2, vr4, vr3
vilvl.b vr1, vr2, vr1 //2 3 3 4
vilvl.w vr2, vr5, vr4
vilvl.w vr3, vr6, vr5
vilvl.b vr2, vr3, vr2 //4 5 5 6
.l_\lable\()v_4w_loop_lsx:
fld.s f7, a1, 0
vilvl.w vr3, vr7, vr6
fldx.s f6, a1, a2
add.d a1, a1, t2
vilvl.w vr4, vr6, vr7
vilvl.b vr3, vr4, vr3 //6 7 7 8
vmulwev.h.bu.b vr12, vr0, vr8
vmulwev.h.bu.b vr13, vr1, vr9
vmulwev.h.bu.b vr14, vr2, vr10
vmulwev.h.bu.b vr15, vr3, vr11
vmaddwod.h.bu.b vr12, vr0, vr8
vmaddwod.h.bu.b vr13, vr1, vr9
vmaddwod.h.bu.b vr14, vr2, vr10
vmaddwod.h.bu.b vr15, vr3, vr11
vaddi.hu vr0, vr1, 0
vaddi.hu vr1, vr2, 0
vaddi.hu vr2, vr3, 0
vadd.h vr12, vr12, vr13
vadd.h vr12, vr12, vr14
vadd.h vr12, vr12, vr15
vsrari.h vr12, vr12, 2
vst vr12, a0, 0
addi.d a0, a0, 16
addi.w a4, a4, -2
bnez a4, .l_\lable\()v_4w_loop_lsx
b .l_\lable\()end_pre_8tap_lsx
.l_\lable\()v_8w_lsx:
addi.d t0, a1, 0
addi.d t5, a4, 0
addi.d t8, a0, 0
slli.w t6, a3, 1
.l_\lable\()v_8w_loop0_lsx:
fld.d f0, a1, 0
fldx.d f1, a1, a2
fldx.d f2, a1, t2
add.d a1, a1, t3
fld.d f3, a1, 0
fldx.d f4, a1, a2
fldx.d f5, a1, t2
fldx.d f6, a1, t3
add.d a1, a1, t4
vilvl.b vr0, vr1, vr0 //0 1
vilvl.b vr1, vr2, vr1 //1 2
vilvl.b vr2, vr3, vr2 //2 3
vilvl.b vr3, vr4, vr3 //3 4
vilvl.b vr4, vr5, vr4 //4 5
vilvl.b vr5, vr6, vr5 //5 6
.l_\lable\()v_8w_loop_lsx:
fld.d f7, a1, 0
vilvl.b vr12, vr7, vr6 //6 7
fldx.d f6, a1, a2
add.d a1, a1, t2
vilvl.b vr13, vr6, vr7 //7 8
vmulwev.h.bu.b vr14, vr0, vr8
vmulwev.h.bu.b vr15, vr1, vr8
vmulwev.h.bu.b vr16, vr2, vr9
vmulwev.h.bu.b vr17, vr3, vr9
vmulwev.h.bu.b vr18, vr4, vr10
vmulwev.h.bu.b vr19, vr5, vr10
vmulwev.h.bu.b vr20, vr12, vr11
vmulwev.h.bu.b vr21, vr13, vr11
vmaddwod.h.bu.b vr14, vr0, vr8
vmaddwod.h.bu.b vr15, vr1, vr8
vmaddwod.h.bu.b vr16, vr2, vr9
vmaddwod.h.bu.b vr17, vr3, vr9
vmaddwod.h.bu.b vr18, vr4, vr10
vmaddwod.h.bu.b vr19, vr5, vr10
vmaddwod.h.bu.b vr20, vr12, vr11
vmaddwod.h.bu.b vr21, vr13, vr11
vaddi.hu vr0, vr2, 0
vaddi.hu vr1, vr3, 0
vaddi.hu vr2, vr4, 0
vaddi.hu vr3, vr5, 0
vaddi.hu vr4, vr12, 0
vaddi.hu vr5, vr13, 0
vadd.h vr14, vr14, vr16
vadd.h vr14, vr14, vr18
vadd.h vr14, vr14, vr20
vadd.h vr15, vr15, vr17
vadd.h vr15, vr15, vr19
vadd.h vr15, vr15, vr21
vsrari.h vr14, vr14, 2
vsrari.h vr15, vr15, 2
vst vr14, a0, 0
add.d a0, a0, t6
vst vr15, a0, 0
add.d a0, a0, t6
addi.w a4, a4, -2
bnez a4, .l_\lable\()v_8w_loop_lsx
addi.d a1, t0, 8
addi.d t0, t0, 8
addi.d a0, t8, 16
addi.d t8, t8, 16
addi.d a4, t5, 0
addi.d a3, a3, -8
bnez a3, .l_\lable\()v_8w_loop0_lsx
.l_\lable\()end_pre_8tap_lsx:
.endm
function prep_8tap_regular_8bpc_lsx
addi.w a7, zero, 0
PREP_8TAP_8BPC_LSX 0
endfunc
function prep_8tap_smooth_regular_8bpc_lsx
addi.w a7, zero, 1
PREP_8TAP_8BPC_LSX 1
endfunc
function prep_8tap_sharp_regular_8bpc_lsx
addi.w a7, zero, 2
PREP_8TAP_8BPC_LSX 2
endfunc
function prep_8tap_regular_smooth_8bpc_lsx
addi.w a7, zero, 4
PREP_8TAP_8BPC_LSX 4
endfunc
function prep_8tap_smooth_8bpc_lsx
addi.w a7, zero, 5
PREP_8TAP_8BPC_LSX 5
endfunc
function prep_8tap_sharp_smooth_8bpc_lsx
addi.w a7, zero, 6
PREP_8TAP_8BPC_LSX 6
endfunc
function prep_8tap_regular_sharp_8bpc_lsx
addi.w a7, zero, 8
PREP_8TAP_8BPC_LSX 8
endfunc
function prep_8tap_smooth_sharp_8bpc_lsx
addi.w a7, zero, 9
PREP_8TAP_8BPC_LSX 9
endfunc
function prep_8tap_sharp_8bpc_lsx
addi.w a7, zero, 10
PREP_8TAP_8BPC_LSX 10
endfunc
/*
* static void blend_lsx(pixel *dst, const ptrdiff_t dst_stride, const pixel *tmp,
const int w, int h, const uint8_t *mask)
*/
function blend_8bpc_lsx
addi.d t8, zero, 64
vreplgr2vr.b vr23, t8
clz.w t0, a3
li.w t1, 26
sub.w t0, t0, t1
la.local t1, .BLEND_LSX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0 // The jump addresses are relative to JRTABLE
add.d t1, t1, t2 // Get absolute address
jirl $r0, t1, 0
.align 3
.BLEND_LSX_JRTABLE:
.hword .BLEND_W32_LSX - .BLEND_LSX_JRTABLE
.hword .BLEND_W16_LSX - .BLEND_LSX_JRTABLE
.hword .BLEND_W8_LSX - .BLEND_LSX_JRTABLE
.hword .BLEND_W4_LSX - .BLEND_LSX_JRTABLE
.BLEND_W4_LSX:
vld vr0, a0, 0
vld vr1, a2, 0
vld vr2, a5, 0
vsllwil.hu.bu vr1, vr1, 0
vsllwil.hu.bu vr4, vr2, 0
vmul.h vr1, vr1, vr4 //b*m
vsub.b vr3, vr23, vr2
vsllwil.hu.bu vr0, vr0, 0
vsllwil.hu.bu vr3, vr3, 0
vmadd.h vr1, vr0, vr3
vssrarni.bu.h vr1, vr1, 6
vstelm.w vr1, a0, 0, 0
addi.w a4, a4, -1
add.d a0, a0, a1
addi.d a2, a2, 4
addi.d a5, a5, 4
blt zero, a4, .BLEND_W4_LSX
b .BLEND_END_LSX
.BLEND_W8_LSX:
vld vr0, a0, 0
vld vr1, a2, 0
vld vr2, a5, 0
vsllwil.hu.bu vr1, vr1, 0
vsllwil.hu.bu vr4, vr2, 0
vmul.h vr1, vr1, vr4 //b*m
vsub.b vr3, vr23, vr2
vsllwil.hu.bu vr0, vr0, 0
vsllwil.hu.bu vr3, vr3, 0
vmadd.h vr1, vr0, vr3
vssrarni.bu.h vr1, vr1, 6
vstelm.d vr1, a0, 0, 0
addi.w a4, a4, -1
add.d a0, a0, a1
addi.d a2, a2, 8
addi.d a5, a5, 8
blt zero, a4, .BLEND_W8_LSX
b .BLEND_END_LSX
.BLEND_W16_LSX:
vld vr0, a0, 0
vld vr1, a2, 0
vld vr2, a5, 0
vexth.hu.bu vr5, vr1
vsllwil.hu.bu vr1, vr1, 0
vexth.hu.bu vr6, vr2
vsllwil.hu.bu vr4, vr2, 0
vmul.h vr1, vr1, vr4 //b*m
vmul.h vr5, vr5, vr6 //b*m
vsub.b vr3, vr23, vr2
vexth.hu.bu vr7, vr0
vexth.hu.bu vr8, vr3
vmadd.h vr5, vr7, vr8
vsllwil.hu.bu vr0, vr0, 0
vsllwil.hu.bu vr3, vr3, 0
vmadd.h vr1, vr0, vr3
vssrarni.bu.h vr5, vr1, 6
vst vr5, a0, 0
addi.w a4, a4, -1
add.d a0, a0, a1
addi.d a2, a2, 16
addi.d a5, a5, 16
blt zero, a4, .BLEND_W16_LSX
b .BLEND_END_LSX
.BLEND_W32_LSX:
vld vr0, a0, 0
vld vr1, a2, 0
vld vr2, a5, 0
vexth.hu.bu vr5, vr1
vsllwil.hu.bu vr1, vr1, 0
vexth.hu.bu vr6, vr2
vsllwil.hu.bu vr4, vr2, 0
vmul.h vr1, vr1, vr4 //b*m
vmul.h vr5, vr5, vr6 //b*m
vsub.b vr3, vr23, vr2
vexth.hu.bu vr7, vr0
vexth.hu.bu vr8, vr3
vmadd.h vr5, vr7, vr8
vsllwil.hu.bu vr0, vr0, 0
vsllwil.hu.bu vr3, vr3, 0
vmadd.h vr1, vr0, vr3
vssrarni.bu.h vr5, vr1, 6
vst vr5, a0, 0
/* sencond */
vld vr0, a0, 16
vld vr1, a2, 16
vld vr2, a5, 16
vexth.hu.bu vr5, vr1
vsllwil.hu.bu vr1, vr1, 0
vexth.hu.bu vr6, vr2
vsllwil.hu.bu vr4, vr2, 0
vmul.h vr1, vr1, vr4 //b*m
vmul.h vr5, vr5, vr6 //b*m
vsub.b vr3, vr23, vr2
vexth.hu.bu vr7, vr0
vexth.hu.bu vr8, vr3
vmadd.h vr5, vr7, vr8
vsllwil.hu.bu vr0, vr0, 0
vsllwil.hu.bu vr3, vr3, 0
vmadd.h vr1, vr0, vr3
vssrarni.bu.h vr5, vr1, 6
vst vr5, a0, 16
addi.w a4, a4, -1
add.d a0, a0, a1
addi.d a2, a2, 32
addi.d a5, a5, 32
blt zero, a4, .BLEND_W32_LSX
.BLEND_END_LSX:
endfunc
const obmc_masks_la
/* Unused */
.byte 0, 0, 0, 0
/* 2 */
.byte 45, 19, 64, 0
/* 4 */
.byte 39, 25, 50, 14, 59, 5, 64, 0
/* 8 */
.byte 36, 28, 42, 22, 48, 16, 53, 11, 57, 7, 61, 3, 64, 0, 64, 0
/* 16 */
.byte 34, 30, 37, 27, 40, 24, 43, 21, 46, 18, 49, 15, 52, 12, 54, 10
.byte 56, 8, 58, 6, 60, 4, 61, 3, 64, 0, 64, 0, 64, 0, 64, 0
/* 32 */
.byte 33, 31, 35, 29, 36, 28, 38, 26, 40, 24, 41, 23, 43, 21, 44, 20
.byte 45, 19, 47, 17, 48, 16, 50, 14, 51, 13, 52, 12, 53, 11, 55, 9
.byte 56, 8, 57, 7, 58, 6, 59, 5, 60, 4, 60, 4, 61, 3, 62, 2
endconst
/*
* static void blend_v_lsx(pixel *dst, const ptrdiff_t dst_stride, const pixel *tmp,
const int w, int h)
*/
function blend_v_8bpc_lsx
la.local t8, obmc_masks_la
clz.w t0, a3
li.w t1, 26
sub.w t0, t0, t1
la.local t1, .BLEND_V_LSX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0 // The jump addresses are relative to JRTABLE
add.d t1, t1, t2 // Get absolute address
jirl $r0, t1, 0
.align 3
.BLEND_V_LSX_JRTABLE:
.hword .BLEND_V_W32_LSX - .BLEND_V_LSX_JRTABLE
.hword .BLEND_V_W16_LSX - .BLEND_V_LSX_JRTABLE
.hword .BLEND_V_W8_LSX - .BLEND_V_LSX_JRTABLE
.hword .BLEND_V_W4_LSX - .BLEND_V_LSX_JRTABLE
.hword .BLEND_V_W2_LSX - .BLEND_V_LSX_JRTABLE
.hword .BLEND_V_W2_LSX_1 - .BLEND_V_LSX_JRTABLE //Instructions must be 4-byte aligned
.BLEND_V_W2_LSX:
ld.bu t6, t8, 4
ld.bu t7, t8, 5
.BLEND_V_W2_LSX_1:
ld.bu t0, a0, 0
ld.bu t1, a2, 0
mul.d t0, t0, t6
mul.d t1, t1, t7
addi.d t0, t0, 32
add.d t0, t0, t1
srli.d t0, t0, 6
st.b t0, a0, 0
addi.w a4, a4, -1
add.d a0, a0, a1
addi.d a2, a2, 2
addi.d a5, a5, 2
blt zero, a4, .BLEND_V_W2_LSX_1
b .BLEND_V_END_LSX
.BLEND_V_W4_LSX:
vld vr20, t8, 8
.BLEND_V_W4_LSX_1:
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr0, vr1, vr0
vdp2.h.bu vr1, vr0, vr20
vssrarni.bu.h vr1, vr1, 6
vstelm.h vr1, a0, 0, 0
vstelm.b vr1, a0, 2, 2
addi.w a4, a4, -1
add.d a0, a0, a1
addi.d a2, a2, 4
blt zero, a4, .BLEND_V_W4_LSX_1
b .BLEND_V_END_LSX
.BLEND_V_W8_LSX:
vld vr20, t8, 16
.BLEND_V_W8_LSX_1:
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr0, vr1, vr0
vdp2.h.bu vr1, vr0, vr20
vssrarni.bu.h vr1, vr1, 6
vstelm.w vr1, a0, 0, 0
vstelm.h vr1, a0, 4, 2
addi.w a4, a4, -1
add.d a0, a0, a1
addi.d a2, a2, 8
blt zero, a4, .BLEND_V_W8_LSX_1
b .BLEND_V_END_LSX
.BLEND_V_W16_LSX:
vld vr20, t8, 32
vld vr21, t8, 48
.BLEND_V_W16_LSX_1:
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr2, vr1, vr0
vilvh.b vr3, vr1, vr0
vmulwev.h.bu vr4, vr2, vr20
vmulwev.h.bu vr5, vr3, vr21
vmaddwod.h.bu vr4, vr2, vr20
vmaddwod.h.bu vr5, vr3, vr21
vssrarni.bu.h vr5, vr4, 6
vstelm.d vr5, a0, 0, 0
vstelm.w vr5, a0, 8, 2
addi.w a4, a4, -1
add.d a0, a0, a1
addi.d a2, a2, 16
blt zero, a4, .BLEND_V_W16_LSX_1
b .BLEND_V_END_LSX
.BLEND_V_W32_LSX:
vld vr20, t8, 64
vld vr21, t8, 80
vld vr22, t8, 96
.BLEND_V_W32_LSX_1:
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a2, 0
vld vr3, a2, 16
vilvl.b vr4, vr2, vr0
vmulwev.h.bu vr7, vr4, vr20
vilvh.b vr5, vr2, vr0
vmulwev.h.bu vr8, vr5, vr21
vilvl.b vr6, vr3, vr1
vmulwev.h.bu vr9, vr6, vr22
vmaddwod.h.bu vr7, vr4, vr20
vmaddwod.h.bu vr8, vr5, vr21
vmaddwod.h.bu vr9, vr6, vr22
vssrarni.bu.h vr8, vr7, 6
vssrarni.bu.h vr9, vr9, 6
vst vr8, a0, 0
vstelm.d vr9, a0, 16, 0
addi.w a4, a4, -1
add.d a0, a0, a1
addi.d a2, a2, 32
blt zero, a4, .BLEND_V_W32_LSX_1
.BLEND_V_END_LSX:
endfunc
/*
* static void blend_h_lsx(pixel *dst, const ptrdiff_t dst_stride, const pixel *tmp,
const int w, int h)
*/
function blend_h_8bpc_lsx
la.local t8, obmc_masks_la
alsl.d t8, a4, t8, 1
srli.d t0, a4, 1
srli.d t1, a4, 2
add.d a4, t0, t1 // h = (h * 3) >> 2;
slli.d a4, a4, 1
add.d a4, a4, t8
clz.w t0, a3
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .BLEND_H_LSX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0 // The jump addresses are relative to JRTABLE
add.d t1, t1, t2 // Get absolute address
jirl $r0, t1, 0
.align 3
.BLEND_H_LSX_JRTABLE:
.hword .BLEND_H_W128_LSX - .BLEND_H_LSX_JRTABLE
.hword .BLEND_H_W64_LSX - .BLEND_H_LSX_JRTABLE
.hword .BLEND_H_W32_LSX - .BLEND_H_LSX_JRTABLE
.hword .BLEND_H_W16_LSX - .BLEND_H_LSX_JRTABLE
.hword .BLEND_H_W8_LSX - .BLEND_H_LSX_JRTABLE
.hword .BLEND_H_W4_LSX - .BLEND_H_LSX_JRTABLE
.hword .BLEND_H_W2_LSX - .BLEND_H_LSX_JRTABLE
.hword .BLEND_H_END_LSX - .BLEND_H_LSX_JRTABLE //Instructions must be 4-byte aligned
.BLEND_H_W2_LSX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr0, vr1, vr0
vdp2.h.bu vr1, vr0, vr20
vssrarni.bu.h vr1, vr1, 6
vstelm.h vr1, a0, 0, 0
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 2
blt t8, a4, .BLEND_H_W2_LSX
b .BLEND_H_END_LSX
.BLEND_H_W4_LSX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr0, vr1, vr0
vdp2.h.bu vr1, vr0, vr20
vssrarni.bu.h vr1, vr1, 6
vstelm.w vr1, a0, 0, 0
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 4
blt t8, a4, .BLEND_H_W4_LSX
b .BLEND_H_END_LSX
.BLEND_H_W8_LSX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr0, vr1, vr0
vdp2.h.bu vr1, vr0, vr20
vssrarni.bu.h vr1, vr1, 6
vstelm.d vr1, a0, 0, 0
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 8
blt t8, a4, .BLEND_H_W8_LSX
b .BLEND_H_END_LSX
.BLEND_H_W16_LSX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr2, vr1, vr0
vilvh.b vr3, vr1, vr0
vmulwev.h.bu vr4, vr2, vr20
vmulwev.h.bu vr5, vr3, vr20
vmaddwod.h.bu vr4, vr2, vr20
vmaddwod.h.bu vr5, vr3, vr20
vssrarni.bu.h vr5, vr4, 6
vst vr5, a0, 0
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 16
blt t8, a4, .BLEND_H_W16_LSX
b .BLEND_H_END_LSX
.BLEND_H_W32_LSX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a2, 0
vld vr3, a2, 16
vilvl.b vr4, vr2, vr0
vilvh.b vr5, vr2, vr0
vilvl.b vr6, vr3, vr1
vilvh.b vr3, vr3, vr1
vmulwev.h.bu vr7, vr4, vr20
vmulwev.h.bu vr8, vr5, vr20
vmulwev.h.bu vr9, vr6, vr20
vmulwev.h.bu vr0, vr3, vr20
vmaddwod.h.bu vr7, vr4, vr20
vmaddwod.h.bu vr8, vr5, vr20
vmaddwod.h.bu vr9, vr6, vr20
vmaddwod.h.bu vr0, vr3, vr20
vssrarni.bu.h vr8, vr7, 6
vssrarni.bu.h vr0, vr9, 6
vst vr8, a0, 0
vst vr0, a0, 16
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 32
blt t8, a4, .BLEND_H_W32_LSX
b .BLEND_H_END_LSX
.BLEND_H_W64_LSX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
vld vr4, a2, 0
vld vr5, a2, 16
vld vr6, a2, 32
vld vr7, a2, 48
vilvl.b vr8, vr4, vr0
vilvh.b vr9, vr4, vr0
vilvl.b vr10, vr5, vr1
vilvh.b vr11, vr5, vr1
vilvl.b vr12, vr6, vr2
vilvh.b vr13, vr6, vr2
vilvl.b vr14, vr7, vr3
vilvh.b vr15, vr7, vr3
vmulwev.h.bu vr0, vr8, vr20
vmulwev.h.bu vr1, vr9, vr20
vmulwev.h.bu vr2, vr10, vr20
vmulwev.h.bu vr3, vr11, vr20
vmulwev.h.bu vr4, vr12, vr20
vmulwev.h.bu vr5, vr13, vr20
vmulwev.h.bu vr6, vr14, vr20
vmulwev.h.bu vr7, vr15, vr20
vmaddwod.h.bu vr0, vr8, vr20
vmaddwod.h.bu vr1, vr9, vr20
vmaddwod.h.bu vr2, vr10, vr20
vmaddwod.h.bu vr3, vr11, vr20
vmaddwod.h.bu vr4, vr12, vr20
vmaddwod.h.bu vr5, vr13, vr20
vmaddwod.h.bu vr6, vr14, vr20
vmaddwod.h.bu vr7, vr15, vr20
vssrarni.bu.h vr1, vr0, 6
vssrarni.bu.h vr3, vr2, 6
vssrarni.bu.h vr5, vr4, 6
vssrarni.bu.h vr7, vr6, 6
vst vr1, a0, 0
vst vr3, a0, 16
vst vr5, a0, 32
vst vr7, a0, 48
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 64
blt t8, a4, .BLEND_H_W64_LSX
b .BLEND_H_END_LSX
.BLEND_H_W128_LSX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
vld vr4, a2, 0
vld vr5, a2, 16
vld vr6, a2, 32
vld vr7, a2, 48
vilvl.b vr8, vr4, vr0
vilvh.b vr9, vr4, vr0
vilvl.b vr10, vr5, vr1
vilvh.b vr11, vr5, vr1
vilvl.b vr12, vr6, vr2
vilvh.b vr13, vr6, vr2
vilvl.b vr14, vr7, vr3
vilvh.b vr15, vr7, vr3
vmulwev.h.bu vr0, vr8, vr20
vmulwev.h.bu vr1, vr9, vr20
vmulwev.h.bu vr2, vr10, vr20
vmulwev.h.bu vr3, vr11, vr20
vmulwev.h.bu vr4, vr12, vr20
vmulwev.h.bu vr5, vr13, vr20
vmulwev.h.bu vr6, vr14, vr20
vmulwev.h.bu vr7, vr15, vr20
vmaddwod.h.bu vr0, vr8, vr20
vmaddwod.h.bu vr1, vr9, vr20
vmaddwod.h.bu vr2, vr10, vr20
vmaddwod.h.bu vr3, vr11, vr20
vmaddwod.h.bu vr4, vr12, vr20
vmaddwod.h.bu vr5, vr13, vr20
vmaddwod.h.bu vr6, vr14, vr20
vmaddwod.h.bu vr7, vr15, vr20
vssrarni.bu.h vr1, vr0, 6
vssrarni.bu.h vr3, vr2, 6
vssrarni.bu.h vr5, vr4, 6
vssrarni.bu.h vr7, vr6, 6
vst vr1, a0, 0
vst vr3, a0, 16
vst vr5, a0, 32
vst vr7, a0, 48
/* second */
vld vr0, a0, 64
vld vr1, a0, 80
vld vr2, a0, 96
vld vr3, a0, 112
vld vr4, a2, 64
vld vr5, a2, 80
vld vr6, a2, 96
vld vr7, a2, 112
vilvl.b vr8, vr4, vr0
vilvh.b vr9, vr4, vr0
vilvl.b vr10, vr5, vr1
vilvh.b vr11, vr5, vr1
vilvl.b vr12, vr6, vr2
vilvh.b vr13, vr6, vr2
vilvl.b vr14, vr7, vr3
vilvh.b vr15, vr7, vr3
vmulwev.h.bu vr0, vr8, vr20
vmulwev.h.bu vr1, vr9, vr20
vmulwev.h.bu vr2, vr10, vr20
vmulwev.h.bu vr3, vr11, vr20
vmulwev.h.bu vr4, vr12, vr20
vmulwev.h.bu vr5, vr13, vr20
vmulwev.h.bu vr6, vr14, vr20
vmulwev.h.bu vr7, vr15, vr20
vmaddwod.h.bu vr0, vr8, vr20
vmaddwod.h.bu vr1, vr9, vr20
vmaddwod.h.bu vr2, vr10, vr20
vmaddwod.h.bu vr3, vr11, vr20
vmaddwod.h.bu vr4, vr12, vr20
vmaddwod.h.bu vr5, vr13, vr20
vmaddwod.h.bu vr6, vr14, vr20
vmaddwod.h.bu vr7, vr15, vr20
vssrarni.bu.h vr1, vr0, 6
vssrarni.bu.h vr3, vr2, 6
vssrarni.bu.h vr5, vr4, 6
vssrarni.bu.h vr7, vr6, 6
vst vr1, a0, 64
vst vr3, a0, 80
vst vr5, a0, 96
vst vr7, a0, 112
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 128
blt t8, a4, .BLEND_H_W128_LSX
b .BLEND_H_END_LSX
.BLEND_H_END_LSX:
endfunc
/*
* static void blend_h_lsx(pixel *dst, const ptrdiff_t dst_stride, const pixel *tmp,
const int w, int h)
*/
function blend_h_8bpc_lasx
la.local t8, obmc_masks_la
alsl.d t8, a4, t8, 1
srli.d t0, a4, 1
srli.d t1, a4, 2
add.d a4, t0, t1 // h = (h * 3) >> 2;
slli.d a4, a4, 1
add.d a4, a4, t8
clz.w t0, a3
li.w t1, 24
sub.w t0, t0, t1
la.local t1, .BLEND_H_LASX_JRTABLE
alsl.d t0, t0, t1, 1
ld.h t2, t0, 0 // The jump addresses are relative to JRTABLE
add.d t1, t1, t2 // Get absolute address
jirl $r0, t1, 0
.align 3
.BLEND_H_LASX_JRTABLE:
.hword .BLEND_H_W128_LASX - .BLEND_H_LASX_JRTABLE
.hword .BLEND_H_W64_LASX - .BLEND_H_LASX_JRTABLE
.hword .BLEND_H_W32_LASX - .BLEND_H_LASX_JRTABLE
.hword .BLEND_H_W16_LASX - .BLEND_H_LASX_JRTABLE
.hword .BLEND_H_W8_LASX - .BLEND_H_LASX_JRTABLE
.hword .BLEND_H_W4_LASX - .BLEND_H_LASX_JRTABLE
.hword .BLEND_H_W2_LASX - .BLEND_H_LASX_JRTABLE
.hword .BLEND_H_END_LASX - .BLEND_H_LASX_JRTABLE //Instructions must be 4-byte aligned
.BLEND_H_W2_LASX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr0, vr1, vr0
vdp2.h.bu vr1, vr0, vr20
vssrarni.bu.h vr1, vr1, 6
vstelm.h vr1, a0, 0, 0
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 2
blt t8, a4, .BLEND_H_W2_LASX
b .BLEND_H_END_LASX
.BLEND_H_W4_LASX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr0, vr1, vr0
vdp2.h.bu vr1, vr0, vr20
vssrarni.bu.h vr1, vr1, 6
vstelm.w vr1, a0, 0, 0
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 4
blt t8, a4, .BLEND_H_W4_LASX
b .BLEND_H_END_LASX
.BLEND_H_W8_LASX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr0, vr1, vr0
vdp2.h.bu vr1, vr0, vr20
vssrarni.bu.h vr1, vr1, 6
vstelm.d vr1, a0, 0, 0
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 8
blt t8, a4, .BLEND_H_W8_LASX
b .BLEND_H_END_LASX
.BLEND_H_W16_LASX:
vldrepl.h vr20, t8, 0
vld vr0, a0, 0
vld vr1, a2, 0
vilvl.b vr2, vr1, vr0
vilvh.b vr3, vr1, vr0
vmulwev.h.bu vr4, vr2, vr20
vmulwev.h.bu vr5, vr3, vr20
vmaddwod.h.bu vr4, vr2, vr20
vmaddwod.h.bu vr5, vr3, vr20
vssrarni.bu.h vr5, vr4, 6
vst vr5, a0, 0
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 16
blt t8, a4, .BLEND_H_W16_LSX
b .BLEND_H_END_LSX
.BLEND_H_W32_LASX:
xvldrepl.h xr20, t8, 0
xvld xr0, a0, 0
xvld xr1, a2, 0
xvilvl.b xr2, xr1, xr0
xvilvh.b xr3, xr1, xr0
xvmulwev.h.bu xr4, xr2, xr20
xvmulwev.h.bu xr5, xr3, xr20
xvmaddwod.h.bu xr4, xr2, xr20
xvmaddwod.h.bu xr5, xr3, xr20
xvssrarni.bu.h xr5, xr4, 6
xvst xr5, a0, 0
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 32
blt t8, a4, .BLEND_H_W32_LASX
b .BLEND_H_END_LASX
.BLEND_H_W64_LASX:
xvldrepl.h xr20, t8, 0
xvld xr0, a0, 0
xvld xr1, a0, 32
xvld xr2, a2, 0
xvld xr3, a2, 32
xvilvl.b xr4, xr2, xr0
xvilvh.b xr5, xr2, xr0
xvilvl.b xr6, xr3, xr1
xvilvh.b xr7, xr3, xr1
xvmulwev.h.bu xr0, xr4, xr20
xvmulwev.h.bu xr1, xr5, xr20
xvmulwev.h.bu xr2, xr6, xr20
xvmulwev.h.bu xr3, xr7, xr20
xvmaddwod.h.bu xr0, xr4, xr20
xvmaddwod.h.bu xr1, xr5, xr20
xvmaddwod.h.bu xr2, xr6, xr20
xvmaddwod.h.bu xr3, xr7, xr20
xvssrarni.bu.h xr1, xr0, 6
xvssrarni.bu.h xr3, xr2, 6
xvst xr1, a0, 0
xvst xr3, a0, 32
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 64
blt t8, a4, .BLEND_H_W64_LASX
b .BLEND_H_END_LASX
.BLEND_H_W128_LASX:
xvldrepl.h xr20, t8, 0
xvld xr0, a0, 0
xvld xr1, a0, 32
xvld xr2, a0, 64
xvld xr3, a0, 96
xvld xr4, a2, 0
xvld xr5, a2, 32
xvld xr6, a2, 64
xvld xr7, a2, 96
xvilvl.b xr8, xr4, xr0
xvilvh.b xr9, xr4, xr0
xvilvl.b xr10, xr5, xr1
xvilvh.b xr11, xr5, xr1
xvilvl.b xr12, xr6, xr2
xvilvh.b xr13, xr6, xr2
xvilvl.b xr14, xr7, xr3
xvilvh.b xr15, xr7, xr3
xvmulwev.h.bu xr0, xr8, xr20
xvmulwev.h.bu xr1, xr9, xr20
xvmulwev.h.bu xr2, xr10, xr20
xvmulwev.h.bu xr3, xr11, xr20
xvmulwev.h.bu xr4, xr12, xr20
xvmulwev.h.bu xr5, xr13, xr20
xvmulwev.h.bu xr6, xr14, xr20
xvmulwev.h.bu xr7, xr15, xr20
xvmaddwod.h.bu xr0, xr8, xr20
xvmaddwod.h.bu xr1, xr9, xr20
xvmaddwod.h.bu xr2, xr10, xr20
xvmaddwod.h.bu xr3, xr11, xr20
xvmaddwod.h.bu xr4, xr12, xr20
xvmaddwod.h.bu xr5, xr13, xr20
xvmaddwod.h.bu xr6, xr14, xr20
xvmaddwod.h.bu xr7, xr15, xr20
xvssrarni.bu.h xr1, xr0, 6
xvssrarni.bu.h xr3, xr2, 6
xvssrarni.bu.h xr5, xr4, 6
xvssrarni.bu.h xr7, xr6, 6
xvst xr1, a0, 0
xvst xr3, a0, 32
xvst xr5, a0, 64
xvst xr7, a0, 96
addi.d t8, t8, 2
add.d a0, a0, a1
addi.d a2, a2, 128
blt t8, a4, .BLEND_H_W128_LASX
b .BLEND_H_END_LASX
.BLEND_H_END_LASX:
endfunc
/*
* a1=16 | a2=8 | a3=4
* temp reg: a4
*/
.macro PIXEL_COPY_LSX _dst, _src, _size
blt \_size, a1, 8f
16:
vld vr0, \_src, 0
vst vr0, \_dst, 0
addi.d \_size, \_size, -16
addi.d \_dst, \_dst, 16
addi.d \_src, \_src, 16
blt a1, \_size, 16b
8:
blt \_size, a2, 14f
ld.d a4, \_src, 0
st.d a4, \_dst, 0
addi.d \_size, \_size, -8
addi.d \_dst, \_dst, 8
addi.d \_src, \_src, 8
14:
blt \_size, a3, 11f
ld.w a4, \_src, 0
st.w a4, \_dst, 0
addi.d \_size, \_size, -4
addi.d \_dst, \_dst, 4
addi.d \_src, \_src, 4
11:
beqz \_size, 110f
111:
ld.b a4, \_src, 0
st.b a4, \_dst, 0
addi.d \_size, \_size, -1
addi.d \_dst, \_dst, 1
addi.d \_src, \_src, 1
bnez \_size, 111b
110:
.endm
/*
* a1=16 | a2=8 | a3=4
*/
.macro PIXEL_SET_LSX _dst, _vsrc, _size
blt \_size, a1, 8f
16:
vst \_vsrc, \_dst, 0
addi.d \_size, \_size, -16
addi.d \_dst, \_dst, 16
blt a1, \_size, 16b
8:
blt \_size, a2, 14f
vstelm.d \_vsrc, \_dst, 0, 0
addi.d \_size, \_size, -8
addi.d \_dst, \_dst, 8
14:
blt \_size, a3, 11f
vstelm.w \_vsrc, \_dst, 0, 0
addi.d \_size, \_size, -4
addi.d \_dst, \_dst, 4
11:
beqz \_size, 110f
111:
vstelm.b \_vsrc, \_dst, 0, 0
addi.d \_size, \_size, -1
addi.d \_dst, \_dst, 1
bnez \_size, 111b
110:
.endm
/*
* temp reg: a4 a5 t2 t3 vr0
*/
.macro DEGE_LOOP need_left, need_right
0:
addi.d t2, t6, 0 // dst
addi.d t3, t7, 0 // src
.if \need_left
vldrepl.b vr0, t3, 0
addi.d a5, t0, 0
PIXEL_SET_LSX t2, vr0, a5
.endif
addi.d a5, t4, 0
PIXEL_COPY_LSX t2, t3, a5
.if \need_right
vldrepl.b vr0, t3, -1
addi.d a5, t1, 0
PIXEL_SET_LSX t2, vr0, a5
.endif
addi.d t5, t5, -1
add.d t7, t7, t8
add.d t6, t6, a7
bnez t5, 0b
.endm
/*
* static void emu_edge_c(const intptr_t bw, const intptr_t bh,
* const intptr_t iw, const intptr_t ih,
* const intptr_t x, const intptr_t y,
* pixel *dst, const ptrdiff_t dst_stride,
* const pixel *ref, const ptrdiff_t ref_stride)
*/
function emu_edge_8bpc_lsx
vxor.v vr23, vr23, vr23 // zero
addi.d t0, a3, -1 // ih - 1
addi.d t1, a2, -1 // iw - 1
vreplgr2vr.w vr22, t0
vinsgr2vr.w vr22, t1, 1
vreplgr2vr.w vr0, a5
vinsgr2vr.w vr0, a4, 1 // [0] - h | [1] - w
vclip.w vr2, vr0, vr23, vr22
vpickve2gr.w t0, vr2, 0
ld.d t2, sp, 0
ld.d t8, sp, 8 // ref_stride
mul.w t0, t0, t8
vpickve2gr.w t1, vr2, 1
add.d t2, t2, t1
add.d t7, t0, t2 // ref
addi.d t0, a0, -1 // bw - 1
addi.d t1, a1, -1 // bh - 1
vreplgr2vr.w vr21, t0
vreplgr2vr.w vr22, t1
vilvl.d vr21, vr22, vr21
sub.d t2, zero, a4 // -x
add.d t3, a0, a4
sub.d t3, t3, a2 // x + bw - iw
sub.d t4, zero, a5 // -y
add.d t5, a1, a5
sub.d t5, t5, a3 // y + bh - ih
vreplgr2vr.w vr0, t2
vinsgr2vr.w vr0, t3, 1
vinsgr2vr.w vr0, t4, 2
vinsgr2vr.w vr0, t5, 3
vclip.w vr2, vr0, vr23, vr21
vpickve2gr.w t0, vr2, 0 // left_ext
vpickve2gr.w t1, vr2, 1 // right_ext
vpickve2gr.w t2, vr2, 2 // top_ext
vpickve2gr.w t3, vr2, 3 // bottom_ext
mul.w t6, t2, a7
add.d t4, t0, t1
add.d t5, t2, t3
sub.d t4, a0, t4 // center_w
sub.d t5, a1, t5 // center_h
addi.d a1, zero, 16
addi.d a2, zero, 8
addi.d a3, zero, 4
add.d t6, t6, a6 // blk
beqz t0, 2f
// need_left
beqz t1, 3f
// need_left + need_right
DEGE_LOOP 1, 1
b 5f
2:
// !need_left
beqz t1, 4f
// !need_left + need_right
DEGE_LOOP 0, 1
b 5f
3:
// need_left + !need_right
DEGE_LOOP 1, 0
b 5f
4:
// !need_left + !need_right
DEGE_LOOP 0, 0
5:
vpickve2gr.w t2, vr2, 2 // top_ext
vpickve2gr.w t3, vr2, 3 // bottom_ext
sub.d t7, a7, a0 // dst_stride - bw
mul.w t8, t2, a7
beqz t3, 2f
// need_bottom
sub.d t0, t6, a7 // &dst[-PXSTRIDE(dst_stride)]
1:
addi.d t1, t0, 0
addi.d a5, a0, 0
PIXEL_COPY_LSX t6, t1, a5
add.d t6, t6, t7
addi.d t3, t3, -1
bnez t3, 1b
2:
beqz t2, 3f
// need_top
add.d t8, t8, a6 // blk
1:
addi.d t1, t8, 0
addi.d a5, a0, 0
PIXEL_COPY_LSX a6, t1, a5
add.d a6, a6, t7
addi.d t2, t2, -1
bnez t2, 1b
3:
endfunc
|
Admenri/urge
| 8,455
|
third_party/dav1d/src/loongarch/loongson_util.S
|
/******************************************************************************
* Copyright © 2024, VideoLAN and dav1d authors
* Copyright © 2024, Loongson Technology Corporation Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef DAV1D_SRC_LOONGSON_UTIL_S
#define DAV1D_SRC_LOONGSON_UTIL_S
#ifndef DEFAULT_ALIGN
#define DEFAULT_ALIGN 5
#endif
//That l means local defines local functions
.macro functionl name, align=DEFAULT_ALIGN
.macro endfuncl
jirl $r0, $r1, 0x0
.size \name, . - \name
.purgem endfuncl
.endm
.text ;
.align \align ;
.hidden \name ;
.type \name, @function ;
\name: ;
.endm
.macro TRANSPOSE_4x16B in0, in1 ,in2, in3, in4, in5, in6, in7
vpackev.b \in4, \in1, \in0
vpackod.b \in5, \in1, \in0
vpackev.b \in6, \in3, \in2
vpackod.b \in7, \in3, \in2
vpackev.h \in0, \in6, \in4
vpackod.h \in2, \in6, \in4
vpackev.h \in1, \in7, \in5
vpackod.h \in3, \in7, \in5
.endm
.macro TRANSPOSE_8x16B in0, in1, in2, in3, in4, in5, in6, in7, in8, in9
vpackev.b \in8, \in1, \in0
vpackod.b \in9, \in1, \in0
vpackev.b \in1, \in3, \in2
vpackod.b \in3, \in3, \in2
vpackev.b \in0, \in5, \in4
vpackod.b \in5, \in5, \in4
vpackev.b \in2, \in7, \in6
vpackod.b \in7, \in7, \in6
vpackev.h \in4, \in2, \in0
vpackod.h \in2, \in2, \in0
vpackev.h \in6, \in7, \in5
vpackod.h \in7, \in7, \in5
vpackev.h \in5, \in3, \in9
vpackod.h \in9, \in3, \in9
vpackev.h \in3, \in1, \in8
vpackod.h \in8, \in1, \in8
vpackev.w \in0, \in4, \in3
vpackod.w \in4, \in4, \in3
vpackev.w \in1, \in6, \in5
vpackod.w \in5, \in6, \in5
vpackod.w \in6, \in2, \in8
vpackev.w \in2, \in2, \in8
vpackev.w \in3, \in7, \in9
vpackod.w \in7, \in7, \in9
.endm
.macro vld_x8 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7
vld \in0, \src, \start
vld \in1, \src, \start+(\stride*1)
vld \in2, \src, \start+(\stride*2)
vld \in3, \src, \start+(\stride*3)
vld \in4, \src, \start+(\stride*4)
vld \in5, \src, \start+(\stride*5)
vld \in6, \src, \start+(\stride*6)
vld \in7, \src, \start+(\stride*7)
.endm
.macro vst_x8 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7
vst \in0, \src, \start
vst \in1, \src, \start+(\stride*1)
vst \in2, \src, \start+(\stride*2)
vst \in3, \src, \start+(\stride*3)
vst \in4, \src, \start+(\stride*4)
vst \in5, \src, \start+(\stride*5)
vst \in6, \src, \start+(\stride*6)
vst \in7, \src, \start+(\stride*7)
.endm
.macro vld_x16 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7, \
in8, in9, in10, in11, in12, in13, in14, in15
vld_x8 \src, \start, \stride, \in0, \in1, \in2, \in3, \in4, \in5, \in6, \in7
vld \in8, \src, \start+(\stride*8)
vld \in9, \src, \start+(\stride*9)
vld \in10, \src, \start+(\stride*10)
vld \in11, \src, \start+(\stride*11)
vld \in12, \src, \start+(\stride*12)
vld \in13, \src, \start+(\stride*13)
vld \in14, \src, \start+(\stride*14)
vld \in15, \src, \start+(\stride*15)
.endm
.macro vst_x16 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7, \
in8, in9, in10, in11, in12, in13, in14, in15
vst_x8 \src, \start, \stride, \in0, \in1, \in2, \in3, \in4, \in5, \in6, \in7
vst \in8, \src, \start+(\stride*8)
vst \in9, \src, \start+(\stride*9)
vst \in10, \src, \start+(\stride*10)
vst \in11, \src, \start+(\stride*11)
vst \in12, \src, \start+(\stride*12)
vst \in13, \src, \start+(\stride*13)
vst \in14, \src, \start+(\stride*14)
vst \in15, \src, \start+(\stride*15)
.endm
.macro xvld_x8 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7
xvld \in0, \src, \start
xvld \in1, \src, \start+(\stride)
xvld \in2, \src, \start+(\stride<<1)
xvld \in3, \src, \start+(\stride<<1)+(\stride)
xvld \in4, \src, \start+(\stride<<2)
xvld \in5, \src, \start+(\stride<<2)+(\stride)
xvld \in6, \src, \start+(\stride*6)
xvld \in7, \src, \start+(\stride<<3)-(\stride)
.endm
.macro xvst_x8 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7
xvst \in0, \src, \start
xvst \in1, \src, \start+(\stride)
xvst \in2, \src, \start+(\stride<<1)
xvst \in3, \src, \start+(\stride<<1)+(\stride)
xvst \in4, \src, \start+(\stride<<2)
xvst \in5, \src, \start+(\stride<<2)+(\stride)
xvst \in6, \src, \start+(\stride*6)
xvst \in7, \src, \start+(\stride<<3)-(\stride)
.endm
.macro xvld_x16 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7, \
in8, in9, in10, in11, in12, in13, in14, in15
xvld_x8 \src, \start, \stride, \in0, \in1, \in2, \in3, \in4, \in5, \in6, \in7
xvld \in8, \src, \start+(\stride<<3)
xvld \in9, \src, \start+(\stride<<3)+(\stride)
xvld \in10, \src, \start+(\stride*10)
xvld \in11, \src, \start+(\stride*11)
xvld \in12, \src, \start+(\stride*12)
xvld \in13, \src, \start+(\stride*13)
xvld \in14, \src, \start+(\stride*14)
xvld \in15, \src, \start+(\stride<<4)-(\stride)
.endm
.macro xvst_x16 src, start, stride, in0, in1, in2, in3, in4, in5, in6, in7, \
in8, in9, in10, in11, in12, in13, in14, in15
xvst_x8 \src, \start, \stride, \in0, \in1, \in2, \in3, \in4, \in5, \in6, \in7
xvst \in8, \src, \start+(\stride<<3)
xvst \in9, \src, \start+(\stride<<3)+(\stride)
xvst \in10, \src, \start+(\stride*10)
xvst \in11, \src, \start+(\stride*11)
xvst \in12, \src, \start+(\stride*12)
xvst \in13, \src, \start+(\stride*13)
xvst \in14, \src, \start+(\stride*14)
xvst \in15, \src, \start+(\stride<<4)-(\stride)
.endm
#endif /* DAV1D_SRC_LOONGSON_UTIL_S */
|
Admenri/urge
| 128,420
|
third_party/dav1d/src/loongarch/ipred.S
|
/*
* Copyright © 2024, VideoLAN and dav1d authors
* Copyright © 2024, Loongson Technology Corporation Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/loongarch/loongson_asm.S"
.macro ipred_dc_gen topleft, width, height
add.d t0, \width, \height //dc
srai.d t0, t0, 1
addi.d t3, \topleft,1
or t1, zero, zero //data index
srai.d t2, \width, 4 //loop param
beqz t2, 2f
1: // width/16
vldx vr0, t3, t1
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.du.wu vr0, vr0, vr0
vhaddw.qu.du vr0, vr0, vr0
vpickve2gr.du t4, vr0, 0
add.d t0, t0, t4
addi.d t1, t1, 16
addi.d t2, t2, -1
bnez t2, 1b
b 4f
2: // &8
andi t2, \width, 8
beqz t2, 3f
vxor.v vr0, vr0, vr0
fldx.d f0, t3, t1
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.du.wu vr0, vr0, vr0
vpickve2gr.du t4, vr0, 0
add.d t0, t0, t4
addi.d t1, t1, 8
b 4f
3: // &4
andi t2, \width, 4
beqz t2, 4f
vxor.v vr0, vr0, vr0
fldx.s f0, t3, t1
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vpickve2gr.wu t4, vr0, 0
add.d t0, t0, t4
addi.d t1, t1, 4
4:
addi.d t3, \topleft,0
srai.d t2, \height, 4 //loop param
beqz t2, 8f
7: // height/16
addi.d t3, t3, -16
vld vr0, t3, 0
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.du.wu vr0, vr0, vr0
vhaddw.qu.du vr0, vr0, vr0
vpickve2gr.du t4, vr0, 0
add.d t0, t0, t4
addi.d t2, t2, -1
bnez t2, 7b
b 10f
8: // &8
andi t2, \height, 8
beqz t2, 9f
addi.d t3, t3, -8
vxor.v vr0, vr0, vr0
fld.d f0, t3, 0
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.du.wu vr0, vr0, vr0
vpickve2gr.du t4, vr0, 0
add.d t0, t0, t4
b 10f
9: // &4
andi t2, \height, 4
beqz t2, 10f
addi.d t3, t3, -4
vxor.v vr0, vr0, vr0
fld.s f0, t3, 0
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vpickve2gr.wu t4, vr0, 0
add.d t0, t0, t4
10:
add.d t1, \width, \height
ctz.w t1, t1
sra.w t0, t0, t1
// w != h
beq \width, \height, 16f
add.d t2, \height, \height
add.d t3, \width, \width
slt t2, t2, \width
slt t3, t3, \height
or t2, t2, t3
li.w t3, 0x3334
maskeqz t1, t3, t2
li.w t3, 0x5556
masknez t2, t3, t2
or t1, t1, t2
mul.w t0, t0, t1
srai.w t0, t0, 16
16:
.endm
.macro ipred_splat_dc dst, stride, width, height, dc
li.w t1, 4
blt t1, \width, 2f
li.w t1, 0x01010101
mulw.d.wu t1, \dc, t1
beqz \height, 7f
or t2, \dst, \dst
1: // width <= 4
st.w t1, t2, 0
add.d t2, t2, \stride
addi.d \height, \height, -1
bnez \height, 1b
b 7f
2: //width > 4
li.d t1, 0x0101010101010101
mul.d t1, \dc, t1
vreplgr2vr.d vr0, t1
or t4, \dst, \dst
beqz \height, 7f
3:
andi t5, \width, 64
beqz t5, 4f
vst vr0, t4, 0
vst vr0, t4, 16
vst vr0, t4, 32
vst vr0, t4, 48
b 6f
4:
andi t5, \width, 32
beqz t5, 41f
vst vr0, t4, 0
vst vr0, t4, 16
b 6f
41:
andi t5, \width, 16
beqz t5, 5f
vst vr0, t4, 0
b 6f
5:
fst.d f0, t4, 0
6:
add.d t4, t4, \stride
addi.d \height, \height, -1
bnez \height, 3b
7:
.endm
.macro ipred_dc_gen_top topleft, width
srai.d t0, \width, 1
addi.d t1, \topleft,1
srai.d t2, \width, 4
beqz t2, 2f
1:
vld vr0, t1, 0
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.du.wu vr0, vr0, vr0
vhaddw.qu.du vr0, vr0, vr0
vpickve2gr.du t3, vr0, 0
add.d t0, t0, t3
addi.d t1, t1, 16
addi.d t2, t2, -1
bnez t2, 1b
b 4f
2: // &8
andi t2, \width, 8
beqz t2, 3f
vxor.v vr0, vr0, vr0
fld.d f0, t1, 0
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.du.wu vr0, vr0, vr0
vpickve2gr.du t2, vr0, 0
add.d t0, t0, t2
addi.d t1, t1, 8
b 4f
3: // &4
andi t2, \width, 4
beqz t2, 4f
vxor.v vr0, vr0, vr0
fld.s f0, t1, 0
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vpickve2gr.du t2, vr0, 0
add.d t0, t0, t2
addi.d t1, t1, 4
4:
ctz.w t1, \width
sra.w t0, t0, t1
.endm
.macro ipred_dc_gen_left topleft, height
srai.d t0, \height, 1
srai.d t2, \height, 4 //loop param
beqz t2, 8f
7: // height/16
addi.d \topleft,\topleft,-16
vld vr0, \topleft,0
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.du.wu vr0, vr0, vr0
vhaddw.qu.du vr0, vr0, vr0
vpickve2gr.du t4, vr0, 0
add.d t0, t0, t4
addi.d t2, t2, -1
bnez t2, 7b
b 10f
8: // &8
andi t2, \height, 8
beqz t2, 9f
addi.d \topleft,\topleft,-8
vxor.v vr0, vr0, vr0
fld.d f0, \topleft,0
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.du.wu vr0, vr0, vr0
vpickve2gr.du t4, vr0, 0
add.d t0, t0, t4
b 10f
9: // &4
andi t2, \height, 4
beqz t2, 10f
addi.d \topleft,\topleft,-4
vxor.v vr0, vr0, vr0
fld.s f0, \topleft,0
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vpickve2gr.wu t4, vr0, 0
add.d t0, t0, t4
10:
ctz.w t1, \height
sra.w t0, t0, t1
.endm
// void ipred_dc_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_dc_8bpc_lsx
ipred_dc_gen a2, a3, a4
ipred_splat_dc a0, a1, a3, a4, t0
endfunc
// void ipred_dc_128_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_dc_128_8bpc_lsx
li.w t0, 128
ipred_splat_dc a0, a1, a3, a4, t0
endfunc
// void ipred_dc_top_c(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_dc_top_8bpc_lsx
ipred_dc_gen_top a2, a3
ipred_splat_dc a0, a1, a3, a4, t0
endfunc
// void ipred_dc_left_c(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_dc_left_8bpc_lsx
ipred_dc_gen_left a2, a4
ipred_splat_dc a0, a1, a3, a4, t0
endfunc
.macro pixel_set_8bpc dst_ptr, src_ptr, width
vldrepl.b vr0, \src_ptr, 0
1:
andi a5, \width, 64
beqz a5, 2f
vst vr0, \dst_ptr, 0
vst vr0, \dst_ptr, 16
vst vr0, \dst_ptr, 32
vst vr0, \dst_ptr, 48
b 6f
2:
andi a5, \width, 32
beqz a5, 3f
vst vr0, \dst_ptr, 0
vst vr0, \dst_ptr, 16
b 6f
3:
andi a5, \width, 16
beqz a5, 4f
vst vr0, \dst_ptr, 0
b 6f
4:
andi a5, \width, 8
beqz a5, 5f
fst.d f0, \dst_ptr, 0
b 6f
5:
andi a5, \width, 4
beqz a5, 6f
fst.s f0, \dst_ptr, 0
6:
.endm
// void ipred_h_c(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_h_8bpc_lsx
beqz a4, .IPRED_H_END
.IPRED_H_LOOP:
addi.d a2, a2, -1
pixel_set_8bpc a0, a2, a3
add.d a0, a0, a1
addi.d a4, a4, -1
bnez a4, .IPRED_H_LOOP
.IPRED_H_END:
endfunc
.macro pixel_copy_8bpc dst_ptr, src_ptr, width
1:
andi a5, \width, 64
beqz a5, 2f
vld vr0, \src_ptr, 0
vld vr1, \src_ptr, 16
vld vr2, \src_ptr, 32
vld vr3, \src_ptr, 48
vst vr0, \dst_ptr, 0
vst vr1, \dst_ptr, 16
vst vr2, \dst_ptr, 32
vst vr3, \dst_ptr, 48
b 6f
2:
andi a5, \width, 32
beqz a5, 3f
vld vr0, \src_ptr, 0
vld vr1, \src_ptr, 16
vst vr0, \dst_ptr, 0
vst vr1, \dst_ptr, 16
b 6f
3:
andi a5, \width, 16
beqz a5, 4f
vld vr0, \src_ptr, 0
vst vr0, \dst_ptr, 0
b 6f
4:
andi a5, \width, 8
beqz a5, 5f
fld.d f0, \src_ptr, 0
fst.d f0, \dst_ptr, 0
b 6f
5:
andi a5, \width, 4
beqz a5, 6f
fld.s f0, \src_ptr, 0
fst.s f0, \dst_ptr, 0
6:
.endm
// void ipred_v_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_v_8bpc_lsx
beqz a4, .IPRED_V_END
addi.d a2, a2, 1
.IPRED_V_LOOP:
pixel_copy_8bpc a0, a2, a3
add.d a0, a0, a1
addi.d a4, a4, -1
bnez a4, .IPRED_V_LOOP
.IPRED_V_END:
endfunc
// void ipred_paeth_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const tl_ptr,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_paeth_8bpc_lsx
vldrepl.b vr0, a2, 0 //topleft
vsllwil.hu.bu vr0, vr0, 0
or a6, a2, a2
addi.d a7, a2, 1
.IPRED_PAETH_H_LOOP:
addi.d a6, a6, -1
vldrepl.b vr1, a6, 0 //left
vsllwil.hu.bu vr1, vr1, 0
.IPRED_PAETH_W_LOOP64:
andi a5, a3, 64
beqz a5, .IPRED_PAETH_W_LOOP32
vld vr2, a7, 0 //top
vpermi.w vr9, vr2, 0x0e
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr9, vr9, 0
vabsd.hu vr5, vr0, vr1 //tdiff
vabsd.hu vr4, vr0, vr2 //ldiff
vabsd.hu vr10, vr0, vr9
vadd.h vr3, vr0, vr0
vadd.h vr6, vr1, vr2
vadd.h vr11, vr1, vr9
vabsd.hu vr6, vr3, vr6 //tldiff
vabsd.hu vr11, vr3, vr11 //tldiff
vsle.hu vr3, vr5, vr6
vbitsel.v vr7, vr0, vr2, vr3
vsle.hu vr3, vr4, vr5
vsle.hu vr8, vr4, vr6
vand.v vr3, vr3, vr8
vbitsel.v vr3, vr7, vr1, vr3
vsrlni.b.h vr3, vr3, 0
vsle.hu vr12, vr5, vr11
vbitsel.v vr7, vr0, vr9, vr12
vsle.hu vr12, vr10, vr5
vsle.hu vr8, vr10, vr11
vand.v vr12, vr12, vr8
vbitsel.v vr12, vr7, vr1, vr12
vsrlni.b.h vr12, vr12, 0
vpermi.w vr12, vr3, 0x44
vst vr12, a0, 0
vld vr2, a7, 16 //top
vpermi.w vr9, vr2, 0x0e
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr9, vr9, 0
vabsd.hu vr5, vr0, vr1 //tdiff
vabsd.hu vr4, vr0, vr2 //ldiff
vabsd.hu vr10, vr0, vr9
vadd.h vr3, vr0, vr0
vadd.h vr6, vr1, vr2
vadd.h vr11, vr1, vr9
vabsd.hu vr6, vr3, vr6 //tldiff
vabsd.hu vr11, vr3, vr11 //tldiff
vsle.hu vr3, vr5, vr6
vbitsel.v vr7, vr0, vr2, vr3
vsle.hu vr3, vr4, vr5
vsle.hu vr8, vr4, vr6
vand.v vr3, vr3, vr8
vbitsel.v vr3, vr7, vr1, vr3
vsrlni.b.h vr3, vr3, 0
vsle.hu vr12, vr5, vr11
vbitsel.v vr7, vr0, vr9, vr12
vsle.hu vr12, vr10, vr5
vsle.hu vr8, vr10, vr11
vand.v vr12, vr12, vr8
vbitsel.v vr12, vr7, vr1, vr12
vsrlni.b.h vr12, vr12, 0
vpermi.w vr12, vr3, 0x44
vst vr12, a0, 16
vld vr2, a7, 32 //top
vpermi.w vr9, vr2, 0x0e
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr9, vr9, 0
vabsd.hu vr5, vr0, vr1 //tdiff
vabsd.hu vr4, vr0, vr2 //ldiff
vabsd.hu vr10, vr0, vr9
vadd.h vr3, vr0, vr0
vadd.h vr6, vr1, vr2
vadd.h vr11, vr1, vr9
vabsd.hu vr6, vr3, vr6 //tldiff
vabsd.hu vr11, vr3, vr11 //tldiff
vsle.hu vr3, vr5, vr6
vbitsel.v vr7, vr0, vr2, vr3
vsle.hu vr3, vr4, vr5
vsle.hu vr8, vr4, vr6
vand.v vr3, vr3, vr8
vbitsel.v vr3, vr7, vr1, vr3
vsrlni.b.h vr3, vr3, 0
vsle.hu vr12, vr5, vr11
vbitsel.v vr7, vr0, vr9, vr12
vsle.hu vr12, vr10, vr5
vsle.hu vr8, vr10, vr11
vand.v vr12, vr12, vr8
vbitsel.v vr12, vr7, vr1, vr12
vsrlni.b.h vr12, vr12, 0
vpermi.w vr12, vr3, 0x44
vst vr12, a0, 32
vld vr2, a7, 48 //top
vpermi.w vr9, vr2, 0x0e
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr9, vr9, 0
vabsd.hu vr5, vr0, vr1 //tdiff
vabsd.hu vr4, vr0, vr2 //ldiff
vabsd.hu vr10, vr0, vr9
vadd.h vr3, vr0, vr0
vadd.h vr6, vr1, vr2
vadd.h vr11, vr1, vr9
vabsd.hu vr6, vr3, vr6 //tldiff
vabsd.hu vr11, vr3, vr11 //tldiff
vsle.hu vr3, vr5, vr6
vbitsel.v vr7, vr0, vr2, vr3
vsle.hu vr3, vr4, vr5
vsle.hu vr8, vr4, vr6
vand.v vr3, vr3, vr8
vbitsel.v vr3, vr7, vr1, vr3
vsrlni.b.h vr3, vr3, 0
vsle.hu vr12, vr5, vr11
vbitsel.v vr7, vr0, vr9, vr12
vsle.hu vr12, vr10, vr5
vsle.hu vr8, vr10, vr11
vand.v vr12, vr12, vr8
vbitsel.v vr12, vr7, vr1, vr12
vsrlni.b.h vr12, vr12, 0
vpermi.w vr12, vr3, 0x44
vst vr12, a0, 48
b .IPRED_PAETH_W_LOOPEND
.IPRED_PAETH_W_LOOP32:
andi a5, a3, 32
beqz a5, .IPRED_PAETH_W_LOOP16
vld vr2, a7, 0 //top
vpermi.w vr9, vr2, 0x0e
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr9, vr9, 0
vabsd.hu vr5, vr0, vr1 //tdiff
vabsd.hu vr4, vr0, vr2 //ldiff
vabsd.hu vr10, vr0, vr9
vadd.h vr3, vr0, vr0
vadd.h vr6, vr1, vr2
vadd.h vr11, vr1, vr9
vabsd.hu vr6, vr3, vr6 //tldiff
vabsd.hu vr11, vr3, vr11 //tldiff
vsle.hu vr3, vr5, vr6
vbitsel.v vr7, vr0, vr2, vr3
vsle.hu vr3, vr4, vr5
vsle.hu vr8, vr4, vr6
vand.v vr3, vr3, vr8
vbitsel.v vr3, vr7, vr1, vr3
vsrlni.b.h vr3, vr3, 0
vsle.hu vr12, vr5, vr11
vbitsel.v vr7, vr0, vr9, vr12
vsle.hu vr12, vr10, vr5
vsle.hu vr8, vr10, vr11
vand.v vr12, vr12, vr8
vbitsel.v vr12, vr7, vr1, vr12
vsrlni.b.h vr12, vr12, 0
vpermi.w vr12, vr3, 0x44
vst vr12, a0, 0
vld vr2, a7, 16 //top
vpermi.w vr9, vr2, 0x0e
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr9, vr9, 0
vabsd.hu vr5, vr0, vr1 //tdiff
vabsd.hu vr4, vr0, vr2 //ldiff
vabsd.hu vr10, vr0, vr9
vadd.h vr3, vr0, vr0
vadd.h vr6, vr1, vr2
vadd.h vr11, vr1, vr9
vabsd.hu vr6, vr3, vr6 //tldiff
vabsd.hu vr11, vr3, vr11 //tldiff
vsle.hu vr3, vr5, vr6
vbitsel.v vr7, vr0, vr2, vr3
vsle.hu vr3, vr4, vr5
vsle.hu vr8, vr4, vr6
vand.v vr3, vr3, vr8
vbitsel.v vr3, vr7, vr1, vr3
vsrlni.b.h vr3, vr3, 0
vsle.hu vr12, vr5, vr11
vbitsel.v vr7, vr0, vr9, vr12
vsle.hu vr12, vr10, vr5
vsle.hu vr8, vr10, vr11
vand.v vr12, vr12, vr8
vbitsel.v vr12, vr7, vr1, vr12
vsrlni.b.h vr12, vr12, 0
vpermi.w vr12, vr3, 0x44
vst vr12, a0, 16
b .IPRED_PAETH_W_LOOPEND
.IPRED_PAETH_W_LOOP16:
andi a5, a3, 16
beqz a5, .IPRED_PAETH_W_LOOP8
vld vr2, a7, 0 //top
vpermi.w vr9, vr2, 0x0e
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr9, vr9, 0
vabsd.hu vr5, vr0, vr1 //tdiff
vabsd.hu vr4, vr0, vr2 //ldiff
vabsd.hu vr10, vr0, vr9
vadd.h vr3, vr0, vr0
vadd.h vr6, vr1, vr2
vadd.h vr11, vr1, vr9
vabsd.hu vr6, vr3, vr6 //tldiff
vabsd.hu vr11, vr3, vr11 //tldiff
vsle.hu vr3, vr5, vr6
vbitsel.v vr7, vr0, vr2, vr3
vsle.hu vr3, vr4, vr5
vsle.hu vr8, vr4, vr6
vand.v vr3, vr3, vr8
vbitsel.v vr3, vr7, vr1, vr3
vsrlni.b.h vr3, vr3, 0
vsle.hu vr12, vr5, vr11
vbitsel.v vr7, vr0, vr9, vr12
vsle.hu vr12, vr10, vr5
vsle.hu vr8, vr10, vr11
vand.v vr12, vr12, vr8
vbitsel.v vr12, vr7, vr1, vr12
vsrlni.b.h vr12, vr12, 0
vpermi.w vr12, vr3, 0x44
vst vr12, a0, 0
b .IPRED_PAETH_W_LOOPEND
.IPRED_PAETH_W_LOOP8:
andi a5, a3, 8
beqz a5, .IPRED_PAETH_W_LOOP4
fld.d f2, a7, 0 //top
vsllwil.hu.bu vr2, vr2, 0
vabsd.hu vr5, vr0, vr1 //tdiff
vabsd.hu vr4, vr0, vr2 //ldiff
vadd.h vr3, vr0, vr0
vadd.h vr6, vr1, vr2
vabsd.hu vr6, vr3, vr6 //tldiff
vsle.hu vr3, vr5, vr6
vbitsel.v vr7, vr0, vr2, vr3
vsle.hu vr3, vr4, vr5
vsle.hu vr8, vr4, vr6
vand.v vr3, vr3, vr8
vbitsel.v vr3, vr7, vr1, vr3
vsrlni.b.h vr3, vr3, 0
fst.d f3, a0, 0
b .IPRED_PAETH_W_LOOPEND
.IPRED_PAETH_W_LOOP4:
andi a5, a3, 4
beqz a5, .IPRED_PAETH_W_LOOPEND
fld.s f2, a7, 0 //top
vsllwil.hu.bu vr2, vr2, 0
vabsd.hu vr5, vr0, vr1 //tdiff
vabsd.hu vr4, vr0, vr2 //ldiff
vadd.h vr3, vr0, vr0
vadd.h vr6, vr1, vr2
vabsd.hu vr6, vr3, vr6 //tldiff
vsle.hu vr3, vr5, vr6
vbitsel.v vr7, vr0, vr2, vr3
vsle.hu vr3, vr4, vr5
vsle.hu vr8, vr4, vr6
vand.v vr3, vr3, vr8
vbitsel.v vr3, vr7, vr1, vr3
vsrlni.b.h vr3, vr3, 0
fst.s f3, a0, 0
b .IPRED_PAETH_W_LOOPEND
.IPRED_PAETH_W_LOOPEND:
add.d a0, a0, a1
addi.d a4, a4, -1
bnez a4, .IPRED_PAETH_H_LOOP
endfunc
const dav1d_sm_weights
.byte 0, 0
// bs = 2
.byte 255, 128
// bs = 4
.byte 255, 149, 85, 64
// bs = 8
.byte 255, 197, 146, 105, 73, 50, 37, 32
// bs = 16
.byte 255, 225, 196, 170, 145, 123, 102, 84
.byte 68, 54, 43, 33, 26, 20, 17, 16
// bs = 32
.byte 255, 240, 225, 210, 196, 182, 169, 157
.byte 145, 133, 122, 111, 101, 92, 83, 74
.byte 66, 59, 52, 45, 39, 34, 29, 25
.byte 21, 17, 14, 12, 10, 9, 8, 8
// bs = 64
.byte 255, 248, 240, 233, 225, 218, 210, 203
.byte 196, 189, 182, 176, 169, 163, 156, 150
.byte 144, 138, 133, 127, 121, 116, 111, 106
.byte 101, 96, 91, 86, 82, 77, 73, 69
.byte 65, 61, 57, 54, 50, 47, 44, 41
.byte 38, 35, 32, 29, 27, 25, 22, 20
.byte 18, 16, 15, 13, 12, 10, 9, 8
.byte 7, 6, 6, 5, 5, 4, 4, 4
endconst
// void ipred_smooth_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_smooth_8bpc_lsx
la.local a5, dav1d_sm_weights
add.d a6, a5, a3 //hor
add.d a5, a5, a4 //ver
add.d a7, a2, a3
sub.d t0, a2, a4
vldrepl.b vr0, a7, 0 //right
vldrepl.b vr1, t0, 0 //bottom
vsllwil.hu.bu vr0, vr0, 0
vsllwil.wu.hu vr0, vr0, 0
vsllwil.hu.bu vr1, vr1, 0
vsllwil.wu.hu vr1, vr1, 0
li.w t0, 256
vreplgr2vr.w vr6, t0
addi.d t0, a2, 1 //ptr topleft[x]
addi.d t3, a2, -1 //ptr topleft[y]
.IPRED_SMOOTH_H_LOOP:
vldrepl.b vr2, a5, 0 //ver[y]
vldrepl.b vr3, t3, 0 //topleft[y]
vsllwil.hu.bu vr2, vr2, 0
vsllwil.wu.hu vr2, vr2, 0
vsllwil.hu.bu vr3, vr3, 0
vsllwil.wu.hu vr3, vr3, 0
vsub.w vr7, vr6, vr2 //256-ver[y]
or t1, zero, zero //xx
srai.d t2, a3, 2 //loop max
.IPRED_SMOOTH_W_LOOP:
fldx.s f4, t0, t1 //topleft[x]
fldx.s f5, a6, t1 //hor[x]
vsllwil.hu.bu vr4, vr4, 0
vsllwil.wu.hu vr4, vr4, 0
vsllwil.hu.bu vr5, vr5, 0
vsllwil.wu.hu vr5, vr5, 0
vsub.w vr8, vr6, vr5 //256-hor[x]
vmul.w vr9, vr8, vr0
vmadd.w vr9, vr5, vr3
vmadd.w vr9, vr7, vr1
vmadd.w vr9, vr2, vr4 //pred
vadd.w vr9, vr9, vr6
vsrlni.h.w vr9, vr9, 9
vsrlni.b.h vr9, vr9, 0
fstx.s f9, a0, t1
addi.d t1, t1, 4
addi.d t2, t2, -1
bnez t2, .IPRED_SMOOTH_W_LOOP
.IPRED_SMOOTH_W_LOOP_END:
addi.d t3, t3, -1
addi.d a5, a5, 1
add.d a0, a0, a1
addi.d a4, a4, -1
bnez a4, .IPRED_SMOOTH_H_LOOP
endfunc
// void ipred_smooth_v_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_smooth_v_8bpc_lsx
la.local a5, dav1d_sm_weights
add.d a5, a5, a4 //ver
sub.d t0, a2, a4
vldrepl.b vr0, t0, 0 //bottom
vsllwil.hu.bu vr0, vr0, 0
li.w t0, 256
vreplgr2vr.h vr2, t0
li.w t0, 128
vreplgr2vr.h vr3, t0
addi.d t0, a2, 1 //ptr topleft[x]
.IPRED_SMOOTH_V_H_LOOP:
vldrepl.b vr1, a5, 0 //ver[y]
vsllwil.hu.bu vr1, vr1, 0
vsub.h vr5, vr2, vr1 //256-ver[y]
or t1, zero, zero //xx
srai.d t2, a3, 3 //loop max
beqz t2, .IPRED_SMOOTH_V_W_LOOP4
.IPRED_SMOOTH_V_W_LOOP8:
fldx.d f4, t0, t1 //topleft[x]
vsllwil.hu.bu vr4, vr4, 0
vmul.h vr6, vr5, vr0
vmadd.h vr6, vr1, vr4 //pred
vadd.h vr6, vr6, vr3
vsrlni.b.h vr6, vr6, 8
fstx.d f6, a0, t1
addi.d t1, t1, 8
addi.d t2, t2, -1
bnez t2, .IPRED_SMOOTH_V_W_LOOP8
b .IPRED_SMOOTH_V_W_LOOP_END
.IPRED_SMOOTH_V_W_LOOP4:
fldx.s f4, t0, t1 //topleft[x]
vsllwil.hu.bu vr4, vr4, 0
vmul.h vr6, vr5, vr0
vmadd.h vr6, vr1, vr4 //pred
vadd.h vr6, vr6, vr3
vsrai.h vr6, vr6, 8
vsrlni.b.h vr6, vr6, 0
fstx.s f6, a0, t1
addi.d t1, t1, 4
.IPRED_SMOOTH_V_W_LOOP_END:
addi.d a5, a5, 1
add.d a0, a0, a1
addi.d a4, a4, -1
bnez a4, .IPRED_SMOOTH_V_H_LOOP
endfunc
// void ipred_smooth_h_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_smooth_h_8bpc_lsx
la.local a5, dav1d_sm_weights
add.d a6, a5, a3 //hor
add.d a7, a2, a3
vldrepl.b vr0, a7, 0 //right
vsllwil.hu.bu vr0, vr0, 0
li.w t0, 256
vreplgr2vr.h vr1, t0
li.w t0, 128
vreplgr2vr.h vr2, t0
addi.d t3, a2, -1 //ptr topleft[y]
.IPRED_SMOOTH_H_H_LOOP:
vldrepl.b vr3, t3, 0 //topleft[y]
vsllwil.hu.bu vr3, vr3, 0
or t1, zero, zero //xx
srai.d t2, a3, 3 //loop max
beqz t2, .IPRED_SMOOTH_H_W_LOOP4
.IPRED_SMOOTH_H_W_LOOP8:
fldx.d f5, a6, t1 //hor[x]
vsllwil.hu.bu vr5, vr5, 0
vsub.h vr4, vr1, vr5 //256-hor[x]
vmul.h vr6, vr4, vr0
vmadd.h vr6, vr5, vr3 //pred
vadd.h vr6, vr6, vr2
vsrlni.b.h vr6, vr6, 8
fstx.d f6, a0, t1
addi.d t1, t1, 8
addi.d t2, t2, -1
bnez t2, .IPRED_SMOOTH_H_W_LOOP8
b .IPRED_SMOOTH_W_H_LOOP_END
.IPRED_SMOOTH_H_W_LOOP4:
fldx.s f5, a6, t1 //hor[x]
vsllwil.hu.bu vr5, vr5, 0
vsub.h vr4, vr1, vr5 //256-hor[x]
vmul.h vr6, vr4, vr0
vmadd.h vr6, vr5, vr3 //pred
vadd.h vr6, vr6, vr2
vsrai.h vr6, vr6, 8
vsrlni.b.h vr6, vr6, 0
fstx.s f6, a0, t1
addi.d t1, t1, 4
.IPRED_SMOOTH_W_H_LOOP_END:
addi.d t3, t3, -1
add.d a0, a0, a1
addi.d a4, a4, -1
bnez a4, .IPRED_SMOOTH_H_H_LOOP
endfunc
// void pal_pred_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const pal, const uint8_t *idx,
// const int w, const int h)
function pal_pred_8bpc_lsx
srai.d a7, a5, 2
.PAL_PRED_WLOOP4:
andi a6, a4, 4
beqz a6, .PAL_PRED_WLOOP8
fld.d f0, a3, 0
vsrli.b vr1, vr0, 4
vandi.b vr2, vr0, 7
vilvl.b vr0, vr1, vr2
fld.d f1, a2, 0
vshuf.b vr2, vr1, vr1, vr0
vstelm.w vr2, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr2, a0, 0, 1
add.d a0, a0, a1
vstelm.w vr2, a0, 0, 2
add.d a0, a0, a1
vstelm.w vr2, a0, 0, 3
add.d a0, a0, a1
addi.d a3, a3, 8
addi.d a7, a7, -1
bnez a7, .PAL_PRED_WLOOP4
b .PAL_PRED_END
.PAL_PRED_WLOOP8:
andi a6, a4, 8
beqz a6, .PAL_PRED_WLOOP16
vld vr0, a3, 0
vsrli.b vr1, vr0, 4
vandi.b vr2, vr0, 7
vilvl.b vr0, vr1, vr2
vilvh.b vr3, vr1, vr2
fld.d f1, a2, 0
vshuf.b vr0, vr1, vr1, vr0
vshuf.b vr3, vr1, vr1, vr3
vstelm.d vr0, a0, 0, 0
add.d a0, a0, a1
vstelm.d vr0, a0, 0, 1
add.d a0, a0, a1
vstelm.d vr3, a0, 0, 0
add.d a0, a0, a1
vstelm.d vr3, a0, 0, 1
add.d a0, a0, a1
addi.d a3, a3, 16
addi.d a7, a7, -1
bnez a7, .PAL_PRED_WLOOP8
b .PAL_PRED_END
.PAL_PRED_WLOOP16:
andi a6, a4, 16
beqz a6, .PAL_PRED_WLOOP32
vld vr0, a3, 0
vld vr1, a3, 16
fld.d f6, a2, 0
vsrli.b vr2, vr0, 4
vandi.b vr3, vr0, 7
vsrli.b vr4, vr1, 4
vandi.b vr5, vr1, 7
vilvl.b vr0, vr2, vr3
vilvh.b vr1, vr2, vr3
vilvl.b vr2, vr4, vr5
vilvh.b vr3, vr4, vr5
vshuf.b vr0, vr6, vr6, vr0
vshuf.b vr1, vr6, vr6, vr1
vshuf.b vr2, vr6, vr6, vr2
vshuf.b vr3, vr6, vr6, vr3
vst vr0, a0, 0
add.d a0, a0, a1
vst vr1, a0, 0
add.d a0, a0, a1
vst vr2, a0, 0
add.d a0, a0, a1
vst vr3, a0, 0
add.d a0, a0, a1
addi.d a3, a3, 32
addi.d a7, a7, -1
bnez a7, .PAL_PRED_WLOOP16
b .PAL_PRED_END
.PAL_PRED_WLOOP32:
andi a6, a4, 32
beqz a6, .PAL_PRED_WLOOP64
vld vr0, a3, 0
vld vr1, a3, 16
vld vr2, a3, 32
vld vr3, a3, 48
fld.d f4, a2, 0
vsrli.b vr5, vr0, 4
vandi.b vr6, vr0, 7
vsrli.b vr7, vr1, 4
vandi.b vr8, vr1, 7
vsrli.b vr9, vr2, 4
vandi.b vr10, vr2, 7
vsrli.b vr11, vr3, 4
vandi.b vr12, vr3, 7
vilvl.b vr0, vr5, vr6
vilvh.b vr1, vr5, vr6
vilvl.b vr2, vr7, vr8
vilvh.b vr3, vr7, vr8
vilvl.b vr5, vr9, vr10
vilvh.b vr6, vr9, vr10
vilvl.b vr7, vr11, vr12
vilvh.b vr8, vr11, vr12
vshuf.b vr0, vr4, vr4, vr0
vshuf.b vr1, vr4, vr4, vr1
vshuf.b vr2, vr4, vr4, vr2
vshuf.b vr3, vr4, vr4, vr3
vshuf.b vr5, vr4, vr4, vr5
vshuf.b vr6, vr4, vr4, vr6
vshuf.b vr7, vr4, vr4, vr7
vshuf.b vr8, vr4, vr4, vr8
vst vr0, a0, 0
vst vr1, a0, 16
add.d a0, a0, a1
vst vr2, a0, 0
vst vr3, a0, 16
add.d a0, a0, a1
vst vr5, a0, 0
vst vr6, a0, 16
add.d a0, a0, a1
vst vr7, a0, 0
vst vr8, a0, 16
add.d a0, a0, a1
addi.d a3, a3, 64
addi.d a7, a7, -1
bnez a7, .PAL_PRED_WLOOP32
b .PAL_PRED_END
.PAL_PRED_WLOOP64:
vld vr0, a3, 0
vld vr1, a3, 16
fld.d f2, a2, 0
vsrli.b vr3, vr0, 4
vandi.b vr4, vr0, 7
vsrli.b vr5, vr1, 4
vandi.b vr6, vr1, 7
vilvl.b vr0, vr3, vr4
vilvh.b vr1, vr3, vr4
vilvl.b vr3, vr5, vr6
vilvh.b vr4, vr5, vr6
vshuf.b vr0, vr2, vr2, vr0
vshuf.b vr1, vr2, vr2, vr1
vshuf.b vr3, vr2, vr2, vr3
vshuf.b vr4, vr2, vr2, vr4
vst vr0, a0, 0
vst vr1, a0, 16
vst vr3, a0, 32
vst vr4, a0, 48
add.d a0, a0, a1
addi.d a3, a3, 32
addi.d a5, a5, -1
bnez a5, .PAL_PRED_WLOOP64
.PAL_PRED_END:
endfunc
.macro apply_sign_vrh v, s, vrzero, vrt0 ,out
vslt.h \vrt0, \s, \vrzero
vandn.v \s, \vrt0, \v
vsigncov.h \v, \vrt0, \v
vor.v \out, \s, \v
.endm
.macro iclip_pixel_vrh in0, in1, in2, tmp0, tmp1, out
vmin.h \tmp0, \in2, \in0
vslt.h \in0, \in0, \in1
vand.v \tmp1, \in0, \in1
vandn.v \tmp0, \in0, \tmp0
vor.v \out, \tmp1, \tmp0
.endm
.macro ipred_cfl_pred dst, stride, w, h, dc, ac, alpha
vreplgr2vr.h vr2, \alpha
vreplgr2vr.h vr7, \dc
li.w t1, 32
vreplgr2vr.h vr3, t1
vxor.v vr4, vr4, vr4
li.w t1, 255
vreplgr2vr.h vr6, t1
add.d t4, \w, \w
1:
or t1, zero, zero
or t2, zero, zero
srai.d t3, \w, 3
beqz t3, 3f
2:
vldx vr0, \ac, t1
vmul.h vr1, vr2, vr0
vadda.h vr0, vr1, vr3
vsrai.h vr0, vr0, 6
apply_sign_vrh vr0, vr1, vr4, vr5, vr0
vadd.h vr1, vr0, vr7
iclip_pixel_vrh vr1, vr4, vr6, vr5, vr8, vr0
vsrlni.b.h vr0, vr0, 0
fstx.d f0, \dst, t2
addi.d t1, t1, 16
addi.d t2, t2, 8
addi.d t3, t3, -1
bnez t3, 2b
b 4f
3:
fld.d f0, \ac, 0
vmul.h vr1, vr2, vr0
vadda.h vr0, vr1, vr3
vsrai.h vr0, vr0, 6
apply_sign_vrh vr0, vr1, vr4, vr5, vr0
vadd.h vr1, vr0, vr7
iclip_pixel_vrh vr1, vr4, vr6, vr5, vr8, vr0
vsrlni.b.h vr0, vr0, 0
fst.s f0, \dst, 0
4:
add.d \ac, \ac, t4
add.d \dst, \dst, \stride
addi.d \h, \h, -1
bnez \h, 1b
.endm
function ipred_cfl_8bpc_lsx
ipred_dc_gen a2, a3, a4
ipred_cfl_pred a0, a1, a3, a4, t0, a5, a6
endfunc
function ipred_cfl_top_8bpc_lsx
ipred_dc_gen_top a2, a3
ipred_cfl_pred a0, a1, a3, a4, t0, a5, a6
endfunc
function ipred_cfl_left_8bpc_lsx
ipred_dc_gen_left a2, a4
ipred_cfl_pred a0, a1, a3, a4, t0, a5, a6
endfunc
function ipred_cfl_128_8bpc_lsx
li.w t0, 128
ipred_cfl_pred a0, a1, a3, a4, t0, a5, a6
endfunc
const dav1d_filter_intra_taps_lsx
//arr0 8*7
.byte -6, -5, -3, -3, -4, -3, -3, -3
.byte 10, 2, 1, 1, 6, 2, 2, 1
.byte 0, 10, 1, 1, 0, 6, 2, 2
.byte 0, 0, 10, 2, 0, 0, 6, 2
.byte 0, 0, 0, 10, 0, 0, 0, 6
.byte 12, 9, 7, 5, 2, 2, 2, 3
.byte 0, 0, 0, 0, 12, 9, 7, 5
//arr1
.byte -10, -6, -4, -2, -10, -6, -4, -2
.byte 16, 0, 0, 0, 16, 0, 0, 0
.byte 0, 16, 0, 0, 0, 16, 0, 0
.byte 0, 0, 16, 0, 0, 0, 16, 0
.byte 0, 0, 0, 16, 0, 0, 0, 16
.byte 10, 6, 4, 2, 0, 0, 0, 0
.byte 0, 0, 0, 0, 10, 6, 4, 2
//arr2
.byte -8, -8, -8, -8, -4, -4, -4, -4
.byte 8, 0, 0, 0, 4, 0, 0, 0
.byte 0, 8, 0, 0, 0, 4, 0, 0
.byte 0, 0, 8, 0, 0, 0, 4, 0
.byte 0, 0, 0, 8, 0, 0, 0, 4
.byte 16, 16, 16, 16, 0, 0, 0, 0
.byte 0, 0, 0, 0, 16, 16, 16, 16
//arr3
.byte -2, -1, -1, 0, -1, -1, -1, -1
.byte 8, 3, 2, 1, 4, 3, 2, 2
.byte 0, 8, 3, 2, 0, 4, 3, 2
.byte 0, 0, 8, 3, 0, 0, 4, 3
.byte 0, 0, 0, 8, 0, 0, 0, 4
.byte 10, 6, 4, 2, 3, 4, 4, 3
.byte 0, 0, 0, 0, 10, 6, 4, 3
//arr4
.byte -12, -10, -9, -8, -10, -9, -8, -7
.byte 14, 0, 0, 0, 12, 1, 0, 0
.byte 0, 14, 0, 0, 0, 12, 0, 0
.byte 0, 0, 14, 0, 0, 0, 12, 1
.byte 0, 0, 0, 14, 0, 0, 0, 12
.byte 14, 12, 11, 10, 0, 0, 1, 1
.byte 0, 0, 0, 0, 14, 12, 11, 9
endconst
.macro ipred_filter_load_p
vldrepl.b vr0, t0, 0
vldrepl.b vr1, a7, 0
vldrepl.b vr2, a7, 1
vldrepl.b vr3, a7, 2
vldrepl.b vr4, a7, 3
vldrepl.b vr5, t1, 0
vldrepl.b vr6, t1, -1
vsllwil.hu.bu vr0, vr0, 0
vsllwil.hu.bu vr1, vr1, 0
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr3, vr3, 0
vsllwil.hu.bu vr4, vr4, 0
vsllwil.hu.bu vr5, vr5, 0
vsllwil.hu.bu vr6, vr6, 0
.endm
.macro ipred_filter_loadx_p
vldrepl.b vr0, t0, 0
vldrepl.b vr1, a7, 0
vldrepl.b vr2, a7, 1
vldrepl.b vr3, a7, 2
vldrepl.b vr4, a7, 3
vldrepl.b vr5, t1, 0
ldx.bu t3, t1, a1
vreplgr2vr.b vr6, t3
vsllwil.hu.bu vr0, vr0, 0
vsllwil.hu.bu vr1, vr1, 0
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr3, vr3, 0
vsllwil.hu.bu vr4, vr4, 0
vsllwil.hu.bu vr5, vr5, 0
vsllwil.hu.bu vr6, vr6, 0
.endm
.macro ipred_filter_load_fltptr
fld.d f7, a6, 0
fld.d f8, a6, 8
fld.d f9, a6, 16
fld.d f10, a6, 24
fld.d f11, a6, 32
fld.d f12, a6, 40
fld.d f13, a6, 48
vsllwil.h.b vr7, vr7, 0
vsllwil.h.b vr8, vr8, 0
vsllwil.h.b vr9, vr9, 0
vsllwil.h.b vr10, vr10, 0
vsllwil.h.b vr11, vr11, 0
vsllwil.h.b vr12, vr12, 0
vsllwil.h.b vr13, vr13, 0
.endm
.macro ipred_filter_calc_acc
vmul.h vr7, vr7, vr0
vmadd.h vr7, vr8, vr1
vmadd.h vr7, vr9, vr2
vmadd.h vr7, vr10, vr3
vmadd.h vr7, vr11, vr4
vmadd.h vr7, vr12, vr5
vmadd.h vr7, vr13, vr6
vaddi.hu vr7, vr7, 8
vsrai.h vr7, vr7, 4
iclip_pixel_vrh vr7, vr14, vr15, vr9, vr10, vr8
vsrlni.b.h vr8, vr8, 0
.endm
// void ipred_filter_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft_in,
// const int width, const int height, int filt_idx,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_filter_8bpc_lsx
andi a5, a5, 511
la.local a6, dav1d_filter_intra_taps_lsx
li.w a7, 56
mul.w a7, a7, a5
add.d a6, a6, a7 //*filter
addi.d a7, a2, 1 //*top
or a5, zero, zero //y
vxor.v vr14, vr14, vr14
li.w t0, 255
vreplgr2vr.h vr15, t0
.FILTER_LOOP_H:
sub.d t0, a2, a5 //*topleft
addi.d t1, t0, -1 //left
ctz.w t2, a3
addi.d t3, t2, -2
beqz t3, .FILTER_LOOP_W4
addi.d t3, t2, -3
beqz t3, .FILTER_LOOP_W8
addi.d t3, t2, -4
beqz t3, .FILTER_LOOP_W16
addi.d t3, t2, -5
beqz t3, .FILTER_LOOP_W32
.FILTER_LOOP_W4:
ipred_filter_load_p
or t3, a0, a0 //*ptr
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
b .FILTER_LOOP_W_END
.FILTER_LOOP_W8:
ipred_filter_load_p
or t3, a0, a0
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 3
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 4
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
b .FILTER_LOOP_W_END
.FILTER_LOOP_W16:
ipred_filter_load_p
or t3, a0, a0
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 3
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 4
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 7
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 8
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 11
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 12
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
b .FILTER_LOOP_W_END
.FILTER_LOOP_W32:
ipred_filter_load_p
or t3, a0, a0
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 3
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 4
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 7
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 8
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 11
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 12
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 15
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 16
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 19
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 20
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 23
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 24
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
addi.d t1, a0, 27
addi.d a7, a7, 4
addi.d t0, a7, -1
ipred_filter_loadx_p
addi.d t3, a0, 28
ipred_filter_load_fltptr
ipred_filter_calc_acc
fst.s f8, t3, 0
add.d t3, t3, a1
vstelm.w vr8, t3, 0, 1
add.d t3, t3, a1
.FILTER_LOOP_W_END:
add.d a7, a0, a1
add.d t2, a1, a1
add.d a0, a0, t2
addi.d a5, a5, 2
blt a5, a4, .FILTER_LOOP_H
endfunc
const dav1d_dr_intra_derivative
// Values that are 0 will never be used
.short 0 // Angles:
.short 1023, 0 // 3, 93, 183
.short 547 // 6, 96, 186
.short 372, 0, 0 // 9, 99, 189
.short 273 // 14, 104, 194
.short 215, 0 // 17, 107, 197
.short 178 // 20, 110, 200
.short 151, 0 // 23, 113, 203 (113 & 203 are base angles)
.short 132 // 26, 116, 206
.short 116, 0 // 29, 119, 209
.short 102, 0 // 32, 122, 212
.short 90 // 36, 126, 216
.short 80, 0 // 39, 129, 219
.short 71 // 42, 132, 222
.short 64, 0 // 45, 135, 225 (45 & 135 are base angles)
.short 57 // 48, 138, 228
.short 51, 0 // 51, 141, 231
.short 45, 0 // 54, 144, 234
.short 40 // 58, 148, 238
.short 35, 0 // 61, 151, 241
.short 31 // 64, 154, 244
.short 27, 0 // 67, 157, 247 (67 & 157 are base angles)
.short 23 // 70, 160, 250
.short 19, 0 // 73, 163, 253
.short 15, 0 // 76, 166, 256
.short 11, 0 // 81, 171, 261
.short 7 // 84, 174, 264
.short 3 // 87, 177, 267
endconst
const z1_upsample_edge_kernel
.short -1, 9, 9, -1, -1, 9, 9, -1
endconst
const ipred_filter_edge_kernel1
.short 0, 4, 8, 4, 0, 4, 8, 4
.short 0, 5, 6, 5, 0, 5, 6, 5
.short 2, 4, 4, 4, 2, 4, 4, 4
endconst
const ipred_filter_edge_kernel2
.short 0, 0, 0, 0, 0, 0, 0, 0
.short 0, 0, 0, 0, 0, 0, 0, 0
.short 2, 2, 2, 2, 2, 2, 2, 2
endconst
.macro z1_upsample_edge_calc_loop
vsllwil.hu.bu vr10, vr7, 0
vsllwil.hu.bu vr11, vr11, 0
vsllwil.hu.bu vr12, vr12, 0
vsllwil.hu.bu vr13, vr13, 0
vmul.h vr10, vr10, vr0
vmul.h vr11, vr11, vr0
vmul.h vr12, vr12, vr0
vmul.h vr13, vr13, vr0
vhaddw.w.h vr10, vr10, vr10
vhaddw.w.h vr11, vr11, vr11
vhaddw.w.h vr12, vr12, vr12
vhaddw.w.h vr13, vr13, vr13
vhaddw.d.w vr10, vr10, vr10
vhaddw.d.w vr11, vr11, vr11
vhaddw.d.w vr12, vr12, vr12
vhaddw.d.w vr13, vr13, vr13
vpackev.h vr10, vr11, vr10
vpackev.h vr11, vr13, vr12
vpackev.w vr12, vr11, vr10 //s:01234567
vsrari.h vr12, vr12, 4
iclip_pixel_vrh vr12, vr15, vr16, vr10, vr11, vr12
vsrlni.b.h vr12, vr12, 0 //out: 13579...
vbsrl.v vr11, vr7, 1 //out:02468...
vilvl.b vr13, vr12, vr11
.endm
.macro z1_upsample_edge_data_init1
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vbsrl.v vr13, vr7, 3
z1_upsample_edge_calc_loop
.endm
.macro z1_upsample_edge_data_init2
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vextrins.b vr12, vr12, 0x76
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_upsample_edge_calc_loop
.endm
.macro z1_upsample_edge_calc_other
vsllwil.hu.bu vr10, vr7, 0
vmul.h vr10, vr10, vr0
vhaddw.w.h vr10, vr10, vr10
vhaddw.d.w vr10, vr10, vr10
vreplvei.h vr12, vr10, 0 //s0-s7
vsrari.h vr12, vr12, 4
iclip_pixel_vrh vr12, vr15, vr16, vr10, vr11, vr12
vsrlni.b.h vr12, vr12, 0
vilvl.b vr13, vr12, vr7
.endm
.macro z1_filter_edge_calc_loop1
vmul.h vr10, vr10, vr1
vmul.h vr11, vr11, vr1
vmul.h vr12, vr12, vr1
vmul.h vr13, vr13, vr1
vhaddw.w.h vr10, vr10, vr10
vhaddw.w.h vr11, vr11, vr11
vhaddw.w.h vr12, vr12, vr12
vhaddw.w.h vr13, vr13, vr13
vhaddw.d.w vr10, vr10, vr10
vhaddw.d.w vr11, vr11, vr11
vhaddw.d.w vr12, vr12, vr12
vhaddw.d.w vr13, vr13, vr13
vpackev.h vr10, vr11, vr10
vpackev.h vr11, vr13, vr12
vpackev.w vr10, vr11, vr10 //s:01234567
.endm
.macro z1_filter_edge_calc_loop2
vsllwil.hu.bu vr13, vr13, 0
vmadd.h vr10, vr13, vr6
vsrari.h vr12, vr10, 4
vsrlni.b.h vr12, vr12, 0 //out: 0-7
.endm
.macro z1_filter_edge_calc_other
vsllwil.hu.bu vr10, vr10, 0
vmul.h vr11, vr10, vr1
vhaddw.w.h vr11, vr11, vr11
vhaddw.d.w vr11, vr11, vr11
vreplvei.h vr12, vr11, 4
vextrins.h vr12, vr11, 0x00
vreplvei.h vr13, vr10, 1
vmadd.h vr12, vr13, vr6
vsrari.h vr12, vr12, 4
vsrlni.b.h vr12, vr12, 0 //out: 0-7
.endm
.macro z1_filter_edge_data_init1
vbsll.v vr10, vr7, 1
vextrins.b vr10, vr10, 0x01
vbsrl.v vr12, vr7, 1
vbsrl.v vr13, vr7, 2
vsllwil.hu.bu vr10, vr10, 0
vsllwil.hu.bu vr11, vr7, 0
vsllwil.hu.bu vr12, vr12, 0
vsllwil.hu.bu vr13, vr13, 0
z1_filter_edge_calc_loop1
.endm
.macro z1_filter_edge_data_init2
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vbsrl.v vr13, vr7, 3
vsllwil.hu.bu vr10, vr7, 0
vsllwil.hu.bu vr11, vr11, 0
vsllwil.hu.bu vr12, vr12, 0
vsllwil.hu.bu vr13, vr13, 0
z1_filter_edge_calc_loop1
.endm
.macro z1_filter_edge_data_init3
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x76
vsllwil.hu.bu vr10, vr7, 0
vsllwil.hu.bu vr11, vr11, 0
vsllwil.hu.bu vr12, vr12, 0
vsllwil.hu.bu vr13, vr13, 0
z1_filter_edge_calc_loop1
.endm
.macro z1_filter_edge_data_init4
vbsll.v vr10, vr7, 1
vextrins.b vr10, vr10, 0x01
vbsrl.v vr12, vr7, 1
vbsrl.v vr13, vr7, 2
vextrins.b vr13, vr13, 0x76
vsllwil.hu.bu vr10, vr10, 0
vsllwil.hu.bu vr11, vr7, 0
vsllwil.hu.bu vr12, vr12, 0
vsllwil.hu.bu vr13, vr13, 0
z1_filter_edge_calc_loop1
.endm
.macro pixel_set_8bpc_allw dst_ptr, src_ptr, width, tmp0, tmp1
vldrepl.b vr10, \src_ptr, 0
or \tmp1, zero, zero
srai.d \tmp0, \width, 4
beqz \tmp0, 2f
1:
vstx vr10, \dst_ptr, \tmp1
addi.d \tmp1, \tmp1, 16
addi.d \tmp0, \tmp0, -1
bnez \tmp0, 1b
2:
andi \tmp0, \width, 8
beqz \tmp0, 3f
fstx.d f10, \dst_ptr, \tmp1
addi.d \tmp1, \tmp1, 8
3:
andi \tmp0, \width, 4
beqz \tmp0, 4f
fstx.s f10, \dst_ptr, \tmp1
addi.d \tmp1, \tmp1, 4
4:
andi \tmp0, \width, 2
beqz \tmp0, 5f
ldx.bu \tmp0, \src_ptr, zero
stx.b \tmp0, \dst_ptr, \tmp1
addi.d \tmp1, \tmp1, 1
stx.b \tmp0, \dst_ptr, \tmp1
addi.d \tmp1, \tmp1, 1
5:
andi \tmp0, \width, 1
beqz \tmp0, 6f
ldx.bu \tmp0, \src_ptr, zero
stx.b \tmp0, \dst_ptr, \tmp1
6:
.endm
// void ipred_z1_lsx(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft_in,
// const int width, const int height, int angle,
// const int max_width, const int max_height
// HIGHBD_DECL_SUFFIX)
function ipred_z1_8bpc_lsx
addi.d a2, a2, 1 //&topleft_in[1]
addi.d sp, sp, -128
or t2, sp, sp //top_out
srai.d a6, a5, 9
andi a6, a6, 1 //is_sum
srai.d a7, a5, 10 //enable_intra_edge_filter
andi a5, a5, 511
la.local t0, dav1d_dr_intra_derivative
andi t1, a5, 0xFFE
ldx.hu t1, t0, t1 //dx
beqz a7, .IPRED_Z1_NOTUA
add.d t3, a3, a4
li.w t4, 90
sub.w t4, t4, a5
// ipred_get_upsample t5:upsample_above
li.w t6, 16
sra.d t6, t6, a6
bge t6, t3, .Z1_GETUS1
addi.d t5, zero, 0
b .Z1_GETUS2
.Z1_GETUS1:
addi.d t5, zero, 1
.Z1_GETUS2:
li.w t6, 40
blt t4, t6, .Z1_GETUS3
addi.d t6, zero, 0
b .Z1_GETUS4
.Z1_GETUS3:
addi.d t6, zero, 1
.Z1_GETUS4:
and t5, t5, t6
beqz t5, .IPRED_Z1_NOTUA
la.local t0, z1_upsample_edge_kernel
vld vr0, t0, 0 //kernel
vxor.v vr15, vr15, vr15
li.w t0, 255
vreplgr2vr.h vr16, t0
.Z1_UEDGE_W4:
andi t6, a3, 4
beqz t6, .Z1_UEDGE_W8
.Z1_UEDGE_W4_H4:
andi t6, a4, 4
beqz t6, .Z1_UEDGE_W4_H8
//0-6
vld vr7, a2, -1
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vextrins.b vr12, vr12, 0x76
vbsrl.v vr13, vr7, 3
z1_upsample_edge_calc_loop
fst.d f13, t2, 0
vstelm.w vr13, t2, 8, 2
vstelm.h vr13, t2, 12, 6
ld.bu t7, a2, 7
st.b t7, t2, 14
b .Z1_UEDGE_END
.Z1_UEDGE_W4_H8:
andi t6, a4, 8
beqz t6, .Z1_UEDGE_W4_H16
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init2
vst vr13, t2, 0
//8-10
vldrepl.b vr7, a2, 7
z1_upsample_edge_calc_other
vstelm.w vr13, t2, 16, 0
vstelm.h vr13, t2, 20, 2
ld.bu t7, a2, 7
st.b t7, t2, 22
b .Z1_UEDGE_END
.Z1_UEDGE_W4_H16:
andi t6, a4, 16
beqz t6, .Z1_UEDGE_W4_H32
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init2
vst vr13, t2, 0
//8-15
vldrepl.b vr7, a2, 7
z1_upsample_edge_calc_other
vst vr13, t2, 16
//16-18
vstelm.w vr13, t2, 32, 0
vstelm.h vr13, t2, 36, 2
ld.bu t7, a2, 7
st.b t7, t2, 38
b .Z1_UEDGE_END
.Z1_UEDGE_W4_H32:
andi t6, a4, 32
beqz t6, .Z1_UEDGE_W4_H64
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init2
vst vr13, t2, 0
//8-15
vldrepl.b vr7, a2, 7
z1_upsample_edge_calc_other
vst vr13, t2, 16
vst vr13, t2, 32 //16-23
vst vr13, t2, 48 //24-31
//32-34
vstelm.w vr13, t2, 64, 0
vstelm.h vr13, t2, 68, 2
ld.bu t7, a2, 7
st.b t7, t2, 70
b .Z1_UEDGE_END
.Z1_UEDGE_W4_H64:
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init2
vst vr13, t2, 0
//8-15
vldrepl.b vr7, a2, 7
z1_upsample_edge_calc_other
vst vr13, t2, 16
vst vr13, t2, 32 //16-23
vst vr13, t2, 48 //24-31
vst vr13, t2, 64 //32-39
vst vr13, t2, 80 //40-47
vst vr13, t2, 96 //48-55
vst vr13, t2, 112 //56-63
//64-66
vstelm.w vr13, t2, 128, 0
vstelm.h vr13, t2, 132, 2
ld.bu t7, a2, 7
st.b t7, t2, 134
b .Z1_UEDGE_END
.Z1_UEDGE_W8:
andi t6, a3, 8
beqz t6, .Z1_UEDGE_W16
.Z1_UEDGE_W8_H4:
andi t6, a4, 4
beqz t6, .Z1_UEDGE_W8_H8
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vextrins.b vr12, vr12, 0x32
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x21
vextrins.b vr13, vr13, 0x31
z1_upsample_edge_calc_loop
vstelm.w vr13, t2, 16, 0
vstelm.h vr13, t2, 20, 2
ld.bu t7, a2, 11
st.b t7, t2, 22
b .Z1_UEDGE_END
.Z1_UEDGE_W8_H8:
andi t6, a4, 8
beqz t6, .Z1_UEDGE_W8_H16
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-14
vld vr7, a2, 7
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vextrins.b vr12, vr12, 0x76
vbsrl.v vr13, vr7, 3
z1_upsample_edge_calc_loop
fst.d f13, t2, 16
vstelm.w vr13, t2, 24, 2
vstelm.h vr13, t2, 28, 6
ld.bu t7, a2, 15
st.b t7, t2, 30
b .Z1_UEDGE_END
.Z1_UEDGE_W8_H16:
andi t6, a4, 16
beqz t6, .Z1_UEDGE_W8_H32
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init2
vst vr13, t2, 16
//16-22
vldrepl.b vr7, a2, 15
z1_upsample_edge_calc_other
fst.d f13, t2, 32
vstelm.w vr13, t2, 40, 2
vstelm.h vr13, t2, 44, 6
ld.bu t7, a2, 15
st.b t7, t2, 46
b .Z1_UEDGE_END
.Z1_UEDGE_W8_H32:
andi t6, a4, 32
beqz t6, .Z1_UEDGE_W8_H64
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init2
vst vr13, t2, 16
//16-23
vldrepl.b vr7, a2, 15
z1_upsample_edge_calc_other
vst vr13, t2, 32
vst vr13, t2, 48 //24-31
//32-38
fst.d f13, t2, 64
vstelm.w vr13, t2, 72, 2
vstelm.h vr13, t2, 76, 6
ld.bu t7, a2, 15
st.b t7, t2, 78
b .Z1_UEDGE_END
.Z1_UEDGE_W8_H64:
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init2
vst vr13, t2, 16
//16-23
vldrepl.b vr7, a2, 15
z1_upsample_edge_calc_other
vst vr13, t2, 32
vst vr13, t2, 48 //24-31
vst vr13, t2, 64 //32-39
vst vr13, t2, 80 //40-47
vst vr13, t2, 96 //48-55
vst vr13, t2, 112 //56-63
//64-70
fst.d f13, t2, 128
vstelm.w vr13, t2, 136, 2
vstelm.h vr13, t2, 140, 6
ld.bu t7, a2, 15
st.b t7, t2, 142
b .Z1_UEDGE_END
.Z1_UEDGE_W16:
andi t6, a3, 16
beqz t6, .Z1_UEDGE_W32
.Z1_UEDGE_W16_H4:
andi t6, a4, 4
beqz t6, .Z1_UEDGE_W16_H8
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-18
vld vr7, a2, 15
z1_upsample_edge_data_init1
vstelm.w vr13, t2, 32, 0
vstelm.h vr13, t2, 36, 2
ld.bu t7, a2, 19
st.b t7, t2, 38
b .Z1_UEDGE_END
.Z1_UEDGE_W16_H8:
andi t6, a4, 8
beqz t6, .Z1_UEDGE_W16_H16
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-22
vld vr7, a2, 15
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vextrins.b vr12, vr12, 0x76
vbsrl.v vr13, vr7, 3
z1_upsample_edge_calc_loop
fst.d f13, t2, 32
vstelm.w vr13, t2, 40, 2
vstelm.h vr13, t2, 44, 6
ld.bu t7, a2, 23
st.b t7, t2, 46
b .Z1_UEDGE_END
.Z1_UEDGE_W16_H16:
andi t6, a4, 16
beqz t6, .Z1_UEDGE_W16_H32
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-30
vld vr7, a2, 23
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vextrins.b vr12, vr12, 0x76
vbsrl.v vr13, vr7, 3
z1_upsample_edge_calc_loop
fst.d f13, t2, 48
vstelm.w vr13, t2, 56, 2
vstelm.h vr13, t2, 60, 6
ld.bu t7, a2, 31
st.b t7, t2, 62
b .Z1_UEDGE_END
.Z1_UEDGE_W16_H32:
andi t6, a4, 32
beqz t6, .Z1_UEDGE_W16_H64
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-31
vld vr7, a2, 23
z1_upsample_edge_data_init2
vst vr13, t2, 48
//32-39
vldrepl.b vr7, a2, 31
z1_upsample_edge_calc_other
vst vr13, t2, 64
//40-46
fst.d f13, t2, 80
vstelm.w vr13, t2, 88, 2
vstelm.h vr13, t2, 92, 6
ld.bu t7, a2, 31
st.b t7, t2, 94
b .Z1_UEDGE_END
.Z1_UEDGE_W16_H64:
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-31
vld vr7, a2, 23
z1_upsample_edge_data_init2
vst vr13, t2, 48
//32-39
vldrepl.b vr7, a2, 31
z1_upsample_edge_calc_other
vst vr13, t2, 64
vst vr13, t2, 80 //40-47
vst vr13, t2, 96 //48-55
vst vr13, t2, 112 //56-63
vst vr13, t2, 128 //64-71
//72-78
fst.d f13, t2, 144
vstelm.w vr13, t2, 152, 2
vstelm.h vr13, t2, 156, 6
ld.bu t7, a2, 31
st.b t7, t2, 158
b .Z1_UEDGE_END
.Z1_UEDGE_W32:
andi t6, a3, 32
beqz t6, .Z1_UEDGE_W64
.Z1_UEDGE_W32_H8:
andi t6, a4, 8
beqz t6, .Z1_UEDGE_W32_H16
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-31
vld vr7, a2, 23
z1_upsample_edge_data_init1
vst vr13, t2, 48
//32-38
vld vr7, a2, 31
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vextrins.b vr12, vr12, 0x76
vbsrl.v vr13, vr7, 3
z1_upsample_edge_calc_loop
fst.d f13, t2, 64
vstelm.w vr13, t2, 72, 2
vstelm.h vr13, t2, 76, 6
ld.bu t7, a2, 39
st.b t7, t2, 78
b .Z1_UEDGE_END
.Z1_UEDGE_W32_H16:
andi t6, a4, 16
beqz t6, .Z1_UEDGE_W32_H32
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-31
vld vr7, a2, 23
z1_upsample_edge_data_init1
vst vr13, t2, 48
//32-39
vld vr7, a2, 31
z1_upsample_edge_data_init1
vst vr13, t2, 64
//40-46
vld vr7, a2, 39
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vextrins.b vr12, vr12, 0x76
vbsrl.v vr13, vr7, 3
z1_upsample_edge_calc_loop
fst.d f13, t2, 80
vstelm.w vr13, t2, 88, 2
vstelm.h vr13, t2, 92, 6
ld.bu t7, a2, 47
st.b t7, t2, 94
b .Z1_UEDGE_END
.Z1_UEDGE_W32_H32:
andi t6, a4, 32
beqz t6, .Z1_UEDGE_W32_H64
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-31
vld vr7, a2, 23
z1_upsample_edge_data_init1
vst vr13, t2, 48
//32-39
vld vr7, a2, 31
z1_upsample_edge_data_init1
vst vr13, t2, 64
//40-47
vld vr7, a2, 39
z1_upsample_edge_data_init1
vst vr13, t2, 80
//48-55
vld vr7, a2, 47
z1_upsample_edge_data_init1
vst vr13, t2, 96
//56-62
vld vr7, a2, 55
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vextrins.b vr12, vr12, 0x76
vbsrl.v vr13, vr7, 3
z1_upsample_edge_calc_loop
fst.d f13, t2, 112
vstelm.w vr13, t2, 120, 2
vstelm.h vr13, t2, 124, 6
ld.bu t7, a2, 63
st.b t7, t2, 126
b .Z1_UEDGE_END
.Z1_UEDGE_W32_H64:
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-31
vld vr7, a2, 23
z1_upsample_edge_data_init1
vst vr13, t2, 48
//32-39
vld vr7, a2, 31
z1_upsample_edge_data_init1
vst vr13, t2, 64
//40-47
vld vr7, a2, 39
z1_upsample_edge_data_init1
vst vr13, t2, 80
//48-55
vld vr7, a2, 47
z1_upsample_edge_data_init1
vst vr13, t2, 96
//56-63
vld vr7, a2, 55
z1_upsample_edge_data_init2
vst vr13, t2, 112
//64-71
vldrepl.b vr7, a2, 63
z1_upsample_edge_calc_other
vst vr13, t2, 128
vst vr13, t2, 144 //72-79
vst vr13, t2, 160 //80-87
//88-94
fst.d f13, t2, 176
vstelm.w vr13, t2, 184, 2
vstelm.h vr13, t2, 188, 6
ld.bu t7, a2, 63
st.b t7, t2, 190
b .Z1_UEDGE_END
.Z1_UEDGE_W64:
.Z1_UEDGE_W64_H16:
andi t6, a4, 16
beqz t6, .Z1_UEDGE_W64_H32
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-31
vld vr7, a2, 23
z1_upsample_edge_data_init1
vst vr13, t2, 48
//32-39
vld vr7, a2, 31
z1_upsample_edge_data_init1
vst vr13, t2, 64
//40-47
vld vr7, a2, 39
z1_upsample_edge_data_init1
vst vr13, t2, 80
//48-55
vld vr7, a2, 47
z1_upsample_edge_data_init1
vst vr13, t2, 96
//56-63
vld vr7, a2, 55
z1_upsample_edge_data_init1
vst vr13, t2, 112
//64-71
vld vr7, a2, 63
z1_upsample_edge_data_init1
vst vr13, t2, 128
//72-78
vld vr7, a2, 71
z1_upsample_edge_data_init2
fst.d f13, t2, 144
vstelm.w vr13, t2, 152, 2
vstelm.h vr13, t2, 156, 6
ld.bu t7, a2, 79
st.b t7, t2, 158
b .Z1_UEDGE_END
.Z1_UEDGE_W64_H32:
andi t6, a4, 32
beqz t6, .Z1_UEDGE_W64_H64
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-31
vld vr7, a2, 23
z1_upsample_edge_data_init1
vst vr13, t2, 48
//32-39
vld vr7, a2, 31
z1_upsample_edge_data_init1
vst vr13, t2, 64
//40-47
vld vr7, a2, 39
z1_upsample_edge_data_init1
vst vr13, t2, 80
//48-55
vld vr7, a2, 47
z1_upsample_edge_data_init1
vst vr13, t2, 96
//56-63
vld vr7, a2, 55
z1_upsample_edge_data_init1
vst vr13, t2, 112
//64-71
vld vr7, a2, 63
z1_upsample_edge_data_init1
vst vr13, t2, 128
//72-79
vld vr7, a2, 71
z1_upsample_edge_data_init1
vst vr13, t2, 144
//80-87
vld vr7, a2, 79
z1_upsample_edge_data_init1
vst vr13, t2, 160
//88-94
vld vr7, a2, 87
z1_upsample_edge_data_init2
fst.d f13, t2, 176
vstelm.w vr13, t2, 184, 2
vstelm.h vr13, t2, 188, 6
ld.bu t7, a2, 95
st.b t7, t2, 190
b .Z1_UEDGE_END
.Z1_UEDGE_W64_H64:
//0-7
vld vr7, a2, -1
z1_upsample_edge_data_init1
vst vr13, t2, 0
//8-15
vld vr7, a2, 7
z1_upsample_edge_data_init1
vst vr13, t2, 16
//16-23
vld vr7, a2, 15
z1_upsample_edge_data_init1
vst vr13, t2, 32
//24-31
vld vr7, a2, 23
z1_upsample_edge_data_init1
vst vr13, t2, 48
//32-39
vld vr7, a2, 31
z1_upsample_edge_data_init1
vst vr13, t2, 64
//40-47
vld vr7, a2, 39
z1_upsample_edge_data_init1
vst vr13, t2, 80
//48-55
vld vr7, a2, 47
z1_upsample_edge_data_init1
vst vr13, t2, 96
//56-63
vld vr7, a2, 55
z1_upsample_edge_data_init1
vst vr13, t2, 112
//64-71
vld vr7, a2, 63
z1_upsample_edge_data_init1
vst vr13, t2, 128
//72-79
vld vr7, a2, 71
z1_upsample_edge_data_init1
vst vr13, t2, 144
//80-87
vld vr7, a2, 79
z1_upsample_edge_data_init1
vst vr13, t2, 160
//88-95
vld vr7, a2, 87
z1_upsample_edge_data_init1
vst vr13, t2, 176
//96-103
vld vr7, a2, 95
z1_upsample_edge_data_init1
vst vr13, t2, 192
//104-111
vld vr7, a2, 103
z1_upsample_edge_data_init1
vst vr13, t2, 208
//112-119
vld vr7, a2, 111
z1_upsample_edge_data_init1
vst vr13, t2, 224
//120-126
vld vr7, a2, 119
z1_upsample_edge_data_init2
fst.d f13, t2, 240
vstelm.w vr13, t2, 248, 2
vstelm.h vr13, t2, 252, 6
ld.bu t7, a2, 127
st.b t7, t2, 254
b .Z1_UEDGE_END
.Z1_UEDGE_END:
//upsample_edge end
or a7, t2, t2 //top
add.d t0, a3, a4
slli.d t0, t0, 1
addi.d t0, t0, -2 //max_base_x
slli.d t1, t1, 1
b .IPRED_Z1_UA_END
.IPRED_Z1_NOTUA:
or t5, zero, zero //upsample_above=0
beqz a7, .IPRED_Z1_NOTFS
add.d a7, a3, a4 //w+h
li.w t4, 90
sub.d t4, t4, a5
// ipred_get_filter_strength a6:filter_strength
beqz a6, .Z1_GETFS20
.Z1_GETFS10: //wh<=8
addi.d t6, a7, -8
blt zero, t6, .Z1_GETFS11
addi.d t6, t4, -64
blt t6, zero, .Z1_GETFS101
ori a6, zero, 2
b .Z1_GETFS40
.Z1_GETFS101:
addi.d t6, t4, -40
blt t6, zero, .Z1_GETFS30
ori a6, zero, 1
b .Z1_GETFS40
.Z1_GETFS11: //wh<=16
addi.d t6, a7, -16
blt zero, t6, .Z1_GETFS12
addi.d t6, t4, -48
blt t6, zero, .Z1_GETFS111
ori a6, zero, 2
b .Z1_GETFS40
.Z1_GETFS111:
addi.d t6, t4, -20
blt t6, zero, .Z1_GETFS30
ori a6, zero, 1
b .Z1_GETFS40
.Z1_GETFS12: //wh<=24
addi.d t6, a7, -24
blt zero, t6, .Z1_GETFS13
addi.d t6, t4, -4
blt t6, zero, .Z1_GETFS30
ori a6, zero, 3
b .Z1_GETFS40
.Z1_GETFS13:
ori a6, zero, 3
b .Z1_GETFS40
.Z1_GETFS20: //wh<=8
addi.d t6, a7, -8
blt zero, t6, .Z1_GETFS21
addi.d t6, t4, -56
blt t6, zero, .Z1_GETFS30
ori a6, zero, 1
b .Z1_GETFS40
.Z1_GETFS21: //wh<=16
addi.d t6, a7, -16
blt zero, t6, .Z1_GETFS22
addi.d t6, t4, -40
blt t6, zero, .Z1_GETFS30
ori a6, zero, 1
b .Z1_GETFS40
.Z1_GETFS22: //wh<=24
addi.d t6, a7, -24
blt zero, t6, .Z1_GETFS23
addi.d t6, t4, -32
blt t6, zero, .Z1_GETFS221
ori a6, zero, 3
b .Z1_GETFS40
.Z1_GETFS221:
addi.d t6, t4, -16
blt t6, zero, .Z1_GETFS222
ori a6, zero, 2
b .Z1_GETFS40
.Z1_GETFS222:
addi.d t6, t4, -8
blt t6, zero, .Z1_GETFS30
ori a6, zero, 1
b .Z1_GETFS40
.Z1_GETFS23: //wh<=32
addi.d t6, a7, -32
blt zero, t6, .Z1_GETFS24
addi.d t6, t4, -32
blt t6, zero, .Z1_GETFS231
ori a6, zero, 3
b .Z1_GETFS40
.Z1_GETFS231:
addi.d t6, t4, -4
blt t6, zero, .Z1_GETFS232
ori a6, zero, 2
b .Z1_GETFS40
.Z1_GETFS232:
ori a6, zero, 1
b .Z1_GETFS40
.Z1_GETFS24:
ori a6, zero, 3
b .Z1_GETFS40
.Z1_GETFS30:
or a6, zero, zero
.Z1_GETFS40:
beqz a6, .IPRED_Z1_NOTFS
.IPRED_Z1_IFFS:
// filter_edge
addi.d a6, a6, -1
slli.d a6, a6, 4
la.local t0, ipred_filter_edge_kernel1
vldx vr1, t0, a6 //kernel[0-3]
la.local t0, ipred_filter_edge_kernel2
vldx vr6, t0, a6 //kernel[4]
.IPRED_Z1_FS_W4:
andi t0, a3, 4
beqz t0, .IPRED_Z1_FS_W8
.IPRED_Z1_FS_W4_H4:
andi t0, a4, 4
beqz t0, .IPRED_Z1_FS_W4_H8
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init4
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W4_H8:
andi t0, a4, 8
beqz t0, .IPRED_Z1_FS_W4_H16
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init4
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-11
vreplvei.b vr10, vr7, 8
vextrins.b vr10, vr7, 0x07
z1_filter_edge_calc_other
fst.s f12, t2, 8
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W4_H16:
andi t0, a4, 16
beqz t0, .IPRED_Z1_FS_W4_H32
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init4
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vreplvei.b vr10, vr7, 8
vextrins.b vr10, vr7, 0x07
z1_filter_edge_calc_other
fst.d f12, t2, 8
//16-19
vreplvei.b vr12, vr12, 1
fst.s f12, t2, 16
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W4_H32:
andi t0, a4, 32
beqz t0, .IPRED_Z1_FS_W4_H64
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init4
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vreplvei.b vr10, vr7, 8
vextrins.b vr10, vr7, 0x07
z1_filter_edge_calc_other
fst.d f12, t2, 8
//16-23
vreplvei.b vr12, vr12, 1
fst.d f12, t2, 16
fst.d f12, t2, 24 //24-31
fst.s f12, t2, 32 //32-35
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W4_H64:
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init4
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vreplvei.b vr10, vr7, 8
vextrins.b vr10, vr7, 0x07
z1_filter_edge_calc_other
fst.d f12, t2, 8
//16-23
vreplvei.b vr12, vr12, 1
fst.d f12, t2, 16
fst.d f12, t2, 24 //24-31
fst.d f12, t2, 32 //32-39
fst.d f12, t2, 40 //40-47
fst.d f12, t2, 48 //48-55
fst.d f12, t2, 56 //56-63
fst.s f12, t2, 64 //64-67
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W8:
andi t0, a3, 8
beqz t0, .IPRED_Z1_FS_W16
.IPRED_Z1_FS_W8_H4:
andi t0, a4, 4
beqz t0, .IPRED_Z1_FS_W8_H8
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-11
vld vr7, a2, 6
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x32
vsllwil.hu.bu vr10, vr7, 0
vsllwil.hu.bu vr11, vr11, 0
vsllwil.hu.bu vr12, vr12, 0
vsllwil.hu.bu vr13, vr13, 0
z1_filter_edge_calc_loop1
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x21
vextrins.b vr13, vr13, 0x31
z1_filter_edge_calc_loop2
fst.s f12, t2, 8
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W8_H8:
andi t0, a4, 8
beqz t0, .IPRED_Z1_FS_W8_H16
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W8_H16:
andi t0, a4, 16
beqz t0, .IPRED_Z1_FS_W8_H32
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vreplvei.b vr10, vr7, 9
vextrins.b vr10, vr7, 0x08
z1_filter_edge_calc_other
fst.d f12, t2, 16
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W8_H32:
andi t0, a4, 32
beqz t0, .IPRED_Z1_FS_W8_H64
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vreplvei.b vr10, vr7, 9
vextrins.b vr10, vr7, 0x08
z1_filter_edge_calc_other
fst.d f12, t2, 16
//24-31
vreplvei.b vr12, vr12, 1
fst.d f12, t2, 24
//32-39
fst.d f12, t2, 32
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W8_H64:
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vreplvei.b vr10, vr7, 9
vextrins.b vr10, vr7, 0x08
z1_filter_edge_calc_other
fst.d f12, t2, 16
//24-31
vreplvei.b vr12, vr12, 1
fst.d f12, t2, 24
fst.d f12, t2, 32 //32-39
fst.d f12, t2, 40 //40-47
fst.d f12, t2, 48 //48-55
fst.d f12, t2, 56 //56-63
fst.d f12, t2, 64 //64-71
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W16:
andi t0, a3, 16
beqz t0, .IPRED_Z1_FS_W32
.IPRED_Z1_FS_W16_H4:
andi t0, a4, 4
beqz t0, .IPRED_Z1_FS_W16_H8
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-19
vld vr7, a2, 14
vbsrl.v vr11, vr7, 1
vbsrl.v vr12, vr7, 2
vbsrl.v vr13, vr7, 3
vextrins.b vr13, vr13, 0x32
vsllwil.hu.bu vr10, vr7, 0
vsllwil.hu.bu vr11, vr11, 0
vsllwil.hu.bu vr12, vr12, 0
vsllwil.hu.bu vr13, vr13, 0
z1_filter_edge_calc_loop1
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x21
vextrins.b vr13, vr13, 0x31
z1_filter_edge_calc_loop2
fst.s f12, t2, 16
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W16_H8:
andi t0, a4, 8
beqz t0, .IPRED_Z1_FS_W16_H16
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W16_H16:
andi t0, a4, 16
beqz t0, .IPRED_Z1_FS_W16_H32
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W16_H32:
andi t0, a4, 32
beqz t0, .IPRED_Z1_FS_W16_H64
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
//32-39
vreplvei.b vr10, vr7, 9
vextrins.b vr10, vr7, 0x08
z1_filter_edge_calc_other
fst.d f12, t2, 32
//40-47
vreplvei.b vr12, vr12, 1
fst.d f12, t2, 40
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W16_H64:
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
//32-39
vreplvei.b vr10, vr7, 9
vextrins.b vr10, vr7, 0x08
z1_filter_edge_calc_other
fst.d f12, t2, 32
//40-47
vreplvei.b vr12, vr12, 1
fst.d f12, t2, 40
fst.d f12, t2, 48 //48-55
fst.d f12, t2, 56 //56-63
fst.d f12, t2, 64 //64-71
fst.d f12, t2, 72 //72-81
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W32:
andi t0, a3, 32
beqz t0, .IPRED_Z1_FS_W64
.IPRED_Z1_FS_W32_H8:
andi t0, a4, 8
beqz t0, .IPRED_Z1_FS_W32_H16
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
//32-39
vld vr7, a2, 30
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 32
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W32_H16:
andi t0, a4, 16
beqz t0, .IPRED_Z1_FS_W32_H32
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
//32-39
vld vr7, a2, 30
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 32
//40-47
vld vr7, a2, 38
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 40
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W32_H32:
andi t0, a4, 32
beqz t0, .IPRED_Z1_FS_W32_H64
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
//32-39
vld vr7, a2, 30
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 32
//40-47
vld vr7, a2, 38
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 40
//48-55
vld vr7, a2, 46
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 48
//56-63
vld vr7, a2, 54
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 56
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W32_H64:
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
//32-39
vld vr7, a2, 30
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 32
//40-47
vld vr7, a2, 38
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 40
//48-55
vld vr7, a2, 46
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 48
//56-63
vld vr7, a2, 54
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 56
//64-71
vreplvei.b vr10, vr7, 9
vextrins.b vr10, vr7, 0x08
z1_filter_edge_calc_other
fst.d f12, t2, 64
//72-89
vreplvei.b vr12, vr12, 1
fst.d f12, t2, 72
fst.d f12, t2, 80 //80-87
fst.d f12, t2, 88 //88-95
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W64:
.IPRED_Z1_FS_W64_H16:
andi t0, a4, 16
beqz t0, .IPRED_Z1_FS_W64_H32
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
//32-39
vld vr7, a2, 30
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 32
//40-47
vld vr7, a2, 38
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 40
//48-55
vld vr7, a2, 46
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 48
//56-63
vld vr7, a2, 54
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 56
//64-71
vld vr7, a2, 62
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 64
//72-79
vld vr7, a2, 70
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 72
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W64_H32:
andi t0, a4, 32
beqz t0, .IPRED_Z1_FS_W64_H64
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
//32-39
vld vr7, a2, 30
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 32
//40-47
vld vr7, a2, 38
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 40
//48-55
vld vr7, a2, 46
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 48
//56-63
vld vr7, a2, 54
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 56
//64-71
vld vr7, a2, 62
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 64
//72-79
vld vr7, a2, 70
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 72
//80-87
vld vr7, a2, 78
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 80
//88-95
vld vr7, a2, 86
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 88
b .IPRED_Z1_FS_END
.IPRED_Z1_FS_W64_H64:
//0-7
vld vr7, a2, -1
z1_filter_edge_data_init1
vbsrl.v vr13, vr7, 3
z1_filter_edge_calc_loop2
fst.d f12, t2, 0
//8-15
vld vr7, a2, 6
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 8
//16-23
vld vr7, a2, 14
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 16
//24-31
vld vr7, a2, 22
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 24
//32-39
vld vr7, a2, 30
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 32
//40-47
vld vr7, a2, 38
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 40
//48-55
vld vr7, a2, 46
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 48
//56-63
vld vr7, a2, 54
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 56
//64-71
vld vr7, a2, 62
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 64
//72-79
vld vr7, a2, 70
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 72
//80-87
vld vr7, a2, 78
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 80
//88-95
vld vr7, a2, 86
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 88
//96-103
vld vr7, a2, 94
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 96
//104-111
vld vr7, a2, 102
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 104
//112-119
vld vr7, a2, 110
z1_filter_edge_data_init2
vbsrl.v vr13, vr7, 4
z1_filter_edge_calc_loop2
fst.d f12, t2, 112
//120-127
vld vr7, a2, 118
z1_filter_edge_data_init3
vbsrl.v vr13, vr7, 4
vextrins.b vr13, vr13, 0x65
vextrins.b vr13, vr13, 0x75
z1_filter_edge_calc_loop2
fst.d f12, t2, 120
.IPRED_Z1_FS_END:
addi.d t0, a7, -1 //max_base_x
or a7, t2, t2 //top
b .IPRED_Z1_UA_END
.IPRED_Z1_NOTFS:
or a7, a2, a2 //top
// imin_gr
blt a3, a4, .Z1_IMIN1
or t0, a4, a4
b .Z1_IMIN2
.Z1_IMIN1:
or t0, a3, a3
.Z1_IMIN2:
add.d t0, a3, t0
addi.d t0, t0, -1 //max_base_x
.IPRED_Z1_UA_END:
//st dst, t1:dx a2 a6 t6 t7
beqz t5, .Z1_UA0
li.w a5, 64
vreplgr2vr.h vr0, a5
vsrai.h vr7, vr0, 1
or t2, zero, zero //y
or t3, t1, t1 //xpos
.Z1_LOOPY:
andi t4, t3, 0x3e //frac
vreplgr2vr.h vr1, t4
vsub.h vr2, vr0, vr1
or a6, zero, zero //x
or a2, zero, zero //base_num
srai.d t6, t3, 6 //base
or t7, t6, t6
bge t7, t0, .Z1_LOOPX
.Z1_BASENUM:
addi.d a2, a2, 1
addi.d t7, t7, 2
blt t7, t0, .Z1_BASENUM
.Z1_LOOPX:
blt a2, a3, .Z1_LOOPX_BASEMAX
srai.d t8, a3, 3 //loop param
beqz t8, .Z1_LOOPX_W4
.Z1_LOOPX_W8:
add.d t5, a7, t6
vld vr3, t5, 0
vpickev.b vr5, vr3, vr3 //0 2 4 6...
vpickod.b vr6, vr3, vr3 //1 3 5 7...
vsllwil.hu.bu vr5, vr5, 0
vsllwil.hu.bu vr6, vr6, 0
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
fstx.d f3, a0, a6
addi.d a6, a6, 8
addi.d t6, t6, 16
addi.d t8, t8, -1
bnez t8, .Z1_LOOPX_W8
b .Z1_LOOPY_END
.Z1_LOOPX_W4:
vldx vr3, a7, t6
vsllwil.hu.bu vr3, vr3, 0
vpickev.h vr5, vr3, vr3 //0 2 4 6...
vpickod.h vr6, vr3, vr3 //1 3 5 7...
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
fstx.s f3, a0, a6
b .Z1_LOOPY_END
.Z1_LOOPX_BASEMAX:
srai.d t8, a2, 3 //loop param
beqz t8, .Z1_LOOPX_BASEMAX4
.Z1_LOOPX_BASEMAX8:
add.d t5, a7, t6
vld vr3, t5, 0
vpickev.b vr5, vr3, vr3 //0 2 4 6...
vpickod.b vr6, vr3, vr3 //1 3 5 7...
vsllwil.hu.bu vr5, vr5, 0
vsllwil.hu.bu vr6, vr6, 0
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
fstx.d f3, a0, a6
addi.d a6, a6, 8
addi.d t6, t6, 16
addi.d t8, t8, -1
bnez t8, .Z1_LOOPX_BASEMAX8
.Z1_LOOPX_BASEMAX4:
andi t8, a2, 4
beqz t8, .Z1_LOOPX_BASEMAX2
vldx vr3, a7, t6
vsllwil.hu.bu vr3, vr3, 0
vpickev.h vr5, vr3, vr3 //0 2 4 6...
vpickod.h vr6, vr3, vr3 //1 3 5 7...
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
fstx.s f3, a0, a6
addi.d a6, a6, 4
addi.d t6, t6, 8
.Z1_LOOPX_BASEMAX2:
andi t8, a2, 2
beqz t8, .Z1_LOOPX_BASEMAX1
vldx vr3, a7, t6
vsllwil.hu.bu vr3, vr3, 0
vpickev.h vr5, vr3, vr3 //0 2 4 6...
vpickod.h vr6, vr3, vr3 //1 3 5 7...
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
vpickve2gr.bu t7, vr3, 0
vpickve2gr.bu t8, vr3, 1
stx.b t7, a0, a6
addi.d a6, a6, 1
stx.b t8, a0, a6
addi.d a6, a6, 1
addi.d t6, t6, 4
.Z1_LOOPX_BASEMAX1:
andi t8, a2, 1
beqz t8, .Z1_LOOPX_BASEMAX_MSET
add.d a2, a7, t6
sub.d t7, a5, t4
ld.bu t8, a2, 0
mul.w t7, t7, t8
ld.bu t8, a2, 1
mul.w t8, t8, t4
add.d t7, t7, t8
addi.d t7, t7, 32
srai.d t7, t7, 6
stx.b t7, a0, a6
addi.d a6, a6, 1
.Z1_LOOPX_BASEMAX_MSET: //memset
add.d t6, a0, a6 //dst
add.d t7, a7, t0 //src
sub.d a2, a3, a6 //size
pixel_set_8bpc_allw t6, t7, a2, t8, t4
.Z1_LOOPY_END:
addi.d t2, t2, 1
add.d a0, a0, a1
add.d t3, t3, t1
blt t2, a4, .Z1_LOOPY
b .Z1_END
.Z1_UA0:
li.w a5, 64
vreplgr2vr.h vr0, a5
vsrai.h vr7, vr0, 1
or t2, zero, zero //y
or t3, t1, t1 //xpos
.Z1_UA0_LOOPY:
andi t4, t3, 0x3e //frac
vreplgr2vr.h vr1, t4
vsub.h vr2, vr0, vr1
or a6, zero, zero //x
srai.d t6, t3, 6 //base
sub.d a2, t0, t6 //a2:base_num
blt a2, zero, .Z1_UA0_BASENUM
b .Z1_UA0_LOOPX
.Z1_UA0_BASENUM:
or a2, zero, zero
.Z1_UA0_LOOPX:
blt a2, a3, .Z1_UA0_LOOPX_BASEMAX
srai.d t8, a3, 3 //loop param
beqz t8, .Z1_UA0_LOOPX_W4
.Z1_UA0_LOOPX_W8:
add.d t5, a7, t6
vld vr5, t5, 0
vld vr6, t5, 1
vsllwil.hu.bu vr5, vr5, 0
vsllwil.hu.bu vr6, vr6, 0
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
fstx.d f3, a0, a6
addi.d a6, a6, 8
addi.d t6, t6, 8
addi.d t8, t8, -1
bnez t8, .Z1_UA0_LOOPX_W8
b .Z1_UA0_LOOPY_END
.Z1_UA0_LOOPX_W4:
vldx vr5, a7, t6
vsllwil.hu.bu vr5, vr5, 0
vbsrl.v vr6, vr5, 2
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
fstx.s f3, a0, a6
b .Z1_UA0_LOOPY_END
.Z1_UA0_LOOPX_BASEMAX:
srai.d t8, a2, 3 //loop param
beqz t8, .Z1_UA0_LOOPX_BASEMAX4
.Z1_UA0_LOOPX_BASEMAX8:
add.d t5, a7, t6
vld vr5, t5, 0
vld vr6, t5, 1
vsllwil.hu.bu vr5, vr5, 0
vsllwil.hu.bu vr6, vr6, 0
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
fstx.d f3, a0, a6
addi.d a6, a6, 8
addi.d t6, t6, 8
addi.d t8, t8, -1
bnez t8, .Z1_UA0_LOOPX_BASEMAX8
.Z1_UA0_LOOPX_BASEMAX4:
andi t8, a2, 4
beqz t8, .Z1_UA0_LOOPX_BASEMAX2
vldx vr5, a7, t6
vsllwil.hu.bu vr5, vr5, 0
vbsrl.v vr6, vr5, 2
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
fstx.s f3, a0, a6
addi.d a6, a6, 4
addi.d t6, t6, 4
.Z1_UA0_LOOPX_BASEMAX2:
andi t8, a2, 2
beqz t8, .Z1_UA0_LOOPX_BASEMAX1
vldx vr5, a7, t6
vsllwil.hu.bu vr5, vr5, 0
vbsrl.v vr6, vr5, 2
vmul.h vr3, vr5, vr2
vmadd.h vr3, vr6, vr1
vadd.h vr3, vr3, vr7
vsrai.h vr3, vr3, 6
vsrlni.b.h vr3, vr3, 0
vpickve2gr.bu t7, vr3, 0
vpickve2gr.bu t8, vr3, 1
stx.b t7, a0, a6
addi.d a6, a6, 1
stx.b t8, a0, a6
addi.d a6, a6, 1
addi.d t6, t6, 2
.Z1_UA0_LOOPX_BASEMAX1:
andi t8, a2, 1
beqz t8, .Z1_UA0_LOOPX_BASEMAX_MSET
add.d a2, a7, t6
sub.d t7, a5, t4
ld.bu t8, a2, 0
mul.w t7, t7, t8
ld.bu t8, a2, 1
mul.w t8, t8, t4
add.d t7, t7, t8
addi.d t7, t7, 32
srai.d t7, t7, 6
stx.b t7, a0, a6
addi.d a6, a6, 1
.Z1_UA0_LOOPX_BASEMAX_MSET: //memset
add.d t6, a0, a6 //dst
add.d t7, a7, t0 //src
sub.d a2, a3, a6 //size
pixel_set_8bpc_allw t6, t7, a2, t8, t4
.Z1_UA0_LOOPY_END:
addi.d t2, t2, 1
add.d a0, a0, a1
add.d t3, t3, t1
blt t2, a4, .Z1_UA0_LOOPY
.Z1_END:
addi.d sp, sp, 128
endfunc
|
Admenri/urge
| 27,904
|
third_party/dav1d/src/loongarch/refmvs.S
|
/*
* Copyright © 2023, VideoLAN and dav1d authors
* Copyright © 2023, Loongson Technology Corporation Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/loongarch/loongson_asm.S"
/*
static void splat_mv_c(refmvs_block **rr, const refmvs_block *const rmv,
const int bx4, const int bw4, int bh4)
*/
function splat_mv_lsx
vld vr0, a1, 0 // 0 1 ... 11 ...
clz.w t4, a3
vaddi.bu vr1, vr0, 0
addi.w t4, t4, -26
vextrins.w vr1, vr0, 0x30 // 0 1 2 ... 11 0 1 2 3
la.local t5, .SPLAT_LSX_JRTABLE
vbsrl.v vr2, vr1, 4 // 4 5 6 7...11 0 1 2 3 0 0 0 0
alsl.d t6, t4, t5, 1
vextrins.w vr2, vr0, 0x31 // 4 5 6 7...11 0 1 2 3 4 5 6 7
ld.h t7, t6, 0
vbsrl.v vr3, vr2, 4 // 8 9 10 11 0 1 2 3 4 5 6 7 0 0 0 0
add.d t8, t5, t7
alsl.d a2, a2, a2, 1
vextrins.w vr3, vr0, 0x32 // 8 9 10 11 0 1 2 3 4 5 6 7 8 9 10 11
slli.w a2, a2, 2
jirl $r0, t8, 0
.SPLAT_LSX_JRTABLE:
.hword .SPLAT_W32_LSX - .SPLAT_LSX_JRTABLE
.hword .SPLAT_W16_LSX - .SPLAT_LSX_JRTABLE
.hword .SPLAT_W8_LSX - .SPLAT_LSX_JRTABLE
.hword .SPLAT_W4_LSX - .SPLAT_LSX_JRTABLE
.hword .SPLAT_W2_LSX - .SPLAT_LSX_JRTABLE
.hword .SPLAT_W1_LSX - .SPLAT_LSX_JRTABLE
.SPLAT_W1_LSX:
ld.d t3, a0, 0
addi.d a0, a0, 8
addi.d a4, a4, -1
add.d t3, t3, a2
fst.d f1, t3, 0
fst.s f3, t3, 8
blt zero, a4, .SPLAT_W1_LSX
b .splat_end
.SPLAT_W2_LSX:
ld.d t3, a0, 0
addi.d a0, a0, 8
addi.d a4, a4, -1
add.d t3, t3, a2
vst vr1, t3, 0
fst.d f2, t3, 16
blt zero, a4, .SPLAT_W2_LSX
b .splat_end
.SPLAT_W4_LSX:
ld.d t3, a0, 0
addi.d a0, a0, 8
addi.d a4, a4, -1
add.d t3, t3, a2
vst vr1, t3, 0
vst vr2, t3, 16
vst vr3, t3, 32
blt zero, a4, .SPLAT_W4_LSX
b .splat_end
.SPLAT_W8_LSX:
ld.d t3, a0, 0
addi.d a0, a0, 8
addi.d a4, a4, -1
add.d t3, t3, a2
vst vr1, t3, 0
vst vr2, t3, 16
vst vr3, t3, 32
vst vr1, t3, 48
vst vr2, t3, 64
vst vr3, t3, 80
blt zero, a4, .SPLAT_W8_LSX
b .splat_end
.SPLAT_W16_LSX:
ld.d t3, a0, 0
addi.d a0, a0, 8
addi.d a4, a4, -1
add.d t3, t3, a2
.rept 2
vst vr1, t3, 0
vst vr2, t3, 16
vst vr3, t3, 32
vst vr1, t3, 48
vst vr2, t3, 64
vst vr3, t3, 80
addi.d t3, t3, 96
.endr
blt zero, a4, .SPLAT_W16_LSX
b .splat_end
.SPLAT_W32_LSX:
ld.d t3, a0, 0
addi.d a0, a0, 8
addi.d a4, a4, -1
add.d t3, t3, a2
.rept 4
vst vr1, t3, 0
vst vr2, t3, 16
vst vr3, t3, 32
vst vr1, t3, 48
vst vr2, t3, 64
vst vr3, t3, 80
addi.d t3, t3, 96
.endr
blt zero, a4, .SPLAT_W32_LSX
.splat_end:
endfunc
const la_div_mult
.short 0, 16384, 8192, 5461, 4096, 3276, 2730, 2340
.short 2048, 1820, 1638, 1489, 1365, 1260, 1170, 1092
.short 1024, 963, 910, 862, 819, 780, 744, 712
.short 682, 655, 630, 606, 585, 564, 546, 528
endconst
/*
* temp reg: a6 a7
*/
.macro LOAD_SET_LOOP is_odd
slli.d a6, t6, 2
add.d a6, a6, t6 // col_w * 5
0:
addi.d a7, zero, 0 // x
.if \is_odd
stx.w t7, t3, a7
addi.d a7, a7, 5
bge a7, a6, 2f
.endif
1:
stx.w t7, t3, a7
addi.d a7, a7, 5
stx.w t7, t3, a7
addi.d a7, a7, 5
blt a7, a6, 1b
2:
add.d t3, t3, t2
addi.d t5, t5, 1
blt t5, a5, 0b
.endm
/*
* static void load_tmvs_c(const refmvs_frame *const rf, int tile_row_idx,
* const int col_start8, const int col_end8,
* const int row_start8, int row_end8)
*/
function load_tmvs_lsx
addi.d sp, sp, -80
st.d s0, sp, 0
st.d s1, sp, 8
st.d s2, sp, 16
st.d s3, sp, 24
st.d s4, sp, 32
st.d s5, sp, 40
st.d s6, sp, 48
st.d s7, sp, 56
st.d s8, sp, 64
vld vr16, a0, 16
vld vr0, a0, 52 // rf->mfmv_ref
ld.w s8, a0, 152 // [0] - rf->n_mfmvs
vld vr17, a0, 168 // [0] - rp_ref| [1]- rp_proj
ld.d t1, a0, 184 // stride
ld.w t0, a0, 200
addi.w t0, t0, -1
bnez t0, 1f
addi.w a1, zero, 0
1:
addi.d t0, a3, 8
vinsgr2vr.w vr1, t0, 0
vinsgr2vr.w vr1, a5, 1
vmin.w vr1, vr1, vr16 // [0] col_end8i [1] row_end8
addi.d t0, a2, -8
bge t0, zero, 2f
addi.w t0, zero, 0 // t0 col_start8i
2:
vpickve2gr.d t4, vr17, 1 // rf->rp_proj
slli.d t2, t1, 2
add.d t2, t2, t1 // stride * 5
slli.d a1, a1, 4 // tile_row_idx * 16
andi t3, a4, 0xf
add.d t3, t3, a1 // tile_row_idx * 16 + row_start8 & 15
mul.w t3, t3, t2
mul.w t8, a1, t2
vpickve2gr.w a5, vr1, 1
addi.d t5, a4, 0
sub.d t6, a3, a2 // col_end8 - col_start8
li.w t7, 0x80008000
slli.d a7, a2, 2
add.d t3, t3, a2
add.d t3, t3, a7
add.d t3, t3, t4 // rp_proj
andi a6, t6, 1
bnez a6, 3f
LOAD_SET_LOOP 0
b 4f
3:
LOAD_SET_LOOP 1
4:
addi.d a6, zero, 0 // n
bge a6, s8, .end_load
add.d t3, t8, t4 // rp_proj
mul.w t6, a4, t2
addi.d s7, zero, 40
vpickve2gr.w t1, vr1, 0 // col_end8i
vbsrl.v vr2, vr0, 4 // rf->mfmv_ref2cur
addi.d t5, a0, 64 // rf->mfmv_ref2ref
la.local t8, la_div_mult
vld vr6, t8, 0
vld vr7, t8, 16
vld vr8, t8, 32
vld vr9, t8, 48
li.w t8, 0x3fff
vreplgr2vr.h vr21, t8
vxor.v vr18, vr18, vr18 // zero
vsub.h vr20, vr18, vr21
vpickev.b vr12, vr7, vr6
vpickod.b vr13, vr7, vr6
vpickev.b vr14, vr9, vr8
vpickod.b vr15, vr9, vr8
vpickve2gr.d s6, vr17, 0 // rf->rp_ref
5:
vld vr10, t5, 0
vld vr11, t5, 16
vpickev.h vr10, vr11, vr10
vpickev.b vr10, vr11, vr10 // [1...7]
vbsrl.v vr0, vr0, 1
vpickve2gr.wu t8, vr2, 0 // ref2cur
vbsrl.v vr2, vr2, 4
srli.d t4, t8, 24
xori t4, t4, 0x80
beqz t4, 8f
vreplgr2vr.h vr23, t8
vshuf.b vr6, vr14, vr12, vr10
vshuf.b vr7, vr15, vr13, vr10
vilvl.b vr8, vr7, vr6
vmulwev.w.h vr6, vr8, vr23
vmulwod.w.h vr7, vr8, vr23
vpickve2gr.b s0, vr0, 0 // ref
slli.d t8, s0, 3
ldx.d s1, s6, t8 // rf->rp_ref[ref]
addi.d s0, s0, -4 // ref_sign
vreplgr2vr.h vr19, s0
add.d s1, s1, t6 // &rf->rp_ref[ref][row_start8 * stride]
addi.d s2, a4, 0 // y
vilvl.w vr8, vr7, vr6
vilvh.w vr9, vr7, vr6
6: // for (int y = row_start8;
andi s3, s2, 0xff8
addi.d s4, s3, 8
blt a4, s3, 0f
addi.d s3, a4, 0 // y_proj_start
0:
blt s4, a5, 0f
addi.d s4, a5, 0 // y_proj_end
0:
addi.d s5, t0, 0 // x
7: // for (int x = col_start8i;
slli.d a7, s5, 2
add.d a7, a7, s5
add.d a7, s1, a7 // rb
vld vr3, a7, 0 // [rb]
vpickve2gr.b t4, vr3, 4 // b_ref
beqz t4, .end_x
vreplve.b vr11, vr10, t4
vpickve2gr.b t7, vr11, 4 // ref2ref
beqz t7, .end_x
vsllwil.w.h vr4, vr3, 0
vreplgr2vr.w vr6, t4
vshuf.w vr6, vr9, vr8 // frac
vmul.w vr5, vr6, vr4
vsrai.w vr4, vr5, 31
vadd.w vr4, vr4, vr5
vssrarni.h.w vr4, vr4, 14
vclip.h vr4, vr4, vr20, vr21 // offset
vxor.v vr5, vr4, vr19 // offset.x ^ ref_sign
vori.b vr5, vr5, 0x1 // offset.x ^ ref_sign
vabsd.h vr4, vr4, vr18
vsrli.h vr4, vr4, 6 // abs(offset.x) >> 6
vsigncov.h vr4, vr5, vr4 // apply_sign
vpickve2gr.h s0, vr4, 0
add.d s0, s2, s0 // pos_y
blt s0, s3, .n_posy
bge s0, s4, .n_posy
andi s0, s0, 0xf
mul.w s0, s0, t2 // pos
vpickve2gr.h t7, vr4, 1
add.d t7, t7, s5 // pos_x
add.d s0, t3, s0 // rp_proj + pos
.loop_posx:
andi t4, s5, 0xff8 // x_sb_align
blt t7, a2, .n_posx
addi.d t8, t4, -8
blt t7, t8, .n_posx
bge t7, a3, .n_posx
addi.d t4, t4, 16
bge t7, t4, .n_posx
slli.d t4, t7, 2
add.d t4, t4, t7 // pos_x * 5
add.d t4, s0, t4 // rp_proj[pos + pos_x]
vstelm.w vr3, t4, 0, 0
vstelm.b vr11, t4, 4, 4
.n_posx:
addi.d s5, s5, 1 // x + 1
bge s5, t1, .ret_posx
addi.d a7, a7, 5 // rb + 1
vld vr4, a7, 0 // [rb]
vseq.b vr5, vr4, vr3
vpickve2gr.d t8, vr5, 0
cto.d t8, t8
blt t8, s7, 7b
addi.d t7, t7, 1 // pos_x + 1
/* Core computing loop expansion(sencond) */
andi t4, s5, 0xff8 // x_sb_align
blt t7, a2, .n_posx
addi.d t8, t4, -8
blt t7, t8, .n_posx
bge t7, a3, .n_posx
addi.d t4, t4, 16
bge t7, t4, .n_posx
slli.d t4, t7, 2
add.d t4, t4, t7 // pos_x * 5
add.d t4, s0, t4 // rp_proj[pos + pos_x]
vstelm.w vr3, t4, 0, 0
vstelm.b vr11, t4, 4, 4
addi.d s5, s5, 1 // x + 1
bge s5, t1, .ret_posx
addi.d a7, a7, 5 // rb + 1
vld vr4, a7, 0 // [rb]
vseq.b vr5, vr4, vr3
vpickve2gr.d t8, vr5, 0
cto.d t8, t8
blt t8, s7, 7b
addi.d t7, t7, 1 // pos_x + 1
/* Core computing loop expansion(third) */
andi t4, s5, 0xff8 // x_sb_align
blt t7, a2, .n_posx
addi.d t8, t4, -8
blt t7, t8, .n_posx
bge t7, a3, .n_posx
addi.d t4, t4, 16
bge t7, t4, .n_posx
slli.d t4, t7, 2
add.d t4, t4, t7 // pos_x * 5
add.d t4, s0, t4 // rp_proj[pos + pos_x]
vstelm.w vr3, t4, 0, 0
vstelm.b vr11, t4, 4, 4
addi.d s5, s5, 1 // x + 1
bge s5, t1, .ret_posx
addi.d a7, a7, 5 // rb + 1
vld vr4, a7, 0 // [rb]
vseq.b vr5, vr4, vr3
vpickve2gr.d t8, vr5, 0
cto.d t8, t8
blt t8, s7, 7b
addi.d t7, t7, 1 // pos_x + 1
b .loop_posx
.n_posy:
addi.d s5, s5, 1 // x + 1
bge s5, t1, .ret_posx
addi.d a7, a7, 5 // rb + 1
vld vr4, a7, 0 // [rb]
vseq.b vr5, vr4, vr3
vpickve2gr.d t8, vr5, 0
cto.d t8, t8
blt t8, s7, 7b
addi.d s5, s5, 1 // x + 1
bge s5, t1, .ret_posx
addi.d a7, a7, 5 // rb + 1
vld vr4, a7, 0 // [rb]
vseq.b vr5, vr4, vr3
vpickve2gr.d t8, vr5, 0
cto.d t8, t8
blt t8, s7, 7b
b .n_posy
.end_x:
addi.d s5, s5, 1 // x + 1
blt s5, t1, 7b
.ret_posx:
add.d s1, s1, t2 // r + stride
addi.d s2, s2, 1 // y + 1
blt s2, a5, 6b
8:
addi.d a6, a6, 1 // n + 1
addi.d t5, t5, 28 // mfmv_ref2ref(offset) + 28
blt a6, s8, 5b
.end_load:
ld.d s0, sp, 0
ld.d s1, sp, 8
ld.d s2, sp, 16
ld.d s3, sp, 24
ld.d s4, sp, 32
ld.d s5, sp, 40
ld.d s6, sp, 48
ld.d s7, sp, 56
ld.d s8, sp, 64
addi.d sp, sp, 80
endfunc
const mv_tbls
.byte 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255
.byte 0, 1, 2, 3, 8, 0, 1, 2, 3, 8, 0, 1, 2, 3, 8, 0
.byte 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4
.byte 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4
endconst
const mask_mult
.byte 1, 0, 2, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0
endconst
const mask_mv0
.byte 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
endconst
const mask_mv1
.byte 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19
endconst
// void dav1d_save_tmvs_lsx(refmvs_temporal_block *rp, ptrdiff_t stride,
// refmvs_block **rr, const uint8_t *ref_sign,
// int col_end8, int row_end8,
// int col_start8, int row_start8)
function save_tmvs_lsx
addi.d sp, sp, -0x28
st.d s0, sp, 0x00
st.d s1, sp, 0x08
st.d s2, sp, 0x10
st.d s3, sp, 0x18
st.d s4, sp, 0x20
move t0, ra
vxor.v vr10, vr10, vr10
vld vr11, a3, 0 // Load ref_sign[0] ~ Load ref_sign[7]
la.local t2, .save_tevs_tbl
la.local s1, mask_mult
la.local t7, mv_tbls
vld vr9, s1, 0 // Load mask_mult
vslli.d vr11, vr11, 8 // 0, ref_sign[0], ... ,ref_sign[6]
la.local s3, mask_mv0
vld vr8, s3, 0 // Load mask_mv0
la.local s4, mask_mv1
vld vr7, s4, 0 // Load mask_mv1
li.d s0, 5
li.d t8, 12 * 2
mul.d a1, a1, s0 // stride *= 5
sub.d a5, a5, a7 // h = row_end8 - row_start8
slli.d a7, a7, 1 // row_start8 <<= 1
1:
li.d s0, 5
andi t3, a7, 30 // (y & 15) * 2
slli.d s4, t3, 3
ldx.d t3, a2, s4 // b = rr[(y & 15) * 2]
addi.d t3, t3, 12 // &b[... + 1]
mul.d s4, a4, t8
add.d t4, s4, t3 // end_cand_b = &b[col_end8*2 + 1]
mul.d s3, a6, t8
add.d t3, s3, t3 // cand_b = &b[x*2 + 1]
mul.d s4, a6, s0
add.d a3, s4, a0 // &rp[x]
2:
/* First cand_b */
ld.b t5, t3, 10 // cand_b->bs
vld vr0, t3, 0 // cand_b->mv and ref
alsl.d t5, t5, t2, 2 // bt2 index
ld.h s3, t3, 8 // cand_b->ref
ld.h t6, t5, 0 // bt2
move s0, t2
alsl.d t3, t6, t3, 1 // Next cand_b += bt2 * 2
vor.v vr2, vr0, vr0
vinsgr2vr.h vr1, s3, 0
move t1 , t3
bge t3, t4, 3f
/* Next cand_b */
ld.b s0, t3, 10 // cand_b->bs
vld vr4, t3, 0 // cand_b->mv and ref
alsl.d s0, s0, t2, 2 // bt2 index
ld.h s4, t3, 8 // cand_b->ref
ld.h t6, s0, 0 // bt2
alsl.d t3, t6, t3, 1 // Next cand_b += bt2*2
vpackev.d vr2, vr4, vr0 // a0.mv[0] a0.mv[1] a1.mv[0], a1.mv[1]
vinsgr2vr.h vr1, s4, 1 // a0.ref[0] a0.ref[1], a1.ref[0], a1.ref[1]
3:
vabsd.h vr2, vr2, vr10 // abs(mv[].xy)
vsle.b vr16, vr10, vr1
vand.v vr1, vr16, vr1
vshuf.b vr1, vr11, vr11, vr1 // ref_sign[ref]
vsrli.h vr2, vr2, 12 // abs(mv[].xy) >> 12
vilvl.b vr1, vr1, vr1
vmulwev.h.bu vr1, vr1, vr9 // ef_sign[ref] * {1, 2}
vseqi.w vr2, vr2, 0 // abs(mv[].xy) <= 4096
vpickev.h vr2, vr2, vr2 // abs() condition to 16 bit
vand.v vr1, vr2, vr1 // h[0-3] contains conditions for mv[0-1]
vhaddw.wu.hu vr1, vr1, vr1 // Combine condition for [1] and [0]
vpickve2gr.wu s1, vr1, 0 // Extract case for first block
vpickve2gr.wu s2, vr1, 1
ld.hu t5, t5, 2 // Fetch jump table entry
ld.hu s0, s0, 2
alsl.d s3, s1, t7, 4 // Load permutation table base on case
vld vr1, s3, 0
alsl.d s4, s2, t7, 4
vld vr5, s4, 0
sub.d t5, t2, t5 // Find jump table target
sub.d s0, t2, s0
vshuf.b vr0, vr0, vr0, vr1 // Permute cand_b to output refmvs_temporal_block
vshuf.b vr4, vr4, vr4, vr5
vsle.b vr16, vr10, vr1
vand.v vr0, vr16, vr0
vsle.b vr17, vr10, vr5
vand.v vr4, vr17, vr4
// v1 follows on v0, with another 3 full repetitions of the pattern.
vshuf.b vr1, vr0, vr0, vr8 // 1, 2, 3, ... , 15, 16
vshuf.b vr5, vr4, vr4, vr8 // 1, 2, 3, ... , 15, 16
// v2 ends with 3 complete repetitions of the pattern.
vshuf.b vr2, vr1, vr0, vr7
vshuf.b vr6, vr5, vr4, vr7 // 4, 5, 6, 7, ... , 12, 13, 14, 15, 16, 17, 18, 19
jirl ra, t5, 0
bge t1 , t4, 4f // if (cand_b >= end)
vor.v vr0, vr4, vr4
vor.v vr1, vr5, vr5
vor.v vr2, vr6, vr6
jirl ra, s0, 0
blt t3, t4, 2b // if (cand_b < end)
4:
addi.d a5, a5, -1 // h--
addi.d a7, a7, 2 // y += 2
add.d a0, a0, a1 // rp += stride
blt zero, a5, 1b
ld.d s0, sp, 0x00
ld.d s1, sp, 0x08
ld.d s2, sp, 0x10
ld.d s3, sp, 0x18
ld.d s4, sp, 0x20
addi.d sp, sp, 0x28
move ra, t0
jirl zero, ra, 0x00
10:
addi.d s1, a3, 4
vstelm.w vr0, a3, 0, 0 // .mv
vstelm.b vr0, s1, 0, 4 // .ref
addi.d a3, a3, 5
jirl zero, ra, 0x00
20:
addi.d s1, a3, 8
vstelm.d vr0, a3, 0, 0 // .mv
vstelm.h vr0, s1, 0, 4 // .ref
addi.d a3, a3, 2 * 5
jirl zero, ra, 0x00
40:
vst vr0, a3, 0
vstelm.w vr1, a3, 0x10, 0
addi.d a3, a3, 4 * 5
jirl zero, ra, 0x00
80:
vst vr0, a3, 0
vst vr1, a3, 0x10 // This writes 6 full entries plus 2 extra bytes
vst vr2, a3, 5 * 8 - 16 // Write the last few, overlapping with the first write.
addi.d a3, a3, 8 * 5
jirl zero, ra, 0x00
160:
addi.d s1, a3, 6 * 5
addi.d s2, a3, 12 * 5
vst vr0, a3, 0
vst vr1, a3, 0x10 // This writes 6 full entries plus 2 extra bytes
vst vr0, a3, 6 * 5
vst vr1, a3, 6 * 5 + 16 // Write another 6 full entries, slightly overlapping with the first set
vstelm.d vr0, s2, 0, 0 // Write 8 bytes (one full entry) after the first 12
vst vr2, a3, 5 * 16 - 16 // Write the last 3 entries
addi.d a3, a3, 16 * 5
jirl zero, ra, 0x00
.save_tevs_tbl:
.hword 16 * 12 // bt2 * 12, 12 is sizeof(refmvs_block)
.hword .save_tevs_tbl - 160b
.hword 16 * 12
.hword .save_tevs_tbl - 160b
.hword 8 * 12
.hword .save_tevs_tbl - 80b
.hword 8 * 12
.hword .save_tevs_tbl - 80b
.hword 8 * 12
.hword .save_tevs_tbl - 80b
.hword 8 * 12
.hword .save_tevs_tbl - 80b
.hword 4 * 12
.hword .save_tevs_tbl - 40b
.hword 4 * 12
.hword .save_tevs_tbl - 40b
.hword 4 * 12
.hword .save_tevs_tbl - 40b
.hword 4 * 12
.hword .save_tevs_tbl - 40b
.hword 2 * 12
.hword .save_tevs_tbl - 20b
.hword 2 * 12
.hword .save_tevs_tbl - 20b
.hword 2 * 12
.hword .save_tevs_tbl - 20b
.hword 2 * 12
.hword .save_tevs_tbl - 20b
.hword 2 * 12
.hword .save_tevs_tbl - 20b
.hword 1 * 12
.hword .save_tevs_tbl - 10b
.hword 1 * 12
.hword .save_tevs_tbl - 10b
.hword 1 * 12
.hword .save_tevs_tbl - 10b
.hword 1 * 12
.hword .save_tevs_tbl - 10b
.hword 1 * 12
.hword .save_tevs_tbl - 10b
.hword 1 * 12
.hword .save_tevs_tbl - 10b
.hword 1 * 12
.hword .save_tevs_tbl - 10b
endfunc
|
Admenri/urge
| 24,500
|
third_party/dav1d/src/loongarch/msac.S
|
/*
* Copyright © 2023, VideoLAN and dav1d authors
* Copyright © 2023, Loongson Technology Corporation Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "loongson_asm.S"
const min_prob
.short 60, 56, 52, 48, 44, 40, 36, 32, 28, 24, 20, 16, 12, 8, 4, 0
endconst
const ph_0xff00
.rept 8
.short 0xff00
.endr
endconst
.macro decode_symbol_adapt w
addi.d sp, sp, -48
vldrepl.h vr0, a0, 24 //rng
fst.s f0, sp, 0 //val==0
vld vr1, a1, 0 //cdf
.if \w == 16
vld vr11, a1, 16
.endif
vldrepl.d vr2, a0, 16 //dif
ld.w t1, a0, 32 //allow_update_cdf
la.local t2, min_prob
addi.d t2, t2, 30
slli.w t3, a2, 1
sub.d t2, t2, t3
vld vr3, t2, 0 //min_prob
.if \w == 16
vld vr13, t2, 16
.endif
vsrli.h vr4, vr0, 8 //r = s->rng >> 8
vslli.h vr4, vr4, 8 //r << 8
vsrli.h vr5, vr1, 6
vslli.h vr5, vr5, 7
.if \w == 16
vsrli.h vr15, vr11, 6
vslli.h vr15, vr15, 7
.endif
vmuh.hu vr5, vr4, vr5
vadd.h vr5, vr5, vr3 //v
.if \w == 16
vmuh.hu vr15, vr4, vr15
vadd.h vr15, vr15, vr13
.endif
addi.d t8, sp, 2
vst vr5, t8, 0 //store v
.if \w == 16
vst vr15, t8, 16
.endif
vreplvei.h vr20, vr2, 3 //c
vsle.hu vr6, vr5, vr20
.if \w == 16
vsle.hu vr16, vr15, vr20
vpickev.b vr21, vr16, vr6
.endif
.if \w <= 8
vmskltz.h vr10, vr6
.else
vmskltz.b vr10, vr21
.endif
beqz t1, .renorm\()\w
// update_cdf
alsl.d t1, a2, a1, 1
ld.h t2, t1, 0 //count
srli.w t3, t2, 4 //count >> 4
.if \w == 16
addi.w t3, t3, 5 //rate
.else
addi.w t3, t3, 4
li.w t5, 2
sltu t5, t5, a2
add.w t3, t3, t5 //rate
.endif
sltui t5, t2, 32
add.w t2, t2, t5 //count + (count < 32)
vreplgr2vr.h vr9, t3
vseq.h vr7, vr7, vr7
vavgr.hu vr5, vr6, vr7 //i >= val ? -1 : 32768
vsub.h vr5, vr5, vr1
vsub.h vr8, vr1, vr6
.if \w == 16
vavgr.hu vr15, vr16, vr7
vsub.h vr15, vr15, vr11
vsub.h vr18, vr11, vr16
.endif
vsra.h vr5, vr5, vr9
vadd.h vr8, vr8, vr5
.if \w == 4
fst.d f8, a1, 0
.else
vst vr8, a1, 0
.endif
.if \w == 16
vsra.h vr15, vr15, vr9
vadd.h vr18, vr18, vr15
vst vr18, a1, 16
.endif
st.h t2, t1, 0
.renorm\()\w:
vpickve2gr.h t3, vr10, 0
ctz.w a7, t3 // ret
alsl.d t3, a7, t8, 1
ld.hu t4, t3, 0 // v
ld.hu t5, t3, -2 // u
sub.w t5, t5, t4 // rng
slli.d t4, t4, 48
vpickve2gr.d t6, vr2, 0
sub.d t6, t6, t4 // dif
clz.w t4, t5 // d
xori t4, t4, 16 // d
sll.d t6, t6, t4
ld.w t0, a0, 28 //cnt
sll.w t5, t5, t4
sub.w t7, t0, t4 // cnt-d
st.w t5, a0, 24 // store rng
bgeu t0, t4, 9f
// refill
ld.d t0, a0, 0 // buf_pos
ld.d t1, a0, 8 // buf_end
addi.d t2, t0, 8
bltu t1, t2, 2f
ld.d t3, t0, 0 // next_bits
addi.w t1, t7, -48 // shift_bits = cnt + 16 (- 64)
nor t3, t3, t3
sub.w t2, zero, t1
revb.d t3, t3 // next_bits = bswap(next_bits)
srli.w t2, t2, 3 // num_bytes_read
srl.d t3, t3, t1 // next_bits >>= (shift_bits & 63)
b 3f
1:
addi.w t3, t7, -48
srl.d t3, t3, t3 // pad with ones
b 4f
2:
bgeu t0, t1, 1b
ld.d t3, t1, -8 // next_bits
sub.w t2, t2, t1
sub.w t1, t1, t0 // num_bytes_left
slli.w t2, t2, 3
srl.d t3, t3, t2
addi.w t2, t7, -48
nor t3, t3, t3
sub.w t4, zero, t2
revb.d t3, t3
srli.w t4, t4, 3
srl.d t3, t3, t2
sltu t2, t1, t4
maskeqz t1, t1, t2
masknez t2, t4, t2
or t2, t2, t1 // num_bytes_read
3:
slli.w t1, t2, 3
add.d t0, t0, t2
add.w t7, t7, t1 // cnt += num_bits_read
st.d t0, a0, 0
4:
or t6, t6, t3 // dif |= next_bits
9:
st.w t7, a0, 28 // store cnt
st.d t6, a0, 16 // store dif
move a0, a7
addi.d sp, sp, 48
.endm
function msac_decode_symbol_adapt4_lsx
decode_symbol_adapt 4
endfunc
function msac_decode_symbol_adapt8_lsx
decode_symbol_adapt 8
endfunc
function msac_decode_symbol_adapt16_lsx
decode_symbol_adapt 16
endfunc
function msac_decode_bool_lsx
ld.w t0, a0, 24 // rng
srli.w a1, a1, 6
ld.d t1, a0, 16 // dif
srli.w t2, t0, 8 // r >> 8
mul.w t2, t2, a1
ld.w a5, a0, 28 // cnt
srli.w t2, t2, 1
addi.w t2, t2, 4 // v
slli.d t3, t2, 48 // vw
sltu t4, t1, t3
move t8, t4 // ret
xori t4, t4, 1
maskeqz t6, t3, t4 // if (ret) vw
sub.d t6, t1, t6 // dif
slli.w t5, t2, 1
sub.w t5, t0, t5 // r - 2v
maskeqz t7, t5, t4 // if (ret) r - 2v
add.w t5, t2, t7 // v(rng)
// renorm
clz.w t4, t5 // d
xori t4, t4, 16 // d
sll.d t6, t6, t4
sll.w t5, t5, t4
sub.w t7, a5, t4 // cnt-d
st.w t5, a0, 24 // store rng
bgeu a5, t4, 9f
// refill
ld.d t0, a0, 0 // buf_pos
ld.d t1, a0, 8 // buf_end
addi.d t2, t0, 8
bltu t1, t2, 2f
ld.d t3, t0, 0 // next_bits
addi.w t1, t7, -48 // shift_bits = cnt + 16 (- 64)
nor t3, t3, t3
sub.w t2, zero, t1
revb.d t3, t3 // next_bits = bswap(next_bits)
srli.w t2, t2, 3 // num_bytes_read
srl.d t3, t3, t1 // next_bits >>= (shift_bits & 63)
b 3f
1:
addi.w t3, t7, -48
srl.d t3, t3, t3 // pad with ones
b 4f
2:
bgeu t0, t1, 1b
ld.d t3, t1, -8 // next_bits
sub.w t2, t2, t1
sub.w t1, t1, t0 // num_bytes_left
slli.w t2, t2, 3
srl.d t3, t3, t2
addi.w t2, t7, -48
nor t3, t3, t3
sub.w t4, zero, t2
revb.d t3, t3
srli.w t4, t4, 3
srl.d t3, t3, t2
sltu t2, t1, t4
maskeqz t1, t1, t2
masknez t2, t4, t2
or t2, t2, t1 // num_bytes_read
3:
slli.w t1, t2, 3
add.d t0, t0, t2
add.w t7, t7, t1 // cnt += num_bits_read
st.d t0, a0, 0
4:
or t6, t6, t3 // dif |= next_bits
9:
st.w t7, a0, 28 // store cnt
st.d t6, a0, 16 // store dif
move a0, t8
endfunc
function msac_decode_bool_equi_lsx
ld.w t0, a0, 24 // rng
ld.d t1, a0, 16 // dif
ld.w a5, a0, 28 // cnt
srli.w t2, t0, 8 // r >> 8
slli.w t2, t2, 7
addi.w t2, t2, 4 // v
slli.d t3, t2, 48 // vw
sltu t4, t1, t3
move t8, t4 // ret
xori t4, t4, 1
maskeqz t6, t3, t4 // if (ret) vw
sub.d t6, t1, t6 // dif
slli.w t5, t2, 1
sub.w t5, t0, t5 // r - 2v
maskeqz t7, t5, t4 // if (ret) r - 2v
add.w t5, t2, t7 // v(rng)
// renorm
clz.w t4, t5 // d
xori t4, t4, 16 // d
sll.d t6, t6, t4
sll.w t5, t5, t4
sub.w t7, a5, t4 // cnt-d
st.w t5, a0, 24 // store rng
bgeu a5, t4, 9f
// refill
ld.d t0, a0, 0 // buf_pos
ld.d t1, a0, 8 // buf_end
addi.d t2, t0, 8
bltu t1, t2, 2f
ld.d t3, t0, 0 // next_bits
addi.w t1, t7, -48 // shift_bits = cnt + 16 (- 64)
nor t3, t3, t3
sub.w t2, zero, t1
revb.d t3, t3 // next_bits = bswap(next_bits)
srli.w t2, t2, 3 // num_bytes_read
srl.d t3, t3, t1 // next_bits >>= (shift_bits & 63)
b 3f
1:
addi.w t3, t7, -48
srl.d t3, t3, t3 // pad with ones
b 4f
2:
bgeu t0, t1, 1b
ld.d t3, t1, -8 // next_bits
sub.w t2, t2, t1
sub.w t1, t1, t0 // num_bytes_left
slli.w t2, t2, 3
srl.d t3, t3, t2
addi.w t2, t7, -48
nor t3, t3, t3
sub.w t4, zero, t2
revb.d t3, t3
srli.w t4, t4, 3
srl.d t3, t3, t2
sltu t2, t1, t4
maskeqz t1, t1, t2
masknez t2, t4, t2
or t2, t2, t1 // num_bytes_read
3:
slli.w t1, t2, 3
add.d t0, t0, t2
add.w t7, t7, t1 // cnt += num_bits_read
st.d t0, a0, 0
4:
or t6, t6, t3 // dif |= next_bits
9:
st.w t7, a0, 28 // store cnt
st.d t6, a0, 16 // store dif
move a0, t8
endfunc
function msac_decode_bool_adapt_lsx
ld.hu a3, a1, 0 // cdf[0] /f
ld.w t0, a0, 24 // rng
ld.d t1, a0, 16 // dif
srli.w t2, t0, 8 // r >> 8
srli.w a7, a3, 6
mul.w t2, t2, a7
ld.w a4, a0, 32 // allow_update_cdf
ld.w a5, a0, 28 // cnt
srli.w t2, t2, 1
addi.w t2, t2, 4 // v
slli.d t3, t2, 48 // vw
sltu t4, t1, t3
move t8, t4 // bit
xori t4, t4, 1
maskeqz t6, t3, t4 // if (ret) vw
sub.d t6, t1, t6 // dif
slli.w t5, t2, 1
sub.w t5, t0, t5 // r - 2v
maskeqz t7, t5, t4 // if (ret) r - 2v
add.w t5, t2, t7 // v(rng)
beqz a4, .renorm
// update_cdf
ld.hu t0, a1, 2 // cdf[1]
srli.w t1, t0, 4
addi.w t1, t1, 4 // rate
sltui t2, t0, 32 // count < 32
add.w t0, t0, t2 // count + (count < 32)
sub.w a3, a3, t8 // cdf[0] -= bit
slli.w t4, t8, 15
sub.w t7, a3, t4 // cdf[0] - bit - 32768
sra.w t7, t7, t1 // (cdf[0] - bit - 32768) >> rate
sub.w t7, a3, t7 // cdf[0]
st.h t7, a1, 0
st.h t0, a1, 2
.renorm:
clz.w t4, t5 // d
xori t4, t4, 16 // d
sll.d t6, t6, t4
sll.w t5, t5, t4
sub.w t7, a5, t4 // cnt-d
st.w t5, a0, 24 // store rng
bgeu a5, t4, 9f
// refill
ld.d t0, a0, 0 // buf_pos
ld.d t1, a0, 8 // buf_end
addi.d t2, t0, 8
bltu t1, t2, 2f
ld.d t3, t0, 0 // next_bits
addi.w t1, t7, -48 // shift_bits = cnt + 16 (- 64)
nor t3, t3, t3
sub.w t2, zero, t1
revb.d t3, t3 // next_bits = bswap(next_bits)
srli.w t2, t2, 3 // num_bytes_read
srl.d t3, t3, t1 // next_bits >>= (shift_bits & 63)
b 3f
1:
addi.w t3, t7, -48
srl.d t3, t3, t3 // pad with ones
b 4f
2:
bgeu t0, t1, 1b
ld.d t3, t1, -8 // next_bits
sub.w t2, t2, t1
sub.w t1, t1, t0 // num_bytes_left
slli.w t2, t2, 3
srl.d t3, t3, t2
addi.w t2, t7, -48
nor t3, t3, t3
sub.w t4, zero, t2
revb.d t3, t3
srli.w t4, t4, 3
srl.d t3, t3, t2
sltu t2, t1, t4
maskeqz t1, t1, t2
masknez t2, t4, t2
or t2, t2, t1 // num_bytes_read
3:
slli.w t1, t2, 3
add.d t0, t0, t2
add.w t7, t7, t1 // cnt += num_bits_read
st.d t0, a0, 0
4:
or t6, t6, t3 // dif |= next_bits
9:
st.w t7, a0, 28 // store cnt
st.d t6, a0, 16 // store dif
move a0, t8
endfunc
.macro HI_TOK allow_update_cdf
.\allow_update_cdf\()_hi_tok_lsx_start:
.if \allow_update_cdf == 1
ld.hu a4, a1, 0x06 // cdf[3]
.endif
vor.v vr1, vr0, vr0
vsrli.h vr1, vr1, 0x06 // cdf[val] >> EC_PROB_SHIFT
vstelm.h vr2, sp, 0, 0 // -0x1a
vand.v vr2, vr2, vr4 // (8 x rng) & 0xff00
vslli.h vr1, vr1, 0x07
vmuh.hu vr1, vr1, vr2
vadd.h vr1, vr1, vr5 // v += EC_MIN_PROB/* 4 */ * ((unsigned)n_symbols/* 3 */ - val);
vst vr1, sp, 0x02 // -0x18
vssub.hu vr1, vr1, vr3 // v - c
vseqi.h vr1, vr1, 0
.if \allow_update_cdf == 1
addi.d t4, a4, 0x50
srli.d t4, t4, 0x04
sltui t7, a4, 32
add.w a4, a4, t7
vreplgr2vr.h vr7, t4
vavgr.hu vr9, vr8, vr1
vsub.h vr9, vr9, vr0
vsub.h vr0, vr0, vr1
vsra.h vr9, vr9, vr7
vadd.h vr0, vr0, vr9
vstelm.d vr0, a1, 0, 0
st.h a4, a1, 0x06
.endif
vmsknz.b vr7, vr1
movfr2gr.s t4, f7
ctz.w t4, t4 // loop_times * 2
addi.d t7, t4, 2
ldx.hu t6, sp, t4 // u
ldx.hu t5, sp, t7 // v
addi.w t3, t3, 0x05
addi.w t4, t4, -0x05 // if t4 == 3, continue
sub.w t6, t6, t5 // u - v , rng for ctx_norm
slli.d t5, t5, 0x30 // (ec_win)v << (EC_WIN_SIZE - 16)
sub.d t1, t1, t5 // s->dif - ((ec_win)v << (EC_WIN_SIZE - 16))
// Init ctx_norm param
clz.w t7, t6
xori t7, t7, 0x1f
xori t7, t7, 0x0f // d = 15 ^ (31 ^ clz(rng));
sll.d t1, t1, t7 // dif << d
sll.d t6, t6, t7 // rng << d
// update vr2 8 x rng
vreplgr2vr.h vr2, t6
vreplvei.h vr2, vr2, 0
st.w t6, a0, 0x18 // store rng
move t0, t2
sub.w t2, t2, t7 // cnt - d
bgeu t0, t7, .\allow_update_cdf\()_hi_tok_lsx_ctx_norm_end // if ((unsigned)cnt < (unsigned)d) goto ctx_norm_end
// Step into ctx_fill
ld.d t5, a0, 0x00 // buf_pos
ld.d t6, a0, 0x08 // end_pos
addi.d t7, t5, 0x08 // buf_pos + 8
sub.d t7, t7, t6 // (buf_pos + 8) - end_pos
blt zero, t7, .\allow_update_cdf\()_hi_tok_lsx_ctx_refill_eob
// (end_pos - buf_pos) >= 8
ld.d t6, t5, 0x00 // load buf_pos[0]~buf_pos[7]
addi.w t7, t2, -0x30 // cnt - 0x30
nor t6, t6, t6 // not buf data
revb.d t6, t6 // Byte reversal
srl.d t6, t6, t7 // Replace left shift with right shift
sub.w t7, zero, t7 // neg
srli.w t7, t7, 0x03 // Loop times
or t1, t1, t6 // dif |= (ec_win)(*buf_pos++ ^ 0xff) << c
b .\allow_update_cdf\()_hi_tok_lsx_ctx_refill_end
.\allow_update_cdf\()_hi_tok_lsx_ctx_refill_eob:
bge t5, t6, .\allow_update_cdf\()_hi_tok_lsx_ctx_refill_one
// end_pos - buf_pos < 8 && buf_pos < end_pos
ld.d t0, t6, -0x08
slli.d t7, t7, 0x03
srl.d t6, t0, t7 // Retrieve the buf data and remove the excess data
addi.w t7, t2, -0x30 // cnt - 0x30
nor t6, t6, t6 // not
revb.d t6, t6 // Byte reversal
srl.d t6, t6, t7 // Replace left shift with right shift
sub.w t7, zero, t7 // neg
or t1, t1, t6 // dif |= (ec_win)(*buf_pos++ ^ 0xff) << c
ld.d t6, a0, 0x08 // end_pos
srli.w t7, t7, 0x03 // Loop times
sub.d t6, t6, t5 // end_pos - buf_pos
slt t0, t6, t7
maskeqz a3, t6, t0 // min(loop_times, end_pos - buf_pos)
masknez t0, t7, t0
or t7, a3, t0
b .\allow_update_cdf\()_hi_tok_lsx_ctx_refill_end
.\allow_update_cdf\()_hi_tok_lsx_ctx_refill_one:
// buf_pos >= end_pos
addi.w t7, t2, -0x10
andi t7, t7, 0xf
nor t0, zero, zero
srl.d t0, t0, t7
or t1, t1, t0 // dif |= ~(~(ec_win)0xff << c);
b .\allow_update_cdf\()_hi_tok_lsx_ctx_norm_end
.\allow_update_cdf\()_hi_tok_lsx_ctx_refill_end:
add.d t5, t5, t7 // buf_pos + Loop_times
st.d t5, a0, 0x00 // Store buf_pos
alsl.w t2, t7, t2, 0x03 // update cnt
.\allow_update_cdf\()_hi_tok_lsx_ctx_norm_end:
srli.d t7, t1, 0x30
vreplgr2vr.h vr3, t7 // broadcast the high 16 bits of dif
add.w t3, t4, t3 // update control parameter
beqz t3, .\allow_update_cdf\()_hi_tok_lsx_end // control loop for at most 4 times.
blt zero, t4, .\allow_update_cdf\()_hi_tok_lsx_start // tok_br == 3
.\allow_update_cdf\()_hi_tok_lsx_end:
addi.d t3, t3, 0x1e
st.d t1, a0, 0x10 // store dif
st.w t2, a0, 0x1c // store cnt
srli.w a0, t3, 0x01 // tok
addi.d sp, sp, 0x1a
.endm
/**
* @param unsigned dav1d_msac_decode_hi_tok_c(MsacContext *const s, uint16_t *const cdf)
* * Reg Alloction
* * vr0: cdf;
* * vr1: temp;
* * vr2: rng;
* * vr3: dif;
* * vr4: const 0xff00ff00...ff00ff00;
* * vr5: const 0x0004080c;
* * vr6: const 0;
* * t0: allow_update_cdf, tmp;
* * t1: dif;
* * t2: cnt;
* * t3: 0xffffffe8, outermost control parameter;
* * t4: loop time
* * t5: v, buf_pos, temp;
* * t6: u, rng, end_pos, buf, temp;
* * t7: temp;
*/
function msac_decode_hi_tok_lsx
fld.d f0, a1, 0 // Load cdf[0]~cdf[3]
vldrepl.h vr2, a0, 0x18 // 8 x rng, assert(rng <= 65535U), only the lower 16 bits are valid
vldrepl.h vr3, a0, 0x16 // broadcast the high 16 bits of dif, c = s->dif >> (EC_WIN_SIZE - 16)
ld.w t0, a0, 0x20 // allow_update_cdf
la.local t7, ph_0xff00
vld vr4, t7, 0x00 // 0xff00ff00...ff00ff00
la.local t7, min_prob
vld vr5, t7, 12 * 2 // 0x0004080c
vxor.v vr6, vr6, vr6 // const 0
ld.d t1, a0, 0x10 // dif
ld.w t2, a0, 0x1c // cnt
orn t3, t3, t3
srli.d t3, t3, 32
addi.d t3, t3, -0x17 // 0xffffffe8
vseq.h vr8, vr8, vr8
addi.d sp, sp, -0x1a // alloc stack
beqz t0, .hi_tok_lsx_no_update_cdf
HI_TOK 1
jirl zero, ra, 0x0
.hi_tok_lsx_no_update_cdf:
HI_TOK 0
endfunc
|
Admenri/urge
| 3,631
|
third_party/dav1d/src/riscv/asm.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2023, Nathan Egge
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DAV1D_SRC_RISCV_ASM_S
#define DAV1D_SRC_RISCV_ASM_S
#include "config.h"
#if !defined(PIC)
#if defined(__PIC__)
#define PIC __PIC__
#elif defined(__pic__)
#define PIC __pic__
#endif
#endif
#ifndef PRIVATE_PREFIX
#define PRIVATE_PREFIX dav1d_
#endif
#define PASTE(a,b) a ## b
#define CONCAT(a,b) PASTE(a,b)
#ifdef PREFIX
#define EXTERN CONCAT(_,PRIVATE_PREFIX)
#else
#define EXTERN PRIVATE_PREFIX
#endif
.macro arch ext:req, more:vararg
.option arch, +\ext
.ifnb \more
arch \more
.endif
.endm
.macro function name, export=0, ext=
.macro endfunc
#ifdef __ELF__
.size \name, . - \name
#endif
.option pop
.purgem endfunc
.endm
.text
.option push
.ifnb \ext
arch \ext
.endif
.if \export
.global EXTERN\name
#ifdef __ELF__
.type EXTERN\name, %function
.hidden EXTERN\name
#elif defined(__MACH__)
.private_extern EXTERN\name
#endif
EXTERN\name:
.else
#ifdef __ELF__
.type \name, %function
#endif
.endif
\name:
.endm
.macro const name, export=0, align=2
.macro endconst
#ifdef __ELF__
.size \name, . - \name
#endif
.purgem endconst
.endm
#if defined(_WIN32)
.section .rdata
#elif !defined(__MACH__)
.section .rodata
#else
.const_data
#endif
.align \align
.if \export
.global EXTERN\name
#ifdef __ELF__
.hidden EXTERN\name
#elif defined(__MACH__)
.private_extern EXTERN\name
#endif
EXTERN\name:
.endif
\name:
.endm
.macro thread_local name, align=3, quads=1
.macro end_thread_local
.size \name, . - \name
.purgem end_thread_local
.endm
.section .tbss, "waT"
.align \align
.hidden \name
\name:
.rept \quads
.quad 0
.endr
end_thread_local
.endm
#define L(x) .L ## x
#define MA (1 << 7)
#define TA (1 << 6)
#define E8 (0 << 3)
#define E16 (1 << 3)
#define E32 (2 << 3)
#define E64 (3 << 3)
#define M1 0
#define M2 1
#define M4 2
#define M8 3
#define MF2 7
#define MF4 6
#define MF8 5
#endif /* DAV1D_SRC_RISCV_ASM_S */
|
Admenri/urge
| 10,907
|
third_party/dav1d/src/arm/asm.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2018, Janne Grunau
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DAV1D_SRC_ARM_ASM_S
#define DAV1D_SRC_ARM_ASM_S
#include "config.h"
#if ARCH_AARCH64
#define x18 do_not_use_x18
#define w18 do_not_use_w18
#if HAVE_AS_ARCH_DIRECTIVE
.arch AS_ARCH_LEVEL
#endif
#if HAVE_AS_ARCHEXT_DOTPROD_DIRECTIVE
#define ENABLE_DOTPROD .arch_extension dotprod
#define DISABLE_DOTPROD .arch_extension nodotprod
#else
#define ENABLE_DOTPROD
#define DISABLE_DOTPROD
#endif
#if HAVE_AS_ARCHEXT_I8MM_DIRECTIVE
#define ENABLE_I8MM .arch_extension i8mm
#define DISABLE_I8MM .arch_extension noi8mm
#else
#define ENABLE_I8MM
#define DISABLE_I8MM
#endif
#if HAVE_AS_ARCHEXT_SVE_DIRECTIVE
#define ENABLE_SVE .arch_extension sve
#define DISABLE_SVE .arch_extension nosve
#else
#define ENABLE_SVE
#define DISABLE_SVE
#endif
#if HAVE_AS_ARCHEXT_SVE2_DIRECTIVE
#define ENABLE_SVE2 .arch_extension sve2
#define DISABLE_SVE2 .arch_extension nosve2
#else
#define ENABLE_SVE2
#define DISABLE_SVE2
#endif
/* If we do support the .arch_extension directives, disable support for all
* the extensions that we may use, in case they were implicitly enabled by
* the .arch level. This makes it clear if we try to assemble an instruction
* from an unintended extension set; we only allow assmbling such instructions
* within regions where we explicitly enable those extensions. */
DISABLE_DOTPROD
DISABLE_I8MM
DISABLE_SVE
DISABLE_SVE2
/* Support macros for
* - Armv8.3-A Pointer Authentication and
* - Armv8.5-A Branch Target Identification
* features which require emitting a .note.gnu.property section with the
* appropriate architecture-dependent feature bits set.
*
* |AARCH64_SIGN_LINK_REGISTER| and |AARCH64_VALIDATE_LINK_REGISTER| expand to
* PACIxSP and AUTIxSP, respectively. |AARCH64_SIGN_LINK_REGISTER| should be
* used immediately before saving the LR register (x30) to the stack.
* |AARCH64_VALIDATE_LINK_REGISTER| should be used immediately after restoring
* it. Note |AARCH64_SIGN_LINK_REGISTER|'s modifications to LR must be undone
* with |AARCH64_VALIDATE_LINK_REGISTER| before RET. The SP register must also
* have the same value at the two points. For example:
*
* .global f
* f:
* AARCH64_SIGN_LINK_REGISTER
* stp x29, x30, [sp, #-96]!
* mov x29, sp
* ...
* ldp x29, x30, [sp], #96
* AARCH64_VALIDATE_LINK_REGISTER
* ret
*
* |AARCH64_VALID_CALL_TARGET| expands to BTI 'c'. Either it, or
* |AARCH64_SIGN_LINK_REGISTER|, must be used at every point that may be an
* indirect call target. In particular, all symbols exported from a file must
* begin with one of these macros. For example, a leaf function that does not
* save LR can instead use |AARCH64_VALID_CALL_TARGET|:
*
* .globl return_zero
* return_zero:
* AARCH64_VALID_CALL_TARGET
* mov x0, #0
* ret
*
* A non-leaf function which does not immediately save LR may need both macros
* because |AARCH64_SIGN_LINK_REGISTER| appears late. For example, the function
* may jump to an alternate implementation before setting up the stack:
*
* .globl with_early_jump
* with_early_jump:
* AARCH64_VALID_CALL_TARGET
* cmp x0, #128
* b.lt .Lwith_early_jump_128
* AARCH64_SIGN_LINK_REGISTER
* stp x29, x30, [sp, #-96]!
* mov x29, sp
* ...
* ldp x29, x30, [sp], #96
* AARCH64_VALIDATE_LINK_REGISTER
* ret
*
* .Lwith_early_jump_128:
* ...
* ret
*
* These annotations are only required with indirect calls. Private symbols that
* are only the target of direct calls do not require annotations. Also note
* that |AARCH64_VALID_CALL_TARGET| is only valid for indirect calls (BLR), not
* indirect jumps (BR). Indirect jumps in assembly are supported through
* |AARCH64_VALID_JUMP_TARGET|. Landing Pads which shall serve for jumps and
* calls can be created using |AARCH64_VALID_JUMP_CALL_TARGET|.
*
* Although not necessary, it is safe to use these macros in 32-bit ARM
* assembly. This may be used to simplify dual 32-bit and 64-bit files.
*
* References:
* - "ELF for the Arm® 64-bit Architecture"
* https: *github.com/ARM-software/abi-aa/blob/master/aaelf64/aaelf64.rst
* - "Providing protection for complex software"
* https://developer.arm.com/architectures/learn-the-architecture/providing-protection-for-complex-software
*/
#if defined(__ARM_FEATURE_BTI_DEFAULT) && (__ARM_FEATURE_BTI_DEFAULT == 1)
#define GNU_PROPERTY_AARCH64_BTI (1 << 0) // Has Branch Target Identification
#define AARCH64_VALID_JUMP_CALL_TARGET hint #38 // BTI 'jc'
#define AARCH64_VALID_CALL_TARGET hint #34 // BTI 'c'
#define AARCH64_VALID_JUMP_TARGET hint #36 // BTI 'j'
#else
#define GNU_PROPERTY_AARCH64_BTI 0 // No Branch Target Identification
#define AARCH64_VALID_JUMP_CALL_TARGET
#define AARCH64_VALID_CALL_TARGET
#define AARCH64_VALID_JUMP_TARGET
#endif
#if defined(__ARM_FEATURE_PAC_DEFAULT)
#if ((__ARM_FEATURE_PAC_DEFAULT & (1 << 0)) != 0) // authentication using key A
#define AARCH64_SIGN_LINK_REGISTER paciasp
#define AARCH64_VALIDATE_LINK_REGISTER autiasp
#elif ((__ARM_FEATURE_PAC_DEFAULT & (1 << 1)) != 0) // authentication using key B
#define AARCH64_SIGN_LINK_REGISTER pacibsp
#define AARCH64_VALIDATE_LINK_REGISTER autibsp
#else
#error Pointer authentication defines no valid key!
#endif
#if ((__ARM_FEATURE_PAC_DEFAULT & (1 << 2)) != 0) // authentication of leaf functions
#error Authentication of leaf functions is enabled but not supported in dav1d!
#endif
#define GNU_PROPERTY_AARCH64_PAC (1 << 1)
#elif defined(__APPLE__) && defined(__arm64e__)
#define GNU_PROPERTY_AARCH64_PAC 0
#define AARCH64_SIGN_LINK_REGISTER pacibsp
#define AARCH64_VALIDATE_LINK_REGISTER autibsp
#else /* __ARM_FEATURE_PAC_DEFAULT */
#define GNU_PROPERTY_AARCH64_PAC 0
#define AARCH64_SIGN_LINK_REGISTER
#define AARCH64_VALIDATE_LINK_REGISTER
#endif /* !__ARM_FEATURE_PAC_DEFAULT */
#if (GNU_PROPERTY_AARCH64_BTI != 0 || GNU_PROPERTY_AARCH64_PAC != 0) && defined(__ELF__)
.pushsection .note.gnu.property, "a"
.balign 8
.long 4
.long 0x10
.long 0x5
.asciz "GNU"
.long 0xc0000000 /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4
.long (GNU_PROPERTY_AARCH64_BTI | GNU_PROPERTY_AARCH64_PAC)
.long 0
.popsection
#endif /* (GNU_PROPERTY_AARCH64_BTI != 0 || GNU_PROPERTY_AARCH64_PAC != 0) && defined(__ELF__) */
#endif /* ARCH_AARCH64 */
#if ARCH_ARM
.syntax unified
#ifdef __ELF__
.arch armv7-a
.fpu neon
.eabi_attribute 10, 0 // suppress Tag_FP_arch
.eabi_attribute 12, 0 // suppress Tag_Advanced_SIMD_arch
.section .note.GNU-stack,"",%progbits // Mark stack as non-executable
#endif /* __ELF__ */
#ifdef _WIN32
#define CONFIG_THUMB 1
#else
#define CONFIG_THUMB 0
#endif
#if CONFIG_THUMB
.thumb
#define A @
#define T
#else
#define A
#define T @
#endif /* CONFIG_THUMB */
#endif /* ARCH_ARM */
#if !defined(PIC)
#if defined(__PIC__)
#define PIC __PIC__
#elif defined(__pic__)
#define PIC __pic__
#endif
#endif
#ifndef PRIVATE_PREFIX
#define PRIVATE_PREFIX dav1d_
#endif
#define PASTE(a,b) a ## b
#define CONCAT(a,b) PASTE(a,b)
#ifdef PREFIX
#define EXTERN CONCAT(_,PRIVATE_PREFIX)
#else
#define EXTERN PRIVATE_PREFIX
#endif
.macro function name, export=0, align=2
.macro endfunc
#ifdef __ELF__
.size \name, . - \name
#endif
#if HAVE_AS_FUNC
.endfunc
#endif
.purgem endfunc
.endm
.text
.align \align
.if \export
.global EXTERN\name
#ifdef __ELF__
.type EXTERN\name, %function
.hidden EXTERN\name
#elif defined(__MACH__)
.private_extern EXTERN\name
#endif
#if HAVE_AS_FUNC
.func EXTERN\name
#endif
EXTERN\name:
.else
#ifdef __ELF__
.type \name, %function
#endif
#if HAVE_AS_FUNC
.func \name
#endif
.endif
\name:
#if ARCH_AARCH64
.if \export
AARCH64_VALID_CALL_TARGET
.endif
#endif
.endm
.macro const name, export=0, align=2
.macro endconst
#ifdef __ELF__
.size \name, . - \name
#endif
.purgem endconst
.endm
#if defined(_WIN32)
.section .rdata
#elif !defined(__MACH__)
.section .rodata
#else
.const_data
#endif
.align \align
.if \export
.global EXTERN\name
#ifdef __ELF__
.hidden EXTERN\name
#elif defined(__MACH__)
.private_extern EXTERN\name
#endif
EXTERN\name:
.endif
\name:
.endm
.macro jumptable name
#ifdef _WIN32
// MS armasm64 doesn't seem to be able to create relocations for subtraction
// of labels in different sections; for armasm64 (and all of Windows for
// simplicity), write the jump table in the text section, to allow calculating
// differences at assembly time. See
// https://developercommunity.visualstudio.com/t/armasm64-unable-to-create-cross-section/10722340
// for reference. (LLVM can create such relocations, but checking for _WIN32
// for simplicity, as execute-only memory isn't relevant on Windows at the
// moment.)
function \name
#else
// For other platforms, write jump tables in a const data section, to allow
// working in environments where executable memory isn't readable.
const \name
#endif
.endm
.macro endjumptable
#ifdef _WIN32
endfunc
#else
endconst
#endif
.endm
#ifdef __APPLE__
#define L(x) L ## x
#else
#define L(x) .L ## x
#endif
#define X(x) CONCAT(EXTERN, x)
#endif /* DAV1D_SRC_ARM_ASM_S */
|
Admenri/urge
| 13,262
|
third_party/dav1d/src/riscv/64/cdef.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2024, Bogdan Gligorijevic
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/riscv/asm.S"
.macro constrain_vectors vec1, vec2, vec_sub, strength, shift, vec_tmp1, vec_tmp2
vmslt.vx v0, \vec_tmp1, zero
vneg.v \vec_tmp1, \vec_tmp1, v0.t
vmmv.m v1, v0
vmslt.vx v0, \vec_tmp2, zero
vneg.v \vec_tmp2, \vec_tmp2, v0.t
vsra.vx \vec1, \vec_tmp1, \shift
vsra.vx \vec2, \vec_tmp2, \shift
vrsub.vx \vec1, \vec1, \strength
vrsub.vx \vec2, \vec2, \strength
vmax.vx \vec1, \vec1, zero
vmax.vx \vec2, \vec2, zero
vmin.vv \vec_tmp1, \vec1, \vec_tmp1
vmin.vv \vec_tmp2, \vec2, \vec_tmp2
vneg.v \vec_tmp2, \vec_tmp2, v0.t
vmmv.m v0, v1
vneg.v \vec_tmp1, \vec_tmp1, v0.t
.endm
.macro padding_fn w, h
li t5, -32768 # INT16_MIN
andi t4, a7, 4
li t2, -2 # y_start
.if \w == 4
vsetivli zero, \w + 4, e16, m1, ta, ma
.else
vsetivli zero, \w + 4, e16, m2, ta, ma
.endif
vmv.v.x v0, t5
bnez t4, L(top_done_\w\()x\h)
slli t5, a1, 1
addi t5, t5, 2
slli t5, t5, 1
sub t5, a0, t5
sh1add t4, a1, t5
vse16.v v0, (t5)
vse16.v v0, (t4)
li t2, 0
L(top_done_\w\()x\h):
andi t4, a7, 8
li t3, 2 + \h # y_end
bnez t4, L(bottom_done_\w\()x\h)
li t5, \h
mul t5, a1, t5
addi t5, t5, -2
sh1add t5, t5, a0
sh1add t4, a1, t5
vse16.v v0, (t5)
vse16.v v0, (t4)
addi t3, t3, -2
L(bottom_done_\w\()x\h):
andi t4, a7, 1
li t0, -2 # x_start
.if \w == 4
vsetivli zero, 2, e16, m1, ta, ma
.else
vsetivli zero, 2, e16, m2, ta, ma
.endif
bnez t4, L(left_done_\w\()x\h)
mul t5, a1, t2
addi t5, t5, -2
sh1add t5, t5, a0
sub t0, t3, t2
3:
vse16.v v0, (t5)
sh1add t5, a1, t5
addi t0, t0, -1
bnez t0, 3b
L(left_done_\w\()x\h):
andi t4, a7, 2
li t1, 2 + \w # x_end
bnez t4, L(right_done_\w\()x\h)
mul t5, t2, a1
addi t5, t5, \w
sh1add t5, t5, a0
sub t1, t3, t2
4:
vse16.v v0, (t5)
sh1add t5, a1, t5
addi t1, t1, -1
bnez t1, 4b
li t1, \w
L(right_done_\w\()x\h):
beqz t2, L(top_skip_\w\()x\h)
mul t5, a1, t2
add t5, t0, t5
sh1add a0, t5, a0 # tmp += y_start * tmp_stride + x_start
add a5, a5, t0
sub t5, t1, t0 # x_end - x_start
slli t6, t0, 1
.if \w == 4
vsetvli zero, t5, e16, m1, ta, ma
.else
vsetvli zero, t5, e16, m2, ta, ma
.endif
5:
vle8.v v0, (a5)
addi t2, t2, 1
vzext.vf2 v2, v0
add a5, a3, a5
vse16.v v2, (a0)
sh1add a0, a1, a0
bnez t2, 5b
sub a0, a0, t6 # tmp -= x_start
L(top_skip_\w\()x\h):
li a5, \h
beqz t0, L(left_skip_\w\()x\h)
sh1add a0, t0, a0 # tmp += x_start
7:
.if \w == 4
vsetivli zero, 2, e16, m1, ta, ma
.else
vsetivli zero, 2, e16, m2, ta, ma
.endif
vle8.v v0, (a4)
addi a5, a5, -1
vzext.vf2 v2, v0
addi a4, a4, 2
vse16.v v2, (a0)
sh1add a0, a1, a0
bnez a5, 7b
li a5, \h
mul t5, a1, a5
add t5, t5, t0
slli t5, t5, 1
sub a0, a0, t5 # tmp -= h * tmp_stride + x_start
L(left_skip_\w\()x\h):
8:
.if \w == 4
vsetvli zero, t1, e16, m1, ta, ma
.else
vsetvli zero, t1, e16, m2, ta, ma
.endif
vle8.v v0, (a2)
vzext.vf2 v2, v0
vse16.v v2, (a0)
add a2, a3, a2
sh1add a0, a1, a0
addi a5, a5, -1
bnez a5, 8b
li a5, \h
sh1add a0, t0, a0 # tmp += x_start
add a6, a6, t0 # bottom += x_start
beq a5, t3, L(bottom_skip_\w\()x\h)
sub t5, t1, t0
.if \w == 4
vsetvli zero, t5, e16, m1, ta, ma
.else
vsetvli zero, t5, e16, m2, ta, ma
.endif
9:
vle8.v v0, (a6)
add a6, a3, a6
vzext.vf2 v2, v0
addi a5, a5, 1
vse16.v v2, (a0)
sh1add a0, a1, a0
bne a5, t3, 9b
L(bottom_skip_\w\()x\h):
li t6, \h
mul t6, a3, t6
sub a2, a2, t6 # src -= h * src_stride
mul t5, a1, t3
add t5, t5, t0
slli t5, t5, 1
sub a0, a0, t5 # tmp -= y_end * tmp_stride + x_start
.endm
.macro cdef_fn w, h
function cdef_filter_block_\w\()x\h\()_8bpc_rvv, export=1, ext="v,zba,zbb"
csrw vxrm, zero
addi sp, sp, -32 - 144*2
sd a5, 24(sp) # pri_strength
sd a6, 16(sp) # sec_strength
sd a7, 8(sp) # dir
ld a7, 8 + 32 + 144*2(sp) # edges
mv a6, a4 # bottom
mv a5, a3 # top
mv a4, a2 # left
mv a3, a1 # dst_stride
mv a2, a0 # dst
li a1, 12 # tmp_stride
addi a0, sp, 32 + 2*(2*12+2)
padding_fn \w, \h
ld a4, 32 + 2*144(sp) # damping
ld a5, 24(sp) # pri_strength
ld a6, 16(sp) # sec_strength
ld a7, 8(sp) # dir
beqz a5, cdef_filter_sec_only_\w\()x\h
bnez a6, cdef_filter_pri_sec_\w\()x\h
andi t0, a5, 1
li t1, 4
sub t4, t1, t0
li t1, 63
clz t2, a5
sub t1, t1, t2
sub t1, a4, t1
li t0, \h
la t2, dav1d_cdef_directions
addi t3, a7, 2
sh1add t2, t3, t2
blt zero, t1, 1f
mv t1, zero
1:
vsetivli zero, \w, e16, m1, ta, mu
lb t3, 0(t2)
vle8.v v0, (a2)
vzext.vf2 v2, v0
sh1add t6, t3, a0
slli t3, t3, 1
sub t3, a0, t3
vle16.v v4, (t6)
vle16.v v6, (t3)
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v12, a5, t1, v8, v16
vmul.vx v28, v16, t4
vmacc.vx v28, t4, v8
lb t3, 1(t2)
andi t5, t4, 3
ori t5, t5, 2
sh1add t6, t3, a0
slli t3, t3, 1
sub t3, a0, t3
vsetvli zero, zero, e16, m1, ta, mu
vle16.v v4, (t6)
vle16.v v6, (t3)
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v12, a5, t1, v8, v16
vmacc.vx v28, t5, v16
vmacc.vx v28, t5, v8
vmslt.vx v0, v28, zero
vadd.vi v28, v28, -1, v0.t
vsetvli zero, zero, e16, m1, ta, ma
vnclip.wi v24, v28, 4
vadd.vv v28, v2, v24
vsetvli zero, zero, e8, mf2, ta, ma
vnclipu.wi v24, v28, 0
vse8.v v24, (a2)
addi t0, t0, -1
add a2, a2, a3
sh1add a0, a1, a0
bnez t0, 1b
addi sp, sp, 32 + 144*2
ret
cdef_filter_sec_only_\w\()x\h:
li t1, 63
clz t2, a6
sub t1, t1, t2
sub t1, a4, t1
li t0, \h
la t2, dav1d_cdef_directions
addi t3, a7, 4
sh1add t3, t3, t2
sh1add t2, a7, t2
2:
vsetivli zero, \w, e16, m1, ta, mu
lb t4, 0(t3)
lb t5, 0(t2)
vle8.v v0, (a2)
vzext.vf2 v2, v0
sh1add t6, t4, a0
slli t4, t4, 1
sub t4, a0, t4
vle16.v v4, (t6)
vle16.v v6, (t4)
sh1add t4, t5, a0
slli t5, t5, 1
sub t5, a0, t5
vle16.v v8, (t4)
vle16.v v10, (t5)
vwsub.vv v12, v4, v2
vwsub.vv v14, v6, v2
vwsub.vv v16, v8, v2
vwsub.vv v18, v10, v2
vsetvli zero, zero, e32, m2, ta, mu
li t4, 2
constrain_vectors v4, v6, v12, a6, t1, v12, v14
constrain_vectors v8, v10, v14, a6, t1, v16, v18
vmul.vx v28, v18, t4
vmacc.vx v28, t4, v16
vmacc.vx v28, t4, v14
vmacc.vx v28, t4, v12
lb t4, 1(t3)
lb t5, 1(t2)
sh1add t6, t4, a0
slli t4, t4, 1
sub t4, a0, t4
vsetvli zero, zero, e16, m1, ta, mu
vle16.v v4, (t6)
vle16.v v6, (t4)
sh1add t4, t5, a0
slli t5, t5, 1
sub t5, a0, t5
vle16.v v8, (t4)
vle16.v v10, (t5)
vwsub.vv v12, v4, v2
vwsub.vv v14, v6, v2
vwsub.vv v16, v8, v2
vwsub.vv v18, v10, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v12, a6, t1, v12, v14
constrain_vectors v8, v10, v14, a6, t1, v16, v18
vadd.vv v4, v28, v12
vadd.vv v28, v4, v14
vadd.vv v4, v28, v16
vadd.vv v28, v4, v18
vmslt.vx v0, v28, zero
vadd.vi v28, v28, -1, v0.t
vsetvli zero, zero, e16, m1, ta, ma
vnclip.wi v24, v28, 4
vadd.vv v28, v2, v24
vsetvli zero, zero, e8, mf2, ta, ma
vnclipu.wi v24, v28, 0
vse8.v v24, (a2)
addi t0, t0, -1
add a2, a2, a3
sh1add a0, a1, a0
bnez t0, 2b
addi sp, sp, 32 + 144*2
ret
cdef_filter_pri_sec_\w\()x\h:
li t1, 63
clz t2, a5
clz t3, a6
sub t2, t1, t2
sub t3, t1, t3
sub t1, a4, t2
sub t2, a4, t3
li t0, \h
la t3, dav1d_cdef_directions
blt zero, t1, 3f
mv t1, zero
3:
vsetivli zero, \w, e16, m1, ta, ma
li t4, 4
andi t6, a5, 1
addi t5, a7, 2
sub t4, t4, t6
sh1add t5, t5, t3
vle8.v v0, (a2)
lb t6, 0(t5)
vzext.vf2 v2, v0
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v2
vmax.vv v24, v4, v2
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v12, a5, t1, v8, v16
vmul.vx v28, v16, t4
vmacc.vx v28, t4, v8
lb t6, 1(t5)
andi t4, t4, 3
ori t4, t4, 2
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v12, a5, t1, v8, v16
addi t5, a7, 4
vmacc.vx v28, t4, v16
vmacc.vx v28, t4, v8
sh1add t5, t5, t3
lb t6, 0(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
li t6, 2
constrain_vectors v4, v6, v12, a6, t2, v8, v16
vmacc.vx v28, t6, v16
vmacc.vx v28, t6, v8
lb t6, 1(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v12, a6, t2, v8, v16
sh1add t5, a7, t3
vadd.vv v4, v28, v8
vadd.vv v28, v4, v16
vsetvli zero, zero, e16, m1, ta, ma
lb t6, 0(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
li t6, 2
constrain_vectors v4, v6, v12, a6, t2, v8, v16
vmacc.vx v28, t6, v16
vmacc.vx v28, t6, v8
lb t6, 1(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v12, a6, t2, v8, v16
vadd.vv v4, v28, v8
vadd.vv v28, v4, v16
vmslt.vx v0, v28, zero
vadd.vi v28, v28, -1, v0.t
vsetvli zero, zero, e16, m1, ta, mu
vnclip.wi v16, v28, 4
vadd.vv v28, v2, v16
vmslt.vv v0, v20, v28
vmerge.vvm v4, v20, v28, v0
vmslt.vv v0, v4, v24
vmerge.vvm v28, v24, v4, v0
vsetvli zero, zero, e8, mf2, ta, ma
vnclipu.wi v24, v28, 0
vse8.v v24, (a2)
addi t0, t0, -1
add a2, a2, a3
sh1add a0, a1, a0
bnez t0, 3b
addi sp, sp, 32 + 144*2
ret
endfunc
.endm
cdef_fn 4, 4
cdef_fn 4, 8
cdef_fn 8, 8
|
Admenri/urge
| 3,317
|
third_party/dav1d/src/riscv/64/mc16.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2024, Nathan Egge
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/riscv/asm.S"
function blend_vl256_16bpc_rvv, export=1, ext=zbb
ctz t0, a3
addi t0, t0, 0xc4
j L(blend_epilog)
endfunc
function blend_16bpc_rvv, export=1, ext="v,zbb"
ctz t0, a3
addi t0, t0, 0xc5
L(blend_epilog):
csrw vxrm, zero
andi t0, t0, 0xc7
li t1, 64
ori t0, t0, 8
add a6, a3, a3
vsetvl zero, a3, t0
1:
addi a4, a4, -2
vle8.v v24, (a5)
add a5, a5, a3
vle8.v v28, (a5)
add a5, a5, a3
vle16.v v8, (a2)
add a2, a2, a6
vle16.v v12, (a2)
add a2, a2, a6
vzext.vf2 v16, v24
vzext.vf2 v20, v28
vle16.v v0, (a0)
add t0, a0, a1
vle16.v v4, (t0)
vwmulu.vv v24, v8, v16
vwmulu.vv v8, v12, v20
vrsub.vx v16, v16, t1
vrsub.vx v20, v20, t1
vwmaccu.vv v24, v0, v16
vwmaccu.vv v8, v4, v20
vnclipu.wi v0, v24, 6
vnclipu.wi v4, v8, 6
vse16.v v0, (a0)
vse16.v v4, (t0)
add a0, t0, a1
bnez a4, 1b
ret
endfunc
function blend_v_vl256_16bpc_rvv, export=1, ext=zbb
srai t0, a3, 2
ctz t0, t0
addi t0, t0, 0xc6
j L(blend_v_epilog)
endfunc
function blend_v_16bpc_rvv, export=1, ext="v,zbb"
ctz t0, a3
addi t0, t0, 0xc5
L(blend_v_epilog):
andi t0, t0, 0xc7
ori t0, t0, 8
srai t1, a3, 2
sub t1, a3, t1
vsetvl zero, t1, t0
csrw vxrm, zero
la t1, dav1d_obmc_masks
add t1, t1, a3
vle8.v v20, (t1)
li t0, 64
vzext.vf2 v16, v20
add a3, a3, a3
vrsub.vx v20, v16, t0
1:
addi a4, a4, -2
vle16.v v8, (a2)
add a2, a2, a3
vle16.v v12, (a2)
add a2, a2, a3
vle16.v v0, (a0)
add t0, a0, a1
vle16.v v4, (t0)
vwmulu.vv v24, v8, v16
vwmulu.vv v8, v12, v16
vwmaccu.vv v24, v0, v20
vwmaccu.vv v8, v4, v20
vnclipu.wi v0, v24, 6
vnclipu.wi v4, v8, 6
vse16.v v0, (a0)
vse16.v v4, (t0)
add a0, t0, a1
bnez a4, 1b
ret
endfunc
|
Admenri/urge
| 2,745
|
third_party/dav1d/src/riscv/64/pal.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2024, Bogdan Gligorijevic
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/riscv/asm.S"
function pal_idx_finish_rvv, export=1, ext="v,zba,zbb"
csrw vxrm, zero
srl t0, a2, 1
sub a2, a2, a4
srl t1, a4, 1
mv t2, a5
csrr t6, vlenb
li t4, -3
ctz a6, t0
ctz t6, t6
li a7, 16
sub a6, a6, t6
li t6, 1<<4+1
// a6 is never > 3 for VLEN >=128
// that would've required stripmining with a6 set to 3
max a6, a6, t4
li t5, 2
andi a6, a6, 7
addi t4, a1, 1
ori a6, a6, 0xc0
1:
sub t3, t0, t1
vsetvl zero, t1, a6
vlse8.v v0, (a1), t5
sh1add a1, t1, a1
vlse8.v v8, (t4), t5
sh1add t4, t1, t4
vmacc.vx v0, a7, v8
vse8.v v0, (a0)
add a0, a0, t1
ble t3, zero, 4f
lbu a4, -1(a1)
mul a4, a4, t6
vsetvl zero, t3, a6
vmv.v.x v0, a4
vse8.v v0, (a0)
add a0, a0, t3
4:
addi t2, t2, -1
add a1, a1, a2
add t4, t4, a2
bnez t2, 1b
sub t1, a3, a5
sub t2, a0, t0
ble t1, zero, 7f
vsetvl zero, t0, a6
vle8.v v0, (t2)
add t2, a0, t0
5:
addi t1, t1, -2
vse8.v v0, (a0)
vse8.v v0, (t2)
sh1add a0, t0, a0
sh1add t2, t0, t2
bnez t1, 5b
7:
ret
endfunc
|
Admenri/urge
| 40,947
|
third_party/dav1d/src/riscv/64/itx.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2023, Nathan Egge
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/riscv/asm.S"
function inv_txfm_add_4x4_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 4, e16, mf2, ta, ma
vle16.v v0, (a2)
addi t0, a2, 8
vle16.v v1, (t0)
addi t0, t0, 8
vle16.v v2, (t0)
addi t0, t0, 8
vle16.v v3, (t0)
jalr t0, a4
vmv.v.x v4, zero
vsseg4e16.v v0, (a2)
vle16.v v0, (a2)
vse16.v v4, (a2)
addi t0, a2, 8
vle16.v v1, (t0)
vse16.v v4, (t0)
addi t0, t0, 8
vle16.v v2, (t0)
vse16.v v4, (t0)
addi t0, t0, 8
vle16.v v3, (t0)
vse16.v v4, (t0)
jalr t0, a5
vssra.vi v0, v0, 4
vssra.vi v1, v1, 4
vssra.vi v2, v2, 4
vssra.vi v3, v3, 4
itx_4x4_end:
vsetvli zero, zero, e8, mf4, ta, ma
vle8.v v4, (a0)
add t0, a0, a1
vle8.v v5, (t0)
add t0, t0, a1
vle8.v v6, (t0)
add t0, t0, a1
vle8.v v7, (t0)
vwaddu.wv v0, v0, v4
vwaddu.wv v1, v1, v5
vwaddu.wv v2, v2, v6
vwaddu.wv v3, v3, v7
vsetvli zero, zero, e16, mf2, ta, ma
vmax.vx v0, v0, zero
vmax.vx v1, v1, zero
vmax.vx v2, v2, zero
vmax.vx v3, v3, zero
vsetvli zero, zero, e8, mf4, ta, ma
vnclipu.wi v4, v0, 0
vnclipu.wi v5, v1, 0
vnclipu.wi v6, v2, 0
vnclipu.wi v7, v3, 0
vse8.v v4, (a0)
add a0, a0, a1
vse8.v v5, (a0)
add a0, a0, a1
vse8.v v6, (a0)
add a0, a0, a1
vse8.v v7, (a0)
ret
endfunc
function inv_identity_e16_x4_rvv, export=1, ext=v
li t1, (5793-4096)*8
vsmul.vx v4, v0, t1
vsmul.vx v5, v1, t1
vsmul.vx v6, v2, t1
vsmul.vx v7, v3, t1
vsadd.vv v0, v0, v4
vsadd.vv v1, v1, v5
vsadd.vv v2, v2, v6
vsadd.vv v3, v3, v7
jr t0
endfunc
.macro iwht_4
vadd.vv v0, v0, v1
vsub.vv v5, v2, v3
vsub.vv v4, v0, v5
vsra.vi v4, v4, 1
vsub.vv v2, v4, v1
vsub.vv v1, v4, v3
vadd.vv v3, v5, v2
vsub.vv v0, v0, v1
.endm
.macro idct_4 o0, o1, o2, o3
li t1, 2896
li t2, 1567
li t3, 3784
vwmul.vx v16, \o0, t1
vwmul.vx v18, \o0, t1
vwmacc.vx v16, t1, \o2
neg t1, t1
vwmacc.vx v18, t1, \o2
vwmul.vx v20, \o1, t3
neg t3, t3
vwmul.vx v22, \o1, t2
vwmacc.vx v20, t2, \o3
vwmacc.vx v22, t3, \o3
vnclip.wi v16, v16, 12
vnclip.wi v18, v18, 12
vnclip.wi v20, v20, 12
vnclip.wi v22, v22, 12
vsadd.vv \o0, v16, v20
vsadd.vv \o1, v18, v22
vssub.vv \o2, v18, v22
vssub.vv \o3, v16, v20
.endm
.macro iadst_4 o0, o1, o2, o3, lm2, lm
li t1, 1321
li t2, 3803
li t3, 2482
vwmul.vx v16, v0, t1
vwmul.vx v18, v0, t3
neg t1, t1
vwmacc.vx v16, t2, v2
vwmacc.vx v18, t1, v2
neg t2, t2
vwmacc.vx v16, t3, v3
vwmacc.vx v18, t2, v3
vwsub.vv v20, v0, v2
vwadd.wv v20, v20, v3
li t1, 3344
vwmul.vx v22, v1, t1
vsetvli zero, zero, e32, \lm2, ta, ma
vmul.vx v20, v20, t1
vadd.vv v24, v16, v18
vadd.vv v16, v16, v22
vadd.vv v18, v18, v22
vsub.vv v22, v24, v22
vsetvli zero, zero, e16, \lm, ta, ma
vnclip.wi \o0, v16, 12
vnclip.wi \o1, v18, 12
vnclip.wi \o2, v20, 12
vnclip.wi \o3, v22, 12
.endm
function inv_dct_e16_x4_rvv, export=1, ext=v
idct_4 v0, v1, v2, v3
jr t0
endfunc
function inv_adst_e16_x4_rvv, export=1, ext=v
iadst_4 v0, v1, v2, v3, m1, mf2
jr t0
endfunc
function inv_flipadst_e16_x4_rvv, export=1, ext=v
iadst_4 v3, v2, v1, v0, m1, mf2
jr t0
endfunc
function inv_adst_e16_x4w_rvv, export=1, ext=v
iadst_4 v0, v1, v2, v3, m2, m1
jr t0
endfunc
function inv_flipadst_e16_x4w_rvv, export=1, ext=v
iadst_4 v3, v2, v1, v0, m2, m1
jr t0
endfunc
function inv_txfm_add_wht_wht_4x4_8bpc_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 4, e16, mf2, ta, ma
vle16.v v0, (a2)
addi t0, a2, 8
vle16.v v1, (t0)
addi t0, t0, 8
vle16.v v2, (t0)
addi t0, t0, 8
vle16.v v3, (t0)
vsra.vi v0, v0, 2
vsra.vi v1, v1, 2
vsra.vi v2, v2, 2
vsra.vi v3, v3, 2
iwht_4
vmv.v.x v4, zero
vsseg4e16.v v0, (a2)
vle16.v v0, (a2)
vse16.v v4, (a2)
addi t0, a2, 8
vle16.v v1, (t0)
vse16.v v4, (t0)
addi t0, t0, 8
vle16.v v2, (t0)
vse16.v v4, (t0)
addi t0, t0, 8
vle16.v v3, (t0)
vse16.v v4, (t0)
iwht_4
j itx_4x4_end
endfunc
.macro def_fn_4x4 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_4x4_8bpc_rvv, export=1, ext=v
.ifc \txfm1\()_\txfm2, dct_dct
beqz a3, 1f
.endif
la a4, inv_\txfm1\()_e16_x4_rvv
la a5, inv_\txfm2\()_e16_x4_rvv
j inv_txfm_add_4x4_rvv
.ifc \txfm1\()_\txfm2, dct_dct
1:
csrw vxrm, zero
vsetivli zero, 4, e16, mf2, ta, ma
ld t2, (a2)
li t1, 2896*8
vmv.v.x v0, t2
vsmul.vx v0, v0, t1
sd x0, (a2)
vsmul.vx v0, v0, t1
vssra.vi v0, v0, 4
vmv.v.v v1, v0
vmv.v.v v2, v0
vmv.v.v v3, v0
j itx_4x4_end
.endif
endfunc
.endm
def_fn_4x4 dct, dct
def_fn_4x4 identity, identity
def_fn_4x4 dct, adst
def_fn_4x4 dct, flipadst
def_fn_4x4 dct, identity
def_fn_4x4 adst, dct
def_fn_4x4 adst, adst
def_fn_4x4 adst, flipadst
def_fn_4x4 flipadst, dct
def_fn_4x4 flipadst, adst
def_fn_4x4 flipadst, flipadst
def_fn_4x4 identity, dct
def_fn_4x4 adst, identity
def_fn_4x4 flipadst, identity
def_fn_4x4 identity, adst
def_fn_4x4 identity, flipadst
.macro def_fn_8x8_base variant
function inv_txfm_\variant\()add_8x8_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 8, e16, m1, ta, ma
vle16.v v0, (a2)
addi t0, a2, 16
vle16.v v1, (t0)
addi t0, t0, 16
vle16.v v2, (t0)
addi t0, t0, 16
vle16.v v3, (t0)
addi t0, t0, 16
vle16.v v4, (t0)
addi t0, t0, 16
vle16.v v5, (t0)
addi t0, t0, 16
vle16.v v6, (t0)
addi t0, t0, 16
vle16.v v7, (t0)
.ifc \variant, identity_
// The identity vsadd.vv and downshift vssra.vi 1 cancel out
j L(itx_8x8_epilog)
.else
jalr t0, a4
vssra.vi v0, v0, 1
vssra.vi v1, v1, 1
vssra.vi v2, v2, 1
vssra.vi v3, v3, 1
vssra.vi v4, v4, 1
vssra.vi v5, v5, 1
vssra.vi v6, v6, 1
vssra.vi v7, v7, 1
L(itx_8x8_epilog):
vsseg8e16.v v0, (a2)
vle16.v v0, (a2)
addi t0, a2, 16
vle16.v v1, (t0)
addi t0, t0, 16
vle16.v v2, (t0)
addi t0, t0, 16
vle16.v v3, (t0)
addi t0, t0, 16
vle16.v v4, (t0)
addi t0, t0, 16
vle16.v v5, (t0)
addi t0, t0, 16
vle16.v v6, (t0)
addi t0, t0, 16
vle16.v v7, (t0)
jalr t0, a5
vssra.vi v0, v0, 4
vssra.vi v1, v1, 4
vssra.vi v2, v2, 4
vssra.vi v3, v3, 4
vssra.vi v4, v4, 4
vssra.vi v5, v5, 4
vssra.vi v6, v6, 4
vssra.vi v7, v7, 4
li t1, 64
vsetvli zero, t1, e16, m8, ta, ma
vmv.v.x v8, zero
vse16.v v8, (a2)
itx_8x8_end:
vsetivli zero, 8, e8, mf2, ta, ma
vle8.v v8, (a0)
add t0, a0, a1
vle8.v v9, (t0)
add t0, t0, a1
vle8.v v10, (t0)
add t0, t0, a1
vle8.v v11, (t0)
add t0, t0, a1
vle8.v v12, (t0)
add t0, t0, a1
vle8.v v13, (t0)
add t0, t0, a1
vle8.v v14, (t0)
add t0, t0, a1
vle8.v v15, (t0)
vwaddu.wv v0, v0, v8
vwaddu.wv v1, v1, v9
vwaddu.wv v2, v2, v10
vwaddu.wv v3, v3, v11
vwaddu.wv v4, v4, v12
vwaddu.wv v5, v5, v13
vwaddu.wv v6, v6, v14
vwaddu.wv v7, v7, v15
vsetvli zero, zero, e16, m1, ta, ma
vmax.vx v0, v0, zero
vmax.vx v1, v1, zero
vmax.vx v2, v2, zero
vmax.vx v3, v3, zero
vmax.vx v4, v4, zero
vmax.vx v5, v5, zero
vmax.vx v6, v6, zero
vmax.vx v7, v7, zero
vsetvli zero, zero, e8, mf2, ta, ma
vnclipu.wi v8, v0, 0
vnclipu.wi v9, v1, 0
vnclipu.wi v10, v2, 0
vnclipu.wi v11, v3, 0
vnclipu.wi v12, v4, 0
vnclipu.wi v13, v5, 0
vnclipu.wi v14, v6, 0
vnclipu.wi v15, v7, 0
vse8.v v8, (a0)
add a0, a0, a1
vse8.v v9, (a0)
add a0, a0, a1
vse8.v v10, (a0)
add a0, a0, a1
vse8.v v11, (a0)
add a0, a0, a1
vse8.v v12, (a0)
add a0, a0, a1
vse8.v v13, (a0)
add a0, a0, a1
vse8.v v14, (a0)
add a0, a0, a1
vse8.v v15, (a0)
ret
.endif
endfunc
.endm
def_fn_8x8_base identity_
def_fn_8x8_base
function inv_identity_e16_x8_rvv, export=1, ext=v
vsadd.vv v0, v0, v0
vsadd.vv v1, v1, v1
vsadd.vv v2, v2, v2
vsadd.vv v3, v3, v3
vsadd.vv v4, v4, v4
vsadd.vv v5, v5, v5
vsadd.vv v6, v6, v6
vsadd.vv v7, v7, v7
jr t0
endfunc
.macro idct_8 o0, o1, o2, o3, o4, o5, o6, o7
idct_4 \o0, \o2, \o4, \o6
li t1, 799
li t2, 4017
li t3, 3406
li t4, 2276
vwmul.vx v22, \o1, t2
neg t2, t2
vwmul.vx v16, \o1, t1
vwmacc.vx v22, t1, \o7
vwmacc.vx v16, t2, \o7
vwmul.vx v20, \o5, t4
neg t4, t4
vwmul.vx v18, \o5, t3
vwmacc.vx v20, t3, \o3
vwmacc.vx v18, t4, \o3
vnclip.wi v16, v16, 12
vnclip.wi v18, v18, 12
vnclip.wi v20, v20, 12
vnclip.wi v22, v22, 12
vssub.vv \o7, v22, v20
vsadd.vv v22, v22, v20
vssub.vv \o1, v16, v18
vsadd.vv v16, v16, v18
li t2, 2896
vwmul.vx v18, \o7, t2
vwmul.vx v20, \o7, t2
vwmacc.vx v20, t2, \o1
neg t2, t2
vwmacc.vx v18, t2, \o1
vnclip.wi v18, v18, 12
vnclip.wi v20, v20, 12
vssub.vv \o7, \o0, v22
vsadd.vv \o0, \o0, v22
vssub.vv v17, \o2, v20
vsadd.vv \o1, \o2, v20
vssub.vv \o5, \o4, v18
vsadd.vv \o2, \o4, v18
vssub.vv \o4, \o6, v16
vsadd.vv \o3, \o6, v16
vmv.v.v \o6, v17
.endm
.macro iadst_8 o0, o1, o2, o3, o4, o5, o6, o7
li t1, 4076
li t2, 401
li t3, 3612
li t4, 1931
li t5, 2598
li t6, 3166
vwmul.vx v16, v7, t1
neg t1, t1
vwmul.vx v18, v7, t2
vwmacc.vx v16, t2, v0
vwmacc.vx v18, t1, v0
vwmul.vx v20, v5, t3
neg t3, t3
vwmul.vx v22, v5, t4
vwmacc.vx v20, t4, v2
vwmacc.vx v22, t3, v2
vwmul.vx v24, v3, t5
neg t5, t5
vwmul.vx v26, v3, t6
vwmacc.vx v24, t6, v4
vwmacc.vx v26, t5, v4
li t2, 1189
li t3, 3920
li t4, 1567
li t5, 3784
li t6, 2896
vwmul.vx v28, v1, t2
neg t2, t2
vwmul.vx v30, v1, t3
vwmacc.vx v28, t3, v6
vwmacc.vx v30, t2, v6
vnclip.wi v16, v16, 12
vnclip.wi v18, v18, 12
vnclip.wi v20, v20, 12
vnclip.wi v22, v22, 12
vnclip.wi v24, v24, 12
vnclip.wi v26, v26, 12
vnclip.wi v28, v28, 12
vnclip.wi v30, v30, 12
vssub.vv v4, v16, v24
vsadd.vv v16, v16, v24
vsadd.vv v1, v18, v26
vsadd.vv v2, v20, v28
vsadd.vv v3, v22, v30
vssub.vv v5, v18, v26
vssub.vv v6, v20, v28
vssub.vv v30, v22, v30
vsadd.vv \o0, v16, v2
vsadd.vv \o7, v1, v3
vssub.vv v2, v16, v2
vssub.vv v3, v1, v3
vwmul.vx v16, v4, t5
vwmul.vx v18, v4, t4
vwmul.vx v20, v30, t5
vwmul.vx v22, v30, t4
vwmacc.vx v16, t4, v5
neg t4, t4
vwmacc.vx v22, t5, v6
neg t5, t5
vwmacc.vx v20, t4, v6
vwmacc.vx v18, t5, v5
vnclip.wi v16, v16, 12
vnclip.wi v18, v18, 12
vnclip.wi v20, v20, 12
vnclip.wi v22, v22, 12
vsadd.vv \o1, v16, v20
vsadd.vv \o6, v18, v22
vssub.vv v16, v16, v20
vssub.vv v17, v18, v22
vwmul.vx v18, v2, t6
vwmul.vx v20, v2, t6
vwmul.vx v22, v16, t6
vwmul.vx v24, v16, t6
vwmacc.vx v18, t6, v3
vwmacc.vx v22, t6, v17
neg t6, t6
vwmacc.vx v20, t6, v3
vwmacc.vx v24, t6, v17
vnclip.wi \o3, v18, 12
vnclip.wi \o4, v20, 12
vnclip.wi \o2, v22, 12
vnclip.wi \o5, v24, 12
vmv.v.x v16, zero
vssub.vv \o1, v16, \o1
vssub.vv \o3, v16, \o3
vssub.vv \o5, v16, \o5
vssub.vv \o7, v16, \o7
.endm
function inv_dct_e16_x8_rvv, export=1, ext=v
idct_8 v0, v1, v2, v3, v4, v5, v6, v7
jr t0
endfunc
function inv_adst_e16_x8_rvv, export=1, ext=v
iadst_8 v0, v1, v2, v3, v4, v5, v6, v7
jr t0
endfunc
function inv_flipadst_e16_x8_rvv, export=1, ext=v
iadst_8 v7, v6, v5, v4, v3, v2, v1, v0
jr t0
endfunc
.macro def_fn_8x8 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_8x8_8bpc_rvv, export=1, ext=v
.ifc \txfm1\()_\txfm2, dct_dct
beqz a3, 1f
.endif
la a5, inv_\txfm2\()_e16_x8_rvv
.ifc \txfm1, identity
j inv_txfm_identity_add_8x8_rvv
.else
la a4, inv_\txfm1\()_e16_x8_rvv
j inv_txfm_add_8x8_rvv
.endif
.ifc \txfm1\()_\txfm2, dct_dct
1:
csrw vxrm, zero
vsetivli zero, 8, e16, m1, ta, ma
ld t2, (a2)
li t1, 2896*8
vmv.v.x v0, t2
vsmul.vx v0, v0, t1
sd x0, (a2)
vssra.vi v0, v0, 1
vsmul.vx v0, v0, t1
vssra.vi v0, v0, 4
vmv.v.v v1, v0
vmv.v.v v2, v0
vmv.v.v v3, v0
vmv.v.v v4, v0
vmv.v.v v5, v0
vmv.v.v v6, v0
vmv.v.v v7, v0
j itx_8x8_end
.endif
endfunc
.endm
def_fn_8x8 dct, dct
def_fn_8x8 identity, identity
def_fn_8x8 dct, adst
def_fn_8x8 dct, flipadst
def_fn_8x8 dct, identity
def_fn_8x8 adst, dct
def_fn_8x8 adst, adst
def_fn_8x8 adst, flipadst
def_fn_8x8 flipadst, dct
def_fn_8x8 flipadst, adst
def_fn_8x8 flipadst, flipadst
def_fn_8x8 identity, dct
def_fn_8x8 adst, identity
def_fn_8x8 flipadst, identity
def_fn_8x8 identity, adst
def_fn_8x8 identity, flipadst
function inv_txfm_add_4x8_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 8, e16, m1, ta, ma
vle16.v v0, (a2)
addi t0, a2, 16
vle16.v v1, (t0)
addi t0, t0, 16
vle16.v v2, (t0)
addi t0, t0, 16
vle16.v v3, (t0)
li t1, 2896*8
.irp i, 0, 1, 2, 3
vsmul.vx v\i, v\i, t1
.endr
jalr t0, a4
vsseg4e16.v v0, (a2)
vsetivli zero, 4, e16, mf2, ta, ma
vmv.v.x v8, zero
vle16.v v0, (a2)
vse16.v v8, (a2)
.irp i, 1, 2, 3, 4, 5, 6, 7
addi a2, a2, 8
vle16.v v\i, (a2)
vse16.v v8, (a2)
.endr
jalr t0, a5
.irp i, 0, 1, 2, 3, 4, 5, 6, 7
vssra.vi v\i, v\i, 4
.endr
vsetvli zero, zero, e8, mf4, ta, ma
vle8.v v8, (a0)
add t0, a0, a1
vle8.v v9, (t0)
.irp i, 10, 11, 12, 13, 14, 15
add t0, t0, a1
vle8.v v\i, (t0)
.endr
vwaddu.wv v0, v0, v8
vwaddu.wv v1, v1, v9
vwaddu.wv v2, v2, v10
vwaddu.wv v3, v3, v11
vwaddu.wv v4, v4, v12
vwaddu.wv v5, v5, v13
vwaddu.wv v6, v6, v14
vwaddu.wv v7, v7, v15
vsetvli zero, zero, e16, mf2, ta, ma
.irp i, 0, 1, 2, 3, 4, 5, 6, 7
vmax.vx v\i, v\i, zero
.endr
vsetvli zero, zero, e8, mf4, ta, ma
vnclipu.wi v8, v0, 0
vnclipu.wi v9, v1, 0
vnclipu.wi v10, v2, 0
vnclipu.wi v11, v3, 0
vnclipu.wi v12, v4, 0
vnclipu.wi v13, v5, 0
vnclipu.wi v14, v6, 0
vnclipu.wi v15, v7, 0
vse8.v v8, (a0)
.irp i, 9, 10, 11, 12, 13, 14, 15
add a0, a0, a1
vse8.v v\i, (a0)
.endr
ret
endfunc
function inv_txfm_add_8x4_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 4, e16, mf2, ta, ma
vle16.v v0, (a2)
addi t0, a2, 8
vle16.v v1, (t0)
.irp i, 2, 3, 4, 5, 6, 7
addi t0, t0, 8
vle16.v v\i, (t0)
.endr
li t1, 2896*8
.irp i, 0, 1, 2, 3, 4, 5, 6, 7
vsmul.vx v\i, v\i, t1
.endr
jalr t0, a4
vsseg8e16.v v0, (a2)
vsetivli zero, 8, e16, m1, ta, ma
vmv.v.x v4, zero
vle16.v v0, (a2)
vse16.v v4, (a2)
.irp i, 1, 2, 3
addi a2, a2, 16
vle16.v v\i, (a2)
vse16.v v4, (a2)
.endr
jalr t0, a5
vssra.vi v0, v0, 4
vssra.vi v1, v1, 4
vssra.vi v2, v2, 4
vssra.vi v3, v3, 4
vsetvli zero, zero, e8, mf2, ta, ma
vle8.v v4, (a0)
add t0, a0, a1
vle8.v v5, (t0)
add t0, t0, a1
vle8.v v6, (t0)
add t0, t0, a1
vle8.v v7, (t0)
vwaddu.wv v0, v0, v4
vwaddu.wv v1, v1, v5
vwaddu.wv v2, v2, v6
vwaddu.wv v3, v3, v7
vsetvli zero, zero, e16, m1, ta, ma
vmax.vx v0, v0, zero
vmax.vx v1, v1, zero
vmax.vx v2, v2, zero
vmax.vx v3, v3, zero
vsetvli zero, zero, e8, mf2, ta, ma
vnclipu.wi v4, v0, 0
vnclipu.wi v5, v1, 0
vnclipu.wi v6, v2, 0
vnclipu.wi v7, v3, 0
vse8.v v4, (a0)
add a0, a0, a1
vse8.v v5, (a0)
add a0, a0, a1
vse8.v v6, (a0)
add a0, a0, a1
vse8.v v7, (a0)
ret
endfunc
/* Define symbols added in .if statement */
.equ dct, 1
.equ identity, 2
.equ adst, 3
.equ flipadst, 4
.macro def_fn_48 w, h, txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_rvv, export=1
.if \w == 4 && (\txfm1 == adst || \txfm1 == flipadst)
la a4, inv_\txfm1\()_e16_x\w\()w_rvv
.else
la a4, inv_\txfm1\()_e16_x\w\()_rvv
.endif
.if \h == 4 && (\txfm2 == adst || \txfm2 == flipadst)
la a5, inv_\txfm2\()_e16_x\h\()w_rvv
.else
la a5, inv_\txfm2\()_e16_x\h\()_rvv
.endif
j inv_txfm_add_\w\()x\h\()_rvv
endfunc
.endm
.macro def_fns_48 w, h
def_fn_48 \w, \h, dct, dct
def_fn_48 \w, \h, identity, identity
def_fn_48 \w, \h, dct, adst
def_fn_48 \w, \h, dct, flipadst
def_fn_48 \w, \h, dct, identity
def_fn_48 \w, \h, adst, dct
def_fn_48 \w, \h, adst, adst
def_fn_48 \w, \h, adst, flipadst
def_fn_48 \w, \h, flipadst, dct
def_fn_48 \w, \h, flipadst, adst
def_fn_48 \w, \h, flipadst, flipadst
def_fn_48 \w, \h, identity, dct
def_fn_48 \w, \h, adst, identity
def_fn_48 \w, \h, flipadst, identity
def_fn_48 \w, \h, identity, adst
def_fn_48 \w, \h, identity, flipadst
.endm
def_fns_48 4, 8
def_fns_48 8, 4
function inv_identity_e16_x16_rvv, export=1, ext=v
li t1, 2*(5793-4096)*8
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vsmul.vx v16, v\i, t1
vsadd.vv v\i, v\i, v\i
vsadd.vv v\i, v\i, v16
.endr
jr t0
endfunc
function inv_dct_e16_x16_rvv, export=1, ext=v
idct_8 v0, v2, v4, v6, v8, v10, v12, v14
li t1, 401
li t2, 4076
li t3, 3166
li t4, 2598
vwmul.vx v30, v1, t2
neg t2, t2
vwmul.vx v16, v1, t1
vwmacc.vx v30, t1, v15
vwmacc.vx v16, t2, v15
vwmul.vx v28, v9, t4
neg t4, t4
vwmul.vx v18, v9, t3
vwmacc.vx v28, t3, v7
vwmacc.vx v18, t4, v7
li t1, 1931
li t2, 3612
li t3, 3920
li t4, 1189
vwmul.vx v26, v5, t2
neg t2, t2
vwmul.vx v20, v5, t1
vwmacc.vx v26, t1, v11
vwmacc.vx v20, t2, v11
vwmul.vx v24, v13, t4
neg t4, t4
vwmul.vx v22, v13, t3
vwmacc.vx v24, t3, v3
vwmacc.vx v22, t4, v3
li t2, 2896
li t3, 1567
li t4, 3784
vnclip.wi v16, v16, 12
vnclip.wi v18, v18, 12
vnclip.wi v20, v20, 12
vnclip.wi v22, v22, 12
vnclip.wi v24, v24, 12
vnclip.wi v26, v26, 12
vnclip.wi v28, v28, 12
vnclip.wi v30, v30, 12
vssub.vv v3, v16, v18
vsadd.vv v16, v16, v18
vssub.vv v5, v22, v20
vsadd.vv v22, v22, v20
vssub.vv v11, v24, v26
vsadd.vv v24, v24, v26
vssub.vv v13, v30, v28
vsadd.vv v30, v30, v28
vwmul.vx v28, v13, t4
neg t4, t4
vwmul.vx v18, v13, t3
vwmul.vx v26, v11, t3
vwmacc.vx v28, t3, v3
neg t3, t3
vwmul.vx v20, v11, t4
vwmacc.vx v18, t4, v3
vwmacc.vx v20, t3, v5
vwmacc.vx v26, t4, v5
vnclip.wi v18, v18, 12
vnclip.wi v20, v20, 12
vnclip.wi v26, v26, 12
vnclip.wi v28, v28, 12
vssub.vv v5, v18, v20
vsadd.vv v18, v18, v20
vssub.vv v11, v28, v26
vsadd.vv v28, v28, v26
vssub.vv v7, v16, v22
vsadd.vv v16, v16, v22
vssub.vv v9, v30, v24
vsadd.vv v30, v30, v24
vwmul.vx v20, v11, t2
vwmul.vx v22, v9, t2
vwmul.vx v24, v9, t2
vwmul.vx v26, v11, t2
vwmacc.vx v24, t2, v7
vwmacc.vx v26, t2, v5
neg t2, t2
vwmacc.vx v20, t2, v5
vwmacc.vx v22, t2, v7
vnclip.wi v20, v20, 12
vnclip.wi v22, v22, 12
vnclip.wi v24, v24, 12
vnclip.wi v26, v26, 12
vssub.vv v15, v0, v30
vsadd.vv v0, v0, v30
vssub.vv v17, v2, v28
vsadd.vv v1, v2, v28
vssub.vv v13, v4, v26
vsadd.vv v2, v4, v26
vssub.vv v19, v6, v24
vsadd.vv v3, v6, v24
vssub.vv v11, v8, v22
vsadd.vv v4, v8, v22
vsadd.vv v5, v10, v20
vssub.vv v10, v10, v20
vssub.vv v9, v12, v18
vsadd.vv v6, v12, v18
vssub.vv v8, v14, v16
vsadd.vv v7, v14, v16
vmv.v.v v14, v17
vmv.v.v v12, v19
jr t0
endfunc
.macro iadst_16 o0, o1, o2, o3, o4, o5, o6, o7, o8, o9, o10, o11, o12, o13, o14, o15
li t1, 4091
li t2, 201
li t3, 3973
li t4, 995
vwmul.vx v16, v15, t1
neg t1, t1
vwmul.vx v18, v15, t2
vwmacc.vx v16, t2, v0
vwmacc.vx v18, t1, v0
vwmul.vx v20, v13, t3
neg t3, t3
vwmul.vx v22, v13, t4
vwmacc.vx v20, t4, v2
vwmacc.vx v22, t3, v2
li t1, 3703
li t2, 1751
li t3, 3290
li t4, 2440
vwmul.vx v24, v11, t1
neg t1, t1
vwmul.vx v26, v11, t2
vwmacc.vx v24, t2, v4
vwmacc.vx v26, t1, v4
vwmul.vx v28, v9, t3
neg t3, t3
vwmul.vx v30, v9, t4
vwmacc.vx v28, t4, v6
vwmacc.vx v30, t3, v6
vnclip.wi v0, v16, 12
vnclip.wi v18, v18, 12
vnclip.wi v2, v20, 12
vnclip.wi v22, v22, 12
vnclip.wi v4, v24, 12
vnclip.wi v26, v26, 12
vnclip.wi v6, v28, 12
vnclip.wi v30, v30, 12
li t1, 2751
li t2, 3035
li t3, 2106
li t4, 3513
vwmul.vx v16, v7, t1
neg t1, t1
vwmul.vx v20, v7, t2
vwmacc.vx v16, t2, v8
vwmacc.vx v20, t1, v8
vwmul.vx v24, v5, t3
neg t3, t3
vwmul.vx v28, v5, t4
vwmacc.vx v24, t4, v10
vwmacc.vx v28, t3, v10
vnclip.wi v16, v16, 12
vnclip.wi v9, v20, 12
vnclip.wi v24, v24, 12
vnclip.wi v11, v28, 12
vssub.vv v8, v0, v16
vsadd.vv v0, v0, v16
vssub.vv v10, v2, v24
vsadd.vv v2, v2, v24
li t1, 1380
li t2, 3857
li t3, 601
li t4, 4052
vwmul.vx v16, v3, t1
neg t1, t1
vwmul.vx v20, v3, t2
vwmacc.vx v16, t2, v12
vwmacc.vx v20, t1, v12
vwmul.vx v24, v1, t3
neg t3, t3
vwmul.vx v28, v1, t4
vwmacc.vx v24, t4, v14
vwmacc.vx v28, t3, v14
vnclip.wi v16, v16, 12
vnclip.wi v13, v20, 12
vnclip.wi v24, v24, 12
vnclip.wi v15, v28, 12
vssub.vv v12, v4, v16
vsadd.vv v16, v4, v16
vssub.vv v14, v6, v24
vsadd.vv v20, v6, v24
vsadd.vv v1, v18, v9
vssub.vv v9, v18, v9
vsadd.vv v3, v22, v11
vssub.vv v11, v22, v11
vsadd.vv v18, v26, v13
vssub.vv v13, v26, v13
vsadd.vv v22, v30, v15
vssub.vv v15, v30, v15
vssub.vv v4, v0, v16
vsadd.vv v0, v0, v16
vssub.vv v5, v1, v18
vsadd.vv v1, v1, v18
vssub.vv v6, v2, v20
vsadd.vv v2, v2, v20
vssub.vv v7, v3, v22
vsadd.vv v3, v3, v22
li t1, 799
li t2, 4017
li t3, 3406
li t4, 2276
vwmul.vx v16, v8, t2
vwmul.vx v18, v8, t1
vwmul.vx v20, v10, t4
vwmul.vx v22, v10, t3
vwmul.vx v24, v13, t2
vwmul.vx v26, v13, t1
vwmul.vx v28, v15, t4
vwmul.vx v30, v15, t3
vwmacc.vx v16, t1, v9
neg t1, t1
vwmacc.vx v20, t3, v11
neg t3, t3
vwmacc.vx v26, t2, v12
neg t2, t2
vwmacc.vx v30, t4, v14
neg t4, t4
vwmacc.vx v18, t2, v9
vwmacc.vx v22, t4, v11
vwmacc.vx v24, t1, v12
vwmacc.vx v28, t3, v14
li t2, 2896
li t3, 1567
li t4, 3784
vnclip.wi v16, v16, 12
vnclip.wi v18, v18, 12
vnclip.wi v20, v20, 12
vnclip.wi v22, v22, 12
vnclip.wi v24, v24, 12
vnclip.wi v26, v26, 12
vnclip.wi v28, v28, 12
vnclip.wi v30, v30, 12
vsadd.vv v8, v16, v24
vsadd.vv v9, v18, v26
vsadd.vv v10, v20, v28
vsadd.vv v11, v22, v30
vssub.vv v12, v16, v24
vssub.vv v13, v18, v26
vssub.vv v14, v20, v28
vssub.vv v15, v22, v30
vwmul.vx v16, v4, t4
vwmul.vx v18, v4, t3
vwmul.vx v20, v7, t4
vwmul.vx v22, v7, t3
vwmul.vx v24, v12, t4
vwmul.vx v26, v12, t3
vwmul.vx v28, v15, t4
vwmul.vx v30, v15, t3
vwmacc.vx v16, t3, v5
vwmacc.vx v22, t4, v6
vwmacc.vx v24, t3, v13
neg t3, t3
vwmacc.vx v30, t4, v14
neg t4, t4
vwmacc.vx v20, t3, v6
vwmacc.vx v28, t3, v14
vwmacc.vx v18, t4, v5
vwmacc.vx v26, t4, v13
vnclip.wi v16, v16, 12
vnclip.wi v18, v18, 12
vnclip.wi v20, v20, 12
vnclip.wi v22, v22, 12
vnclip.wi v24, v24, 12
vnclip.wi v26, v26, 12
vnclip.wi v28, v28, 12
vnclip.wi v30, v30, 12
.ifc \o0, v0
vsadd.vv \o14, v9, v11
vssub.vv v11, v9, v11
vssub.vv v9, v1, v3
vsadd.vv \o15, v1, v3
vsadd.vv \o1, v8, v10
vssub.vv v10, v8, v10
vssub.vv v8, v0, v2
vsadd.vv \o0, v0, v2
.else
vsadd.vv \o1, v8, v10
vssub.vv v10, v8, v10
vssub.vv v8, v0, v2
vsadd.vv \o0, v0, v2
vsadd.vv v2, v9, v11
vssub.vv v11, v9, v11
vssub.vv v9, v1, v3
vsadd.vv \o15, v1, v3
vmv.v.v \o14, v2
.endif
vsadd.vv \o3, v16, v20
vssub.vv v6, v16, v20
vsadd.vv \o12, v18, v22
vssub.vv v7, v18, v22
vsadd.vv \o2, v24, v28
vssub.vv v24, v24, v28
vsadd.vv \o13, v26, v30
vssub.vv v26, v26, v30
neg t3, t2
vwmul.vx v28, v24, t2
vwmul.vx v30, v24, t2
vwmacc.vx v28, t2, v26
vwmacc.vx v30, t3, v26
vwmul.vx v24, v10, t2
vwmul.vx v26, v10, t2
vwmacc.vx v24, t2, v11
vwmacc.vx v26, t3, v11
vwmul.vx v20, v6, t2
vwmul.vx v22, v6, t2
vwmacc.vx v20, t2, v7
vwmacc.vx v22, t3, v7
vwmul.vx v16, v8, t2
vwmul.vx v18, v8, t2
vwmacc.vx v16, t2, v9
vwmacc.vx v18, t3, v9
vnclip.wi \o7, v16, 12
vnclip.wi \o8, v18, 12
vnclip.wi \o4, v20, 12
vnclip.wi \o11, v22, 12
vnclip.wi \o6, v24, 12
vnclip.wi \o9, v26, 12
vnclip.wi \o5, v28, 12
vnclip.wi \o10, v30, 12
vmv.v.x v16, zero
vssub.vv \o1, v16, \o1
vssub.vv \o3, v16, \o3
vssub.vv \o5, v16, \o5
vssub.vv \o7, v16, \o7
vssub.vv \o9, v16, \o9
vssub.vv \o11, v16, \o11
vssub.vv \o13, v16, \o13
vssub.vv \o15, v16, \o15
.endm
function inv_adst_e16_x16_rvv, export=1, ext=v
iadst_16 v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15
jr t0
endfunc
function inv_flipadst_e16_x16_rvv, export=1, ext=v
iadst_16 v15, v14, v13, v12, v11, v10, v9, v8, v7, v6, v5, v4, v3, v2, v1, v0
jr t0
endfunc
.macro def_horz_16 variant
function inv_txfm_horz\variant\()_16x8_rvv, export=1, ext=v
vmv.v.x v16, zero
vle16.v v0, (t4)
vse16.v v16, (t4)
.irp i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
add t4, t4, t6
vle16.v v\i, (t4)
vse16.v v16, (t4)
.endr
.ifc \variant, _identity
li t1, 2*(5793-4096)*8
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vsmul.vx v16, v\i, t1
vsra.vi v16, v16, 1
vaadd.vv v\i, v\i, v16
.endr
j L(horz_16x8_epilog)
.else
jalr t0, a4
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vssra.vi v\i, v\i, 2
.endr
L(horz_16x8_epilog):
vsse16.v v0, (t5), t6
.irp i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
addi t5, t5, 2
vsse16.v v\i, (t5), t6
.endr
jr a7
.endif
endfunc
.endm
def_horz_16 _identity
def_horz_16
function inv_txfm_add_vert_8x16_rvv, export=1, ext=v
vsetivli zero, 8, e16, m1, ta, ma
vle16.v v0, (t4)
.irp i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
add t4, t4, t6
vle16.v v\i, (t4)
.endr
jalr t0, a5
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vssra.vi v\i, v\i, 4
.endr
vsetivli zero, 8, e8, mf2, ta, ma
vle8.v v16, (t5)
add t0, t5, a1
vle8.v v17, (t0)
.irp i, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
add t0, t0, a1
vle8.v v\i, (t0)
.endr
vwaddu.wv v0, v0, v16
vwaddu.wv v1, v1, v17
vwaddu.wv v2, v2, v18
vwaddu.wv v3, v3, v19
vwaddu.wv v4, v4, v20
vwaddu.wv v5, v5, v21
vwaddu.wv v6, v6, v22
vwaddu.wv v7, v7, v23
vwaddu.wv v8, v8, v24
vwaddu.wv v9, v9, v25
vwaddu.wv v10, v10, v26
vwaddu.wv v11, v11, v27
vwaddu.wv v12, v12, v28
vwaddu.wv v13, v13, v29
vwaddu.wv v14, v14, v30
vwaddu.wv v15, v15, v31
vsetvli zero, zero, e16, m1, ta, ma
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vmax.vx v\i, v\i, zero
.endr
vsetvli zero, zero, e8, mf2, ta, ma
vnclipu.wi v16, v0, 0
vnclipu.wi v17, v1, 0
vnclipu.wi v18, v2, 0
vnclipu.wi v19, v3, 0
vnclipu.wi v20, v4, 0
vnclipu.wi v21, v5, 0
vnclipu.wi v22, v6, 0
vnclipu.wi v23, v7, 0
vnclipu.wi v24, v8, 0
vnclipu.wi v25, v9, 0
vnclipu.wi v26, v10, 0
vnclipu.wi v27, v11, 0
vnclipu.wi v28, v12, 0
vnclipu.wi v29, v13, 0
vnclipu.wi v30, v14, 0
vnclipu.wi v31, v15, 0
vse8.v v16, (t5)
.irp i, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
add t5, t5, a1
vse8.v v\i, (t5)
.endr
jr a7
endfunc
function inv_txfm_add_16x16_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 8, e16, m1, ta, ma
addi sp, sp, -16*32
.irp i, 8, 0
addi t4, a2, \i*2
addi t5, sp, \i*16*2
.if \i == 8
blt a3, a7, 1f
.endif
li t6, 16*2
jalr a7, a6
.if \i == 8
j 2f
1:
li t1, 64
vsetvli zero, t1, e16, m8, ta, ma
vmv.v.x v0, zero
vse16.v v0, (t5)
addi t5, t5, 128
vse16.v v0, (t5)
vsetivli zero, 8, e16, m1, ta, ma
2:
.endif
.endr
.irp i, 0, 8
addi t4, sp, \i*2
addi t5, a0, \i
li t6, 16*2
jal a7, inv_txfm_add_vert_8x16_rvv
.endr
addi sp, sp, 16*32
ret
endfunc
.macro def_fn_16x16 txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_16x16_8bpc_rvv, export=1, ext=v
.ifc \txfm1\()_\txfm2, dct_dct
beqz a3, 1f
.endif
.ifc \txfm1, identity
la a6, inv_txfm_horz_identity_16x8_rvv
.else
la a6, inv_txfm_horz_16x8_rvv
la a4, inv_\txfm1\()_e16_x16_rvv
.endif
la a5, inv_\txfm2\()_e16_x16_rvv
li a7, \eob_half
j inv_txfm_add_16x16_rvv
.ifc \txfm1\()_\txfm2, dct_dct
1:
csrw vxrm, zero
vsetivli zero, 16, e16, m2, ta, ma
lh t2, (a2)
li t3, 2896*8
li t4, 1<<14
li t5, 0xFFFF
li t6, -0x10000
sh x0, (a2)
mul t2, t2, t3
add t2, t2, t4
srai t2, t2, 15
ble t2, t5, 3f
mv t2, t5
3:
ble t6, t2, 4f
mv t2, t6
4:
addi t2, t2, 2
srai t2, t2, 2
mul t2, t2, t3
add t2, t2, t4
srai t2, t2, 15
ble t2, t5, 5f
mv t2, t5
5:
ble t6, t2, 6f
mv t2, t6
6:
addi t2, t2, 8
srai t2, t2, 4
vmv.v.x v24, t2
vsetvli zero, zero, e8, m1, ta, ma
add t2, a1, a1
li t3, 16
2:
add t0, a0, a1
vle8.v v16, (a0)
vle8.v v17, (t0)
vwaddu.wv v0, v24, v16
vwaddu.wv v2, v24, v17
addi t3, t3, -2 # loop counter
vsetvli zero, zero, e16, m2, ta, ma
.irp i, 0, 2
vmax.vx v\i, v\i, zero
.endr
vsetvli zero, zero, e8, m1, ta, ma
vnclipu.wi v16, v0, 0
vnclipu.wi v17, v2, 0
add t0, a0, a1
vse8.v v16, (a0)
add a0, a0, t2
vse8.v v17, (t0)
bnez t3, 2b
ret
.endif
endfunc
.endm
def_fn_16x16 dct, dct, 36
def_fn_16x16 identity, identity, 36
def_fn_16x16 dct, adst, 36
def_fn_16x16 dct, flipadst, 36
def_fn_16x16 dct, identity, 8
def_fn_16x16 adst, dct, 36
def_fn_16x16 adst, adst, 36
def_fn_16x16 adst, flipadst, 36
def_fn_16x16 flipadst, dct, 36
def_fn_16x16 flipadst, adst, 36
def_fn_16x16 flipadst, flipadst, 36
def_fn_16x16 identity, dct, 8
.macro def_fn_416_base variant
function inv_txfm_\variant\()add_4x16_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 8, e16, m1, ta, ma
blt a3, a6, 1f
addi t0, a2, 16
vle16.v v0, (t0)
addi t0, t0, 32
vle16.v v1, (t0)
addi t0, t0, 32
vle16.v v2, (t0)
addi t0, t0, 32
vle16.v v3, (t0)
.ifc \variant, identity_
li t1, (5793-4096)*8
vsmul.vx v8, v0, t1
vaadd.vv v4, v0, v8
vsmul.vx v8, v1, t1
vaadd.vv v5, v1, v8
vsmul.vx v8, v2, t1
vaadd.vv v6, v2, v8
vsmul.vx v8, v3, t1
vaadd.vv v7, v3, v8
.else
jalr t0, a4
vssra.vi v4, v0, 1
vssra.vi v5, v1, 1
vssra.vi v6, v2, 1
vssra.vi v7, v3, 1
.endif
j 2f
1:
.irp i, 4, 5, 6, 7
vmv.v.x v\i, zero
.endr
2:
vle16.v v0, (a2)
addi t0, a2, 32
vle16.v v1, (t0)
addi t0, t0, 32
vle16.v v2, (t0)
addi t0, t0, 32
vle16.v v3, (t0)
.ifc \variant, identity_
li t1, (5793-4096)*8
.irp i, 0, 1, 2, 3
vsmul.vx v8, v\i, t1
vaadd.vv v\i, v\i, v8
.endr
j L(itx_4x16_epilog)
.else
jalr t0, a4
vssra.vi v0, v0, 1
vssra.vi v1, v1, 1
vssra.vi v2, v2, 1
vssra.vi v3, v3, 1
L(itx_4x16_epilog):
vsseg4e16.v v0, (a2)
addi t0, a2, 64
vsseg4e16.v v4, (t0)
vsetivli zero, 4, e16, mf2, ta, ma
vmv.v.x v16, zero
vle16.v v0, (a2)
vse16.v v16, (a2)
addi t0, a2, 8
vle16.v v1, (t0)
vse16.v v16, (t0)
.irp i, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
addi t0, t0, 8
vle16.v v\i, (t0)
vse16.v v16, (t0)
.endr
jalr t0, a5
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vssra.vi v\i, v\i, 4
.endr
vsetvli zero, zero, e8, mf4, ta, ma
vle8.v v16, (a0)
add t0, a0, a1
vle8.v v17, (t0)
.irp i, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
add t0, t0, a1
vle8.v v\i, (t0)
.endr
vwaddu.wv v0, v0, v16
vwaddu.wv v1, v1, v17
vwaddu.wv v2, v2, v18
vwaddu.wv v3, v3, v19
vwaddu.wv v4, v4, v20
vwaddu.wv v5, v5, v21
vwaddu.wv v6, v6, v22
vwaddu.wv v7, v7, v23
vwaddu.wv v8, v8, v24
vwaddu.wv v9, v9, v25
vwaddu.wv v10, v10, v26
vwaddu.wv v11, v11, v27
vwaddu.wv v12, v12, v28
vwaddu.wv v13, v13, v29
vwaddu.wv v14, v14, v30
vwaddu.wv v15, v15, v31
vsetvli zero, zero, e16, mf2, ta, ma
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vmax.vx v\i, v\i, zero
.endr
vsetvli zero, zero, e8, mf4, ta, ma
vnclipu.wi v16, v0, 0
vnclipu.wi v17, v1, 0
vnclipu.wi v18, v2, 0
vnclipu.wi v19, v3, 0
vnclipu.wi v20, v4, 0
vnclipu.wi v21, v5, 0
vnclipu.wi v22, v6, 0
vnclipu.wi v23, v7, 0
vnclipu.wi v24, v8, 0
vnclipu.wi v25, v9, 0
vnclipu.wi v26, v10, 0
vnclipu.wi v27, v11, 0
vnclipu.wi v28, v12, 0
vnclipu.wi v29, v13, 0
vnclipu.wi v30, v14, 0
vnclipu.wi v31, v15, 0
vse8.v v16, (a0)
.irp i, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
add a0, a0, a1
vse8.v v\i, (a0)
.endr
ret
.endif
endfunc
function inv_txfm_\variant\()add_16x4_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 4, e16, mf2, ta, ma
vle16.v v0, (a2)
addi t0, a2, 8
vle16.v v1, (t0)
.irp i, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
addi t0, t0, 8
vle16.v v\i, (t0)
.endr
.ifc \variant, identity_
li t1, 2*(5793-4096)*8
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vsmul.vx v16, v\i, t1
vssra.vi v16, v16, 1
vsadd.vv v\i, v\i, v16
.endr
j L(itx_16x4_epilog)
.else
jalr t0, a4
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vssra.vi v\i, v\i, 1
.endr
L(itx_16x4_epilog):
li t0, 32
vssseg8e16.v v0, (a2), t0
addi t1, a2, 16
vssseg8e16.v v8, (t1), t0
.irp j, 0, 8
vsetivli zero, 8, e16, m1, ta, ma
vmv.v.x v4, zero
addi t0, a2, \j*2
vle16.v v0, (t0)
vse16.v v4, (t0)
.irp i, 1, 2, 3
addi t0, t0, 32
vle16.v v\i, (t0)
vse16.v v4, (t0)
.endr
jalr t0, a5
vssra.vi v0, v0, 4
vssra.vi v1, v1, 4
vssra.vi v2, v2, 4
vssra.vi v3, v3, 4
vsetvli zero, zero, e8, mf2, ta, ma
addi t0, a0, \j
vle8.v v4, (t0)
add t0, t0, a1
vle8.v v5, (t0)
add t0, t0, a1
vle8.v v6, (t0)
add t0, t0, a1
vle8.v v7, (t0)
vwaddu.wv v0, v0, v4
vwaddu.wv v1, v1, v5
vwaddu.wv v2, v2, v6
vwaddu.wv v3, v3, v7
vsetvli zero, zero, e16, m1, ta, ma
vmax.vx v0, v0, zero
vmax.vx v1, v1, zero
vmax.vx v2, v2, zero
vmax.vx v3, v3, zero
vsetvli zero, zero, e8, mf2, ta, ma
vnclipu.wi v4, v0, 0
vnclipu.wi v5, v1, 0
vnclipu.wi v6, v2, 0
vnclipu.wi v7, v3, 0
addi t0, a0, \j
vse8.v v4, (t0)
add t0, t0, a1
vse8.v v5, (t0)
add t0, t0, a1
vse8.v v6, (t0)
add t0, t0, a1
vse8.v v7, (t0)
.endr
ret
.endif
endfunc
.endm
def_fn_416_base identity_
def_fn_416_base
.macro def_fn_416 w, h, txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_rvv, export=1
.if \w == 4 && (\txfm1 == adst || \txfm1 == flipadst)
la a4, inv_\txfm1\()_e16_x\w\()w_rvv
.elseif \txfm1 != identity
la a4, inv_\txfm1\()_e16_x\w\()_rvv
.endif
.if \h == 4 && (\txfm2 == adst || \txfm2 == flipadst)
la a5, inv_\txfm2\()_e16_x\h\()w_rvv
.else
la a5, inv_\txfm2\()_e16_x\h\()_rvv
.endif
.if \w == 4
li a6, \eob_half
.endif
.ifc \txfm1, identity
j inv_txfm_identity_add_\w\()x\h\()_rvv
.else
j inv_txfm_add_\w\()x\h\()_rvv
.endif
endfunc
.endm
.macro def_fns_416 w, h
def_fn_416 \w, \h, dct, dct, 29
def_fn_416 \w, \h, identity, identity, 29
def_fn_416 \w, \h, dct, adst, 29
def_fn_416 \w, \h, dct, flipadst, 29
def_fn_416 \w, \h, dct, identity, 8
def_fn_416 \w, \h, adst, dct, 29
def_fn_416 \w, \h, adst, adst, 29
def_fn_416 \w, \h, adst, flipadst, 29
def_fn_416 \w, \h, flipadst, dct, 29
def_fn_416 \w, \h, flipadst, adst, 29
def_fn_416 \w, \h, flipadst, flipadst, 29
def_fn_416 \w, \h, identity, dct, 32
def_fn_416 \w, \h, adst, identity, 8
def_fn_416 \w, \h, flipadst, identity, 8
def_fn_416 \w, \h, identity, adst, 32
def_fn_416 \w, \h, identity, flipadst, 32
.endm
def_fns_416 4, 16
def_fns_416 16, 4
.macro def_fn_816_base variant
function inv_txfm_\variant\()add_8x16_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 8, e16, m1, ta, ma
blt a3, a6, 1f
vmv.v.x v16, zero
addi t0, a2, 16
vle16.v v0, (t0)
vse16.v v16, (t0)
.irp i, 1, 2, 3, 4, 5, 6, 7
addi t0, t0, 32
vle16.v v\i, (t0)
vse16.v v16, (t0)
.endr
li t1, 2896*8
.ifc \variant, identity_
vsmul.vx v8, v0, t1
vsmul.vx v9, v1, t1
vsmul.vx v10, v2, t1
vsmul.vx v11, v3, t1
vsmul.vx v12, v4, t1
vsmul.vx v13, v5, t1
vsmul.vx v14, v6, t1
vsmul.vx v15, v7, t1
.else
.irp i, 0, 1, 2, 3, 4, 5, 6, 7
vsmul.vx v\i, v\i, t1
.endr
jalr t0, a4
vssra.vi v8, v0, 1
vssra.vi v9, v1, 1
vssra.vi v10, v2, 1
vssra.vi v11, v3, 1
vssra.vi v12, v4, 1
vssra.vi v13, v5, 1
vssra.vi v14, v6, 1
vssra.vi v15, v7, 1
.endif
j 2f
1:
.irp i, 8, 9, 10, 11, 12, 13, 14, 15
vmv.v.x v\i, zero
.endr
2:
vmv.v.x v16, zero
vle16.v v0, (a2)
vse16.v v16, (a2)
addi t0, a2, 32
vle16.v v1, (t0)
vse16.v v16, (t0)
.irp i, 2, 3, 4, 5, 6, 7
addi t0, t0, 32
vle16.v v\i, (t0)
vse16.v v16, (t0)
.endr
li t1, 2896*8
.irp i, 0, 1, 2, 3, 4, 5, 6, 7
vsmul.vx v\i, v\i, t1
.endr
.ifc \variant, identity_
j L(itx_8x16_epilog)
.else
jalr t0, a4
.irp i, 0, 1, 2, 3, 4, 5, 6, 7
vssra.vi v\i, v\i, 1
.endr
L(itx_8x16_epilog):
addi t4, sp, -8*32
vsseg8e16.v v0, (t4)
addi t0, t4, 8*16
vsseg8e16.v v8, (t0)
mv t5, a0
li t6, 16
jal a7, inv_txfm_add_vert_8x16_rvv
ret
.endif
endfunc
function inv_txfm_\variant\()add_16x8_rvv, export=1, ext=v
csrw vxrm, zero
vsetivli zero, 8, e16, m1, ta, ma
vle16.v v0, (a2)
addi t0, a2, 16
vle16.v v1, (t0)
.irp i, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
addi t0, t0, 16
vle16.v v\i, (t0)
.endr
li t1, 2896*8
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vsmul.vx v\i, v\i, t1
.endr
.ifc \variant, identity_
li t1, 2*(5793-4096)*8
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vsmul.vx v16, v\i, t1
vssra.vi v16, v16, 1
vsadd.vv v\i, v\i, v16
.endr
j L(itx_16x8_epilog)
.else
jalr t0, a4
.irp i, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
vssra.vi v\i, v\i, 1
.endr
L(itx_16x8_epilog):
li t0, 32
vssseg8e16.v v0, (a2), t0
addi t1, a2, 16
vssseg8e16.v v8, (t1), t0
.irp j, 0, 8
vsetivli zero, 8, e16, m1, ta, ma
vmv.v.x v8, zero
addi t0, a2, \j*2
vle16.v v0, (t0)
vse16.v v8, (t0)
.irp i, 1, 2, 3, 4, 5, 6, 7
addi t0, t0, 32
vle16.v v\i, (t0)
vse16.v v8, (t0)
.endr
jalr t0, a5
.irp i, 0, 1, 2, 3, 4, 5, 6, 7
vssra.vi v\i, v\i, 4
.endr
vsetvli zero, zero, e8, mf2, ta, ma
addi t0, a0, \j
vle8.v v8, (t0)
.irp i, 9, 10, 11, 12, 13, 14, 15
add t0, t0, a1
vle8.v v\i, (t0)
.endr
vwaddu.wv v0, v0, v8
vwaddu.wv v1, v1, v9
vwaddu.wv v2, v2, v10
vwaddu.wv v3, v3, v11
vwaddu.wv v4, v4, v12
vwaddu.wv v5, v5, v13
vwaddu.wv v6, v6, v14
vwaddu.wv v7, v7, v15
vsetvli zero, zero, e16, m1, ta, ma
.irp i, 0, 1, 2, 3, 4, 5, 6, 7
vmax.vx v\i, v\i, zero
.endr
vsetvli zero, zero, e8, mf2, ta, ma
vnclipu.wi v8, v0, 0
vnclipu.wi v9, v1, 0
vnclipu.wi v10, v2, 0
vnclipu.wi v11, v3, 0
vnclipu.wi v12, v4, 0
vnclipu.wi v13, v5, 0
vnclipu.wi v14, v6, 0
vnclipu.wi v15, v7, 0
addi t0, a0, \j
vse8.v v8, (t0)
.irp i, 9, 10, 11, 12, 13, 14, 15
add t0, t0, a1
vse8.v v\i, (t0)
.endr
.endr
ret
.endif
endfunc
.endm
def_fn_816_base identity_
def_fn_816_base
.macro def_fn_816 w, h, txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_rvv, export=1
.ifnc \txfm1, identity
la a4, inv_\txfm1\()_e16_x\w\()_rvv
.endif
la a5, inv_\txfm2\()_e16_x\h\()_rvv
.if \w == 8
li a6, \eob_half
.endif
.ifc \txfm1, identity
j inv_txfm_identity_add_\w\()x\h\()_rvv
.else
j inv_txfm_add_\w\()x\h\()_rvv
.endif
endfunc
.endm
.macro def_fns_816 w, h
def_fn_816 \w, \h, dct, dct, 43
def_fn_816 \w, \h, identity, identity, 43
def_fn_816 \w, \h, dct, adst, 43
def_fn_816 \w, \h, dct, flipadst, 43
def_fn_816 \w, \h, dct, identity, 8
def_fn_816 \w, \h, adst, dct, 43
def_fn_816 \w, \h, adst, adst, 43
def_fn_816 \w, \h, adst, flipadst, 43
def_fn_816 \w, \h, flipadst, dct, 43
def_fn_816 \w, \h, flipadst, adst, 43
def_fn_816 \w, \h, flipadst, flipadst, 43
def_fn_816 \w, \h, identity, dct, 64
def_fn_816 \w, \h, adst, identity, 8
def_fn_816 \w, \h, flipadst, identity, 8
def_fn_816 \w, \h, identity, adst, 64
def_fn_816 \w, \h, identity, flipadst, 64
.endm
def_fns_816 8, 16
def_fns_816 16, 8
|
Admenri/urge
| 10,063
|
third_party/dav1d/src/riscv/64/ipred16.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2024, Bogdan Gligorijevic
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/riscv/asm.S"
function dc_gen_16bpc_rvv, export=1, ext="v,zba,zbb"
.variant_cc dav1d_dc_gen_16bpc_rvv
add t1, a1, a2
srli t5, t1, 1
mv t1, a1
addi t2, a0, 2
vsetvli zero, t1, e32, m8, ta, ma
vmv.v.x v0, zero
1:
vsetvli t3, t1, e16, m4, tu, ma
vle16.v v8, (t2)
vwaddu.wv v0, v0, v8
sub t1, t1, t3
sh1add t2, t3, t2
bnez t1, 1b
mv t1, a2
mv t2, a0
vsetvli zero, t1, e32, m8, ta, ma
vmv.v.x v16, zero
2:
vsetvli t3, t1, e16, m4, tu, ma
sub t1, t1, t3
sll t3, t3, 1
sub t2, t2, t3
vle16.v v8, (t2)
vwaddu.wv v16, v16, v8
bnez t1, 2b
vsetvli zero, a1, e32, m8, ta, ma
vmv.s.x v24, t5
vmv.s.x v25, zero
vredsum.vs v8, v0, v24
vsetvli zero, a2, e32, m8, ta, ma
vredsum.vs v0, v16, v25
vmv.x.s t5, v8
vmv.x.s t1, v0
add t5, t5, t1
add t1, a1, a2
ctz t1, t1
srl a0, t5, t1
beq a1, a2, 5f
slli t1, a1, 1
sltu t2, t1, a2
slli t3, a2, 1
sltu t1, t3, a1
or t1, t1, t2
bnez t1, 3f
li t1, 0xAAAB
j 4f
3:
li t1, 0x6667
4:
mul a0, a0, t1
li t1, 17
srl a0, a0, t1
5:
jr t0
endfunc
function dc_gen_top_16bpc_rvv, export=1, ext="v,zba,zbb"
.variant_cc dav1d_dc_gen_top_16bpc_rvv
mv t1, a1
srli t5, a1, 1
addi a0, a0, 2
vsetvli zero, t1, e32, m2, ta, ma
vmv.v.x v0, zero
1:
vsetvli t3, t1, e16, m1, tu, ma
vle16.v v4, (a0)
vwaddu.wv v0, v0, v4
sh1add a0, t3, a0
sub t1, t1, t3
bnez t1, 1b
j dc_gen_sum_up_16bpc_rvv
endfunc
function dc_gen_left_16bpc_rvv, export=1, ext="v,zba,zbb"
.variant_cc dav1d_dc_gen_left_16bpc_rvv
mv t1, a1
srli t5, a1, 1
vsetvli zero, t1, e32, m2, ta, ma
vmv.v.x v0, zero
1:
vsetvli t3, t1, e16, m1, tu, ma
sub t1, t1, t3
slli t3, t3, 1
sub a0, a0, t3
vle16.v v4, (a0)
vwaddu.wv v0, v0, v4
bnez t1, 1b
j dc_gen_sum_up_16bpc_rvv
endfunc
function dc_gen_sum_up_16bpc_rvv, export=1, ext="v,zba,zbb"
.variant_cc dav1d_dc_gen_sum_up_16bpc_rvv
vsetvli zero, a1, e32, m2, ta, ma
vmv.s.x v4, t5
vredsum.vs v8, v0, v4
vmv.x.s t5, v8
ctz t1, a1
srl a0, t5, t1
jr t0
endfunc
function cfl_pred_16bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
1:
li t2, 0
mv t3, a2
2:
vsetvli t0, t3, e16, m2, ta, ma
sh1add t4, t2, a0
vle16.v v0, (a5)
sh1add a5, t0, a5
vwmul.vx v4, v0, a6
vsetvli zero, zero, e32, m4, ta, mu
vneg.v v8, v4
vmslt.vx v0, v4, x0
vmax.vv v12, v8, v4
vssra.vi v16, v12, 6
vneg.v v16, v16, v0.t
vadd.vx v20, v16, a4
vmax.vx v0, v20, zero
vmin.vx v0, v0, a7
vsetvli zero, zero, e16, m2, ta, ma
vnclipu.wi v4, v0, 0
vse16.v v4, (t4)
add t2, t0, t2
sub t3, t3, t0
bnez t3, 2b
addi a3, a3, -1
add a0, a0, a1
bnez a3, 1b
ret
endfunc
function ipred_cfl_16bpc_rvv, export=1, ext=v
mv t6, a0 # dst
mv a0, a2 # topleft
mv t4, a1 # stride
mv a1, a3 # width
mv a2, a4 # height
jal t0, dc_gen_16bpc_rvv
mv a2, a3 # width
mv a3, a4 # height
mv a4, a0 # dc_get_top
mv a0, t6 # dst
mv a1, t4 # stride
j cfl_pred_16bpc_rvv
endfunc
function ipred_cfl_128_16bpc_rvv, export=1, ext="v,zba"
# dc = (bitdepth_max + 1) >> 1, then just rearrange registers
mv a2, a3
mv a3, a4
addi a4, a7, 1
srli a4, a4, 1
j cfl_pred_16bpc_rvv
endfunc
function ipred_cfl_top_16bpc_rvv, export=1, ext=v
mv t6, a0 # dst
mv a0, a2 # topleft
mv t4, a1 # stride
mv a1, a3 # width
jal t0, dc_gen_top_16bpc_rvv
mv a3, a4 # height
mv a4, a0 # dc_get_top
mv a0, t6 # dst
mv a2, a1 # width
mv a1, t4 # stride
j cfl_pred_16bpc_rvv
endfunc
function ipred_cfl_left_16bpc_rvv, export=1, ext=v
mv t6, a0 # dst
mv a0, a2 # topleft
mv t4, a1 # stride
mv a1, a4 # height
mv a2, a3 # width
jal t0, dc_gen_left_16bpc_rvv
mv a3, a4 # height
mv a4, a0 # dc_get_top
mv a1, t4 # stride
mv a0, t6 # dst
j cfl_pred_16bpc_rvv
endfunc
function ipred_paeth_16bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
li t0, 0
mv t3, a2
lhu t1, (a2)
addi a6, a2, -2
addi a2, a2, 2
1:
lhu t2, (a6)
mv t3, a3
2:
sub t5, a3, t3
sh1add t5, t5, a2
vsetvli t6, t3, e16, m2, ta, ma
vle16.v v2, (t5)
vwaddu.vx v4, v2, t2
vsetvli zero, zero, e32, m4, ta, mu
vsub.vx v8, v4, t1
vzext.vf2 v24, v2
vsub.vx v12, v8, t1
vmslt.vx v0, v12, zero
vneg.v v12, v12, v0.t
vsub.vx v16, v8, t2
vmslt.vx v0, v16, zero
vneg.v v16, v16, v0.t
vsub.vv v20, v8, v24
vmslt.vx v0, v20, zero
vneg.v v20, v20, v0.t
sub t5, a3, t3
vmsleu.vv v4, v16, v20
vmsleu.vv v5, v16, v12
vmsgtu.vv v0, v20, v12
vmand.mm v6, v4, v5
vsetvli zero, zero, e16, m2, ta, ma
vmerge.vxm v8, v2, t1, v0
vmmv.m v0, v6
sh1add t5, t5, a0
sub t3, t3, t6
vmerge.vxm v4, v8, t2, v0
vse16.v v4, (t5)
bnez t3, 2b
addi a4, a4, -1
addi a6, a6, -2
add a0, a0, a1
bnez a4, 1b
ret
endfunc
function ipred_smooth_16bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
la t0, dav1d_sm_weights
add t1, t0, a3
sh1add t2, a3, a2
slli t3, a4, 1
add t0, t0, a4
lhu t2, (t2)
sub t3, a2, t3
addi a6, a2, -2
addi a2, a2, 2
lhu t3, (t3)
1:
mv t6, a3
lhu a7, (a6)
lbu t4, (t0)
2:
li a5, 256
vsetvli t5, t6, e16, m2, ta, ma
vle8.v v2, (t1)
add t1, t1, t5
vle16.v v4, (a2)
sh1add a2, t5, a2
sub a5, a5, t4
vwmul.vx v8, v4, t4
mul a5, a5, t3
vsetvli zero, zero, e32, m4, ta, ma
vadd.vx v4, v8, a5
li a5, 256
vzext.vf4 v12, v2
vmul.vx v8, v12, a7
vrsub.vx v12, v12, a5
vmacc.vx v8, t2, v12
vadd.vv v12, v4, v8
vsetvli zero, zero, e32, m4, ta, ma
sub a5, a3, t6
sub t6, t6, t5
sh1add a5, a5, a0
vsetvli zero, zero, e16, m2, ta, ma
vnclipu.wi v2, v12, 9
vse16.v v2, (a5)
bnez t6, 2b
sub t1, t1, a3
slli t6, a3, 1
add a0, a0, a1
sub a2, a2, t6
addi a4, a4, -1
addi t0, t0, 1
addi a6, a6, -2
bnez a4, 1b
ret
endfunc
function ipred_smooth_v_16bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
la t0, dav1d_sm_weights
slli t3, a4, 1
add t0, t0, a4
sub t3, a2, t3
addi a2, a2, 2
lhu t3, (t3)
1:
mv t6, a3
lbu t4, (t0)
2:
li a5, 256
vsetvli t5, t6, e16, m2, ta, ma
vle16.v v4, (a2)
sh1add a2, t5, a2
sub a5, a5, t4
vwmul.vx v8, v4, t4
mul a5, a5, t3
vsetvli zero, zero, e32, m4, ta, ma
vadd.vx v4, v8, a5
vsetvli zero, zero, e32, m4, ta, ma
sub a5, a3, t6
sub t6, t6, t5
sh1add a5, a5, a0
vsetvli zero, zero, e16, m2, ta, ma
vnclipu.wi v2, v4, 8
vse16.v v2, (a5)
bnez t6, 2b
slli t6, a3, 1
add a0, a0, a1
sub a2, a2, t6
addi a4, a4, -1
addi t0, t0, 1
bnez a4, 1b
ret
endfunc
function ipred_smooth_h_16bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
la t0, dav1d_sm_weights
add t1, t0, a3
sh1add t2, a3, a2
lhu t2, (t2)
addi a6, a2, -2
1:
mv t6, a3
lhu a7, (a6)
2:
vsetvli t5, t6, e16, m2, ta, ma
vle8.v v2, (t1)
add t1, t1, t5
li a5, 256
vsetvli zero, zero, e32, m4, ta, ma
vzext.vf4 v12, v2
vmul.vx v8, v12, a7
vrsub.vx v12, v12, a5
vmacc.vx v8, t2, v12
sub a5, a3, t6
sub t6, t6, t5
sh1add a5, a5, a0
vsetvli zero, zero, e16, m2, ta, ma
vnclipu.wi v2, v8, 8
vse16.v v2, (a5)
bnez t6, 2b
sub t1, t1, a3
add a0, a0, a1
addi a4, a4, -1
addi a6, a6, -2
bnez a4, 1b
ret
endfunc
function pal_pred_16bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
vsetivli t5, 8, e16, m1, ta, ma
vle16.v v30, (a2)
li t0, 4
srli t1, a4, 1
li t2, 1
1:
mv t4, a4
2:
vsetvli t5, t1, e8, mf2, ta, ma
vle8.v v0, (a3)
add a3, a3, t5
vand.vi v1, v0, 7
sub t6, a4, t4
vsrl.vi v2, v0, 4
vwmul.vx v4, v1, t2
vwmul.vx v6, v2, t2
vsetvli zero, zero, e16, m1, ta, ma
sh1add t6, t6, a0
vrgather.vv v8, v30, v4
addi t3, t6, 2
vrgather.vv v10, v30, v6
slli t5, t5, 1
vsse16.v v8, (t6), t0
vsse16.v v10, (t3), t0
sub t4, t4, t5
bnez t4, 2b
add a0, a0, a1
addi a5, a5, -1
bnez a5, 1b
ret
endfunc
|
Admenri/urge
| 10,672
|
third_party/dav1d/src/riscv/64/mc.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2024, Nathan Egge, Niklas Haas, Bogdan Gligorijevic
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/riscv/asm.S"
function blend_vl256_8bpc_rvv, export=1, ext=zbb
ctz t0, a3
addi t0, t0, 0xc3
j L(blend_epilog)
endfunc
function blend_8bpc_rvv, export=1, ext="v,zbb"
ctz t0, a3
addi t0, t0, 0xc4
L(blend_epilog):
csrw vxrm, zero
andi t0, t0, 0xc7
vsetvl zero, a3, t0
li t1, 64
1:
addi a4, a4, -2
vle8.v v4, (a2)
add a2, a2, a3
vle8.v v6, (a2)
add a2, a2, a3
vle8.v v8, (a5)
add a5, a5, a3
vle8.v v10, (a5)
add a5, a5, a3
vle8.v v0, (a0)
add t0, a0, a1
vle8.v v2, (t0)
vwmulu.vv v16, v4, v8
vwmulu.vv v20, v6, v10
vrsub.vx v8, v8, t1
vrsub.vx v10, v10, t1
vwmaccu.vv v16, v0, v8
vwmaccu.vv v20, v2, v10
vnclipu.wi v0, v16, 6
vnclipu.wi v2, v20, 6
vse8.v v0, (a0)
vse8.v v2, (t0)
add a0, t0, a1
bnez a4, 1b
ret
endfunc
function blend_h_vl256_8bpc_rvv, export=1, ext=zbb
srai t0, a3, 2
li t2, 64
ctz t0, t0
addi t0, t0, 0xc5
j L(blend_h_epilog)
endfunc
function blend_h_8bpc_rvv, export=1, ext="v,zbb"
li t2, 64
bgt a3, t2, 128f
ctz t0, a3
addi t0, t0, 0xc4
L(blend_h_epilog):
csrw vxrm, zero
andi t0, t0, 0xc7
vsetvl zero, a3, t0
la t1, dav1d_obmc_masks
srai t0, a4, 2
add t1, t1, a4
sub a4, a4, t0
0:
mv t5, ra
1:
addi a4, a4, -2
lbu t3, (t1)
addi t1, t1, 1
lbu t4, (t1)
addi t1, t1, 1
vle8.v v8, (a2)
add a2, a2, a3
vle8.v v12, (a2)
add a2, a2, a3
vle8.v v0, (a0)
add t0, a0, a1
vle8.v v4, (t0)
vwmulu.vx v16, v8, t3
vwmulu.vx v24, v12, t4
sub t3, t2, t3
sub t4, t2, t4
vwmaccu.vx v16, t3, v0
vwmaccu.vx v24, t4, v4
vnclipu.wi v0, v16, 6
vnclipu.wi v4, v24, 6
vse8.v v0, (a0)
vse8.v v4, (t0)
add a0, t0, a1
bgtz a4, 1b
jr t5
128:
csrw vxrm, zero
vsetvli zero, t2, e8, m4, ta, ma
la t1, dav1d_obmc_masks
srai t0, a4, 2
add t1, t1, a4
sub a4, a4, t0
mv a5, a0
mv a6, a2
mv a7, a4
jal t5, 1b
add t1, t1, a4
add a0, a5, t2
add a2, a6, t2
mv a4, a7
sub t1, t1, a4
j 0b
endfunc
function blend_v_vl256_8bpc_rvv, export=1, ext=zbb
srai t0, a3, 2
ctz t0, t0
addi t0, t0, 0xc5
j L(blend_v_epilog)
endfunc
function blend_v_8bpc_rvv, export=1, ext="v,zbb"
ctz t0, a3
addi t0, t0, 0xc4
L(blend_v_epilog):
andi t0, t0, 0xc7
srai t1, a3, 2
sub t1, a3, t1
vsetvl zero, t1, t0
csrw vxrm, zero
la t1, dav1d_obmc_masks
add t1, t1, a3
vle8.v v8, (t1)
li t0, 64
vrsub.vx v10, v8, t0
1:
addi a4, a4, -2
vle8.v v4, (a2)
add a2, a2, a3
vle8.v v6, (a2)
add a2, a2, a3
vle8.v v0, (a0)
add t0, a0, a1
vle8.v v2, (t0)
vwmulu.vv v12, v4, v8
vwmulu.vv v16, v6, v8
vwmaccu.vv v12, v0, v10
vwmaccu.vv v16, v2, v10
vnclipu.wi v0, v12, 6
vnclipu.wi v2, v16, 6
vse8.v v0, (a0)
vse8.v v2, (t0)
add a0, t0, a1
bnez a4, 1b
ret
endfunc
.macro avg va, vb, vm
vadd.vv \va, \va, \vb
.endm
.macro w_avg va, vb, vm
vwmul.vx v24, \va, a6
vwmacc.vx v24, a7, \vb
vnclip.wi \va, v24, 8
.endm
.macro mask va, vb, vm
vwmul.vv v24, \va, \vm
vrsub.vx \vm, \vm, a7
vwmacc.vv v24, \vb, \vm
vnclip.wi \va, v24, 10
.endm
.macro bidir_fn type, shift
function \type\()_8bpc_rvv, export=1, ext="v,zba,zbb"
.ifc \type, w_avg
li a7, 16
sub a7, a7, a6
.endif
.ifc \type, mask
li a7, 64
.endif
li t0, 4
csrw vxrm, zero
beq t0, a4, 4f
csrr t0, vlenb
ctz t1, a4
ctz t0, t0
li t2, 1
sub t0, t1, t0
li t4, -3
bgt t0, t2, 2f
max t0, t0, t4
andi t1, t0, 0x7
addi t0, t1, 1 # may overflow into E16 bit
ori t0, t0, MA | TA | E16
ori t1, t1, MA | TA | E8
1:
addi a5, a5, -4
.rept 2
vsetvl zero, a4, t0
sh1add t3, a4, a2
vle16.v v0, (a2)
sh1add a2, a4, t3
vle16.v v4, (t3)
sh1add t3, a4, a3
vle16.v v8, (a3)
sh1add a3, a4, t3
vle16.v v12, (t3)
.ifc \type, mask
add t3, a4, a6
vle8.v v24, (a6)
add a6, a4, t3
vle8.v v26, (t3)
vzext.vf2 v16, v24
vzext.vf2 v20, v26
.endif
\type v0, v8, v16
\type v4, v12, v20
vmax.vx v8, v0, zero
vmax.vx v12, v4, zero
vsetvl zero, zero, t1
vnclipu.wi v0, v8, \shift
vnclipu.wi v2, v12, \shift
add t3, a1, a0
vse8.v v0, (a0)
add a0, a1, t3
vse8.v v2, (t3)
.endr
bnez a5, 1b
ret
2:
mv t0, a0
neg t4, a4
add a0, a1, a0
addi a5, a5, -1
20:
vsetvli t2, a4, e16, m4, ta, ma
sh1add t4, t2, t4
sh1add t3, t2, a2
vle16.v v0, (a2)
sh1add a2, t2, t3
vle16.v v4, (t3)
sh1add t3, t2, a3
vle16.v v8, (a3)
sh1add a3, t2, t3
vle16.v v12, (t3)
.ifc \type, mask
add t3, t2, a6
vle8.v v24, (a6)
add a6, t2, t3
vle8.v v26, (t3)
vzext.vf2 v16, v24
vzext.vf2 v20, v26
.endif
\type v0, v8, v16
\type v4, v12, v20
vmax.vx v8, v0, zero
vmax.vx v12, v4, zero
vsetvli zero, zero, e8, m2, ta, ma
vnclipu.wi v0, v8, \shift
vnclipu.wi v2, v12, \shift
add t3, t2, t0
vse8.v v0, (t0)
add t0, t2, t3
vse8.v v2, (t3)
bnez t4, 20b
bnez a5, 2b
ret
4:
slli t0, a5, 2
vsetvli t1, t0, e16, m4, ta, ma
vle16.v v0, (a2)
sh1add a2, t1, a2
vle16.v v4, (a3)
sh1add a3, t1, a3
.ifc \type, mask
vle8.v v16, (a6)
add a6, t1, a6
vzext.vf2 v8, v16
.endif
\type v0, v4, v8
vmax.vx v8, v0, zero
vsetvli zero, zero, e8, m2, ta, ma
vnclipu.wi v0, v8, \shift
vsetvli t1, a5, e32, m2, ta, ma
vsse32.v v0, (a0), a1
ctz t0, t1
sub a5, a5, t1
sll t0, a1, t0
add a0, t0, a0
bnez a5, 4b
ret
endfunc
.endm
bidir_fn avg, 5
bidir_fn w_avg, 0
bidir_fn mask, 0
function warp_8x8_8bpc_rvv, export=1, ext="v"
csrw vxrm, zero
vsetivli zero, 8, e16, m1, ta, ma
addi sp, sp, -2*15*8
mv t5, sp
li t0, 3
mul t0, a3, t0
sub a2, a2, t0
addi a2, a2, -3
li t0, 64
addi a3, a3, -8
li t1, 15
la t2, dav1d_mc_warp_filter
lh t6, (a4)
lh t4, 2(a4)
vid.v v30
vwmul.vx v28, v30, t6
1:
addi t1, t1, -1
vsetvli zero, zero, e32, m2, ta, ma
vadd.vx v4, v28, a5
add a5, a5, t4
vssra.vi v2, v4, 10
vadd.vx v2, v2, t0
vsll.vi v24, v2, 3
vsetvli zero, zero, e8, mf2, ta, ma
vluxseg8ei32.v v2, (t2), v24
vsetvli zero, zero, e16, m1, ta, ma
.irp i, 2, 3, 4, 5, 6, 7, 8, 9
vle8.v v10, (a2)
addi a2, a2, 1
vsext.vf2 v14, v\i
vzext.vf2 v16, v10
.if \i == 2
vwmulsu.vv v12, v14, v16
.else
vwmaccsu.vv v12, v14, v16
.endif
.endr
vnclip.wi v10, v12, 3
add a2, a2, a3
vse16.v v10, (t5)
addi t5, t5, 16
bnez t1, 1b
mv t5, sp
li t1, 8
lh t6, 4(a4)
lh t4, 6(a4)
vwmul.vx v28, v30, t6
2:
addi t1, t1, -1
vsetvli zero, zero, e32, m2, ta, ma
vadd.vx v4, v28, a6
add a6, a6, t4
vssra.vi v2, v4, 10
vadd.vx v2, v2, t0
vsll.vi v24, v2, 3
vsetvli zero, zero, e8, mf2, ta, ma
vluxseg8ei32.v v2, (t2), v24
vsetvli zero, zero, e16, m1, ta, ma
.irp i, 2, 3, 4, 5, 6, 7, 8, 9
vle16.v v10, (t5)
addi t5, t5, 16
vsext.vf2 v14, v\i
.if \i == 2
vwmul.vv v12, v14, v10
.else
vwmacc.vv v12, v14, v10
.endif
.endr
addi t5, t5, -16*7
vnclip.wi v10, v12, 11
vmax.vx v10, v10, zero
vsetvli zero, zero, e8, mf2, ta, ma
vnclipu.wi v12, v10, 0
vse8.v v12, (a0)
add a0, a0, a1
bnez t1, 2b
addi sp, sp, 2*15*8
ret
endfunc
function warp_8x8t_8bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
vsetivli zero, 8, e16, m1, ta, ma
addi sp, sp, -2*15*8
mv t5, sp
li t0, 3
mul t0, a3, t0
sub a2, a2, t0
addi a2, a2, -3
li t0, 64
addi a3, a3, -8
li t1, 15
la t2, dav1d_mc_warp_filter
lh t6, (a4)
lh t4, 2(a4)
vid.v v30
vwmul.vx v28, v30, t6
1:
addi t1, t1, -1
vsetvli zero, zero, e32, m2, ta, ma
vadd.vx v4, v28, a5
add a5, a5, t4
vssra.vi v2, v4, 10
vadd.vx v2, v2, t0
vsll.vi v24, v2, 3
vsetvli zero, zero, e8, mf2, ta, ma
vluxseg8ei32.v v2, (t2), v24
vsetvli zero, zero, e16, m1, ta, ma
.irp i, 2, 3, 4, 5, 6, 7, 8, 9
vle8.v v10, (a2)
addi a2, a2, 1
vsext.vf2 v14, v\i
vzext.vf2 v16, v10
.if \i == 2
vwmulsu.vv v12, v14, v16
.else
vwmaccsu.vv v12, v14, v16
.endif
.endr
vnclip.wi v10, v12, 3
add a2, a2, a3
vse16.v v10, (t5)
addi t5, t5, 16
bnez t1, 1b
mv t5, sp
li t1, 8
lh t6, 4(a4)
lh t4, 6(a4)
vwmul.vx v28, v30, t6
2:
addi t1, t1, -1
vsetvli zero, zero, e32, m2, ta, ma
vadd.vx v4, v28, a6
add a6, a6, t4
vssra.vi v2, v4, 10
vadd.vx v2, v2, t0
vsll.vi v24, v2, 3
vsetvli zero, zero, e8, mf2, ta, ma
vluxseg8ei32.v v2, (t2), v24
vsetvli zero, zero, e16, m1, ta, ma
.irp i, 2, 3, 4, 5, 6, 7, 8, 9
vle16.v v10, (t5)
addi t5, t5, 16
vsext.vf2 v14, v\i
.if \i == 2
vwmul.vv v12, v14, v10
.else
vwmacc.vv v12, v14, v10
.endif
.endr
addi t5, t5, -16*7
vnclip.wi v10, v12, 7
vse16.v v10, (a0)
sh1add a0, a1, a0
bnez t1, 2b
addi sp, sp, 2*15*8
ret
endfunc
|
Admenri/urge
| 2,431
|
third_party/dav1d/src/riscv/64/cpu.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2024, Nathan Egge
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/riscv/asm.S"
// This function detects non-compliant RVV 0.7.1 hardware which reports support
// for the V extension through HWCAP, by intentionally setting tail and mask
// agnostic vector configurations that were only introduced in RVV 0.9 spec.
// Existing non-compliant (pre RVV 1.0) hardware will set the VILL bit in VTYPE
// (indicating an illegal vector configuration) which is stored in the XLEN-1
// bit position, thus a simple sign check is sufficient for detection.
// This work around is inexpensive and harmless on compliant hardware, but we
// should still consider removing it once all non-compliant RVV 0.7.1 hardware
// is out of service.
function has_compliant_rvv, export=1, ext=v
vsetvli t0, zero, e8, m1, ta, ma
csrr a0, vtype
sgtz a0, a0
ret
endfunc
function get_vlenb, export=1
csrr a0, vlenb
ret
endfunc
|
Admenri/urge
| 9,919
|
third_party/dav1d/src/riscv/64/ipred.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2024, Bogdan Gligorijevic
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/riscv/asm.S"
function dc_gen_8bpc_rvv, export=1, ext="v,zbb"
.variant_cc dav1d_dc_gen_8bpc_rvv
add t1, a1, a2
srli t5, t1, 1
mv t1, a1
addi t2, a0, 1
vsetvli zero, t1, e16, m4, ta, ma
vmv.v.x v0, zero
1:
vsetvli t3, t1, e8, m2, tu, ma
vle8.v v4, (t2)
vwaddu.wv v0, v0, v4
sub t1, t1, t3
add t2, t2, t3
bnez t1, 1b
mv t1, a2
mv t2, a0
vsetvli zero, t1, e16, m4, ta, ma
vmv.v.x v8, zero
2:
vsetvli t3, t1, e8, m2, tu, ma
sub t2, t2, t3
vle8.v v4, (t2)
vwaddu.wv v8, v8, v4
sub t1, t1, t3
bnez t1, 2b
vsetvli zero, zero, e32, m8, ta, ma
vmv.s.x v16, t5
vmv.s.x v12, zero
vsetvli zero, a1, e16, m4, ta, ma
vwredsum.vs v24, v0, v16
vsetvli zero, a2, e16, m4, ta, ma
vwredsum.vs v16, v8, v12
vsetvli zero, zero, e32, m8, ta, ma
vmv.x.s t5, v24
vmv.x.s t1, v16
add t5, t5, t1
add t1, a1, a2
ctz t1, t1
srl a0, t5, t1
beq a1, a2, 5f
slli t1, a1, 1
sltu t2, t1, a2
slli t3, a2, 1
sltu t1, t3, a1
or t1, t1, t2
bnez t1, 3f
li t1, 0x5556
j 4f
3:
li t1, 0x3334
4:
mul a0, a0, t1
srli a0, a0, 16
5:
jr t0
endfunc
function dc_gen_top_8bpc_rvv, export=1, ext="v,zbb"
.variant_cc dav1d_dc_gen_top_8bpc_rvv
mv t1, a1
srli t5, a1, 1
addi a0, a0, 1
vsetvli zero, t1, e16, m4, ta, ma
vmv.v.x v0, zero
1:
vsetvli t3, t1, e8, m2, tu, ma
vle8.v v4, (a0)
vwaddu.wv v0, v0, v4
sub t1, t1, t3
add a0, a0, t3
bnez t1, 1b
j dc_gen_sum_up_8bpc_rvv
endfunc
function dc_gen_left_8bpc_rvv, export=1, ext="v,zbb"
.variant_cc dav1d_dc_gen_left_8bpc_rvv
mv t1, a1
srli t5, a1, 1
vsetvli t2, t1, e16, m4, ta, ma
vmv.v.x v0, zero
1:
vsetvli t3, t1, e8, m2, tu, ma
sub a0, a0, t3
vle8.v v4, (a0)
vwaddu.wv v0, v0, v4
sub t1, t1, t3
bnez t1, 1b
j dc_gen_sum_up_8bpc_rvv
endfunc
function dc_gen_sum_up_8bpc_rvv, export=1, ext="v,zbb"
.variant_cc dav1d_dc_gen_sum_up_8bpc_rvv
vsetvli zero, a1, e32, m8, ta, ma
vmv.s.x v4, t5
vsetvli zero, zero, e16, m4, ta, ma
vwredsum.vs v8, v0, v4
vsetvli zero, zero, e32, m8, ta, ma
vmv.x.s t5, v8
ctz t1, a1
srl a0, t5, t1
jr t0
endfunc
function cfl_pred_8bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
1:
li t2, 0
mv t3, a2
2:
vsetvli t0, t3, e16, m2, ta, ma
add t4, a0, t2
vle16.v v0, (a5)
sh1add a5, t0, a5
vwmul.vx v4, v0, a6
vsetvli zero, zero, e32, m4, ta, mu
vneg.v v8, v4
vmslt.vx v0, v4, x0
vmax.vv v12, v8, v4
vssra.vi v16, v12, 6
vneg.v v16, v16, v0.t
vadd.vx v20, v16, a4
vmax.vx v0, v20, zero
vsetvli zero, zero, e16, m2, ta, ma
vnclipu.wi v4, v0, 0
vsetvli zero, zero, e8, m1, ta, ma
vnclipu.wi v0, v4, 0
vse8.v v0, (t4)
add t2, t0, t2
sub t3, t3, t0
bnez t3, 2b
addi a3, a3, -1
add a0, a0, a1
bnez a3, 1b
ret
endfunc
function ipred_cfl_8bpc_rvv, export=1, ext=v
mv t6, a0 # dst
mv a0, a2 # topleft
mv t4, a1 # stride
mv a1, a3 # width
mv a2, a4 # height
jal t0, dc_gen_8bpc_rvv
mv a2, a3 # width
mv a3, a4 # height
mv a4, a0 # dc_get_top
mv a0, t6 # dst
mv a1, t4 # stride
j cfl_pred_8bpc_rvv
endfunc
function ipred_cfl_128_8bpc_rvv, export=1, ext="v,zba"
# dc = 128, then just rearrange registers
mv a2, a3
mv a3, a4
li a4, 128
j cfl_pred_8bpc_rvv
endfunc
function ipred_cfl_top_8bpc_rvv, export=1, ext=v
mv t6, a0 # dst
mv a0, a2 # topleft
mv t4, a1 # stride
mv a1, a3 # width
jal t0, dc_gen_top_8bpc_rvv
mv a3, a4 # height
mv a4, a0 # dc_get_top
mv a0, t6 # dst
mv a2, a1 # width
mv a1, t4 # stride
j cfl_pred_8bpc_rvv
endfunc
function ipred_cfl_left_8bpc_rvv, export=1, ext="v,zba"
mv t6, a0 # dst
mv a0, a2 # topleft
mv t4, a1 # stride
mv a1, a4 # height
mv a2, a3 # width
jal t0, dc_gen_left_8bpc_rvv
mv a3, a4 # height
mv a4, a0 # dc_get_left
mv a1, t4 # stride
mv a0, t6 # dst
j cfl_pred_8bpc_rvv
endfunc
function ipred_paeth_8bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
li t0, 0
mv t3, a2
lbu t1, (a2)
addi a6, a2, -1
addi a2, a2, 1
1:
lbu t2, (a6)
mv t3, a3
2:
sub t5, a3, t3
add t5, a2, t5
vsetvli t6, t3, e8, m1, ta, ma
vle8.v v2, (t5)
vwaddu.vx v4, v2, t2
vsetvli zero, zero, e16, m2, ta, ma
vwsub.vx v8, v4, t1
vsetvli zero, zero, e32, m4, ta, mu
vzext.vf4 v24, v2
vsub.vx v12, v8, t1
vmslt.vx v0, v12, zero
vneg.v v12, v12, v0.t
vsub.vx v16, v8, t2
vmslt.vx v0, v16, zero
vneg.v v16, v16, v0.t
vsub.vv v20, v8, v24
vmslt.vx v0, v20, zero
vneg.v v20, v20, v0.t
sub t5, a3, t3
vmsleu.vv v4, v16, v20
vmsleu.vv v5, v16, v12
vmsgtu.vv v0, v20, v12
vmand.mm v6, v4, v5
vsetvli zero, zero, e8, m1, ta, ma
vmerge.vxm v8, v2, t1, v0
vmmv.m v0, v6
add t5, a0, t5
sub t3, t3, t6
vmerge.vxm v4, v8, t2, v0
vse8.v v4, (t5)
bnez t3, 2b
addi a4, a4, -1
addi a6, a6, -1
add a0, a0, a1
bnez a4, 1b
ret
endfunc
function ipred_smooth_8bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
la t0, dav1d_sm_weights
add t1, t0, a3
add t2, a2, a3
add t0, t0, a4
lbu t2, (t2)
sub t3, a2, a4
addi a6, a2, -1
addi a2, a2, 1
lbu t3, (t3)
1:
mv t6, a3
lbu a7, (a6)
lbu t4, (t0)
2:
li a5, 256
vsetvli t5, t6, e8, m1, ta, ma
vle8.v v2, (t1)
add t1, t1, t5
vle8.v v4, (a2)
add a2, a2, t5
sub a5, a5, t4
vwmulu.vx v8, v4, t4
vsetvli zero, zero, e16, m2, ta, ma
mul a5, a5, t3
vadd.vx v4, v8, a5
vsetvli zero, zero, e8, m1, ta, ma
vwmulu.vx v8, v2, a7
vneg.v v12, v2
vwmaccu.vx v8, t2, v12
vsetvli zero, zero, e16, m2, ta, ma
vwaddu.vv v12, v4, v8
sub a5, a3, t6
sub t6, t6, t5
add a5, a5, a0
vnclipu.wi v2, v12, 9
vsetvli zero, zero, e8, m1, ta, ma
vnclipu.wi v0, v2, 0
vse8.v v0, (a5)
bnez t6, 2b
sub t1, t1, a3
add a0, a0, a1
sub a2, a2, a3
addi a4, a4, -1
addi t0, t0, 1
addi a6, a6, -1
bnez a4, 1b
ret
endfunc
function ipred_smooth_v_8bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
la t0, dav1d_sm_weights
add t2, a2, a3
add t0, t0, a4
sub t3, a2, a4
addi a2, a2, 1
lbu t3, (t3)
1:
mv t6, a3
lbu t4, (t0)
2:
li a5, 256
vsetvli t5, t6, e8, m1, ta, ma
vle8.v v4, (a2)
add a2, a2, t5
sub a5, a5, t4
vwmulu.vx v8, v4, t4
vsetvli zero, zero, e16, m2, ta, ma
mul a5, a5, t3
vwaddu.vx v4, v8, a5
sub a5, a3, t6
sub t6, t6, t5
add a5, a5, a0
vsetvli zero, zero, e16, m2, ta, ma
vnclipu.wi v2, v4, 8
vsetvli zero, zero, e8, m1, ta, ma
vnclipu.wi v0, v2, 0
vse8.v v0, (a5)
bnez t6, 2b
add a0, a0, a1
sub a2, a2, a3
addi a4, a4, -1
addi t0, t0, 1
bnez a4, 1b
ret
endfunc
function ipred_smooth_h_8bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
la t0, dav1d_sm_weights
add t1, t0, a3
add t2, a2, a3
lbu t2, (t2)
addi a6, a2, -1
1:
mv t6, a3
lbu a7, (a6)
2:
vsetvli t5, t6, e8, m1, ta, ma
vle8.v v2, (t1)
add t1, t1, t5
vwmulu.vx v8, v2, a7
vneg.v v12, v2
vwmaccu.vx v8, t2, v12
sub a5, a3, t6
sub t6, t6, t5
add a5, a5, a0
vsetvli zero, zero, e8, m1, ta, ma
vnclipu.wi v0, v8, 8
vse8.v v0, (a5)
bnez t6, 2b
sub t1, t1, a3
add a0, a0, a1
addi a4, a4, -1
addi a6, a6, -1
bnez a4, 1b
ret
endfunc
function pal_pred_8bpc_rvv, export=1, ext="v,zba"
csrw vxrm, zero
vsetivli t5, 8, e8, m1, ta, ma
vle8.v v30, (a2)
li t0, 2
srli t1, a4, 1
1:
mv t4, a4
2:
vsetvli t5, t1, e8, m1, ta, ma
vle8.v v0, (a3)
add a3, a3, t5
vsrl.vi v2, v0, 4
sub t6, a4, t4
vand.vi v1, v0, 7
add t6, a0, t6
vrgather.vv v3, v30, v1
addi t2, t6, 1
vrgather.vv v4, v30, v2
slli t5, t5, 1
vsse8.v v3, (t6), t0
sub t4, t4, t5
vsse8.v v4, (t2), t0
bnez t4, 2b
addi a5, a5, -1
add a0, a0, a1
bnez a5, 1b
ret
endfunc
|
Admenri/urge
| 13,147
|
third_party/dav1d/src/riscv/64/cdef16.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2024, Bogdan Gligorijevic
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/riscv/asm.S"
.macro constrain_vectors vec1, vec2, vec_sub, strength, shift, vec_tmp1, vec_tmp2
vmslt.vx v0, \vec_tmp1, zero
vneg.v \vec_tmp1, \vec_tmp1, v0.t
vmmv.m v1, v0
vmslt.vx v0, \vec_tmp2, zero
vneg.v \vec_tmp2, \vec_tmp2, v0.t
vsra.vx \vec1, \vec_tmp1, \shift
vsra.vx \vec2, \vec_tmp2, \shift
vrsub.vx \vec1, \vec1, \strength
vrsub.vx \vec2, \vec2, \strength
vmax.vx \vec1, \vec1, zero
vmax.vx \vec2, \vec2, zero
vmin.vv \vec_tmp1, \vec1, \vec_tmp1
vmin.vv \vec_tmp2, \vec2, \vec_tmp2
vneg.v \vec_tmp2, \vec_tmp2, v0.t
vmmv.m v0, v1
vneg.v \vec_tmp1, \vec_tmp1, v0.t
.endm
.macro padding_fn w, h
li t5, -32768 # INT16_MIN
andi t4, a7, 4
li t2, -2 # y_start
.if \w == 4
vsetivli zero, \w + 4, e16, m1, ta, ma
.else
vsetivli zero, \w + 4, e16, m2, ta, ma
.endif
vmv.v.x v0, t5
bnez t4, L(top_done_\w\()x\h)
slli t5, a1, 1
addi t5, t5, 2
slli t5, t5, 1
sub t5, a0, t5
sh1add t4, a1, t5
vse16.v v0, (t5)
vse16.v v0, (t4)
li t2, 0
L(top_done_\w\()x\h):
andi t4, a7, 8
li t3, 2 + \h # y_end
bnez t4, L(bottom_done_\w\()x\h)
li t5, \h
mul t5, a1, t5
addi t5, t5, -2
sh1add t5, t5, a0
sh1add t4, a1, t5
vse16.v v0, (t5)
vse16.v v0, (t4)
addi t3, t3, -2
L(bottom_done_\w\()x\h):
andi t4, a7, 1
li t0, -2 # x_start
.if \w == 4
vsetivli zero, 2, e16, m1, ta, ma
.else
vsetivli zero, 2, e16, m2, ta, ma
.endif
bnez t4, L(left_done_\w\()x\h)
mul t5, a1, t2
addi t5, t5, -2
sh1add t5, t5, a0
sub t0, t3, t2
3:
vse16.v v0, (t5)
sh1add t5, a1, t5
addi t0, t0, -1
bnez t0, 3b
L(left_done_\w\()x\h):
andi t4, a7, 2
li t1, 2 + \w # x_end
bnez t4, L(right_done_\w\()x\h)
mul t5, t2, a1
addi t5, t5, \w
sh1add t5, t5, a0
sub t1, t3, t2
4:
vse16.v v0, (t5)
sh1add t5, a1, t5
addi t1, t1, -1
bnez t1, 4b
li t1, \w
L(right_done_\w\()x\h):
beqz t2, L(top_skip_\w\()x\h)
mul t5, a1, t2
add t5, t0, t5
sh1add a0, t5, a0 # tmp += y_start * tmp_stride + x_start
sh1add a5, t0, a5 # top += x_start
sub t5, t1, t0
slli t6, t0, 1
.if \w == 4
vsetvli zero, t5, e16, m1, ta, ma
.else
vsetvli zero, t5, e16, m2, ta, ma
.endif
5:
vle16.v v2, (a5)
addi t2, t2, 1
add a5, a3, a5
vse16.v v2, (a0)
sh1add a0, a1, a0
bnez t2, 5b
sub a0, a0, t6 # tmp -= x_start
L(top_skip_\w\()x\h):
li a5, \h
beqz t0, L(left_skip_\w\()x\h)
sh1add a0, t0, a0 # tmp += x_start
7:
.if \w == 4
vsetivli zero, 2, e16, m1, ta, ma
.else
vsetivli zero, 2, e16, m2, ta, ma
.endif
vle16.v v2, (a4)
addi a5, a5, -1
addi a4, a4, 4
vse16.v v2, (a0)
sh1add a0, a1, a0
bnez a5, 7b
li a5, \h
mul t5, a1, a5
add t5, t5, t0
slli t5, t5, 1
sub a0, a0, t5 # tmp -= h * tmp_stride + x_start
L(left_skip_\w\()x\h):
8:
.if \w == 4
vsetvli zero, t1, e16, m1, ta, ma
.else
vsetvli zero, t1, e16, m2, ta, ma
.endif
vle16.v v2, (a2)
add a2, a3, a2
vse16.v v2, (a0)
sh1add a0, a1, a0
addi a5, a5, -1
bnez a5, 8b
li a5, \h
sh1add a0, t0, a0 # tmp += x_start
sh1add a6, t0, a6 # bottom += x_start
beq a5, t3, L(bottom_skip_\w\()x\h)
sub t5, t1, t0
.if \w == 4
vsetvli zero, t5, e16, m1, ta, ma
.else
vsetvli zero, t5, e16, m2, ta, ma
.endif
9:
vle16.v v2, (a6)
add a6, a3, a6
addi a5, a5, 1
vse16.v v2, (a0)
sh1add a0, a1, a0
bne a5, t3, 9b
L(bottom_skip_\w\()x\h):
li t6, \h
mul t6, a3, t6
sub a2, a2, t6 # src -= h * PXSTRIDE(src_stride)
mul t5, a1, t3
add t5, t5, t0
slli t5, t5, 1
sub a0, a0, t5 # tmp -= y_end * tmp_stride + x_start
.endm
.macro cdef_fn w, h
function cdef_filter_block_\w\()x\h\()_16bpc_rvv, export=1, ext="v,zba,zbb"
csrw vxrm, zero
addi sp, sp, -32 - 144*2
sd a5, 24(sp) # pri_strength
sd a6, 16(sp) # sec_strength
sd a7, 8(sp) # dir
ld a7, 8 + 32 + 144*2(sp) # edges
mv a6, a4 # bottom
mv a5, a3 # top
mv a4, a2 # left
mv a3, a1 # dst_stride
mv a2, a0 # dst
li a1, 12 # tmp_stride
addi a0, sp, 32 + 2*(2*12+2)
padding_fn \w, \h
ld a4, 32 + 2*144(sp) # damping
ld a5, 24(sp) # pri_strength
ld a6, 16(sp) # sec_strength
ld a7, 8(sp) # dir
beqz a5, cdef_filter_sec_only_\w\()x\h
bnez a6, cdef_filter_pri_sec_\w\()x\h
li t1, 64-8
ld t4, 32 + 2*144 + 16(sp) # bitdepth_max
clz t4, t4
sub t4, t1, t4
sra t4, a5, t4
andi t0, t4, 1
li t1, 4
sub t4, t1, t0
li t1, 63
clz t2, a5
sub t1, t1, t2
sub t1, a4, t1
li t0, \h
la t2, dav1d_cdef_directions
addi t3, a7, 2
sh1add t2, t3, t2
vsetivli zero, \w, e16, m1, ta, ma
blt zero, t1, 1f
mv t1, zero
1:
lb t3, 0(t2)
vle16.v v2, (a2)
sh1add t6, t3, a0
slli t3, t3, 1
sub t3, a0, t3
vle16.v v4, (t6)
vle16.v v6, (t3)
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v2, a5, t1, v8, v16
vmul.vx v28, v16, t4
vmacc.vx v28, t4, v8
lb t3, 1(t2)
andi t5, t4, 3
ori t5, t5, 2
sh1add t6, t3, a0
slli t3, t3, 1
sub t3, a0, t3
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (t6)
vle16.v v6, (t3)
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v2, a5, t1, v8, v16
vmacc.vx v28, t5, v16
vmacc.vx v28, t5, v8
vmslt.vx v0, v28, zero
vadd.vi v28, v28, -1, v0.t
vsetvli zero, zero, e16, m1, ta, ma
vnclip.wi v24, v28, 4
vadd.vv v28, v2, v24
vse16.v v28, (a2)
add a2, a2, a3
sh1add a0, a1, a0
addi t0, t0, -1
bnez t0, 1b
addi sp, sp, 32 + 144*2
ret
cdef_filter_sec_only_\w\()x\h:
li t1, 63
clz t2, a6
sub t1, t1, t2
sub t1, a4, t1
li t0, \h
la t2, dav1d_cdef_directions
addi t3, a7, 4
sh1add t3, t3, t2
sh1add t2, a7, t2
vsetivli zero, \w, e16, m1, ta, ma
2:
lb t4, 0(t3)
lb t5, 0(t2)
vle16.v v2, (a2)
sh1add t6, t4, a0
slli t4, t4, 1
sub t4, a0, t4
vle16.v v4, (t6)
vle16.v v6, (t4)
sh1add t4, t5, a0
slli t5, t5, 1
sub t5, a0, t5
vle16.v v8, (t4)
vle16.v v10, (t5)
vwsub.vv v12, v4, v2
vwsub.vv v14, v6, v2
vwsub.vv v16, v8, v2
vwsub.vv v18, v10, v2
vsetvli zero, zero, e32, m2, ta, mu
li t4, 2
constrain_vectors v4, v6, v2, a6, t1, v12, v14
constrain_vectors v8, v10, v2, a6, t1, v16, v18
vmul.vx v28, v18, t4
vmacc.vx v28, t4, v16
vmacc.vx v28, t4, v14
vmacc.vx v28, t4, v12
lb t4, 1(t3)
lb t5, 1(t2)
sh1add t6, t4, a0
slli t4, t4, 1
sub t4, a0, t4
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (t6)
vle16.v v6, (t4)
sh1add t4, t5, a0
slli t5, t5, 1
sub t5, a0, t5
vle16.v v8, (t4)
vle16.v v10, (t5)
vwsub.vv v12, v4, v2
vwsub.vv v14, v6, v2
vwsub.vv v16, v8, v2
vwsub.vv v18, v10, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v2, a6, t1, v12, v14
constrain_vectors v8, v10, v2, a6, t1, v16, v18
vadd.vv v4, v28, v12
vadd.vv v28, v4, v14
vadd.vv v4, v28, v16
vadd.vv v28, v4, v18
vmslt.vx v0, v28, zero
vadd.vi v28, v28, -1, v0.t
vsetvli zero, zero, e16, m1, ta, ma
vnclip.wi v24, v28, 4
vadd.vv v28, v2, v24
vse16.v v28, (a2)
add a2, a2, a3
sh1add a0, a1, a0
addi t0, t0, -1
bnez t0, 2b
addi sp, sp, 32 + 144*2
ret
cdef_filter_pri_sec_\w\()x\h:
li t1, 63
clz t2, a5
clz t3, a6
sub t2, t1, t2
sub t3, t1, t3
sub t1, a4, t2
sub t2, a4, t3
li t0, \h
la t3, dav1d_cdef_directions
vsetivli zero, \w, e16, m1, ta, ma
blt zero, t1, 3f
mv t1, zero
3:
li t5, 64-8
ld t4, 32 + 2*144 + 16(sp) # bitdepth_max
clz t4, t4
sub t4, t5, t4
sra t4, a5, t4
li t6, 4
andi t5, t4, 1
sub t4, t6, t5
addi t5, a7, 2
sh1add t5, t5, t3
vle16.v v2, (a2)
lb t6, 0(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v2
vmax.vv v24, v4, v2
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v2, a5, t1, v8, v16
vmul.vx v28, v16, t4
vmacc.vx v28, t4, v8
andi t4, t4, 3
ori t4, t4, 2
lb t6, 1(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v2, a5, t1, v8, v16
addi t5, a7, 4
vmacc.vx v28, t4, v16
vmacc.vx v28, t4, v8
sh1add t5, t5, t3
lb t6, 0(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
li t6, 2
constrain_vectors v4, v6, v2, a6, t2, v8, v16
vmacc.vx v28, t6, v16
vmacc.vx v28, t6, v8
lb t6, 1(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v2, a6, t2, v8, v16
sh1add t5, a7, t3
vadd.vv v4, v28, v8
vadd.vv v28, v4, v16
vsetvli zero, zero, e16, m1, ta, ma
lb t6, 0(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
li t6, 2
constrain_vectors v4, v6, v2, a6, t2, v8, v16
vmacc.vx v28, t6, v16
vmacc.vx v28, t6, v8
lb t6, 1(t5)
sh1add a4, t6, a0
slli t6, t6, 1
sub t6, a0, t6
vsetvli zero, zero, e16, m1, ta, ma
vle16.v v4, (a4)
vle16.v v6, (t6)
vminu.vv v20, v4, v20
vmax.vv v24, v4, v24
vminu.vv v20, v6, v20
vmax.vv v24, v6, v24
vwsub.vv v8, v4, v2
vwsub.vv v16, v6, v2
vsetvli zero, zero, e32, m2, ta, mu
constrain_vectors v4, v6, v2, a6, t2, v8, v16
vadd.vv v4, v28, v8
vadd.vv v28, v4, v16
vmslt.vx v0, v28, zero
vadd.vi v28, v28, -1, v0.t
vsetvli zero, zero, e16, m1, ta, ma
vnclip.wi v16, v28, 4
vadd.vv v28, v2, v16
vmslt.vv v0, v20, v28
vmerge.vvm v4, v20, v28, v0
vmslt.vv v0, v4, v24
vmerge.vvm v28, v24, v4, v0
vse16.v v28, (a2)
add a2, a2, a3
sh1add a0, a1, a0
addi t0, t0, -1
bnez t0, 3b
addi sp, sp, 32 + 144*2
ret
endfunc
.endm
cdef_fn 4, 4
cdef_fn 4, 8
cdef_fn 8, 8
|
Admenri/urge
| 19,099
|
third_party/dav1d/src/arm/64/cdef.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#include "cdef_tmpl.S"
.macro pad_top_bottom s1, s2, w, stride, rn, rw, ret
tst w7, #1 // CDEF_HAVE_LEFT
b.eq 2f
// CDEF_HAVE_LEFT
sub \s1, \s1, #2
sub \s2, \s2, #2
tst w7, #2 // CDEF_HAVE_RIGHT
b.eq 1f
// CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
ldr \rn\()0, [\s1]
ldr s1, [\s1, #\w]
ldr \rn\()2, [\s2]
ldr s3, [\s2, #\w]
uxtl v0.8h, v0.8b
uxtl v1.8h, v1.8b
uxtl v2.8h, v2.8b
uxtl v3.8h, v3.8b
str \rw\()0, [x0]
str d1, [x0, #2*\w]
add x0, x0, #2*\stride
str \rw\()2, [x0]
str d3, [x0, #2*\w]
.if \ret
ret
.else
add x0, x0, #2*\stride
b 3f
.endif
1:
// CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
ldr \rn\()0, [\s1]
ldr h1, [\s1, #\w]
ldr \rn\()2, [\s2]
ldr h3, [\s2, #\w]
uxtl v0.8h, v0.8b
uxtl v1.8h, v1.8b
uxtl v2.8h, v2.8b
uxtl v3.8h, v3.8b
str \rw\()0, [x0]
str s1, [x0, #2*\w]
str s31, [x0, #2*\w+4]
add x0, x0, #2*\stride
str \rw\()2, [x0]
str s3, [x0, #2*\w]
str s31, [x0, #2*\w+4]
.if \ret
ret
.else
add x0, x0, #2*\stride
b 3f
.endif
2:
// !CDEF_HAVE_LEFT
tst w7, #2 // CDEF_HAVE_RIGHT
b.eq 1f
// !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
ldr \rn\()0, [\s1]
ldr h1, [\s1, #\w]
ldr \rn\()2, [\s2]
ldr h3, [\s2, #\w]
uxtl v0.8h, v0.8b
uxtl v1.8h, v1.8b
uxtl v2.8h, v2.8b
uxtl v3.8h, v3.8b
str s31, [x0]
stur \rw\()0, [x0, #4]
str s1, [x0, #4+2*\w]
add x0, x0, #2*\stride
str s31, [x0]
stur \rw\()2, [x0, #4]
str s3, [x0, #4+2*\w]
.if \ret
ret
.else
add x0, x0, #2*\stride
b 3f
.endif
1:
// !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
ldr \rn\()0, [\s1]
ldr \rn\()1, [\s2]
uxtl v0.8h, v0.8b
uxtl v1.8h, v1.8b
str s31, [x0]
stur \rw\()0, [x0, #4]
str s31, [x0, #4+2*\w]
add x0, x0, #2*\stride
str s31, [x0]
stur \rw\()1, [x0, #4]
str s31, [x0, #4+2*\w]
.if \ret
ret
.else
add x0, x0, #2*\stride
.endif
3:
.endm
.macro load_n_incr dst, src, incr, w
.if \w == 4
ld1 {\dst\().s}[0], [\src], \incr
.else
ld1 {\dst\().8b}, [\src], \incr
.endif
.endm
// void dav1d_cdef_paddingX_8bpc_neon(uint16_t *tmp, const pixel *src,
// ptrdiff_t src_stride, const pixel (*left)[2],
// const pixel *const top,
// const pixel *const bottom, int h,
// enum CdefEdgeFlags edges);
.macro padding_func w, stride, rn, rw
function cdef_padding\w\()_8bpc_neon, export=1
cmp w7, #0xf // fully edged
b.eq cdef_padding\w\()_edged_8bpc_neon
movi v30.8h, #0x80, lsl #8
mov v31.16b, v30.16b
sub x0, x0, #2*(2*\stride+2)
tst w7, #4 // CDEF_HAVE_TOP
b.ne 1f
// !CDEF_HAVE_TOP
st1 {v30.8h, v31.8h}, [x0], #32
.if \w == 8
st1 {v30.8h, v31.8h}, [x0], #32
.endif
b 3f
1:
// CDEF_HAVE_TOP
add x9, x4, x2
pad_top_bottom x4, x9, \w, \stride, \rn, \rw, 0
// Middle section
3:
tst w7, #1 // CDEF_HAVE_LEFT
b.eq 2f
// CDEF_HAVE_LEFT
tst w7, #2 // CDEF_HAVE_RIGHT
b.eq 1f
// CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
0:
ld1 {v0.h}[0], [x3], #2
ldr h2, [x1, #\w]
load_n_incr v1, x1, x2, \w
subs w6, w6, #1
uxtl v0.8h, v0.8b
uxtl v1.8h, v1.8b
uxtl v2.8h, v2.8b
str s0, [x0]
stur \rw\()1, [x0, #4]
str s2, [x0, #4+2*\w]
add x0, x0, #2*\stride
b.gt 0b
b 3f
1:
// CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
ld1 {v0.h}[0], [x3], #2
load_n_incr v1, x1, x2, \w
subs w6, w6, #1
uxtl v0.8h, v0.8b
uxtl v1.8h, v1.8b
str s0, [x0]
stur \rw\()1, [x0, #4]
str s31, [x0, #4+2*\w]
add x0, x0, #2*\stride
b.gt 1b
b 3f
2:
tst w7, #2 // CDEF_HAVE_RIGHT
b.eq 1f
// !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
0:
ldr h1, [x1, #\w]
load_n_incr v0, x1, x2, \w
subs w6, w6, #1
uxtl v0.8h, v0.8b
uxtl v1.8h, v1.8b
str s31, [x0]
stur \rw\()0, [x0, #4]
str s1, [x0, #4+2*\w]
add x0, x0, #2*\stride
b.gt 0b
b 3f
1:
// !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
load_n_incr v0, x1, x2, \w
subs w6, w6, #1
uxtl v0.8h, v0.8b
str s31, [x0]
stur \rw\()0, [x0, #4]
str s31, [x0, #4+2*\w]
add x0, x0, #2*\stride
b.gt 1b
3:
tst w7, #8 // CDEF_HAVE_BOTTOM
b.ne 1f
// !CDEF_HAVE_BOTTOM
st1 {v30.8h, v31.8h}, [x0], #32
.if \w == 8
st1 {v30.8h, v31.8h}, [x0], #32
.endif
ret
1:
// CDEF_HAVE_BOTTOM
add x9, x5, x2
pad_top_bottom x5, x9, \w, \stride, \rn, \rw, 1
endfunc
.endm
padding_func 8, 16, d, q
padding_func 4, 8, s, d
// void cdef_paddingX_edged_8bpc_neon(uint8_t *tmp, const pixel *src,
// ptrdiff_t src_stride, const pixel (*left)[2],
// const pixel *const top,
// const pixel *const bottom, int h,
// enum CdefEdgeFlags edges);
.macro padding_func_edged w, stride, reg
function cdef_padding\w\()_edged_8bpc_neon, export=1
sub x4, x4, #2
sub x5, x5, #2
sub x0, x0, #(2*\stride+2)
.if \w == 4
ldr d0, [x4]
ldr d1, [x4, x2]
st1 {v0.8b, v1.8b}, [x0], #16
.else
add x9, x4, x2
ldr d0, [x4]
ldr s1, [x4, #8]
ldr d2, [x9]
ldr s3, [x9, #8]
str d0, [x0]
str s1, [x0, #8]
str d2, [x0, #\stride]
str s3, [x0, #\stride+8]
add x0, x0, #2*\stride
.endif
0:
ld1 {v0.h}[0], [x3], #2
ldr h2, [x1, #\w]
load_n_incr v1, x1, x2, \w
subs w6, w6, #1
str h0, [x0]
stur \reg\()1, [x0, #2]
str h2, [x0, #2+\w]
add x0, x0, #\stride
b.gt 0b
.if \w == 4
ldr d0, [x5]
ldr d1, [x5, x2]
st1 {v0.8b, v1.8b}, [x0], #16
.else
add x9, x5, x2
ldr d0, [x5]
ldr s1, [x5, #8]
ldr d2, [x9]
ldr s3, [x9, #8]
str d0, [x0]
str s1, [x0, #8]
str d2, [x0, #\stride]
str s3, [x0, #\stride+8]
.endif
ret
endfunc
.endm
padding_func_edged 8, 16, d
padding_func_edged 4, 8, s
tables
filter 8, 8
filter 4, 8
find_dir 8
.macro load_px_8 d1, d2, w
.if \w == 8
add x6, x2, w9, sxtb // x + off
sub x9, x2, w9, sxtb // x - off
ld1 {\d1\().d}[0], [x6] // p0
add x6, x6, #16 // += stride
ld1 {\d2\().d}[0], [x9] // p1
add x9, x9, #16 // += stride
ld1 {\d1\().d}[1], [x6] // p0
ld1 {\d2\().d}[1], [x9] // p0
.else
add x6, x2, w9, sxtb // x + off
sub x9, x2, w9, sxtb // x - off
ld1 {\d1\().s}[0], [x6] // p0
add x6, x6, #8 // += stride
ld1 {\d2\().s}[0], [x9] // p1
add x9, x9, #8 // += stride
ld1 {\d1\().s}[1], [x6] // p0
add x6, x6, #8 // += stride
ld1 {\d2\().s}[1], [x9] // p1
add x9, x9, #8 // += stride
ld1 {\d1\().s}[2], [x6] // p0
add x6, x6, #8 // += stride
ld1 {\d2\().s}[2], [x9] // p1
add x9, x9, #8 // += stride
ld1 {\d1\().s}[3], [x6] // p0
ld1 {\d2\().s}[3], [x9] // p1
.endif
.endm
.macro handle_pixel_8 s1, s2, thresh_vec, shift, tap, min
.if \min
umin v3.16b, v3.16b, \s1\().16b
umax v4.16b, v4.16b, \s1\().16b
umin v3.16b, v3.16b, \s2\().16b
umax v4.16b, v4.16b, \s2\().16b
.endif
uabd v16.16b, v0.16b, \s1\().16b // abs(diff)
uabd v20.16b, v0.16b, \s2\().16b // abs(diff)
ushl v17.16b, v16.16b, \shift // abs(diff) >> shift
ushl v21.16b, v20.16b, \shift // abs(diff) >> shift
uqsub v17.16b, \thresh_vec, v17.16b // clip = imax(0, threshold - (abs(diff) >> shift))
uqsub v21.16b, \thresh_vec, v21.16b // clip = imax(0, threshold - (abs(diff) >> shift))
cmhi v18.16b, v0.16b, \s1\().16b // px > p0
cmhi v22.16b, v0.16b, \s2\().16b // px > p1
umin v17.16b, v17.16b, v16.16b // imin(abs(diff), clip)
umin v21.16b, v21.16b, v20.16b // imin(abs(diff), clip)
dup v19.16b, \tap // taps[k]
neg v16.16b, v17.16b // -imin()
neg v20.16b, v21.16b // -imin()
bsl v18.16b, v16.16b, v17.16b // constrain() = apply_sign()
bsl v22.16b, v20.16b, v21.16b // constrain() = apply_sign()
mla v1.16b, v18.16b, v19.16b // sum += taps[k] * constrain()
mla v2.16b, v22.16b, v19.16b // sum += taps[k] * constrain()
.endm
// void cdef_filterX_edged_8bpc_neon(pixel *dst, ptrdiff_t dst_stride,
// const uint8_t *tmp, int pri_strength,
// int sec_strength, int dir, int damping,
// int h);
.macro filter_func_8 w, pri, sec, min, suffix
function cdef_filter\w\suffix\()_edged_8bpc_neon
.if \pri
movrel x8, pri_taps
and w9, w3, #1
add x8, x8, w9, uxtw #1
.endif
movrel x9, directions\w
add x5, x9, w5, uxtw #1
movi v30.8b, #7
dup v28.8b, w6 // damping
.if \pri
dup v25.16b, w3 // threshold
.endif
.if \sec
dup v27.16b, w4 // threshold
.endif
trn1 v24.8b, v25.8b, v27.8b
clz v24.8b, v24.8b // clz(threshold)
sub v24.8b, v30.8b, v24.8b // ulog2(threshold)
uqsub v24.8b, v28.8b, v24.8b // shift = imax(0, damping - ulog2(threshold))
neg v24.8b, v24.8b // -shift
.if \sec
dup v26.16b, v24.b[1]
.endif
.if \pri
dup v24.16b, v24.b[0]
.endif
1:
.if \w == 8
add x12, x2, #16
ld1 {v0.d}[0], [x2] // px
ld1 {v0.d}[1], [x12] // px
.else
add x12, x2, #1*8
add x13, x2, #2*8
add x14, x2, #3*8
ld1 {v0.s}[0], [x2] // px
ld1 {v0.s}[1], [x12] // px
ld1 {v0.s}[2], [x13] // px
ld1 {v0.s}[3], [x14] // px
.endif
// We need 9-bits or two 8-bit accululators to fit the sum.
// Max of |sum| > 15*2*6(pri) + 4*4*3(sec) = 228.
// Start sum at -1 instead of 0 to help handle rounding later.
movi v1.16b, #255 // sum
movi v2.16b, #0 // sum
.if \min
mov v3.16b, v0.16b // min
mov v4.16b, v0.16b // max
.endif
// Instead of loading sec_taps 2, 1 from memory, just set it
// to 2 initially and decrease for the second round.
// This is also used as loop counter.
mov w11, #2 // sec_taps[0]
2:
.if \pri
ldrb w9, [x5] // off1
load_px_8 v5, v6, \w
.endif
.if \sec
add x5, x5, #4 // +2*2
ldrb w9, [x5] // off2
load_px_8 v28, v29, \w
.endif
.if \pri
ldrb w10, [x8] // *pri_taps
handle_pixel_8 v5, v6, v25.16b, v24.16b, w10, \min
.endif
.if \sec
add x5, x5, #8 // +2*4
ldrb w9, [x5] // off3
load_px_8 v5, v6, \w
handle_pixel_8 v28, v29, v27.16b, v26.16b, w11, \min
handle_pixel_8 v5, v6, v27.16b, v26.16b, w11, \min
sub x5, x5, #11 // x5 -= 2*(2+4); x5 += 1;
.else
add x5, x5, #1 // x5 += 1
.endif
subs w11, w11, #1 // sec_tap-- (value)
.if \pri
add x8, x8, #1 // pri_taps++ (pointer)
.endif
b.ne 2b
// Perform halving adds since the value won't fit otherwise.
// To handle the offset for negative values, use both halving w/ and w/o rounding.
srhadd v5.16b, v1.16b, v2.16b // sum >> 1
shadd v6.16b, v1.16b, v2.16b // (sum - 1) >> 1
cmlt v1.16b, v5.16b, #0 // sum < 0
bsl v1.16b, v6.16b, v5.16b // (sum - (sum < 0)) >> 1
srshr v1.16b, v1.16b, #3 // (8 + sum - (sum < 0)) >> 4
usqadd v0.16b, v1.16b // px + (8 + sum ...) >> 4
.if \min
umin v0.16b, v0.16b, v4.16b
umax v0.16b, v0.16b, v3.16b // iclip(px + .., min, max)
.endif
.if \w == 8
st1 {v0.d}[0], [x0], x1
add x2, x2, #2*16 // tmp += 2*tmp_stride
subs w7, w7, #2 // h -= 2
st1 {v0.d}[1], [x0], x1
.else
st1 {v0.s}[0], [x0], x1
add x2, x2, #4*8 // tmp += 4*tmp_stride
st1 {v0.s}[1], [x0], x1
subs w7, w7, #4 // h -= 4
st1 {v0.s}[2], [x0], x1
st1 {v0.s}[3], [x0], x1
.endif
// Reset pri_taps and directions back to the original point
sub x5, x5, #2
.if \pri
sub x8, x8, #2
.endif
b.gt 1b
ret
endfunc
.endm
.macro filter_8 w
filter_func_8 \w, pri=1, sec=0, min=0, suffix=_pri
filter_func_8 \w, pri=0, sec=1, min=0, suffix=_sec
filter_func_8 \w, pri=1, sec=1, min=1, suffix=_pri_sec
.endm
filter_8 8
filter_8 4
|
Admenri/urge
| 134,853
|
third_party/dav1d/src/arm/64/itx16.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/arm/asm.S"
#include "util.S"
// The exported functions in this file have got the following signature:
// void itxfm_add(pixel *dst, ptrdiff_t dst_stride, coef *coeff, int eob,
// int bitdepth_max);
// Most of the functions use the following register layout:
// x0-x3 external parameters
// x4 function pointer to first transform
// x5 function pointer to second transform
// x6 output parameter for helper function
// x7 input parameter for helper function
// x8 input stride for helper function
// x9-x12 scratch variables for helper functions
// x13 pointer to list of eob thresholds
// x14 return pointer for helper function
// x15 return pointer for main function
// The SIMD registers most often use the following layout:
// v0-v1 multiplication coefficients
// v2-v7 scratch registers
// v8-v15 unused
// v16-v31 inputs/outputs of transforms
const idct_coeffs, align=4
// idct4
.int 2896, 2896*8*(1<<16), 1567, 3784
// idct8
.int 799, 4017, 3406, 2276
// idct16
.int 401, 4076, 3166, 2598
.int 1931, 3612, 3920, 1189
// idct32
.int 201, 4091, 3035, 2751
.int 1751, 3703, 3857, 1380
.int 995, 3973, 3513, 2106
.int 2440, 3290, 4052, 601
endconst
const idct64_coeffs, align=4
.int 101*8*(1<<16), 4095*8*(1<<16), 2967*8*(1<<16), -2824*8*(1<<16)
.int 1660*8*(1<<16), 3745*8*(1<<16), 3822*8*(1<<16), -1474*8*(1<<16)
.int 4076, 401, 4017, 799
.int 4036*8*(1<<16), -700*8*(1<<16), 2359*8*(1<<16), 3349*8*(1<<16)
.int 3461*8*(1<<16), -2191*8*(1<<16), 897*8*(1<<16), 3996*8*(1<<16)
.int -3166, -2598, -799, -4017
.int 501*8*(1<<16), 4065*8*(1<<16), 3229*8*(1<<16), -2520*8*(1<<16)
.int 2019*8*(1<<16), 3564*8*(1<<16), 3948*8*(1<<16), -1092*8*(1<<16)
.int 3612, 1931, 2276, 3406
.int 4085*8*(1<<16), -301*8*(1<<16), 2675*8*(1<<16), 3102*8*(1<<16)
.int 3659*8*(1<<16), -1842*8*(1<<16), 1285*8*(1<<16), 3889*8*(1<<16)
.int -3920, -1189, -3406, -2276
endconst
const iadst4_coeffs, align=4
.int 1321, 3803, 2482, 3344
endconst
const iadst8_coeffs, align=4
.int 4076, 401, 3612, 1931
.int 2598, 3166, 1189, 3920
// idct_coeffs
.int 2896, 0, 1567, 3784
endconst
const iadst16_coeffs, align=4
.int 4091, 201, 3973, 995
.int 3703, 1751, 3290, 2440
.int 2751, 3035, 2106, 3513
.int 1380, 3857, 601, 4052
endconst
.macro mul_mla d, s0, s1, c0, c1
mul \d\().4s, \s0\().4s, \c0
mla \d\().4s, \s1\().4s, \c1
.endm
.macro mul_mls d, s0, s1, c0, c1
mul \d\().4s, \s0\().4s, \c0
mls \d\().4s, \s1\().4s, \c1
.endm
.macro scale_input sz, c, r0, r1, r2 r3, r4, r5, r6, r7
sqrdmulh \r0\sz, \r0\sz, \c
sqrdmulh \r1\sz, \r1\sz, \c
sqrdmulh \r2\sz, \r2\sz, \c
sqrdmulh \r3\sz, \r3\sz, \c
.ifnb \r4
sqrdmulh \r4\sz, \r4\sz, \c
sqrdmulh \r5\sz, \r5\sz, \c
sqrdmulh \r6\sz, \r6\sz, \c
sqrdmulh \r7\sz, \r7\sz, \c
.endif
.endm
.macro smin_4s r0, r1, r2
smin \r0\().4s, \r1\().4s, \r2\().4s
.endm
.macro smax_4s r0, r1, r2
smax \r0\().4s, \r1\().4s, \r2\().4s
.endm
.macro load_add_store load, shift, addsrc, adddst, min, store, dst, src, shiftbits=4
.ifnb \load
ld1 {\load}, [\src], x1
.endif
.ifnb \shift
srshr \shift, \shift, #\shiftbits
.endif
.ifnb \addsrc
usqadd \adddst, \addsrc
.endif
.ifnb \min
smin \min, \min, v7.8h
.endif
.ifnb \store
st1 {\store}, [\dst], x1
.endif
.endm
.macro load_add_store_8x16 dst, src
mov \src, \dst
mvni v7.8h, #0xfc, lsl #8 // 0x3ff
load_add_store v2.8h, v16.8h, , , , , \dst, \src
load_add_store v3.8h, v17.8h, , , , , \dst, \src
load_add_store v4.8h, v18.8h, v16.8h, v2.8h, , , \dst, \src
load_add_store v5.8h, v19.8h, v17.8h, v3.8h, v2.8h, , \dst, \src
load_add_store v16.8h, v20.8h, v18.8h, v4.8h, v3.8h, v2.8h, \dst, \src
load_add_store v17.8h, v21.8h, v19.8h, v5.8h, v4.8h, v3.8h, \dst, \src
load_add_store v18.8h, v22.8h, v20.8h, v16.8h, v5.8h, v4.8h, \dst, \src
load_add_store v19.8h, v23.8h, v21.8h, v17.8h, v16.8h, v5.8h, \dst, \src
load_add_store v20.8h, v24.8h, v22.8h, v18.8h, v17.8h, v16.8h, \dst, \src
load_add_store v21.8h, v25.8h, v23.8h, v19.8h, v18.8h, v17.8h, \dst, \src
load_add_store v22.8h, v26.8h, v24.8h, v20.8h, v19.8h, v18.8h, \dst, \src
load_add_store v23.8h, v27.8h, v25.8h, v21.8h, v20.8h, v19.8h, \dst, \src
load_add_store v24.8h, v28.8h, v26.8h, v22.8h, v21.8h, v20.8h, \dst, \src
load_add_store v25.8h, v29.8h, v27.8h, v23.8h, v22.8h, v21.8h, \dst, \src
load_add_store v26.8h, v30.8h, v28.8h, v24.8h, v23.8h, v22.8h, \dst, \src
load_add_store v27.8h, v31.8h, v29.8h, v25.8h, v24.8h, v23.8h, \dst, \src
load_add_store , , v30.8h, v26.8h, v25.8h, v24.8h, \dst, \src
load_add_store , , v31.8h, v27.8h, v26.8h, v25.8h, \dst, \src
load_add_store , , , , v27.8h, v26.8h, \dst, \src
load_add_store , , , , , v27.8h, \dst, \src
.endm
.macro load_add_store_8x8 dst, src, shiftbits=4
mov \src, \dst
mvni v7.8h, #0xfc, lsl #8 // 0x3ff
load_add_store v2.8h, v16.8h, , , , , \dst, \src, \shiftbits
load_add_store v3.8h, v17.8h, , , , , \dst, \src, \shiftbits
load_add_store v4.8h, v18.8h, v16.8h, v2.8h, , , \dst, \src, \shiftbits
load_add_store v5.8h, v19.8h, v17.8h, v3.8h, v2.8h, , \dst, \src, \shiftbits
load_add_store v16.8h, v20.8h, v18.8h, v4.8h, v3.8h, v2.8h, \dst, \src, \shiftbits
load_add_store v17.8h, v21.8h, v19.8h, v5.8h, v4.8h, v3.8h, \dst, \src, \shiftbits
load_add_store v18.8h, v22.8h, v20.8h, v16.8h, v5.8h, v4.8h, \dst, \src, \shiftbits
load_add_store v19.8h, v23.8h, v21.8h, v17.8h, v16.8h, v5.8h, \dst, \src, \shiftbits
load_add_store , , v22.8h, v18.8h, v17.8h, v16.8h, \dst, \src, \shiftbits
load_add_store , , v23.8h, v19.8h, v18.8h, v17.8h, \dst, \src, \shiftbits
load_add_store , , , , v19.8h, v18.8h, \dst, \src, \shiftbits
load_add_store , , , , , v19.8h, \dst, \src, \shiftbits
.endm
.macro load_add_store_8x4 dst, src, shiftbits=4
mov \src, \dst
mvni v7.8h, #0xfc, lsl #8 // 0x3ff
load_add_store v2.8h, v16.8h, , , , , \dst, \src, \shiftbits
load_add_store v3.8h, v17.8h, , , , , \dst, \src, \shiftbits
load_add_store v4.8h, v18.8h, v16.8h, v2.8h, , , \dst, \src, \shiftbits
load_add_store v5.8h, v19.8h, v17.8h, v3.8h, v2.8h, , \dst, \src, \shiftbits
load_add_store , , v18.8h, v4.8h, v3.8h, v2.8h, \dst, \src, \shiftbits
load_add_store , , v19.8h, v5.8h, v4.8h, v3.8h, \dst, \src, \shiftbits
load_add_store , , , , v5.8h, v4.8h, \dst, \src, \shiftbits
load_add_store , , , , , v5.8h, \dst, \src, \shiftbits
.endm
.macro load_add_store4 load, inssrc, insdst, shift, addsrc, adddst, min, store, dst, src
.ifnb \load
ld1 {\load}[0], [\src], x1
.endif
.ifnb \inssrc
ins \insdst\().d[1], \inssrc\().d[0]
.endif
.ifnb \shift
srshr \shift, \shift, #4
.endif
.ifnb \load
ld1 {\load}[1], [\src], x1
.endif
.ifnb \addsrc
usqadd \adddst, \addsrc
.endif
.ifnb \store
st1 {\store}[0], [\dst], x1
.endif
.ifnb \min
smin \min, \min, v7.8h
.endif
.ifnb \store
st1 {\store}[1], [\dst], x1
.endif
.endm
.macro load_add_store_4x16 dst, src
mov \src, \dst
mvni v7.8h, #0xfc, lsl #8 // 0x3ff
load_add_store4 v0.d, v17, v16, , , , , , \dst, \src
load_add_store4 v1.d, v19, v18, , , , , , \dst, \src
load_add_store4 v2.d, v21, v20, v16.8h, , , , , \dst, \src
load_add_store4 v3.d, v23, v22, v18.8h, v16.8h, v0.8h, , , \dst, \src
load_add_store4 v17.d, v25, v24, v20.8h, v18.8h, v1.8h, v0.8h, , \dst, \src
load_add_store4 v19.d, v27, v26, v22.8h, v20.8h, v2.8h, v1.8h, v0.d, \dst, \src
load_add_store4 v21.d, v29, v28, v24.8h, v22.8h, v3.8h, v2.8h, v1.d, \dst, \src
load_add_store4 v23.d, v31, v30, v26.8h, v24.8h, v17.8h, v3.8h, v2.d, \dst, \src
load_add_store4 , , , v28.8h, v26.8h, v19.8h, v17.8h, v3.d, \dst, \src
load_add_store4 , , , v30.8h, v28.8h, v21.8h, v19.8h, v17.d, \dst, \src
load_add_store4 , , , , v30.8h, v23.8h, v21.8h, v19.d, \dst, \src
load_add_store4 , , , , , , v23.8h, v21.d, \dst, \src
load_add_store4 , , , , , , , v23.d, \dst, \src
.endm
.macro load_add_store_4x8 dst, src
mov \src, \dst
mvni v7.8h, #0xfc, lsl #8 // 0x3ff
load_add_store4 v0.d, v17, v16, , , , , , \dst, \src
load_add_store4 v1.d, v19, v18, , , , , , \dst, \src
load_add_store4 v2.d, v21, v20, v16.8h, , , , , \dst, \src
load_add_store4 v3.d, v23, v22, v18.8h, v16.8h, v0.8h, , , \dst, \src
load_add_store4 , , , v20.8h, v18.8h, v1.8h, v0.8h, , \dst, \src
load_add_store4 , , , v22.8h, v20.8h, v2.8h, v1.8h, v0.d, \dst, \src
load_add_store4 , , , , v22.8h, v3.8h, v2.8h, v1.d, \dst, \src
load_add_store4 , , , , , , v3.8h, v2.d, \dst, \src
load_add_store4 , , , , , , , v3.d, \dst, \src
.endm
.macro idct_dc w, h, shift
cbnz w3, 1f
movz w16, #2896*8, lsl #16
ld1r {v16.4s}, [x2]
dup v0.2s, w16
sqrdmulh v20.4s, v16.4s, v0.s[0]
str wzr, [x2]
.if (\w == 2*\h) || (2*\w == \h)
sqrdmulh v20.4s, v20.4s, v0.s[0]
.endif
.if \shift > 0
sqrshrn v16.4h, v20.4s, #\shift
sqrshrn2 v16.8h, v20.4s, #\shift
.else
sqxtn v16.4h, v20.4s
sqxtn2 v16.8h, v20.4s
.endif
sqrdmulh v16.8h, v16.8h, v0.h[1]
srshr v16.8h, v16.8h, #4
mov w4, #\h
b idct_dc_w\w\()_neon
1:
.endm
function idct_dc_w4_neon
mvni v31.8h, #0xfc, lsl #8 // 0x3ff
1:
ld1 {v0.d}[0], [x0], x1
ld1 {v0.d}[1], [x0], x1
ld1 {v1.d}[0], [x0], x1
subs w4, w4, #4
ld1 {v1.d}[1], [x0], x1
usqadd v0.8h, v16.8h
sub x0, x0, x1, lsl #2
usqadd v1.8h, v16.8h
smin v0.8h, v0.8h, v31.8h
st1 {v0.d}[0], [x0], x1
smin v1.8h, v1.8h, v31.8h
st1 {v0.d}[1], [x0], x1
st1 {v1.d}[0], [x0], x1
st1 {v1.d}[1], [x0], x1
b.gt 1b
ret
endfunc
function idct_dc_w8_neon
mvni v31.8h, #0xfc, lsl #8 // 0x3ff
1:
ld1 {v0.8h}, [x0], x1
subs w4, w4, #4
ld1 {v1.8h}, [x0], x1
usqadd v0.8h, v16.8h
ld1 {v2.8h}, [x0], x1
usqadd v1.8h, v16.8h
ld1 {v3.8h}, [x0], x1
usqadd v2.8h, v16.8h
usqadd v3.8h, v16.8h
sub x0, x0, x1, lsl #2
smin v0.8h, v0.8h, v31.8h
smin v1.8h, v1.8h, v31.8h
st1 {v0.8h}, [x0], x1
smin v2.8h, v2.8h, v31.8h
st1 {v1.8h}, [x0], x1
smin v3.8h, v3.8h, v31.8h
st1 {v2.8h}, [x0], x1
st1 {v3.8h}, [x0], x1
b.gt 1b
ret
endfunc
function idct_dc_w16_neon
mvni v31.8h, #0xfc, lsl #8 // 0x3ff
1:
ld1 {v0.8h, v1.8h}, [x0], x1
subs w4, w4, #2
ld1 {v2.8h, v3.8h}, [x0], x1
usqadd v0.8h, v16.8h
usqadd v1.8h, v16.8h
sub x0, x0, x1, lsl #1
usqadd v2.8h, v16.8h
usqadd v3.8h, v16.8h
smin v0.8h, v0.8h, v31.8h
smin v1.8h, v1.8h, v31.8h
smin v2.8h, v2.8h, v31.8h
st1 {v0.8h, v1.8h}, [x0], x1
smin v3.8h, v3.8h, v31.8h
st1 {v2.8h, v3.8h}, [x0], x1
b.gt 1b
ret
endfunc
function idct_dc_w32_neon
mvni v31.8h, #0xfc, lsl #8 // 0x3ff
1:
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0]
subs w4, w4, #1
usqadd v0.8h, v16.8h
usqadd v1.8h, v16.8h
usqadd v2.8h, v16.8h
usqadd v3.8h, v16.8h
smin v0.8h, v0.8h, v31.8h
smin v1.8h, v1.8h, v31.8h
smin v2.8h, v2.8h, v31.8h
smin v3.8h, v3.8h, v31.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
b.gt 1b
ret
endfunc
function idct_dc_w64_neon
mvni v31.8h, #0xfc, lsl #8 // 0x3ff
sub x1, x1, #64
1:
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
subs w4, w4, #1
usqadd v0.8h, v16.8h
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0]
usqadd v1.8h, v16.8h
sub x0, x0, #64
usqadd v2.8h, v16.8h
usqadd v3.8h, v16.8h
usqadd v4.8h, v16.8h
usqadd v5.8h, v16.8h
usqadd v6.8h, v16.8h
usqadd v7.8h, v16.8h
smin v0.8h, v0.8h, v31.8h
smin v1.8h, v1.8h, v31.8h
smin v2.8h, v2.8h, v31.8h
smin v3.8h, v3.8h, v31.8h
smin v4.8h, v4.8h, v31.8h
smin v5.8h, v5.8h, v31.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
smin v6.8h, v6.8h, v31.8h
smin v7.8h, v7.8h, v31.8h
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], x1
b.gt 1b
ret
endfunc
.macro iwht4
add v16.4s, v16.4s, v17.4s
sub v21.4s, v18.4s, v19.4s
sub v20.4s, v16.4s, v21.4s
sshr v20.4s, v20.4s, #1
sub v18.4s, v20.4s, v17.4s
sub v17.4s, v20.4s, v19.4s
add v19.4s, v21.4s, v18.4s
sub v16.4s, v16.4s, v17.4s
.endm
.macro idct_4 r0, r1, r2, r3
mul_mla v6, \r1, \r3, v0.s[3], v0.s[2]
mul_mla v2, \r0, \r2, v0.s[0], v0.s[0]
mul_mls v4, \r1, \r3, v0.s[2], v0.s[3]
mul_mls v3, \r0, \r2, v0.s[0], v0.s[0]
srshr v6.4s, v6.4s, #12
srshr v2.4s, v2.4s, #12
srshr v7.4s, v4.4s, #12
srshr v3.4s, v3.4s, #12
sqadd \r0\().4s, v2.4s, v6.4s
sqsub \r3\().4s, v2.4s, v6.4s
sqadd \r1\().4s, v3.4s, v7.4s
sqsub \r2\().4s, v3.4s, v7.4s
.endm
function inv_dct_4s_x4_neon
AARCH64_VALID_CALL_TARGET
movrel x16, idct_coeffs
ld1 {v0.4s}, [x16]
idct_4 v16, v17, v18, v19
ret
endfunc
.macro iadst_4x4 o0, o1, o2, o3
movrel x16, iadst4_coeffs
ld1 {v0.4s}, [x16]
sub v3.4s, v16.4s, v18.4s
mul v4.4s, v16.4s, v0.s[0]
mla v4.4s, v18.4s, v0.s[1]
mla v4.4s, v19.4s, v0.s[2]
mul v7.4s, v17.4s, v0.s[3]
add v3.4s, v3.4s, v19.4s
mul v5.4s, v16.4s, v0.s[2]
mls v5.4s, v18.4s, v0.s[0]
mls v5.4s, v19.4s, v0.s[1]
add \o3\().4s, v4.4s, v5.4s
mul \o2\().4s, v3.4s, v0.s[3]
add \o0\().4s, v4.4s, v7.4s
add \o1\().4s, v5.4s, v7.4s
sub \o3\().4s, \o3\().4s, v7.4s
srshr \o0\().4s, \o0\().4s, #12
srshr \o2\().4s, \o2\().4s, #12
srshr \o1\().4s, \o1\().4s, #12
srshr \o3\().4s, \o3\().4s, #12
.endm
function inv_adst_4s_x4_neon
AARCH64_VALID_CALL_TARGET
iadst_4x4 v16, v17, v18, v19
ret
endfunc
function inv_flipadst_4s_x4_neon
AARCH64_VALID_CALL_TARGET
iadst_4x4 v19, v18, v17, v16
ret
endfunc
function inv_identity_4s_x4_neon
AARCH64_VALID_CALL_TARGET
movz w16, #(5793-4096)*8, lsl #16
dup v0.2s, w16
sqrdmulh v4.4s, v16.4s, v0.s[0]
sqrdmulh v5.4s, v17.4s, v0.s[0]
sqrdmulh v6.4s, v18.4s, v0.s[0]
sqrdmulh v7.4s, v19.4s, v0.s[0]
sqadd v16.4s, v16.4s, v4.4s
sqadd v17.4s, v17.4s, v5.4s
sqadd v18.4s, v18.4s, v6.4s
sqadd v19.4s, v19.4s, v7.4s
ret
endfunc
function inv_txfm_add_wht_wht_4x4_16bpc_neon, export=1
mov x15, x30
movi v30.4s, #0
movi v31.4s, #0
ld1 {v16.4s,v17.4s,v18.4s,v19.4s}, [x2]
st1 {v30.4s, v31.4s}, [x2], #32
sshr v16.4s, v16.4s, #2
sshr v17.4s, v17.4s, #2
sshr v18.4s, v18.4s, #2
sshr v19.4s, v19.4s, #2
iwht4
st1 {v30.4s, v31.4s}, [x2], #32
transpose_4x4s v16, v17, v18, v19, v20, v21, v22, v23
iwht4
ld1 {v0.d}[0], [x0], x1
sqxtn v16.4h, v16.4s
ld1 {v0.d}[1], [x0], x1
sqxtn2 v16.8h, v17.4s
ld1 {v1.d}[0], [x0], x1
sqxtn v18.4h, v18.4s
ld1 {v1.d}[1], [x0], x1
sqxtn2 v18.8h, v19.4s
b L(itx_4x4_end)
endfunc
// HBD inv_txfm_add_4x4_neon deviates from the common pattern with registers
// x0-x4 external parameters
// x5 function pointer to first transform
// x6 function pointer to second transform
function inv_txfm_add_4x4_neon
movi v30.4s, #0
movi v31.4s, #0
ld1 {v16.4s,v17.4s,v18.4s,v19.4s}, [x2]
st1 {v30.4s, v31.4s}, [x2], #32
blr x5
st1 {v30.4s, v31.4s}, [x2], #32
sqxtn v16.4h, v16.4s
sqxtn v17.4h, v17.4s
sqxtn v18.4h, v18.4s
sqxtn v19.4h, v19.4s
transpose_4x4h v16, v17, v18, v19, v20, v21, v22, v23
blr x6
ld1 {v0.d}[0], [x0], x1
ld1 {v0.d}[1], [x0], x1
ins v16.d[1], v17.d[0]
ins v18.d[1], v19.d[0]
ld1 {v1.d}[0], [x0], x1
ld1 {v1.d}[1], [x0], x1
srshr v16.8h, v16.8h, #4
srshr v18.8h, v18.8h, #4
L(itx_4x4_end):
dup v31.8h, w4
sub x0, x0, x1, lsl #2
usqadd v0.8h, v16.8h
usqadd v1.8h, v18.8h
smin v0.8h, v0.8h, v31.8h
st1 {v0.d}[0], [x0], x1
smin v1.8h, v1.8h, v31.8h
st1 {v0.d}[1], [x0], x1
st1 {v1.d}[0], [x0], x1
st1 {v1.d}[1], [x0], x1
ret x15
endfunc
.macro def_fn_4x4 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_4x4_16bpc_neon, export=1
mov x15, x30
.ifc \txfm1\()_\txfm2, dct_dct
cbnz w3, 1f
movz w16, #2896*8, lsl #16
ld1r {v16.4s}, [x2]
dup v4.2s, w16
str wzr, [x2]
sqrdmulh v16.4s, v16.4s, v4.s[0]
ld1 {v0.d}[0], [x0], x1
sqxtn v20.4h, v16.4s
sqxtn2 v20.8h, v16.4s
ld1 {v0.d}[1], [x0], x1
sqrdmulh v20.8h, v20.8h, v4.h[1]
ld1 {v1.d}[0], [x0], x1
srshr v16.8h, v20.8h, #4
ld1 {v1.d}[1], [x0], x1
srshr v18.8h, v20.8h, #4
movi v30.8h, #0
b L(itx_4x4_end)
1:
.endif
adr x5, inv_\txfm1\()_4s_x4_neon
movrel x6, X(inv_\txfm2\()_4h_x4_neon)
b inv_txfm_add_4x4_neon
endfunc
.endm
def_fn_4x4 dct, dct
def_fn_4x4 identity, identity
def_fn_4x4 dct, adst
def_fn_4x4 dct, flipadst
def_fn_4x4 dct, identity
def_fn_4x4 adst, dct
def_fn_4x4 adst, adst
def_fn_4x4 adst, flipadst
def_fn_4x4 flipadst, dct
def_fn_4x4 flipadst, adst
def_fn_4x4 flipadst, flipadst
def_fn_4x4 identity, dct
def_fn_4x4 adst, identity
def_fn_4x4 flipadst, identity
def_fn_4x4 identity, adst
def_fn_4x4 identity, flipadst
.macro idct_8 r0, r1, r2, r3, r4, r5, r6, r7
idct_4 \r0, \r2, \r4, \r6
movi v5.4s, #1, msl #16 // row_clip_max = ~(~bdmax << 7), 0x1ffff
mvni v4.4s, #1, msl #16 // row_clip_min = (~bdmax << 7), 0xfffe0000
.irp r, \r0, \r2, \r4, \r6
smin_4s \r, \r, v5
.endr
.irp r, \r0, \r2, \r4, \r6
smax_4s \r, \r, v4
.endr
mul_mls v2, \r1, \r7, v1.s[0], v1.s[1] // -> t4a
mul_mla v3, \r1, \r7, v1.s[1], v1.s[0] // -> t7a
mul_mls v6, \r5, \r3, v1.s[2], v1.s[3] // -> t5a
mul_mla v7, \r5, \r3, v1.s[3], v1.s[2] // -> t6a
srshr \r1\().4s, v2.4s, #12 // t4a
srshr \r7\().4s, v3.4s, #12 // t7a
srshr \r3\().4s, v6.4s, #12 // t5a
srshr \r5\().4s, v7.4s, #12 // t6a
sqadd v2.4s, \r1\().4s, \r3\().4s // t4
sqsub \r1\().4s, \r1\().4s, \r3\().4s // t5a
sqadd v3.4s, \r7\().4s, \r5\().4s // t7
sqsub \r3\().4s, \r7\().4s, \r5\().4s // t6a
.irp r, v2, \r1, v3, \r3
smin_4s \r, \r, v5
.endr
.irp r, v2, \r1, v3, \r3
smax_4s \r, \r, v4
.endr
mul_mls v7, \r3, \r1, v0.s[0], v0.s[0] // -> t5
mul_mla v6, \r3, \r1, v0.s[0], v0.s[0] // -> t6
srshr v7.4s, v7.4s, #12 // t5
srshr v6.4s, v6.4s, #12 // t6
sqsub \r7\().4s, \r0\().4s, v3.4s // out7
sqadd \r0\().4s, \r0\().4s, v3.4s // out0
sqadd \r1\().4s, \r2\().4s, v6.4s // out1
sqsub v6.4s, \r2\().4s, v6.4s // out6
sqadd \r2\().4s, \r4\().4s, v7.4s // out2
sqsub \r5\().4s, \r4\().4s, v7.4s // out5
sqadd \r3\().4s, \r6\().4s, v2.4s // out3
sqsub \r4\().4s, \r6\().4s, v2.4s // out4
mov \r6\().16b, v6.16b // out6
.endm
function inv_dct_4s_x8_neon
AARCH64_VALID_CALL_TARGET
movrel x16, idct_coeffs
ld1 {v0.4s, v1.4s}, [x16]
idct_8 v16, v17, v18, v19, v20, v21, v22, v23
ret
endfunc
.macro iadst_8 o0, o1, o2, o3, o4, o5, o6, o7
movrel x16, iadst8_coeffs
ld1 {v0.4s, v1.4s}, [x16], #32
mul_mla v2, v23, v16, v0.s[0], v0.s[1]
mul_mls v4, v23, v16, v0.s[1], v0.s[0]
mul_mla v6, v21, v18, v0.s[2], v0.s[3]
srshr v16.4s, v2.4s, #12 // t0a
srshr v23.4s, v4.4s, #12 // t1a
mul_mls v2, v21, v18, v0.s[3], v0.s[2]
mul_mla v4, v19, v20, v1.s[0], v1.s[1]
srshr v18.4s, v6.4s, #12 // t2a
srshr v21.4s, v2.4s, #12 // t3a
mul_mls v6, v19, v20, v1.s[1], v1.s[0]
mul_mla v2, v17, v22, v1.s[2], v1.s[3]
srshr v20.4s, v4.4s, #12 // t4a
srshr v19.4s, v6.4s, #12 // t5a
mul_mls v4, v17, v22, v1.s[3], v1.s[2]
srshr v22.4s, v2.4s, #12 // t6a
srshr v17.4s, v4.4s, #12 // t7a
ld1 {v0.4s}, [x16]
movi v1.4s, #1, msl #16 // row_clip_max = ~(~bdmax << 7), 0x1ffff
sqadd v2.4s, v16.4s, v20.4s // t0
sqsub v3.4s, v16.4s, v20.4s // t4
mvni v20.4s, #1, msl #16 // row_clip_min = (~bdmax << 7), 0xfffe0000
sqadd v4.4s, v23.4s, v19.4s // t1
sqsub v5.4s, v23.4s, v19.4s // t5
sqadd v6.4s, v18.4s, v22.4s // t2
sqsub v7.4s, v18.4s, v22.4s // t6
sqadd v18.4s, v21.4s, v17.4s // t3
sqsub v19.4s, v21.4s, v17.4s // t7
.irp r, v2, v3, v4, v5, v6, v7, v18, v19
smin_4s \r, \r, v1
.endr
.irp r, v2, v3, v4, v5, v6, v7, v18, v19
smax_4s \r, \r, v20
.endr
mul_mla v16, v3, v5, v0.s[3], v0.s[2]
mul_mls v20, v3, v5, v0.s[2], v0.s[3]
mul_mls v22, v19, v7, v0.s[3], v0.s[2]
srshr v3.4s, v16.4s, #12 // t4a
srshr v5.4s, v20.4s, #12 // t5a
mul_mla v16, v19, v7, v0.s[2], v0.s[3]
srshr v7.4s, v22.4s, #12 // t6a
srshr v19.4s, v16.4s, #12 // t7a
sqadd \o0\().4s, v2.4s, v6.4s // out0
sqsub v2.4s, v2.4s, v6.4s // t2
sqadd \o7\().4s, v4.4s, v18.4s // out7
sqsub v4.4s, v4.4s, v18.4s // t3
mvni v18.4s, #1, msl #16 // row_clip_min = (~bdmax << 7), 0xfffe0000
sqadd \o1\().4s, v3.4s, v7.4s // out1
sqsub v3.4s, v3.4s, v7.4s // t6
sqadd \o6\().4s, v5.4s, v19.4s // out6
sqsub v5.4s, v5.4s, v19.4s // t7
// Not clipping the output registers, as they will be downshifted and
// narrowed afterwards anyway.
.irp r, v2, v4, v3, v5
smin_4s \r, \r, v1
.endr
.irp r, v2, v4, v3, v5
smax_4s \r, \r, v18
.endr
sqneg \o7\().4s, \o7\().4s // out7
sqneg \o1\().4s, \o1\().4s // out1
mul_mla v18, v2, v4, v0.s[0], v0.s[0] // -> out3 (v19 or v20)
mul_mls v6, v2, v4, v0.s[0], v0.s[0] // -> out4 (v20 or v19)
mul_mls v20, v3, v5, v0.s[0], v0.s[0] // -> out5 (v21 or v18)
srshr v2.4s, v18.4s, #12 // out3
mul_mla v18, v3, v5, v0.s[0], v0.s[0] // -> out2 (v18 or v21)
srshr v3.4s, v20.4s, #12 // out5
srshr \o2\().4s, v18.4s, #12 // out2 (v18 or v21)
srshr \o4\().4s, v6.4s, #12 // out4 (v20 or v19)
sqneg \o3\().4s, v2.4s // out3
sqneg \o5\().4s, v3.4s // out5
.endm
function inv_adst_4s_x8_neon
AARCH64_VALID_CALL_TARGET
iadst_8 v16, v17, v18, v19, v20, v21, v22, v23
ret
endfunc
function inv_flipadst_4s_x8_neon
AARCH64_VALID_CALL_TARGET
iadst_8 v23, v22, v21, v20, v19, v18, v17, v16
ret
endfunc
function inv_identity_4s_x8_neon
AARCH64_VALID_CALL_TARGET
sqshl v16.4s, v16.4s, #1
sqshl v17.4s, v17.4s, #1
sqshl v18.4s, v18.4s, #1
sqshl v19.4s, v19.4s, #1
sqshl v20.4s, v20.4s, #1
sqshl v21.4s, v21.4s, #1
sqshl v22.4s, v22.4s, #1
sqshl v23.4s, v23.4s, #1
ret
endfunc
function inv_txfm_add_8x8_neon
movi v31.4s, #0
cmp w3, w13
mov x11, #32
b.lt 1f
add x6, x2, #16
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
ld1 {\i}, [x6]
st1 {v31.4s}, [x6], x11
.endr
blr x4
sqrshrn v24.4h, v16.4s, #1
sqrshrn v25.4h, v17.4s, #1
sqrshrn v26.4h, v18.4s, #1
sqrshrn v27.4h, v19.4s, #1
sqrshrn2 v24.8h, v20.4s, #1
sqrshrn2 v25.8h, v21.4s, #1
sqrshrn2 v26.8h, v22.4s, #1
sqrshrn2 v27.8h, v23.4s, #1
transpose_4x8h v24, v25, v26, v27, v2, v3, v4, v5
b 2f
1:
.irp i, v24.8h, v25.8h, v26.8h, v27.8h
movi \i, #0
.endr
2:
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
ld1 {\i}, [x2]
st1 {v31.4s}, [x2], x11
.endr
blr x4
sqrshrn v16.4h, v16.4s, #1
sqrshrn v17.4h, v17.4s, #1
sqrshrn v18.4h, v18.4s, #1
sqrshrn v19.4h, v19.4s, #1
sqrshrn2 v16.8h, v20.4s, #1
sqrshrn2 v17.8h, v21.4s, #1
sqrshrn2 v18.8h, v22.4s, #1
sqrshrn2 v19.8h, v23.4s, #1
transpose_4x8h v16, v17, v18, v19, v20, v21, v22, v23
mov v20.16b, v24.16b
mov v21.16b, v25.16b
mov v22.16b, v26.16b
mov v23.16b, v27.16b
blr x5
load_add_store_8x8 x0, x7
ret x15
endfunc
.macro def_fn_8x8 txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_8x8_16bpc_neon, export=1
mov x15, x30
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc 8, 8, 1
.endif
movrel x5, X(inv_\txfm2\()_8h_x8_neon)
mov w13, #\eob_half
adr x4, inv_\txfm1\()_4s_x8_neon
b inv_txfm_add_8x8_neon
endfunc
.endm
def_fn_8x8 dct, dct, 10
def_fn_8x8 identity, identity, 10
def_fn_8x8 dct, adst, 10
def_fn_8x8 dct, flipadst, 10
def_fn_8x8 dct, identity, 4
def_fn_8x8 adst, dct, 10
def_fn_8x8 adst, adst, 10
def_fn_8x8 adst, flipadst, 10
def_fn_8x8 flipadst, dct, 10
def_fn_8x8 flipadst, adst, 10
def_fn_8x8 flipadst, flipadst, 10
def_fn_8x8 identity, dct, 4
def_fn_8x8 adst, identity, 4
def_fn_8x8 flipadst, identity, 4
def_fn_8x8 identity, adst, 4
def_fn_8x8 identity, flipadst, 4
function inv_txfm_add_8x4_neon
movi v28.4s, #0
movi v29.4s, #0
movi v30.4s, #0
movi v31.4s, #0
ld1 {v16.4s,v17.4s,v18.4s,v19.4s}, [x2]
st1 {v28.4s,v29.4s,v30.4s,v31.4s}, [x2], #64
movz w16, #2896*8, lsl #16
dup v0.2s, w16
ld1 {v20.4s,v21.4s,v22.4s,v23.4s}, [x2]
st1 {v28.4s,v29.4s,v30.4s,v31.4s}, [x2]
scale_input .4s, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
blr x4
sqxtn v16.4h, v16.4s
sqxtn v17.4h, v17.4s
sqxtn v18.4h, v18.4s
sqxtn v19.4h, v19.4s
sqxtn v20.4h, v20.4s
sqxtn v21.4h, v21.4s
sqxtn v22.4h, v22.4s
sqxtn v23.4h, v23.4s
transpose_4x4h v16, v17, v18, v19, v4, v5, v6, v7
transpose_4x4h v20, v21, v22, v23, v4, v5, v6, v7
ins v16.d[1], v20.d[0]
ins v17.d[1], v21.d[0]
ins v18.d[1], v22.d[0]
ins v19.d[1], v23.d[0]
blr x5
load_add_store_8x4 x0, x7
ret x15
endfunc
function inv_txfm_add_4x8_neon
movz w16, #2896*8, lsl #16
movi v31.4s, #0
dup v30.2s, w16
cmp w3, w13
mov x11, #32
b.lt 1f
add x6, x2, #16
.irp i, v16.4s, v17.4s, v18.4s, v19.4s
ld1 {\i}, [x6]
st1 {v31.4s}, [x6], x11
.endr
scale_input .4s, v30.s[0], v16, v17, v18, v19
blr x4
sqxtn v20.4h, v16.4s
sqxtn v21.4h, v17.4s
sqxtn v22.4h, v18.4s
sqxtn v23.4h, v19.4s
transpose_4x4h v20, v21, v22, v23, v4, v5, v6, v7
b 2f
1:
.irp i, v20, v21, v22, v23
movi \i\().4h, #0
.endr
2:
.irp i, v16.4s, v17.4s, v18.4s, v19.4s
ld1 {\i}, [x2]
st1 {v31.4s}, [x2], x11
.endr
scale_input .4s, v30.s[0], v16, v17, v18, v19
blr x4
sqxtn v16.4h, v16.4s
sqxtn v17.4h, v17.4s
sqxtn v18.4h, v18.4s
sqxtn v19.4h, v19.4s
transpose_4x4h v16, v17, v18, v19, v4, v5, v6, v7
blr x5
load_add_store_4x8 x0, x7
ret x15
endfunc
.macro def_fn_48 w, h, txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_16bpc_neon, export=1
mov x15, x30
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 0
.endif
adr x4, inv_\txfm1\()_4s_x\w\()_neon
.if \w == 4
mov w13, #\eob_half
.endif
movrel x5, X(inv_\txfm2\()_\w\()h_x\h\()_neon)
b inv_txfm_add_\w\()x\h\()_neon
endfunc
.endm
.macro def_fns_48 w, h
def_fn_48 \w, \h, dct, dct, 13
def_fn_48 \w, \h, identity, identity, 13
def_fn_48 \w, \h, dct, adst, 13
def_fn_48 \w, \h, dct, flipadst, 13
def_fn_48 \w, \h, dct, identity, 4
def_fn_48 \w, \h, adst, dct, 13
def_fn_48 \w, \h, adst, adst, 13
def_fn_48 \w, \h, adst, flipadst, 13
def_fn_48 \w, \h, flipadst, dct, 13
def_fn_48 \w, \h, flipadst, adst, 13
def_fn_48 \w, \h, flipadst, flipadst, 13
def_fn_48 \w, \h, identity, dct, 16
def_fn_48 \w, \h, adst, identity, 4
def_fn_48 \w, \h, flipadst, identity, 4
def_fn_48 \w, \h, identity, adst, 16
def_fn_48 \w, \h, identity, flipadst, 16
.endm
def_fns_48 4, 8
def_fns_48 8, 4
function inv_dct_4s_x16_neon
AARCH64_VALID_CALL_TARGET
movrel x16, idct_coeffs
ld1 {v0.4s, v1.4s}, [x16], #32
idct_8 v16, v18, v20, v22, v24, v26, v28, v30
// idct_8 leaves the row_clip_max/min constants in v5 and v4
.irp r, v16, v18, v20, v22, v24, v26, v28, v30
smin \r\().4s, \r\().4s, v5.4s
.endr
.irp r, v16, v18, v20, v22, v24, v26, v28, v30
smax \r\().4s, \r\().4s, v4.4s
.endr
ld1 {v0.4s, v1.4s}, [x16]
sub x16, x16, #32
mul_mls v2, v17, v31, v0.s[0], v0.s[1] // -> t8a
mul_mla v3, v17, v31, v0.s[1], v0.s[0] // -> t15a
mul_mls v6, v25, v23, v0.s[2], v0.s[3] // -> t9a
srshr v17.4s, v2.4s, #12 // t8a
srshr v31.4s, v3.4s, #12 // t15a
mul_mla v2, v25, v23, v0.s[3], v0.s[2] // -> t14a
mul_mls v3, v21, v27, v1.s[0], v1.s[1] // -> t10a
srshr v23.4s, v6.4s, #12 // t9a
srshr v25.4s, v2.4s, #12 // t14a
mul_mla v6, v21, v27, v1.s[1], v1.s[0] // -> t13a
mul_mls v2, v29, v19, v1.s[2], v1.s[3] // -> t11a
srshr v21.4s, v3.4s, #12 // t10a
srshr v27.4s, v6.4s, #12 // t13a
mul_mla v3, v29, v19, v1.s[3], v1.s[2] // -> t12a
srshr v19.4s, v2.4s, #12 // t11a
srshr v29.4s, v3.4s, #12 // t12a
ld1 {v0.4s}, [x16]
sqsub v2.4s, v17.4s, v23.4s // t9
sqadd v17.4s, v17.4s, v23.4s // t8
sqsub v3.4s, v31.4s, v25.4s // t14
sqadd v31.4s, v31.4s, v25.4s // t15
sqsub v23.4s, v19.4s, v21.4s // t10
sqadd v19.4s, v19.4s, v21.4s // t11
sqadd v25.4s, v29.4s, v27.4s // t12
sqsub v29.4s, v29.4s, v27.4s // t13
.irp r, v2, v17, v3, v31, v23, v19, v25, v29
smin \r\().4s, \r\().4s, v5.4s
.endr
.irp r, v2, v17, v3, v31, v23, v19, v25, v29
smax \r\().4s, \r\().4s, v4.4s
.endr
mul_mls v7, v3, v2, v0.s[2], v0.s[3] // -> t9a
mul_mla v6, v3, v2, v0.s[3], v0.s[2] // -> t14a
srshr v21.4s, v7.4s, #12 // t9a
srshr v27.4s, v6.4s, #12 // t14a
mul_mls v7, v29, v23, v0.s[2], v0.s[3] // -> t13a
mul_mla v6, v29, v23, v0.s[3], v0.s[2] // -> t10a
srshr v29.4s, v7.4s, #12 // t13a
neg v6.4s, v6.4s
srshr v23.4s, v6.4s, #12 // t10a
sqsub v2.4s, v17.4s, v19.4s // t11a
sqadd v17.4s, v17.4s, v19.4s // t8a
sqsub v3.4s, v31.4s, v25.4s // t12a
sqadd v31.4s, v31.4s, v25.4s // t15a
sqadd v19.4s, v21.4s, v23.4s // t9
sqsub v21.4s, v21.4s, v23.4s // t10
sqsub v25.4s, v27.4s, v29.4s // t13
sqadd v27.4s, v27.4s, v29.4s // t14
.irp r, v2, v17, v3, v31, v19, v21, v25, v27
smin \r\().4s, \r\().4s, v5.4s
.endr
.irp r, v2, v17, v3, v31, v19, v21, v25, v27
smax \r\().4s, \r\().4s, v4.4s
.endr
mul_mls v7, v3, v2, v0.s[0], v0.s[0] // -> t11
mul_mla v6, v3, v2, v0.s[0], v0.s[0] // -> t12
mul_mls v2, v25, v21, v0.s[0], v0.s[0] // -> t10a
srshr v7.4s, v7.4s, #12 // t11
srshr v6.4s, v6.4s, #12 // t12
mul_mla v3, v25, v21, v0.s[0], v0.s[0] // -> t13a
srshr v2.4s, v2.4s, #12 // t10a
srshr v3.4s, v3.4s, #12 // t13a
sqadd v1.4s, v16.4s, v31.4s // out0
sqsub v31.4s, v16.4s, v31.4s // out15
mov v16.16b, v1.16b
sqadd v23.4s, v30.4s, v17.4s // out7
sqsub v1.4s, v30.4s, v17.4s // out8
sqadd v17.4s, v18.4s, v27.4s // out1
sqsub v30.4s, v18.4s, v27.4s // out14
sqadd v18.4s, v20.4s, v3.4s // out2
sqsub v29.4s, v20.4s, v3.4s // out13
sqadd v3.4s, v28.4s, v19.4s // out6
sqsub v25.4s, v28.4s, v19.4s // out9
sqadd v19.4s, v22.4s, v6.4s // out3
sqsub v28.4s, v22.4s, v6.4s // out12
sqadd v20.4s, v24.4s, v7.4s // out4
sqsub v27.4s, v24.4s, v7.4s // out11
sqadd v21.4s, v26.4s, v2.4s // out5
sqsub v26.4s, v26.4s, v2.4s // out10
mov v24.16b, v1.16b
mov v22.16b, v3.16b
ret
endfunc
.macro iadst_16 o0, o1, o2, o3, o4, o5, o6, o7, o8, o9, o10, o11, o12, o13, o14, o15
movrel x16, iadst16_coeffs
ld1 {v0.4s, v1.4s}, [x16], #32
mul_mla v2, v31, v16, v0.s[0], v0.s[1] // -> t0
mul_mls v4, v31, v16, v0.s[1], v0.s[0] // -> t1
mul_mla v6, v29, v18, v0.s[2], v0.s[3] // -> t2
srshr v16.4s, v2.4s, #12 // t0
srshr v31.4s, v4.4s, #12 // t1
mul_mls v2, v29, v18, v0.s[3], v0.s[2] // -> t3
mul_mla v4, v27, v20, v1.s[0], v1.s[1] // -> t4
srshr v18.4s, v6.4s, #12 // t2
srshr v29.4s, v2.4s, #12 // t3
mul_mls v6, v27, v20, v1.s[1], v1.s[0] // -> t5
mul_mla v2, v25, v22, v1.s[2], v1.s[3] // -> t6
srshr v20.4s, v4.4s, #12 // t4
srshr v27.4s, v6.4s, #12 // t5
mul_mls v4, v25, v22, v1.s[3], v1.s[2] // -> t7
ld1 {v0.4s, v1.4s}, [x16]
movrel x16, idct_coeffs
mul_mla v6, v23, v24, v0.s[0], v0.s[1] // -> t8
srshr v22.4s, v2.4s, #12 // t6
srshr v25.4s, v4.4s, #12 // t7
mul_mls v2, v23, v24, v0.s[1], v0.s[0] // -> t9
mul_mla v4, v21, v26, v0.s[2], v0.s[3] // -> t10
srshr v23.4s, v6.4s, #12 // t8
srshr v24.4s, v2.4s, #12 // t9
mul_mls v6, v21, v26, v0.s[3], v0.s[2] // -> t11
mul_mla v2, v19, v28, v1.s[0], v1.s[1] // -> t12
srshr v21.4s, v4.4s, #12 // t10
srshr v26.4s, v6.4s, #12 // t11
mul_mls v4, v19, v28, v1.s[1], v1.s[0] // -> t13
mul_mla v6, v17, v30, v1.s[2], v1.s[3] // -> t14
srshr v19.4s, v2.4s, #12 // t12
srshr v28.4s, v4.4s, #12 // t13
mul_mls v2, v17, v30, v1.s[3], v1.s[2] // -> t15
srshr v17.4s, v6.4s, #12 // t14
srshr v30.4s, v2.4s, #12 // t15
ld1 {v0.4s, v1.4s}, [x16]
movi v5.4s, #1, msl #16 // row_clip_max = ~(~bdmax << 7), 0x1ffff
mvni v7.4s, #1, msl #16 // row_clip_min = (~bdmax << 7), 0xfffe0000
sqsub v2.4s, v16.4s, v23.4s // t8a
sqadd v16.4s, v16.4s, v23.4s // t0a
sqsub v3.4s, v31.4s, v24.4s // t9a
sqadd v31.4s, v31.4s, v24.4s // t1a
sqadd v23.4s, v18.4s, v21.4s // t2a
sqsub v18.4s, v18.4s, v21.4s // t10a
sqadd v24.4s, v29.4s, v26.4s // t3a
sqsub v29.4s, v29.4s, v26.4s // t11a
sqadd v21.4s, v20.4s, v19.4s // t4a
sqsub v20.4s, v20.4s, v19.4s // t12a
sqadd v26.4s, v27.4s, v28.4s // t5a
sqsub v27.4s, v27.4s, v28.4s // t13a
sqadd v19.4s, v22.4s, v17.4s // t6a
sqsub v22.4s, v22.4s, v17.4s // t14a
sqadd v28.4s, v25.4s, v30.4s // t7a
sqsub v25.4s, v25.4s, v30.4s // t15a
.irp r, v2, v16, v3, v31, v23, v18, v24, v29, v21, v20, v26, v27, v19, v22, v28, v25
smin_4s \r, \r, v5
.endr
.irp r, v2, v16, v3, v31, v23, v18, v24, v29, v21, v20, v26, v27, v19, v22, v28, v25
smax_4s \r, \r, v7
.endr
mul_mla v4, v2, v3, v1.s[1], v1.s[0] // -> t8
mul_mls v6, v2, v3, v1.s[0], v1.s[1] // -> t9
mul_mla v2, v18, v29, v1.s[3], v1.s[2] // -> t10
srshr v17.4s, v4.4s, #12 // t8
srshr v30.4s, v6.4s, #12 // t9
mul_mls v4, v18, v29, v1.s[2], v1.s[3] // -> t11
mul_mls v6, v27, v20, v1.s[1], v1.s[0] // -> t12
srshr v18.4s, v2.4s, #12 // t10
srshr v29.4s, v4.4s, #12 // t11
mul_mla v2, v27, v20, v1.s[0], v1.s[1] // -> t13
mul_mls v4, v25, v22, v1.s[3], v1.s[2] // -> t14
srshr v27.4s, v6.4s, #12 // t12
srshr v20.4s, v2.4s, #12 // t13
mul_mla v6, v25, v22, v1.s[2], v1.s[3] // -> t15
srshr v25.4s, v4.4s, #12 // t14
srshr v22.4s, v6.4s, #12 // t15
sqsub v2.4s, v16.4s, v21.4s // t4
sqadd v16.4s, v16.4s, v21.4s // t0
sqsub v3.4s, v31.4s, v26.4s // t5
sqadd v31.4s, v31.4s, v26.4s // t1
sqadd v21.4s, v23.4s, v19.4s // t2
sqsub v23.4s, v23.4s, v19.4s // t6
sqadd v26.4s, v24.4s, v28.4s // t3
sqsub v24.4s, v24.4s, v28.4s // t7
sqadd v19.4s, v17.4s, v27.4s // t8a
sqsub v17.4s, v17.4s, v27.4s // t12a
sqadd v28.4s, v30.4s, v20.4s // t9a
sqsub v30.4s, v30.4s, v20.4s // t13a
sqadd v27.4s, v18.4s, v25.4s // t10a
sqsub v18.4s, v18.4s, v25.4s // t14a
sqadd v20.4s, v29.4s, v22.4s // t11a
sqsub v29.4s, v29.4s, v22.4s // t15a
.irp r, v2, v16, v3, v31, v21, v23, v26, v24, v19, v17, v28, v30, v27, v18, v20, v29
smin_4s \r, \r, v5
.endr
.irp r, v2, v16, v3, v31, v21, v23, v26, v24, v19, v17, v28, v30, v27, v18, v20, v29
smax_4s \r, \r, v7
.endr
mul_mla v4, v2, v3, v0.s[3], v0.s[2] // -> t4a
mul_mls v6, v2, v3, v0.s[2], v0.s[3] // -> t5a
mul_mls v2, v24, v23, v0.s[3], v0.s[2] // -> t6a
srshr v22.4s, v4.4s, #12 // t4a
srshr v25.4s, v6.4s, #12 // t5a
mul_mla v4, v24, v23, v0.s[2], v0.s[3] // -> t7a
mul_mla v6, v17, v30, v0.s[3], v0.s[2] // -> t12
srshr v24.4s, v2.4s, #12 // t6a
srshr v23.4s, v4.4s, #12 // t7a
mul_mls v2, v17, v30, v0.s[2], v0.s[3] // -> t13
mul_mls v4, v29, v18, v0.s[3], v0.s[2] // -> t14
srshr v17.4s, v6.4s, #12 // t12
mul_mla v6, v29, v18, v0.s[2], v0.s[3] // -> t15
srshr v29.4s, v2.4s, #12 // t13
srshr v30.4s, v4.4s, #12 // t14
srshr v18.4s, v6.4s, #12 // t15
sqsub v2.4s, v16.4s, v21.4s // t2a
.ifc \o0, v16
sqadd \o0\().4s, v16.4s, v21.4s // out0
sqsub v21.4s, v31.4s, v26.4s // t3a
sqadd \o15\().4s, v31.4s, v26.4s // out15
.else
sqadd v4.4s, v16.4s, v21.4s // out0
sqsub v21.4s, v31.4s, v26.4s // t3a
sqadd \o15\().4s, v31.4s, v26.4s // out15
mov \o0\().16b, v4.16b
.endif
sqsub v3.4s, v29.4s, v18.4s // t15a
sqadd \o13\().4s, v29.4s, v18.4s // out13
sqadd \o2\().4s, v17.4s, v30.4s // out2
sqsub v26.4s, v17.4s, v30.4s // t14a
sqadd \o1\().4s, v19.4s, v27.4s // out1
sqsub v27.4s, v19.4s, v27.4s // t10
sqadd \o14\().4s, v28.4s, v20.4s // out14
sqsub v20.4s, v28.4s, v20.4s // t11
sqadd \o3\().4s, v22.4s, v24.4s // out3
sqsub v22.4s, v22.4s, v24.4s // t6
sqadd \o12\().4s, v25.4s, v23.4s // out12
sqsub v23.4s, v25.4s, v23.4s // t7
// Not clipping the output registers, as they will be downshifted and
// narrowed afterwards anyway.
.irp r, v2, v21, v3, v26, v27, v20, v22, v23
smin_4s \r, \r, v5
.endr
.irp r, v2, v21, v3, v26, v27, v20, v22, v23
smax_4s \r, \r, v7
.endr
sqneg \o15\().4s, \o15\().4s // out15
sqneg \o13\().4s, \o13\().4s // out13
sqneg \o1\().4s, \o1\().4s // out1
sqneg \o3\().4s, \o3\().4s // out3
mul_mls v24, v2, v21, v0.s[0], v0.s[0] // -> out8 (v24 or v23)
mul_mla v4, v2, v21, v0.s[0], v0.s[0] // -> out7 (v23 or v24)
mul_mla v6, v26, v3, v0.s[0], v0.s[0] // -> out5 (v21 or v26)
srshr v24.4s, v24.4s, #12 // out8
srshr v4.4s, v4.4s, #12 // out7
srshr v5.4s, v6.4s, #12 // out5
mul_mls v6, v26, v3, v0.s[0], v0.s[0] // -> out10 (v26 or v21)
mul_mla v2, v22, v23, v0.s[0], v0.s[0] // -> out4 (v20 or v27)
srshr v26.4s, v6.4s, #12 // out10
mul_mls v6, v22, v23, v0.s[0], v0.s[0] // -> out11 (v27 or v20)
mul_mla v22, v27, v20, v0.s[0], v0.s[0] // -> out6 (v22 or v25)
mul_mls v21, v27, v20, v0.s[0], v0.s[0] // -> out9 (v25 or v22)
srshr \o4\().4s, v2.4s, #12 // out4
srshr v6.4s, v6.4s, #12 // out11
srshr v7.4s, v21.4s, #12 // out9
srshr \o6\().4s, v22.4s, #12 // out6
.ifc \o8, v23
mov \o8\().16b, v24.16b
mov \o10\().16b, v26.16b
.endif
sqneg \o7\().4s, v4.4s // out7
sqneg \o5\().4s, v5.4s // out5
sqneg \o11\().4s, v6.4s // out11
sqneg \o9\().4s, v7.4s // out9
.endm
function inv_adst_4s_x16_neon
AARCH64_VALID_CALL_TARGET
iadst_16 v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31
ret
endfunc
function inv_flipadst_4s_x16_neon
AARCH64_VALID_CALL_TARGET
iadst_16 v31, v30, v29, v28, v27, v26, v25, v24, v23, v22, v21, v20, v19, v18, v17, v16
ret
endfunc
function inv_identity_4s_x16_neon
AARCH64_VALID_CALL_TARGET
movz w16, #2*(5793-4096)*8, lsl #16
dup v0.2s, w16
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
sqrdmulh v2.4s, v\i\().4s, v0.s[0]
sqadd v\i\().4s, v\i\().4s, v\i\().4s
sqadd v\i\().4s, v\i\().4s, v2.4s
.endr
ret
endfunc
.macro identity_4x16_shift1 c
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
sqrdmulh v3.4s, \i, \c
srshr v3.4s, v3.4s, #1
sqadd \i, \i, v3.4s
.endr
.endm
.macro identity_4x16 c
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
sqrdmulh v3.4s, \i, \c
sqadd \i, \i, \i
sqadd \i, \i, v3.4s
.endr
.endm
.macro def_horz_16 scale=0, shift=2, suffix
function inv_txfm_horz\suffix\()_16x4_neon
mov x14, x30
movi v7.4s, #0
.if \scale
movz w16, #2896*8, lsl #16
dup v0.2s, w16
.endif
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
ld1 {\i}, [x7]
st1 {v7.4s}, [x7], x8
.endr
.if \scale
scale_input .4s, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .4s, v0.s[0], v24, v25, v26, v27, v28, v29, v30, v31
.endif
blr x4
sqrshrn v16.4h, v16.4s, #\shift
sqrshrn v17.4h, v17.4s, #\shift
sqrshrn v18.4h, v18.4s, #\shift
sqrshrn v19.4h, v19.4s, #\shift
sqrshrn2 v16.8h, v20.4s, #\shift
sqrshrn2 v17.8h, v21.4s, #\shift
sqrshrn2 v18.8h, v22.4s, #\shift
sqrshrn2 v19.8h, v23.4s, #\shift
sqrshrn v20.4h, v24.4s, #\shift
sqrshrn v21.4h, v25.4s, #\shift
sqrshrn v22.4h, v26.4s, #\shift
sqrshrn v23.4h, v27.4s, #\shift
sqrshrn2 v20.8h, v28.4s, #\shift
sqrshrn2 v21.8h, v29.4s, #\shift
sqrshrn2 v22.8h, v30.4s, #\shift
sqrshrn2 v23.8h, v31.4s, #\shift
.if \scale
b L(horz_16x4_epilog)
.else
L(horz_16x4_epilog):
transpose_4x8h v16, v17, v18, v19, v4, v5, v6, v7
transpose_4x8h v20, v21, v22, v23, v4, v5, v6, v7
.irp i, v16.8h, v20.8h, v17.8h, v21.8h, v18.8h, v22.8h, v19.8h, v23.8h
st1 {\i}, [x6], #16
.endr
ret x14
.endif
endfunc
.endm
def_horz_16 scale=1, shift=1, suffix=_scale
def_horz_16 scale=0, shift=2
function inv_txfm_add_vert_8x16_neon
mov x14, x30
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
ld1 {v\i\().8h}, [x7], x8
.endr
blr x5
load_add_store_8x16 x6, x7
ret x14
endfunc
function inv_txfm_add_16x16_neon
mov x15, x30
sub sp, sp, #512
ldrh w12, [x13], #2
.irp i, 0, 4, 8, 12
add x6, sp, #(\i*16*2)
.if \i > 0
mov w8, #(16 - \i)
cmp w3, w12
b.lt 1f
.if \i < 12
ldrh w12, [x13], #2
.endif
.endif
add x7, x2, #(\i*4)
mov x8, #16*4
bl inv_txfm_horz_16x4_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #4
.rept 2
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8
add x6, x0, #(\i*2)
add x7, sp, #(\i*2)
mov x8, #32
bl inv_txfm_add_vert_8x16_neon
.endr
add sp, sp, #512
ret x15
endfunc
const eob_16x16
.short 10, 36, 78, 256
endconst
const eob_16x16_identity
.short 4, 8, 12, 256
endconst
.macro def_fn_16x16 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_16x16_16bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc 16, 16, 2
.endif
adr x4, inv_\txfm1\()_4s_x16_neon
movrel x5, X(inv_\txfm2\()_8h_x16_neon)
.ifc \txfm1, identity
.ifc \txfm2, identity
movrel x13, eob_16x16
.else
movrel x13, eob_16x16_identity
.endif
.else
.ifc \txfm2, identity
movrel x13, eob_16x16_identity
.else
movrel x13, eob_16x16
.endif
.endif
b inv_txfm_add_16x16_neon
endfunc
.endm
def_fn_16x16 dct, dct
def_fn_16x16 identity, identity
def_fn_16x16 dct, adst
def_fn_16x16 dct, flipadst
def_fn_16x16 dct, identity
def_fn_16x16 adst, dct
def_fn_16x16 adst, adst
def_fn_16x16 adst, flipadst
def_fn_16x16 flipadst, dct
def_fn_16x16 flipadst, adst
def_fn_16x16 flipadst, flipadst
def_fn_16x16 identity, dct
function inv_txfm_add_16x4_neon
mov x15, x30
movi v4.4s, #0
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
ld1 {\i}, [x2]
st1 {v4.4s}, [x2], #16
.endr
blr x4
sqrshrn v16.4h, v16.4s, #1
sqrshrn v17.4h, v17.4s, #1
sqrshrn v18.4h, v18.4s, #1
sqrshrn v19.4h, v19.4s, #1
sqrshrn2 v16.8h, v20.4s, #1
sqrshrn2 v17.8h, v21.4s, #1
sqrshrn2 v18.8h, v22.4s, #1
sqrshrn2 v19.8h, v23.4s, #1
transpose_4x8h v16, v17, v18, v19, v2, v3, v4, v5
blr x5
mov x6, x0
load_add_store_8x4 x6, x7
sqrshrn v16.4h, v24.4s, #1
sqrshrn v17.4h, v25.4s, #1
sqrshrn v18.4h, v26.4s, #1
sqrshrn v19.4h, v27.4s, #1
sqrshrn2 v16.8h, v28.4s, #1
sqrshrn2 v17.8h, v29.4s, #1
sqrshrn2 v18.8h, v30.4s, #1
sqrshrn2 v19.8h, v31.4s, #1
transpose_4x8h v16, v17, v18, v19, v2, v3, v4, v5
blr x5
add x6, x0, #16
load_add_store_8x4 x6, x7
ret x15
endfunc
function inv_txfm_add_4x16_neon
ldrh w12, [x13, #4]
mov x15, x30
mov x11, #64
cmp w3, w12
ldrh w12, [x13, #2]
b.lt 1f
add x6, x2, #48
movi v2.4s, #0
.irp i, v16.4s, v17.4s, v18.4s, v19.4s
ld1 {\i}, [x6]
st1 {v2.4s}, [x6], x11
.endr
blr x4
sqrshrn v28.4h, v16.4s, #1
sqrshrn v29.4h, v17.4s, #1
sqrshrn v30.4h, v18.4s, #1
sqrshrn v31.4h, v19.4s, #1
transpose_4x4h v28, v29, v30, v31, v4, v5, v6, v7
b 2f
1:
.irp i, v28.4h, v29.4h, v30.4h, v31.4h
movi \i, #0
.endr
2:
cmp w3, w12
ldrh w12, [x13, #0]
b.lt 1f
add x6, x2, #32
movi v2.4s, #0
.irp i, v16.4s, v17.4s, v18.4s, v19.4s
ld1 {\i}, [x6]
st1 {v2.4s}, [x6], x11
.endr
blr x4
sqrshrn v24.4h, v16.4s, #1
sqrshrn v25.4h, v17.4s, #1
sqrshrn v26.4h, v18.4s, #1
sqrshrn v27.4h, v19.4s, #1
transpose_4x4h v24, v25, v26, v27, v4, v5, v6, v7
b 2f
1:
.irp i, v24.4h, v25.4h, v26.4h, v27.4h
movi \i, #0
.endr
2:
cmp w3, w12
b.lt 1f
add x6, x2, #16
movi v2.4s, #0
.irp i, v16.4s, v17.4s, v18.4s, v19.4s
ld1 {\i}, [x6]
st1 {v2.4s}, [x6], x11
.endr
blr x4
sqrshrn v20.4h, v16.4s, #1
sqrshrn v21.4h, v17.4s, #1
sqrshrn v22.4h, v18.4s, #1
sqrshrn v23.4h, v19.4s, #1
transpose_4x4h v20, v21, v22, v23, v4, v5, v6, v7
b 2f
1:
.irp i, v20.4h, v21.4h, v22.4h, v23.4h
movi \i, #0
.endr
2:
movi v2.4s, #0
.irp i, v16.4s, v17.4s, v18.4s, v19.4s
ld1 {\i}, [x2]
st1 {v2.4s}, [x2], x11
.endr
blr x4
sqrshrn v16.4h, v16.4s, #1
sqrshrn v17.4h, v17.4s, #1
sqrshrn v18.4h, v18.4s, #1
sqrshrn v19.4h, v19.4s, #1
transpose_4x8h v16, v17, v18, v19, v4, v5, v6, v7
blr x5
load_add_store_4x16 x0, x6
ret x15
endfunc
const eob_4x16
.short 13, 29, 45, 64
endconst
const eob_4x16_identity1
.short 16, 32, 48, 64
endconst
const eob_4x16_identity2
.short 4, 8, 12, 64
endconst
.macro def_fn_416 w, h, txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_16bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 1
.endif
.if \w == 4
adr x4, inv_\txfm1\()_4s_x\w\()_neon
movrel x5, X(inv_\txfm2\()_4h_x\h\()_neon)
.ifc \txfm1, identity
.ifc \txfm2, identity
movrel x13, eob_4x16
.else
movrel x13, eob_4x16_identity1
.endif
.else
.ifc \txfm2, identity
movrel x13, eob_4x16_identity2
.else
movrel x13, eob_4x16
.endif
.endif
.else
adr x4, inv_\txfm1\()_4s_x\w\()_neon
movrel x5, X(inv_\txfm2\()_8h_x\h\()_neon)
.endif
b inv_txfm_add_\w\()x\h\()_neon
endfunc
.endm
.macro def_fns_416 w, h
def_fn_416 \w, \h, dct, dct
def_fn_416 \w, \h, identity, identity
def_fn_416 \w, \h, dct, adst
def_fn_416 \w, \h, dct, flipadst
def_fn_416 \w, \h, dct, identity
def_fn_416 \w, \h, adst, dct
def_fn_416 \w, \h, adst, adst
def_fn_416 \w, \h, adst, flipadst
def_fn_416 \w, \h, flipadst, dct
def_fn_416 \w, \h, flipadst, adst
def_fn_416 \w, \h, flipadst, flipadst
def_fn_416 \w, \h, identity, dct
def_fn_416 \w, \h, adst, identity
def_fn_416 \w, \h, flipadst, identity
def_fn_416 \w, \h, identity, adst
def_fn_416 \w, \h, identity, flipadst
.endm
def_fns_416 4, 16
def_fns_416 16, 4
function inv_txfm_add_16x8_neon
mov x15, x30
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
cmp w3, w13
mov x11, #32
b.lt 1f
movi v4.4s, #0
movz w16, #2896*8, lsl #16
dup v0.2s, w16
add x6, x2, #16
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
ld1 {\i}, [x6]
st1 {v4.4s}, [x6], x11
.endr
scale_input .4s, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .4s, v0.s[0], v24, v25, v26, v27, v28, v29, v30, v31
blr x4
sqrshrn v8.4h, v16.4s, #1
sqrshrn v9.4h, v17.4s, #1
sqrshrn v10.4h, v18.4s, #1
sqrshrn v11.4h, v19.4s, #1
sqrshrn2 v8.8h, v20.4s, #1
sqrshrn2 v9.8h, v21.4s, #1
sqrshrn2 v10.8h, v22.4s, #1
sqrshrn2 v11.8h, v23.4s, #1
sqrshrn v12.4h, v24.4s, #1
sqrshrn v13.4h, v25.4s, #1
sqrshrn v14.4h, v26.4s, #1
sqrshrn v15.4h, v27.4s, #1
sqrshrn2 v12.8h, v28.4s, #1
sqrshrn2 v13.8h, v29.4s, #1
sqrshrn2 v14.8h, v30.4s, #1
sqrshrn2 v15.8h, v31.4s, #1
transpose_4x8h v8, v9, v10, v11, v2, v3, v4, v5
transpose_4x8h v12, v13, v14, v15, v2, v3, v4, v5
b 2f
1:
.irp i, v8.8h, v9.8h, v10.8h, v11.8h, v12.8h, v13.8h, v14.8h, v15.8h
movi \i, #0
.endr
2:
movz w16, #2896*8, lsl #16
dup v0.2s, w16
movi v4.4s, #0
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
ld1 {\i}, [x2]
st1 {v4.4s}, [x2], x11
.endr
scale_input .4s, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .4s, v0.s[0], v24, v25, v26, v27, v28, v29, v30, v31
blr x4
sqrshrn v16.4h, v16.4s, #1
sqrshrn v17.4h, v17.4s, #1
sqrshrn v18.4h, v18.4s, #1
sqrshrn v19.4h, v19.4s, #1
sqrshrn2 v16.8h, v20.4s, #1
sqrshrn2 v17.8h, v21.4s, #1
sqrshrn2 v18.8h, v22.4s, #1
sqrshrn2 v19.8h, v23.4s, #1
mov v20.16b, v8.16b
mov v21.16b, v9.16b
mov v22.16b, v10.16b
mov v23.16b, v11.16b
transpose_4x8h v16, v17, v18, v19, v2, v3, v4, v5
sqrshrn v8.4h, v24.4s, #1
sqrshrn v9.4h, v25.4s, #1
sqrshrn v10.4h, v26.4s, #1
sqrshrn v11.4h, v27.4s, #1
sqrshrn2 v8.8h, v28.4s, #1
sqrshrn2 v9.8h, v29.4s, #1
sqrshrn2 v10.8h, v30.4s, #1
sqrshrn2 v11.8h, v31.4s, #1
transpose_4x8h v8, v9, v10, v11, v2, v3, v4, v5
blr x5
mov x6, x0
load_add_store_8x8 x6, x7
mov v16.16b, v8.16b
mov v17.16b, v9.16b
mov v18.16b, v10.16b
mov v19.16b, v11.16b
mov v20.16b, v12.16b
mov v21.16b, v13.16b
mov v22.16b, v14.16b
mov v23.16b, v15.16b
blr x5
add x0, x0, #16
load_add_store_8x8 x0, x7
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret x15
endfunc
function inv_txfm_add_8x16_neon
mov x15, x30
stp d8, d9, [sp, #-0x20]!
stp d10, d11, [sp, #0x10]
ldrh w12, [x13, #4]
mov x11, #64
cmp w3, w12
ldrh w12, [x13, #2]
b.lt 1f
add x6, x2, #48
movi v4.4s, #0
movz w16, #2896*8, lsl #16
dup v0.2s, w16
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
ld1 {\i}, [x6]
st1 {v4.4s}, [x6], x11
.endr
scale_input .4s, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
blr x4
sqrshrn v28.4h, v16.4s, #1
sqrshrn v29.4h, v17.4s, #1
sqrshrn v30.4h, v18.4s, #1
sqrshrn v31.4h, v19.4s, #1
sqrshrn2 v28.8h, v20.4s, #1
sqrshrn2 v29.8h, v21.4s, #1
sqrshrn2 v30.8h, v22.4s, #1
sqrshrn2 v31.8h, v23.4s, #1
transpose_4x8h v28, v29, v30, v31, v2, v3, v4, v5
b 2f
1:
.irp i, v28.8h, v29.8h, v30.8h, v31.8h
movi \i, #0
.endr
2:
cmp w3, w12
ldrh w12, [x13, #0]
b.lt 1f
add x6, x2, #32
movi v4.4s, #0
movz w16, #2896*8, lsl #16
dup v0.2s, w16
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
ld1 {\i}, [x6]
st1 {v4.4s}, [x6], x11
.endr
scale_input .4s, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
blr x4
sqrshrn v24.4h, v16.4s, #1
sqrshrn v25.4h, v17.4s, #1
sqrshrn v26.4h, v18.4s, #1
sqrshrn v27.4h, v19.4s, #1
sqrshrn2 v24.8h, v20.4s, #1
sqrshrn2 v25.8h, v21.4s, #1
sqrshrn2 v26.8h, v22.4s, #1
sqrshrn2 v27.8h, v23.4s, #1
transpose_4x8h v24, v25, v26, v27, v2, v3, v4, v5
b 2f
1:
.irp i, v24.8h, v25.8h, v26.8h, v27.8h
movi \i, #0
.endr
2:
cmp w3, w12
b.lt 1f
add x6, x2, #16
movi v4.4s, #0
movz w16, #2896*8, lsl #16
dup v0.2s, w16
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
ld1 {\i}, [x6]
st1 {v4.4s}, [x6], x11
.endr
scale_input .4s, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
blr x4
sqrshrn v8.4h, v16.4s, #1
sqrshrn v9.4h, v17.4s, #1
sqrshrn v10.4h, v18.4s, #1
sqrshrn v11.4h, v19.4s, #1
sqrshrn2 v8.8h, v20.4s, #1
sqrshrn2 v9.8h, v21.4s, #1
sqrshrn2 v10.8h, v22.4s, #1
sqrshrn2 v11.8h, v23.4s, #1
transpose_4x8h v8, v9, v10, v11, v2, v3, v4, v5
b 2f
1:
.irp i, v8.8h, v9.8h, v10.8h, v11.8h
movi \i, #0
.endr
2:
movi v4.4s, #0
movz w16, #2896*8, lsl #16
dup v0.2s, w16
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
ld1 {\i}, [x2]
st1 {v4.4s}, [x2], x11
.endr
scale_input .4s, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
blr x4
sqrshrn v16.4h, v16.4s, #1
sqrshrn v17.4h, v17.4s, #1
sqrshrn v18.4h, v18.4s, #1
sqrshrn v19.4h, v19.4s, #1
sqrshrn2 v16.8h, v20.4s, #1
sqrshrn2 v17.8h, v21.4s, #1
sqrshrn2 v18.8h, v22.4s, #1
sqrshrn2 v19.8h, v23.4s, #1
transpose_4x8h v16, v17, v18, v19, v2, v3, v4, v5
mov v20.16b, v8.16b
mov v21.16b, v9.16b
mov v22.16b, v10.16b
mov v23.16b, v11.16b
blr x5
load_add_store_8x16 x0, x6
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x20
ret x15
endfunc
const eob_8x16
.short 10, 43, 75, 128
endconst
const eob_8x16_identity1
.short 4, 64, 96, 128
endconst
const eob_8x16_identity2
.short 4, 8, 12, 128
endconst
.macro def_fn_816 w, h, txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_16bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 1
.endif
adr x4, inv_\txfm1\()_4s_x\w\()_neon
movrel x5, X(inv_\txfm2\()_8h_x\h\()_neon)
.ifc \txfm1, identity
.ifc \txfm2, identity
movrel x13, eob_8x16
.else
movrel x13, eob_8x16_identity1
.endif
.else
.ifc \txfm2, identity
movrel x13, eob_8x16_identity2
.else
movrel x13, eob_8x16
.endif
.endif
.if \h == 8
ldrh w13, [x13]
.endif
b inv_txfm_add_\w\()x\h\()_neon
endfunc
.endm
.macro def_fns_816 w, h
def_fn_816 \w, \h, dct, dct
def_fn_816 \w, \h, identity, identity
def_fn_816 \w, \h, dct, adst
def_fn_816 \w, \h, dct, flipadst
def_fn_816 \w, \h, dct, identity
def_fn_816 \w, \h, adst, dct
def_fn_816 \w, \h, adst, adst
def_fn_816 \w, \h, adst, flipadst
def_fn_816 \w, \h, flipadst, dct
def_fn_816 \w, \h, flipadst, adst
def_fn_816 \w, \h, flipadst, flipadst
def_fn_816 \w, \h, identity, dct
def_fn_816 \w, \h, adst, identity
def_fn_816 \w, \h, flipadst, identity
def_fn_816 \w, \h, identity, adst
def_fn_816 \w, \h, identity, flipadst
.endm
def_fns_816 8, 16
def_fns_816 16, 8
function inv_dct32_odd_4s_x16_neon
movrel x16, idct_coeffs, 4*16
ld1 {v0.4s, v1.4s}, [x16], #32
mul_mls v2, v16, v31, v0.s[0], v0.s[1] // -> t16a
mul_mla v4, v16, v31, v0.s[1], v0.s[0] // -> t31a
mul_mls v6, v24, v23, v0.s[2], v0.s[3] // -> t17a
srshr v16.4s, v2.4s, #12 // t16a
srshr v31.4s, v4.4s, #12 // t31a
mul_mla v2, v24, v23, v0.s[3], v0.s[2] // -> t30a
mul_mls v4, v20, v27, v1.s[0], v1.s[1] // -> t18a
srshr v24.4s, v6.4s, #12 // t17a
srshr v23.4s, v2.4s, #12 // t30a
mul_mla v6, v20, v27, v1.s[1], v1.s[0] // -> t29a
mul_mls v2, v28, v19, v1.s[2], v1.s[3] // -> t19a
srshr v20.4s, v4.4s, #12 // t18a
srshr v27.4s, v6.4s, #12 // t29a
mul_mla v4, v28, v19, v1.s[3], v1.s[2] // -> t28a
ld1 {v0.4s, v1.4s}, [x16]
sub x16, x16, #4*24
mul_mls v6, v18, v29, v0.s[0], v0.s[1] // -> t20a
srshr v28.4s, v2.4s, #12 // t19a
srshr v19.4s, v4.4s, #12 // t28a
mul_mla v2, v18, v29, v0.s[1], v0.s[0] // -> t27a
mul_mls v4, v26, v21, v0.s[2], v0.s[3] // -> t21a
srshr v18.4s, v6.4s, #12 // t20a
srshr v29.4s, v2.4s, #12 // t27a
mul_mla v6, v26, v21, v0.s[3], v0.s[2] // -> t26a
mul_mls v2, v22, v25, v1.s[0], v1.s[1] // -> t22a
srshr v26.4s, v4.4s, #12 // t21a
srshr v21.4s, v6.4s, #12 // t26a
mul_mla v4, v22, v25, v1.s[1], v1.s[0] // -> t25a
mul_mls v6, v30, v17, v1.s[2], v1.s[3] // -> t23a
srshr v22.4s, v2.4s, #12 // t22a
srshr v25.4s, v4.4s, #12 // t25a
mul_mla v2, v30, v17, v1.s[3], v1.s[2] // -> t24a
srshr v30.4s, v6.4s, #12 // t23a
srshr v17.4s, v2.4s, #12 // t24a
ld1 {v0.4s, v1.4s}, [x16]
movi v5.4s, #1, msl #16 // row_clip_max = ~(~bdmax << 7), 0x1ffff
mvni v4.4s, #1, msl #16 // row_clip_min = (~bdmax << 7), 0xfffe0000
sqsub v2.4s, v16.4s, v24.4s // t17
sqadd v16.4s, v16.4s, v24.4s // t16
sqsub v3.4s, v31.4s, v23.4s // t30
sqadd v31.4s, v31.4s, v23.4s // t31
sqsub v24.4s, v28.4s, v20.4s // t18
sqadd v28.4s, v28.4s, v20.4s // t19
sqadd v23.4s, v18.4s, v26.4s // t20
sqsub v18.4s, v18.4s, v26.4s // t21
sqsub v20.4s, v30.4s, v22.4s // t22
sqadd v30.4s, v30.4s, v22.4s // t23
sqadd v26.4s, v17.4s, v25.4s // t24
sqsub v17.4s, v17.4s, v25.4s // t25
sqsub v22.4s, v29.4s, v21.4s // t26
sqadd v29.4s, v29.4s, v21.4s // t27
sqadd v25.4s, v19.4s, v27.4s // t28
sqsub v19.4s, v19.4s, v27.4s // t29
.irp r, v2, v16, v3, v31, v24, v28, v23, v18, v20, v30, v26, v17, v22, v29, v25, v19
smin \r\().4s, \r\().4s, v5.4s
.endr
.irp r, v2, v16, v3, v31, v24, v28, v23, v18, v20, v30, v26, v17, v22, v29, v25, v19
smax \r\().4s, \r\().4s, v4.4s
.endr
mul_mls v7, v3, v2, v1.s[0], v1.s[1] // -> t17a
mul_mla v6, v3, v2, v1.s[1], v1.s[0] // -> t30a
mul_mla v2, v19, v24, v1.s[1], v1.s[0] // -> t18a
srshr v21.4s, v7.4s, #12 // t17a
srshr v27.4s, v6.4s, #12 // t30a
neg v2.4s, v2.4s // -> t18a
mul_mls v7, v19, v24, v1.s[0], v1.s[1] // -> t29a
mul_mls v6, v22, v18, v1.s[2], v1.s[3] // -> t21a
srshr v19.4s, v2.4s, #12 // t18a
srshr v24.4s, v7.4s, #12 // t29a
mul_mla v2, v22, v18, v1.s[3], v1.s[2] // -> t26a
mul_mla v7, v17, v20, v1.s[3], v1.s[2] // -> t22a
srshr v22.4s, v6.4s, #12 // t21a
srshr v18.4s, v2.4s, #12 // t26a
neg v7.4s, v7.4s // -> t22a
mul_mls v6, v17, v20, v1.s[2], v1.s[3] // -> t25a
srshr v17.4s, v7.4s, #12 // t22a
srshr v20.4s, v6.4s, #12 // t25a
sqsub v2.4s, v27.4s, v24.4s // t29
sqadd v27.4s, v27.4s, v24.4s // t30
sqsub v3.4s, v21.4s, v19.4s // t18
sqadd v21.4s, v21.4s, v19.4s // t17
sqsub v24.4s, v16.4s, v28.4s // t19a
sqadd v16.4s, v16.4s, v28.4s // t16a
sqsub v19.4s, v30.4s, v23.4s // t20a
sqadd v30.4s, v30.4s, v23.4s // t23a
sqsub v28.4s, v17.4s, v22.4s // t21
sqadd v17.4s, v17.4s, v22.4s // t22
sqadd v23.4s, v26.4s, v29.4s // t24a
sqsub v26.4s, v26.4s, v29.4s // t27a
sqadd v22.4s, v20.4s, v18.4s // t25
sqsub v20.4s, v20.4s, v18.4s // t26
sqsub v29.4s, v31.4s, v25.4s // t28a
sqadd v31.4s, v31.4s, v25.4s // t31a
.irp r, v2, v27, v3, v21, v24, v16, v19, v30, v28, v17, v23, v26, v22, v20, v29, v31
smin \r\().4s, \r\().4s, v5.4s
.endr
.irp r, v2, v27, v3, v21, v24, v16, v19, v30, v28, v17, v23, v26, v22, v20, v29, v31
smax \r\().4s, \r\().4s, v4.4s
.endr
mul_mls v7, v2, v3, v0.s[2], v0.s[3] // -> t18a
mul_mla v6, v2, v3, v0.s[3], v0.s[2] // -> t29a
mul_mls v2, v29, v24, v0.s[2], v0.s[3] // -> t19
srshr v18.4s, v7.4s, #12 // t18a
srshr v25.4s, v6.4s, #12 // t29a
mul_mla v7, v29, v24, v0.s[3], v0.s[2] // -> t28
mul_mla v6, v26, v19, v0.s[3], v0.s[2] // -> t20
srshr v29.4s, v2.4s, #12 // t19
srshr v24.4s, v7.4s, #12 // t28
neg v6.4s, v6.4s // -> t20
mul_mls v2, v26, v19, v0.s[2], v0.s[3] // -> t27
mul_mla v7, v20, v28, v0.s[3], v0.s[2] // -> t21a
srshr v26.4s, v6.4s, #12 // t20
srshr v19.4s, v2.4s, #12 // t27
neg v7.4s, v7.4s // -> t21a
mul_mls v6, v20, v28, v0.s[2], v0.s[3] // -> t26a
srshr v20.4s, v7.4s, #12 // t21a
srshr v28.4s, v6.4s, #12 // t26a
sqsub v2.4s, v16.4s, v30.4s // t23
sqadd v16.4s, v16.4s, v30.4s // t16 = out16
sqsub v3.4s, v31.4s, v23.4s // t24
sqadd v31.4s, v31.4s, v23.4s // t31 = out31
sqsub v23.4s, v21.4s, v17.4s // t22a
sqadd v17.4s, v21.4s, v17.4s // t17a = out17
sqadd v30.4s, v27.4s, v22.4s // t30a = out30
sqsub v21.4s, v27.4s, v22.4s // t25a
sqsub v27.4s, v18.4s, v20.4s // t21
sqadd v18.4s, v18.4s, v20.4s // t18 = out18
sqadd v7.4s, v29.4s, v26.4s // t19a = out19
sqsub v26.4s, v29.4s, v26.4s // t20a
sqadd v29.4s, v25.4s, v28.4s // t29 = out29
sqsub v25.4s, v25.4s, v28.4s // t26
sqadd v28.4s, v24.4s, v19.4s // t28a = out28
sqsub v24.4s, v24.4s, v19.4s // t27a
mov v19.16b, v7.16b // out19
.irp r, v2, v16, v3, v31, v23, v17, v30, v21, v27, v18, v19, v26, v29, v25, v28, v24
smin \r\().4s, \r\().4s, v5.4s
.endr
.irp r, v2, v16, v3, v31, v23, v17, v30, v21, v27, v18, v19, v26, v29, v25, v28, v24
smax \r\().4s, \r\().4s, v4.4s
.endr
mul_mls v7, v24, v26, v0.s[0], v0.s[0] // -> t20
mul_mla v6, v24, v26, v0.s[0], v0.s[0] // -> t27
srshr v20.4s, v7.4s, #12 // t20
srshr v22.4s, v6.4s, #12 // t27
mul_mla v7, v25, v27, v0.s[0], v0.s[0] // -> t26a
mul_mls v6, v25, v27, v0.s[0], v0.s[0] // -> t21a
mov v27.16b, v22.16b // t27
srshr v26.4s, v7.4s, #12 // t26a
mul_mls v24, v21, v23, v0.s[0], v0.s[0] // -> t22
mul_mla v7, v21, v23, v0.s[0], v0.s[0] // -> t25
srshr v21.4s, v6.4s, #12 // t21a
srshr v22.4s, v24.4s, #12 // t22
srshr v25.4s, v7.4s, #12 // t25
mul_mls v7, v3, v2, v0.s[0], v0.s[0] // -> t23a
mul_mla v6, v3, v2, v0.s[0], v0.s[0] // -> t24a
srshr v23.4s, v7.4s, #12 // t23a
srshr v24.4s, v6.4s, #12 // t24a
ret
endfunc
.macro def_horz_32 scale=0, shift=2, suffix
function inv_txfm_horz\suffix\()_dct_32x4_neon
mov x14, x30
movi v7.4s, #0
lsl x8, x8, #1
.if \scale
movz w16, #2896*8, lsl #16
dup v0.2s, w16
.endif
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
ld1 {\i}, [x7]
st1 {v7.4s}, [x7], x8
.endr
sub x7, x7, x8, lsl #4
add x7, x7, x8, lsr #1
.if \scale
scale_input .4s, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .4s, v0.s[0], v24, v25, v26, v27, v28, v29, v30, v31
.endif
bl inv_dct_4s_x16_neon
// idct_16 leaves the row_clip_max/min constants in v5 and v4
.irp r, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31
smin_4s \r, \r, v5
.endr
.irp r, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31
smax_4s \r, \r, v4
.endr
transpose_4x4s v16, v17, v18, v19, v2, v3, v4, v5
transpose_4x4s v20, v21, v22, v23, v2, v3, v4, v5
transpose_4x4s v24, v25, v26, v27, v2, v3, v4, v5
transpose_4x4s v28, v29, v30, v31, v2, v3, v4, v5
.macro store1 r0, r1, r2, r3
st1 {\r0}, [x6], #16
st1 {\r1}, [x6], #16
st1 {\r2}, [x6], #16
st1 {\r3}, [x6], #16
.endm
store1 v16.4s, v20.4s, v24.4s, v28.4s
store1 v17.4s, v21.4s, v25.4s, v29.4s
store1 v18.4s, v22.4s, v26.4s, v30.4s
store1 v19.4s, v23.4s, v27.4s, v31.4s
.purgem store1
sub x6, x6, #64*4
movi v7.4s, #0
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
ld1 {\i}, [x7]
st1 {v7.4s}, [x7], x8
.endr
.if \scale
// This relies on the fact that the idct also leaves the right coeff in v0.s[1]
scale_input .4s, v0.s[1], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .4s, v0.s[1], v24, v25, v26, v27, v28, v29, v30, v31
.endif
bl inv_dct32_odd_4s_x16_neon
transpose_4x4s v31, v30, v29, v28, v2, v3, v4, v5
transpose_4x4s v27, v26, v25, v24, v2, v3, v4, v5
transpose_4x4s v23, v22, v21, v20, v2, v3, v4, v5
transpose_4x4s v19, v18, v17, v16, v2, v3, v4, v5
.macro store2 r0, r1, r2, r3, shift
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x6]
sqsub v4.4s, v0.4s, \r0
sqadd v0.4s, v0.4s, \r0
sqsub v5.4s, v1.4s, \r1
sqadd v1.4s, v1.4s, \r1
sqsub v6.4s, v2.4s, \r2
sqadd v2.4s, v2.4s, \r2
sqsub v7.4s, v3.4s, \r3
sqadd v3.4s, v3.4s, \r3
sqrshrn v0.4h, v0.4s, #\shift
sqrshrn2 v0.8h, v1.4s, #\shift
sqrshrn v1.4h, v2.4s, #\shift
sqrshrn2 v1.8h, v3.4s, #\shift
sqrshrn v2.4h, v7.4s, #\shift
sqrshrn2 v2.8h, v6.4s, #\shift
sqrshrn v3.4h, v5.4s, #\shift
sqrshrn2 v3.8h, v4.4s, #\shift
st1 {v0.8h, v1.8h}, [x6], #32
rev64 v2.8h, v2.8h
rev64 v3.8h, v3.8h
st1 {v2.8h, v3.8h}, [x6], #32
.endm
store2 v31.4s, v27.4s, v23.4s, v19.4s, \shift
store2 v30.4s, v26.4s, v22.4s, v18.4s, \shift
store2 v29.4s, v25.4s, v21.4s, v17.4s, \shift
store2 v28.4s, v24.4s, v20.4s, v16.4s, \shift
.purgem store2
ret x14
endfunc
.endm
def_horz_32 scale=0, shift=2
def_horz_32 scale=1, shift=1, suffix=_scale
function inv_txfm_add_vert_dct_8x32_neon
mov x14, x30
lsl x8, x8, #1
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
ld1 {v\i\().8h}, [x7], x8
.endr
sub x7, x7, x8, lsl #4
bl X(inv_dct_8h_x16_neon)
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
st1 {v\i\().8h}, [x7], x8
.endr
sub x7, x7, x8, lsl #4
add x7, x7, x8, lsr #1
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
ld1 {v\i\().8h}, [x7], x8
.endr
sub x7, x7, x8, lsl #4
sub x7, x7, x8, lsr #1
bl X(inv_dct32_odd_8h_x16_neon)
neg x9, x8
mov x10, x6
mvni v1.8h, #0xfc, lsl #8 // 0x3ff
.macro combine r0, r1, r2, r3, op, stride
ld1 {v5.8h}, [x7], \stride
ld1 {v2.8h}, [x10], x1
ld1 {v6.8h}, [x7], \stride
ld1 {v3.8h}, [x10], x1
\op v5.8h, v5.8h, \r0
ld1 {v7.8h}, [x7], \stride
ld1 {v4.8h}, [x10], x1
srshr v5.8h, v5.8h, #4
\op v6.8h, v6.8h, \r1
usqadd v2.8h, v5.8h
srshr v6.8h, v6.8h, #4
\op v7.8h, v7.8h, \r2
ld1 {v5.8h}, [x7], \stride
usqadd v3.8h, v6.8h
smin v2.8h, v2.8h, v1.8h
srshr v7.8h, v7.8h, #4
\op v5.8h, v5.8h, \r3
st1 {v2.8h}, [x6], x1
ld1 {v2.8h}, [x10], x1
usqadd v4.8h, v7.8h
smin v3.8h, v3.8h, v1.8h
srshr v5.8h, v5.8h, #4
st1 {v3.8h}, [x6], x1
usqadd v2.8h, v5.8h
smin v4.8h, v4.8h, v1.8h
st1 {v4.8h}, [x6], x1
smin v2.8h, v2.8h, v1.8h
st1 {v2.8h}, [x6], x1
.endm
combine v31.8h, v30.8h, v29.8h, v28.8h, sqadd, x8
combine v27.8h, v26.8h, v25.8h, v24.8h, sqadd, x8
combine v23.8h, v22.8h, v21.8h, v20.8h, sqadd, x8
combine v19.8h, v18.8h, v17.8h, v16.8h, sqadd, x8
sub x7, x7, x8
combine v16.8h, v17.8h, v18.8h, v19.8h, sqsub, x9
combine v20.8h, v21.8h, v22.8h, v23.8h, sqsub, x9
combine v24.8h, v25.8h, v26.8h, v27.8h, sqsub, x9
combine v28.8h, v29.8h, v30.8h, v31.8h, sqsub, x9
.purgem combine
ret x14
endfunc
const eob_32x32
.short 10, 36, 78, 136, 210, 300, 406, 1024
endconst
const eob_16x32
.short 10, 36, 78, 151, 215, 279, 343, 512
endconst
const eob_16x32_shortside
.short 10, 36, 78, 512
endconst
const eob_8x32
.short 10, 43, 75, 107, 139, 171, 203, 256
endconst
function inv_txfm_add_identity_identity_32x32_16bpc_neon, export=1
movi v0.8h, #0
movi v1.8h, #0
movrel x13, eob_32x32, 2
mov x8, #4*32
1:
mov w9, #0
movrel x12, eob_32x32, 2
2:
add w9, w9, #8
ld1 {v16.4s, v17.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v18.4s, v19.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v20.4s, v21.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v22.4s, v23.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v24.4s, v25.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v26.4s, v27.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v28.4s, v29.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v30.4s, v31.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
sqxtn v16.4h, v16.4s
sqxtn2 v16.8h, v17.4s
sqxtn v17.4h, v18.4s
sqxtn2 v17.8h, v19.4s
sqxtn v18.4h, v20.4s
sqxtn2 v18.8h, v21.4s
sqxtn v19.4h, v22.4s
sqxtn2 v19.8h, v23.4s
sqxtn v20.4h, v24.4s
sqxtn2 v20.8h, v25.4s
sqxtn v21.4h, v26.4s
sqxtn2 v21.8h, v27.4s
sqxtn v22.4h, v28.4s
sqxtn2 v22.8h, v29.4s
sqxtn v23.4h, v30.4s
sqxtn2 v23.8h, v31.4s
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v4, v5
load_add_store_8x8 x0, x7, shiftbits=2
ldrh w11, [x12], #4
sub x0, x0, x1, lsl #3
add x0, x0, #2*8
cmp w3, w11
b.ge 2b
ldrh w11, [x13], #4
cmp w3, w11
b.lt 9f
sub x0, x0, w9, uxtw #1
add x0, x0, x1, lsl #3
msub x2, x8, x9, x2
add x2, x2, #4*8
b 1b
9:
ret
endfunc
.macro shift_16_regs op, shift
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
\op \i, \i, #\shift
.endr
.endm
.macro def_identity_1632 w, h, wshort, hshort
function inv_txfm_add_identity_identity_\w\()x\h\()_16bpc_neon, export=1
movz w16, #2896*8, lsl #16
movz w17, #2*(5793-4096)*8, lsl #16
movi v0.4s, #0
movi v1.4s, #0
movrel x13, eob_16x32\hshort, 2
mov x8, #4*\h
1:
mov w9, #0
movrel x12, eob_16x32\wshort, 2
2:
add w9, w9, #8
ld1 {v16.4s, v17.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
dup v2.2s, w16
ld1 {v18.4s, v19.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
mov v2.s[1], w17
ld1 {v20.4s, v21.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v22.4s, v23.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v24.4s, v25.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v26.4s, v27.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v28.4s, v29.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v30.4s, v31.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
scale_input .4s, v2.s[0], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .4s, v2.s[0], v24, v25, v26, v27, v28, v29, v30, v31
.if \w == 16
// 16x32
identity_4x16_shift1 v2.s[1]
.else
// 32x16
shift_16_regs sqshl, 1
identity_4x16 v2.s[1]
.endif
sqxtn v16.4h, v16.4s
sqxtn2 v16.8h, v17.4s
sqxtn v17.4h, v18.4s
sqxtn2 v17.8h, v19.4s
sqxtn v18.4h, v20.4s
sqxtn2 v18.8h, v21.4s
sqxtn v19.4h, v22.4s
sqxtn2 v19.8h, v23.4s
sqxtn v20.4h, v24.4s
sqxtn2 v20.8h, v25.4s
sqxtn v21.4h, v26.4s
sqxtn2 v21.8h, v27.4s
sqxtn v22.4h, v28.4s
sqxtn2 v22.8h, v29.4s
sqxtn v23.4h, v30.4s
sqxtn2 v23.8h, v31.4s
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v4, v5
.if \w == 16
load_add_store_8x8 x0, x7, shiftbits=2
.else
load_add_store_8x8 x0, x7, shiftbits=4
.endif
ldrh w11, [x12], #4
sub x0, x0, x1, lsl #3
add x0, x0, #16
cmp w3, w11
b.ge 2b
ldrh w11, [x13], #4
cmp w3, w11
b.lt 9f
sub x0, x0, w9, uxtw #1
add x0, x0, x1, lsl #3
msub x2, x8, x9, x2
add x2, x2, #4*8
b 1b
9:
ret
endfunc
.endm
def_identity_1632 16, 32, _shortside,
def_identity_1632 32, 16, , _shortside
.macro def_identity_832 w, h
function inv_txfm_add_identity_identity_\w\()x\h\()_16bpc_neon, export=1
movi v0.4s, #0
movi v1.4s, #0
// Working on 8x8 blocks, read every other entry from eob_8x32
movrel x13, eob_8x32, 2
mov w8, #4*\h
1:
// Working on 8x8 blocks, read every other entry from eob_8x32
ldrh w12, [x13], #4
ld1 {v16.4s, v17.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v18.4s, v19.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v20.4s, v21.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v22.4s, v23.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v24.4s, v25.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v26.4s, v27.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v28.4s, v29.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
ld1 {v30.4s, v31.4s}, [x2]
st1 {v0.4s, v1.4s}, [x2], x8
.if \w == 8
sqrshrn v16.4h, v16.4s, #1
sqrshrn2 v16.8h, v17.4s, #1
sqrshrn v17.4h, v18.4s, #1
sqrshrn2 v17.8h, v19.4s, #1
sqrshrn v18.4h, v20.4s, #1
sqrshrn2 v18.8h, v21.4s, #1
sqrshrn v19.4h, v22.4s, #1
sqrshrn2 v19.8h, v23.4s, #1
sqrshrn v20.4h, v24.4s, #1
sqrshrn2 v20.8h, v25.4s, #1
sqrshrn v21.4h, v26.4s, #1
sqrshrn2 v21.8h, v27.4s, #1
sqrshrn v22.4h, v28.4s, #1
sqrshrn2 v22.8h, v29.4s, #1
sqrshrn v23.4h, v30.4s, #1
sqrshrn2 v23.8h, v31.4s, #1
.else
sqxtn v16.4h, v16.4s
sqxtn2 v16.8h, v17.4s
sqxtn v17.4h, v18.4s
sqxtn2 v17.8h, v19.4s
sqxtn v18.4h, v20.4s
sqxtn2 v18.8h, v21.4s
sqxtn v19.4h, v22.4s
sqxtn2 v19.8h, v23.4s
sqxtn v20.4h, v24.4s
sqxtn2 v20.8h, v25.4s
sqxtn v21.4h, v26.4s
sqxtn2 v21.8h, v27.4s
sqxtn v22.4h, v28.4s
sqxtn2 v22.8h, v29.4s
sqxtn v23.4h, v30.4s
sqxtn2 v23.8h, v31.4s
.endif
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v4, v5
cmp w3, w12
.if \w == 8
load_add_store_8x8 x0, x7, shiftbits=2
.else
load_add_store_8x8 x0, x7, shiftbits=3
.endif
b.lt 9f
.if \w == 8
sub x2, x2, x8, lsl #3
add x2, x2, #4*8
.else
sub x0, x0, x1, lsl #3
add x0, x0, #2*8
.endif
b 1b
9:
ret
endfunc
.endm
def_identity_832 8, 32
def_identity_832 32, 8
function inv_txfm_add_dct_dct_32x32_16bpc_neon, export=1
idct_dc 32, 32, 2
mov x15, x30
sub sp, sp, #2048
movrel x13, eob_32x32
ldrh w12, [x13], #2
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add x6, sp, #(\i*32*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.if \i < 28
ldrh w12, [x13], #2
.endif
.endif
add x7, x2, #(\i*4)
mov x8, #32*4
bl inv_txfm_horz_dct_32x4_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #4
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8, 16, 24
add x6, x0, #(\i*2)
add x7, sp, #(\i*2)
mov x8, #32*2
bl inv_txfm_add_vert_dct_8x32_neon
.endr
add sp, sp, #2048
ret x15
endfunc
function inv_txfm_add_dct_dct_16x32_16bpc_neon, export=1
idct_dc 16, 32, 1
mov x15, x30
sub sp, sp, #1024
movrel x13, eob_16x32
ldrh w12, [x13], #2
adr x4, inv_dct_4s_x16_neon
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add x6, sp, #(\i*16*2)
add x7, x2, #(\i*4)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.if \i < 28
ldrh w12, [x13], #2
.endif
.endif
mov x8, #4*32
bl inv_txfm_horz_scale_16x4_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #4
.rept 2
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8
add x6, x0, #(\i*2)
add x7, sp, #(\i*2)
mov x8, #16*2
bl inv_txfm_add_vert_dct_8x32_neon
.endr
add sp, sp, #1024
ret x15
endfunc
function inv_txfm_add_dct_dct_32x16_16bpc_neon, export=1
idct_dc 32, 16, 1
mov x15, x30
sub sp, sp, #1024
movrel x13, eob_16x32
movrel x5, X(inv_dct_8h_x16_neon)
ldrh w12, [x13], #2
.irp i, 0, 4, 8, 12
add x6, sp, #(\i*32*2)
add x7, x2, #(\i*4)
.if \i > 0
mov w8, #(16 - \i)
cmp w3, w12
b.lt 1f
.if \i < 12
ldrh w12, [x13], #2
.endif
.endif
mov x8, #4*16
bl inv_txfm_horz_scale_dct_32x4_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #4
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8, 16, 24
add x6, x0, #(\i*2)
add x7, sp, #(\i*2)
mov x8, #32*2
bl inv_txfm_add_vert_8x16_neon
.endr
add sp, sp, #1024
ret x15
endfunc
function inv_txfm_add_dct_dct_8x32_16bpc_neon, export=1
idct_dc 8, 32, 2
mov x15, x30
sub sp, sp, #512
movrel x13, eob_8x32
movi v28.4s, #0
mov x8, #4*32
mov w9, #32
mov x6, sp
mov x7, x2
1:
.irp i, 16, 17, 18, 19, 20, 21, 22, 23
ld1 {v\i\().4s}, [x7]
st1 {v28.4s}, [x7], x8
.endr
ldrh w12, [x13], #2
sub w9, w9, #4
sub x7, x7, x8, lsl #3
add x7, x7, #4*4
bl inv_dct_4s_x8_neon
sqrshrn v16.4h, v16.4s, #2
sqrshrn v17.4h, v17.4s, #2
sqrshrn v18.4h, v18.4s, #2
sqrshrn v19.4h, v19.4s, #2
sqrshrn2 v16.8h, v20.4s, #2
sqrshrn2 v17.8h, v21.4s, #2
sqrshrn2 v18.8h, v22.4s, #2
sqrshrn2 v19.8h, v23.4s, #2
transpose_4x8h v16, v17, v18, v19, v2, v3, v4, v5
cmp w3, w12
st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x6], #64
b.ge 1b
cbz w9, 3f
movi v29.8h, #0
movi v30.8h, #0
movi v31.8h, #0
2:
subs w9, w9, #4
st1 {v28.8h,v29.8h,v30.8h,v31.8h}, [x6], #64
b.gt 2b
3:
mov x6, x0
mov x7, sp
mov x8, #8*2
bl inv_txfm_add_vert_dct_8x32_neon
add sp, sp, #512
ret x15
endfunc
function inv_txfm_add_dct_dct_32x8_16bpc_neon, export=1
idct_dc 32, 8, 2
mov x15, x30
sub sp, sp, #512
.irp i, 0, 4
add x6, sp, #(\i*32*2)
add x7, x2, #(\i*4)
.if \i > 0
cmp w3, #10
b.lt 1f
.endif
mov x8, #8*4
bl inv_txfm_horz_dct_32x4_neon
.endr
b 2f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
2:
mov x8, #2*32
mov w9, #0
1:
add x6, x0, x9, lsl #1
add x7, sp, x9, lsl #1 // #(\i*2)
.irp i, 16, 17, 18, 19, 20, 21, 22, 23
ld1 {v\i\().8h}, [x7], x8
.endr
add w9, w9, #8
bl X(inv_dct_8h_x8_neon)
cmp w9, #32
load_add_store_8x8 x6, x7
b.lt 1b
add sp, sp, #512
ret x15
endfunc
function inv_dct64_step1_neon
// in1/31/17/15 -> t32a/33/34a/35/60/61a/62/63a
// in7/25/23/ 9 -> t56a/57/58a/59/36/37a/38/39a
// in5/27/21/11 -> t40a/41/42a/43/52/53a/54/55a
// in3/29/19/13 -> t48a/49/50a/51/44/45a/46/47a
ld1 {v0.4s, v1.4s}, [x17], #32
sqrdmulh v23.4s, v16.4s, v0.s[1] // t63a
sqrdmulh v16.4s, v16.4s, v0.s[0] // t32a
sqrdmulh v22.4s, v17.4s, v0.s[2] // t62a
sqrdmulh v17.4s, v17.4s, v0.s[3] // t33a
sqrdmulh v21.4s, v18.4s, v1.s[1] // t61a
sqrdmulh v18.4s, v18.4s, v1.s[0] // t34a
sqrdmulh v20.4s, v19.4s, v1.s[2] // t60a
sqrdmulh v19.4s, v19.4s, v1.s[3] // t35a
ld1 {v0.4s}, [x17], #16
sqadd v24.4s, v16.4s, v17.4s // t32
sqsub v25.4s, v16.4s, v17.4s // t33
sqsub v26.4s, v19.4s, v18.4s // t34
sqadd v27.4s, v19.4s, v18.4s // t35
sqadd v28.4s, v20.4s, v21.4s // t60
sqsub v29.4s, v20.4s, v21.4s // t61
sqsub v30.4s, v23.4s, v22.4s // t62
sqadd v31.4s, v23.4s, v22.4s // t63
.irp r, v24, v25, v26, v27, v28, v29, v30, v31
smin_4s \r, \r, v5
.endr
.irp r, v24, v25, v26, v27, v28, v29, v30, v31
smax_4s \r, \r, v4
.endr
mul_mla v2, v29, v26, v0.s[0], v0.s[1] // -> t34a
mul_mls v7, v29, v26, v0.s[1], v0.s[0] // -> t61a
neg v2.4s, v2.4s // t34a
mul_mls v6, v30, v25, v0.s[1], v0.s[0] // -> t33a
srshr v26.4s, v2.4s, #12 // t34a
mul_mla v2, v30, v25, v0.s[0], v0.s[1] // -> t62a
srshr v29.4s, v7.4s, #12 // t61a
srshr v25.4s, v6.4s, #12 // t33a
srshr v30.4s, v2.4s, #12 // t62a
sqadd v16.4s, v24.4s, v27.4s // t32a
sqsub v19.4s, v24.4s, v27.4s // t35a
sqadd v17.4s, v25.4s, v26.4s // t33
sqsub v18.4s, v25.4s, v26.4s // t34
sqsub v20.4s, v31.4s, v28.4s // t60a
sqadd v23.4s, v31.4s, v28.4s // t63a
sqsub v21.4s, v30.4s, v29.4s // t61
sqadd v22.4s, v30.4s, v29.4s // t62
.irp r, v16, v19, v17, v18, v20, v23, v21, v22
smin_4s \r, \r, v5
.endr
.irp r, v16, v19, v17, v18, v20, v23, v21, v22
smax_4s \r, \r, v4
.endr
mul_mla v2, v21, v18, v0.s[2], v0.s[3] // -> t61a
mul_mls v7, v21, v18, v0.s[3], v0.s[2] // -> t34a
mul_mla v6, v20, v19, v0.s[2], v0.s[3] // -> t60
srshr v21.4s, v2.4s, #12 // t61a
srshr v18.4s, v7.4s, #12 // t34a
mul_mls v2, v20, v19, v0.s[3], v0.s[2] // -> t35
srshr v20.4s, v6.4s, #12 // t60
srshr v19.4s, v2.4s, #12 // t35
st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x6], #64
st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [x6], #64
ret
endfunc
function inv_dct64_step2_neon
movrel x16, idct_coeffs
ld1 {v0.4s}, [x16]
1:
// t32a/33/34a/35/60/61a/62/63a
// t56a/57/58a/59/36/37a/38/39a
// t40a/41/42a/43/52/53a/54/55a
// t48a/49/50a/51/44/45a/46/47a
ldr q16, [x6, #4*4*0] // t32a
ldr q17, [x9, #4*4*8] // t39a
ldr q18, [x9, #4*4*0] // t63a
ldr q19, [x6, #4*4*8] // t56a
ldr q20, [x6, #4*4*16] // t40a
ldr q21, [x9, #4*4*24] // t47a
ldr q22, [x9, #4*4*16] // t55a
ldr q23, [x6, #4*4*24] // t48a
sqadd v24.4s, v16.4s, v17.4s // t32
sqsub v25.4s, v16.4s, v17.4s // t39
sqadd v26.4s, v18.4s, v19.4s // t63
sqsub v27.4s, v18.4s, v19.4s // t56
sqsub v28.4s, v21.4s, v20.4s // t40
sqadd v29.4s, v21.4s, v20.4s // t47
sqadd v30.4s, v23.4s, v22.4s // t48
sqsub v31.4s, v23.4s, v22.4s // t55
.irp r, v24, v25, v26, v27, v28, v29, v30, v31
smin_4s \r, \r, v5
.endr
.irp r, v24, v25, v26, v27, v28, v29, v30, v31
smax_4s \r, \r, v4
.endr
mul_mla v2, v27, v25, v0.s[3], v0.s[2] // -> t56a
mul_mls v7, v27, v25, v0.s[2], v0.s[3] // -> t39a
mul_mla v6, v31, v28, v0.s[3], v0.s[2] // -> t40a
srshr v25.4s, v2.4s, #12 // t56a
srshr v27.4s, v7.4s, #12 // t39a
neg v6.4s, v6.4s // t40a
mul_mls v2, v31, v28, v0.s[2], v0.s[3] // -> t55a
srshr v31.4s, v6.4s, #12 // t40a
srshr v28.4s, v2.4s, #12 // t55a
sqadd v16.4s, v24.4s, v29.4s // t32a
sqsub v19.4s, v24.4s, v29.4s // t47a
sqadd v17.4s, v27.4s, v31.4s // t39
sqsub v18.4s, v27.4s, v31.4s // t40
sqsub v20.4s, v26.4s, v30.4s // t48a
sqadd v23.4s, v26.4s, v30.4s // t63a
sqsub v21.4s, v25.4s, v28.4s // t55
sqadd v22.4s, v25.4s, v28.4s // t56
.irp r, v16, v19, v17, v18, v20, v23, v21, v22
smin_4s \r, \r, v5
.endr
.irp r, v16, v19, v17, v18, v20, v23, v21, v22
smax_4s \r, \r, v4
.endr
mul_mls v2, v21, v18, v0.s[0], v0.s[0] // -> t40a
mul_mla v7, v21, v18, v0.s[0], v0.s[0] // -> t55a
mul_mls v6, v20, v19, v0.s[0], v0.s[0] // -> t47
srshr v18.4s, v2.4s, #12 // t40a
srshr v21.4s, v7.4s, #12 // t55a
mul_mla v2, v20, v19, v0.s[0], v0.s[0] // -> t48
srshr v19.4s, v6.4s, #12 // t47
srshr v20.4s, v2.4s, #12 // t48
str q16, [x6, #4*4*0] // t32a
str q17, [x9, #4*4*0] // t39
str q18, [x6, #4*4*8] // t40a
str q19, [x9, #4*4*8] // t47
str q20, [x6, #4*4*16] // t48
str q21, [x9, #4*4*16] // t55a
str q22, [x6, #4*4*24] // t56
str q23, [x9, #4*4*24] // t63a
add x6, x6, #4*4
sub x9, x9, #4*4
cmp x6, x9
b.lt 1b
ret
endfunc
.macro load8 src, strd, zero, clear
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s
.if \clear
ld1 {\i}, [\src]
st1 {\zero}, [\src], \strd
.else
ld1 {\i}, [\src], \strd
.endif
.endr
.endm
.macro store16 dst
.irp i, v16.4s, v17.4s, v18.4s, v19.4s, v20.4s, v21.4s, v22.4s, v23.4s, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
st1 {\i}, [\dst], #16
.endr
.endm
.macro clear_upper8
.irp i, v24.4s, v25.4s, v26.4s, v27.4s, v28.4s, v29.4s, v30.4s, v31.4s
movi \i, #0
.endr
.endm
.macro movi_if reg, val, cond
.if \cond
movi \reg, \val
.endif
.endm
.macro movz16dup_if reg, gpr, val, cond
.if \cond
movz \gpr, \val, lsl #16
dup \reg, \gpr
.endif
.endm
.macro st1_if regs, dst, cond
.if \cond
st1 \regs, \dst
.endif
.endm
.macro str_if reg, dst, cond
.if \cond
str \reg, \dst
.endif
.endm
.macro stroff_if reg, dst, dstoff, cond
.if \cond
str \reg, \dst, \dstoff
.endif
.endm
.macro scale_if cond, c, r0, r1, r2, r3, r4, r5, r6, r7
.if \cond
scale_input .4s, \c, \r0, \r1, \r2, \r3, \r4, \r5, \r6, \r7
.endif
.endm
.macro def_dct64_func suffix, clear=0, scale=0
function inv_txfm_dct\suffix\()_4s_x64_neon
mov x14, x30
mov x6, sp
lsl x8, x8, #2
movz16dup_if v0.2s, w16, #2896*8, \scale
movi_if v7.4s, #0, \clear
load8 x7, x8, v7.4s, \clear
clear_upper8
sub x7, x7, x8, lsl #3
add x7, x7, x8, lsr #1
scale_if \scale, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
bl inv_dct_4s_x16_neon
// idct_16 leaves the row_clip_max/min constants in v5 and v4
.irp r, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31
smin_4s \r, \r, v5
.endr
.irp r, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31
smax_4s \r, \r, v4
.endr
store16 x6
movz16dup_if v0.2s, w16, #2896*8, \scale
movi_if v7.8h, #0, \clear
load8 x7, x8, v7.4s, \clear
clear_upper8
sub x7, x7, x8, lsl #3
lsr x8, x8, #1
sub x7, x7, x8, lsr #1
scale_if \scale, v0.s[0], v16, v17, v18, v19, v20, v21, v22, v23
bl inv_dct32_odd_4s_x16_neon
add x10, x6, #16*15
sub x6, x6, #16*16
mov x9, #-16
movi v1.4s, #1, msl #16 // row_clip_max = ~(~bdmax << 7), 0x1ffff
mvni v0.4s, #1, msl #16 // row_clip_min = (~bdmax << 7), 0xfffe0000
.macro store_addsub r0, r1, r2, r3
ld1 {v2.4s}, [x6], #16
ld1 {v3.4s}, [x6], #16
sqadd v6.4s, v2.4s, \r0
sqsub \r0, v2.4s, \r0
ld1 {v4.4s}, [x6], #16
sqadd v7.4s, v3.4s, \r1
sqsub \r1, v3.4s, \r1
smin v6.4s, v6.4s, v1.4s
smin \r0, \r0, v1.4s
ld1 {v5.4s}, [x6], #16
sqadd v2.4s, v4.4s, \r2
sub x6, x6, #16*4
smax v6.4s, v6.4s, v0.4s
smax \r0, \r0, v0.4s
sqsub \r2, v4.4s, \r2
smin v7.4s, v7.4s, v1.4s
smin \r1, \r1, v1.4s
st1 {v6.4s}, [x6], #16
st1 {\r0}, [x10], x9
smin v2.4s, v2.4s, v1.4s
smin \r2, \r2, v1.4s
smax v7.4s, v7.4s, v0.4s
smax \r1, \r1, v0.4s
sqadd v3.4s, v5.4s, \r3
sqsub \r3, v5.4s, \r3
smax v2.4s, v2.4s, v0.4s
smax \r2, \r2, v0.4s
smin v3.4s, v3.4s, v1.4s
smin \r3, \r3, v1.4s
st1 {v7.4s}, [x6], #16
st1 {\r1}, [x10], x9
smax v3.4s, v3.4s, v0.4s
smax \r3, \r3, v0.4s
st1 {v2.4s}, [x6], #16
st1 {\r2}, [x10], x9
st1 {v3.4s}, [x6], #16
st1 {\r3}, [x10], x9
.endm
store_addsub v31.4s, v30.4s, v29.4s, v28.4s
store_addsub v27.4s, v26.4s, v25.4s, v24.4s
store_addsub v23.4s, v22.4s, v21.4s, v20.4s
store_addsub v19.4s, v18.4s, v17.4s, v16.4s
.purgem store_addsub
add x6, x6, #4*4*16
movrel x17, idct64_coeffs
movi v5.4s, #1, msl #16 // row_clip_max = ~(~bdmax << 7), 0x1ffff
mvni v4.4s, #1, msl #16 // row_clip_min = (~bdmax << 7), 0xfffe0000
movz16dup_if v0.2s, w16, #2896*8, \scale
movi_if v7.4s, #0, \clear
add x9, x7, x8, lsl #4 // offset 16
add x10, x7, x8, lsl #3 // offset 8
sub x9, x9, x8 // offset 15
sub x11, x10, x8 // offset 7
ld1 {v16.4s}, [x7] // in1 (offset 0)
ld1 {v17.4s}, [x9] // in31 (offset 15)
ld1 {v18.4s}, [x10] // in17 (offset 8)
ld1 {v19.4s}, [x11] // in15 (offset 7)
st1_if {v7.4s}, [x7], \clear
st1_if {v7.4s}, [x9], \clear
st1_if {v7.4s}, [x10], \clear
st1_if {v7.4s}, [x11], \clear
scale_if \scale, v0.s[0], v16, v17, v18, v19
bl inv_dct64_step1_neon
movz16dup_if v0.2s, w16, #2896*8, \scale
movi_if v7.4s, #0, \clear
add x7, x7, x8, lsl #2 // offset 4
sub x9, x9, x8, lsl #2 // offset 11
sub x10, x7, x8 // offset 3
add x11, x9, x8 // offset 12
ld1 {v16.4s}, [x10] // in7 (offset 3)
ld1 {v17.4s}, [x11] // in25 (offset 12)
ld1 {v18.4s}, [x9] // in23 (offset 11)
ld1 {v19.4s}, [x7] // in9 (offset 4)
st1_if {v7.4s}, [x7], \clear
st1_if {v7.4s}, [x9], \clear
st1_if {v7.4s}, [x10], \clear
st1_if {v7.4s}, [x11], \clear
scale_if \scale, v0.s[0], v16, v17, v18, v19
bl inv_dct64_step1_neon
movz16dup_if v0.2s, w16, #2896*8, \scale
movi_if v7.4s, #0, \clear
sub x10, x10, x8, lsl #1 // offset 1
sub x9, x9, x8, lsl #1 // offset 9
add x7, x7, x8 // offset 5
add x11, x11, x8 // offset 13
ldr q16, [x10, x8] // in5 (offset 2)
ldr q17, [x11] // in27 (offset 13)
ldr q18, [x9, x8] // in21 (offset 10)
ldr q19, [x7] // in11 (offset 5)
stroff_if q7, [x10, x8], \clear
str_if q7, [x11], \clear
stroff_if q7, [x9, x8], \clear
str_if q7, [x7], \clear
scale_if \scale, v0.s[0], v16, v17, v18, v19
bl inv_dct64_step1_neon
movz16dup_if v0.2s, w16, #2896*8, \scale
movi_if v7.4s, #0, \clear
ldr q16, [x10] // in3 (offset 1)
ldr q17, [x11, x8] // in29 (offset 14)
ldr q18, [x9] // in19 (offset 9)
ldr q19, [x7, x8] // in13 (offset 6)
str_if q7, [x10], \clear
stroff_if q7, [x11, x8], \clear
str_if q7, [x9], \clear
stroff_if q7, [x7, x8], \clear
scale_if \scale, v0.s[0], v16, v17, v18, v19
bl inv_dct64_step1_neon
sub x6, x6, #4*4*32
add x9, x6, #4*4*7
bl inv_dct64_step2_neon
ret x14
endfunc
.endm
def_dct64_func _clear, clear=1
def_dct64_func _clear_scale, clear=1, scale=1
function inv_txfm_horz_dct_64x4_neon
mov x14, x30
mov x7, sp
add x8, sp, #4*4*(64 - 4)
add x9, x6, #2*56
mov x10, #2*64
mov x11, #-4*4*4
dup v7.4s, w12
1:
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x7], #64
ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [x8], x11
ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [x7], #64
ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [x8], x11
transpose_4x4s v16, v17, v18, v19, v2, v3, v4, v5
transpose_4x4s v20, v21, v22, v23, v2, v3, v4, v5
transpose_4x4s v31, v30, v29, v28, v2, v3, v4, v5
transpose_4x4s v27, v26, v25, v24, v2, v3, v4, v5
.macro store_addsub src0, src1, src2, src3
sqsub v1.4s, \src0, \src1
sqadd v0.4s, \src0, \src1
sqsub v3.4s, \src2, \src3
srshl v1.4s, v1.4s, v7.4s
sqadd v2.4s, \src2, \src3
srshl v3.4s, v3.4s, v7.4s
srshl v0.4s, v0.4s, v7.4s
srshl v2.4s, v2.4s, v7.4s
sqxtn v3.4h, v3.4s
sqxtn2 v3.8h, v1.4s
sqxtn v0.4h, v0.4s
sqxtn2 v0.8h, v2.4s
rev64 v3.8h, v3.8h
st1 {v0.8h}, [x6], x10
st1 {v3.8h}, [x9], x10
.endm
store_addsub v16.4s, v31.4s, v20.4s, v27.4s
store_addsub v17.4s, v30.4s, v21.4s, v26.4s
store_addsub v18.4s, v29.4s, v22.4s, v25.4s
store_addsub v19.4s, v28.4s, v23.4s, v24.4s
.purgem store_addsub
sub x6, x6, x10, lsl #2
sub x9, x9, x10, lsl #2
add x6, x6, #16
sub x9, x9, #16
cmp x7, x8
b.lt 1b
ret x14
endfunc
function inv_txfm_add_vert_dct_8x64_neon
mov x14, x30
lsl x8, x8, #1
mov x7, sp
add x8, sp, #2*8*(64 - 4)
add x9, x6, x1, lsl #6
sub x9, x9, x1
neg x10, x1
mov x11, #-2*8*4
1:
ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x7], #64
ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x8], x11
ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x7], #64
ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x8], x11
mvni v7.8h, #0xfc, lsl #8 // 0x3ff
.macro add_dest_addsub src0, src1, src2, src3
ld1 {v0.8h}, [x6], x1
ld1 {v1.8h}, [x9], x10
sqadd v4.8h, \src0, \src1
ld1 {v2.8h}, [x6]
sqsub \src0, \src0, \src1
ld1 {v3.8h}, [x9]
sqadd v5.8h, \src2, \src3
sqsub \src2, \src2, \src3
sub x6, x6, x1
sub x9, x9, x10
srshr v4.8h, v4.8h, #4
srshr v5.8h, v5.8h, #4
srshr \src0, \src0, #4
usqadd v0.8h, v4.8h
srshr \src2, \src2, #4
usqadd v1.8h, \src0
usqadd v2.8h, v5.8h
smin v0.8h, v0.8h, v7.8h
usqadd v3.8h, \src2
smin v1.8h, v1.8h, v7.8h
st1 {v0.8h}, [x6], x1
smin v2.8h, v2.8h, v7.8h
st1 {v1.8h}, [x9], x10
smin v3.8h, v3.8h, v7.8h
st1 {v2.8h}, [x6], x1
st1 {v3.8h}, [x9], x10
.endm
add_dest_addsub v16.8h, v31.8h, v17.8h, v30.8h
add_dest_addsub v18.8h, v29.8h, v19.8h, v28.8h
add_dest_addsub v20.8h, v27.8h, v21.8h, v26.8h
add_dest_addsub v22.8h, v25.8h, v23.8h, v24.8h
.purgem add_dest_addsub
cmp x7, x8
b.lt 1b
ret x14
endfunc
function inv_txfm_add_dct_dct_64x64_16bpc_neon, export=1
idct_dc 64, 64, 2
mov x15, x30
sub_sp 64*32*2+64*4*4
add x5, sp, #64*4*4
movrel x13, eob_32x32
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add x6, x5, #(\i*64*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.endif
add x7, x2, #(\i*4)
mov x8, #32*4
mov x12, #-2 // shift
bl inv_txfm_dct_clear_4s_x64_neon
add x6, x5, #(\i*64*2)
bl inv_txfm_horz_dct_64x4_neon
.if \i < 28
ldrh w12, [x13], #2
.endif
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #2
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8, 16, 24, 32, 40, 48, 56
add x7, x5, #(\i*2)
mov x8, #64*2
bl X(inv_txfm_dct_8h_x64_neon)
add x6, x0, #(\i*2)
bl inv_txfm_add_vert_dct_8x64_neon
.endr
add sp, x5, #64*32*2
ret x15
endfunc
function inv_txfm_add_dct_dct_64x32_16bpc_neon, export=1
idct_dc 64, 32, 1
mov x15, x30
sub_sp 64*32*2+64*4*4
add x5, sp, #64*4*4
movrel x13, eob_32x32
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add x6, x5, #(\i*64*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.endif
add x7, x2, #(\i*4)
mov x8, #32*4
mov x12, #-1 // shift
bl inv_txfm_dct_clear_scale_4s_x64_neon
add x6, x5, #(\i*64*2)
bl inv_txfm_horz_dct_64x4_neon
.if \i < 28
ldrh w12, [x13], #2
.endif
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #2
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8, 16, 24, 32, 40, 48, 56
add x6, x0, #(\i*2)
add x7, x5, #(\i*2)
mov x8, #64*2
bl inv_txfm_add_vert_dct_8x32_neon
.endr
add sp, x5, #64*32*2
ret x15
endfunc
function inv_txfm_add_dct_dct_32x64_16bpc_neon, export=1
idct_dc 32, 64, 1
mov x15, x30
sub_sp 32*32*2+64*8*2
add x5, sp, #64*8*2
movrel x13, eob_32x32
ldrh w12, [x13], #2
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add x6, x5, #(\i*32*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
ldrh w12, [x13], #2
.endif
add x7, x2, #(\i*4)
mov x8, #32*4
bl inv_txfm_horz_scale_dct_32x4_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #4
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8, 16, 24
add x7, x5, #(\i*2)
mov x8, #32*2
bl X(inv_txfm_dct_8h_x64_neon)
add x6, x0, #(\i*2)
bl inv_txfm_add_vert_dct_8x64_neon
.endr
add sp, x5, #32*32*2
ret x15
endfunc
function inv_txfm_add_dct_dct_64x16_16bpc_neon, export=1
idct_dc 64, 16, 2
mov x15, x30
sub_sp 64*16*2+64*4*4
add x4, sp, #64*4*4
movrel x13, eob_16x32
.irp i, 0, 4, 8, 12
add x6, x4, #(\i*64*2)
.if \i > 0
mov w8, #(16 - \i)
cmp w3, w12
b.lt 1f
.endif
add x7, x2, #(\i*4)
mov x8, #16*4
mov x12, #-2 // shift
bl inv_txfm_dct_clear_4s_x64_neon
add x6, x4, #(\i*64*2)
bl inv_txfm_horz_dct_64x4_neon
.if \i < 12
ldrh w12, [x13], #2
.endif
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #2
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
movrel x5, X(inv_dct_8h_x16_neon)
.irp i, 0, 8, 16, 24, 32, 40, 48, 56
add x6, x0, #(\i*2)
add x7, x4, #(\i*2)
mov x8, #64*2
bl inv_txfm_add_vert_8x16_neon
.endr
add sp, x4, #64*16*2
ret x15
endfunc
function inv_txfm_add_dct_dct_16x64_16bpc_neon, export=1
idct_dc 16, 64, 2
mov x15, x30
sub_sp 16*32*2+64*8*2
add x5, sp, #64*8*2
movrel x13, eob_16x32
ldrh w12, [x13], #2
adr x4, inv_dct_4s_x16_neon
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add x6, x5, #(\i*16*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.if \i < 28
ldrh w12, [x13], #2
.endif
.endif
add x7, x2, #(\i*4)
mov x8, #32*4
bl inv_txfm_horz_16x4_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #4
.rept 2
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8
add x7, x5, #(\i*2)
mov x8, #16*2
bl X(inv_txfm_dct_8h_x64_neon)
add x6, x0, #(\i*2)
bl inv_txfm_add_vert_dct_8x64_neon
.endr
add sp, x5, #16*32*2
ret x15
endfunc
|
Admenri/urge
| 151,419
|
third_party/dav1d/src/arm/64/mc16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2018, Janne Grunau
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#define PREP_BIAS 8192
.macro avg d0, d1, t0, t1, t2, t3
ld1 {\t0\().8h,\t1\().8h}, [x2], 32
ld1 {\t2\().8h,\t3\().8h}, [x3], 32
sqadd \t0\().8h, \t0\().8h, \t2\().8h
sqadd \t1\().8h, \t1\().8h, \t3\().8h
smax \t0\().8h, \t0\().8h, v28.8h // -2*PREP_BIAS - 1 << intermediate_bits
smax \t1\().8h, \t1\().8h, v28.8h // -2*PREP_BIAS - 1 << intermediate_bits
sqsub \t0\().8h, \t0\().8h, v28.8h // -2*PREP_BIAS - 1 << intermediate_bits
sqsub \t1\().8h, \t1\().8h, v28.8h // -2*PREP_BIAS - 1 << intermediate_bits
sshl \d0\().8h, \t0\().8h, v29.8h // -(intermediate_bits+1)
sshl \d1\().8h, \t1\().8h, v29.8h // -(intermediate_bits+1)
.endm
.macro w_avg d0, d1, t0, t1, t2, t3
ld1 {\t0\().8h,\t1\().8h}, [x2], 32
ld1 {\t2\().8h,\t3\().8h}, [x3], 32
// This difference requires a 17 bit range, and all bits are
// significant for the following multiplication.
ssubl \d0\().4s, \t2\().4h, \t0\().4h
ssubl2 \t0\().4s, \t2\().8h, \t0\().8h
ssubl \d1\().4s, \t3\().4h, \t1\().4h
ssubl2 \t1\().4s, \t3\().8h, \t1\().8h
mul \d0\().4s, \d0\().4s, v27.4s
mul \t0\().4s, \t0\().4s, v27.4s
mul \d1\().4s, \d1\().4s, v27.4s
mul \t1\().4s, \t1\().4s, v27.4s
sshr \d0\().4s, \d0\().4s, #4
sshr \t0\().4s, \t0\().4s, #4
sshr \d1\().4s, \d1\().4s, #4
sshr \t1\().4s, \t1\().4s, #4
saddw \d0\().4s, \d0\().4s, \t2\().4h
saddw2 \t0\().4s, \t0\().4s, \t2\().8h
saddw \d1\().4s, \d1\().4s, \t3\().4h
saddw2 \t1\().4s, \t1\().4s, \t3\().8h
uzp1 \d0\().8h, \d0\().8h, \t0\().8h // Same as xtn, xtn2
uzp1 \d1\().8h, \d1\().8h, \t1\().8h // Ditto
srshl \d0\().8h, \d0\().8h, v29.8h // -intermediate_bits
srshl \d1\().8h, \d1\().8h, v29.8h // -intermediate_bits
add \d0\().8h, \d0\().8h, v28.8h // PREP_BIAS >> intermediate_bits
add \d1\().8h, \d1\().8h, v28.8h // PREP_BIAS >> intermediate_bits
smin \d0\().8h, \d0\().8h, v31.8h // bitdepth_max
smin \d1\().8h, \d1\().8h, v31.8h // bitdepth_max
smax \d0\().8h, \d0\().8h, v30.8h // 0
smax \d1\().8h, \d1\().8h, v30.8h // 0
.endm
.macro mask d0, d1, t0, t1, t2, t3
ld1 {v27.16b}, [x6], 16
ld1 {\t0\().8h,\t1\().8h}, [x2], 32
neg v27.16b, v27.16b
ld1 {\t2\().8h,\t3\().8h}, [x3], 32
sxtl v26.8h, v27.8b
sxtl2 v27.8h, v27.16b
sxtl v24.4s, v26.4h
sxtl2 v25.4s, v26.8h
sxtl v26.4s, v27.4h
sxtl2 v27.4s, v27.8h
ssubl \d0\().4s, \t2\().4h, \t0\().4h
ssubl2 \t0\().4s, \t2\().8h, \t0\().8h
ssubl \d1\().4s, \t3\().4h, \t1\().4h
ssubl2 \t1\().4s, \t3\().8h, \t1\().8h
mul \d0\().4s, \d0\().4s, v24.4s
mul \t0\().4s, \t0\().4s, v25.4s
mul \d1\().4s, \d1\().4s, v26.4s
mul \t1\().4s, \t1\().4s, v27.4s
sshr \d0\().4s, \d0\().4s, #6
sshr \t0\().4s, \t0\().4s, #6
sshr \d1\().4s, \d1\().4s, #6
sshr \t1\().4s, \t1\().4s, #6
saddw \d0\().4s, \d0\().4s, \t2\().4h
saddw2 \t0\().4s, \t0\().4s, \t2\().8h
saddw \d1\().4s, \d1\().4s, \t3\().4h
saddw2 \t1\().4s, \t1\().4s, \t3\().8h
uzp1 \d0\().8h, \d0\().8h, \t0\().8h // Same as xtn, xtn2
uzp1 \d1\().8h, \d1\().8h, \t1\().8h // Ditto
srshl \d0\().8h, \d0\().8h, v29.8h // -intermediate_bits
srshl \d1\().8h, \d1\().8h, v29.8h // -intermediate_bits
add \d0\().8h, \d0\().8h, v28.8h // PREP_BIAS >> intermediate_bits
add \d1\().8h, \d1\().8h, v28.8h // PREP_BIAS >> intermediate_bits
smin \d0\().8h, \d0\().8h, v31.8h // bitdepth_max
smin \d1\().8h, \d1\().8h, v31.8h // bitdepth_max
smax \d0\().8h, \d0\().8h, v30.8h // 0
smax \d1\().8h, \d1\().8h, v30.8h // 0
.endm
.macro bidir_fn type, bdmax
function \type\()_16bpc_neon, export=1
clz w4, w4
.ifnc \type, avg
dup v31.8h, \bdmax // bitdepth_max
movi v30.8h, #0
.endif
clz w7, \bdmax
sub w7, w7, #18 // intermediate_bits = clz(bitdepth_max) - 18
.ifc \type, avg
mov w9, #1
mov w8, #-2*PREP_BIAS
lsl w9, w9, w7 // 1 << intermediate_bits
add w7, w7, #1
sub w8, w8, w9 // -2*PREP_BIAS - 1 << intermediate_bits
neg w7, w7 // -(intermediate_bits+1)
dup v28.8h, w8 // -2*PREP_BIAS - 1 << intermediate_bits
dup v29.8h, w7 // -(intermediate_bits+1)
.else
mov w8, #PREP_BIAS
lsr w8, w8, w7 // PREP_BIAS >> intermediate_bits
neg w7, w7 // -intermediate_bits
dup v28.8h, w8 // PREP_BIAS >> intermediate_bits
dup v29.8h, w7 // -intermediate_bits
.endif
.ifc \type, w_avg
dup v27.4s, w6
neg v27.4s, v27.4s
.endif
movrel x7, \type\()_tbl
sub w4, w4, #24
\type v4, v5, v0, v1, v2, v3
ldrsw x4, [x7, x4, lsl #2]
add x7, x7, x4
br x7
40:
AARCH64_VALID_JUMP_TARGET
add x7, x0, x1
lsl x1, x1, #1
4:
subs w5, w5, #4
st1 {v4.8b}, [x0], x1
st1 {v4.d}[1], [x7], x1
st1 {v5.8b}, [x0], x1
st1 {v5.d}[1], [x7], x1
b.le 0f
\type v4, v5, v0, v1, v2, v3
b 4b
80:
AARCH64_VALID_JUMP_TARGET
add x7, x0, x1
lsl x1, x1, #1
8:
st1 {v4.8h}, [x0], x1
subs w5, w5, #2
st1 {v5.8h}, [x7], x1
b.le 0f
\type v4, v5, v0, v1, v2, v3
b 8b
160:
AARCH64_VALID_JUMP_TARGET
16:
\type v6, v7, v0, v1, v2, v3
st1 {v4.8h, v5.8h}, [x0], x1
subs w5, w5, #2
st1 {v6.8h, v7.8h}, [x0], x1
b.le 0f
\type v4, v5, v0, v1, v2, v3
b 16b
320:
AARCH64_VALID_JUMP_TARGET
32:
\type v6, v7, v0, v1, v2, v3
subs w5, w5, #1
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], x1
b.le 0f
\type v4, v5, v0, v1, v2, v3
b 32b
640:
AARCH64_VALID_JUMP_TARGET
add x7, x0, #64
64:
\type v6, v7, v0, v1, v2, v3
\type v16, v17, v0, v1, v2, v3
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], x1
\type v18, v19, v0, v1, v2, v3
subs w5, w5, #1
st1 {v16.8h,v17.8h,v18.8h,v19.8h}, [x7], x1
b.le 0f
\type v4, v5, v0, v1, v2, v3
b 64b
1280:
AARCH64_VALID_JUMP_TARGET
add x7, x0, #64
mov x8, #128
sub x1, x1, #128
128:
\type v6, v7, v0, v1, v2, v3
\type v16, v17, v0, v1, v2, v3
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], x8
\type v18, v19, v0, v1, v2, v3
st1 {v16.8h,v17.8h,v18.8h,v19.8h}, [x7], x8
\type v4, v5, v0, v1, v2, v3
\type v6, v7, v0, v1, v2, v3
\type v16, v17, v0, v1, v2, v3
subs w5, w5, #1
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], x1
\type v18, v19, v0, v1, v2, v3
st1 {v16.8h,v17.8h,v18.8h,v19.8h}, [x7], x1
b.le 0f
\type v4, v5, v0, v1, v2, v3
b 128b
0:
ret
endfunc
jumptable \type\()_tbl
.word 1280b - \type\()_tbl
.word 640b - \type\()_tbl
.word 320b - \type\()_tbl
.word 160b - \type\()_tbl
.word 80b - \type\()_tbl
.word 40b - \type\()_tbl
endjumptable
.endm
bidir_fn avg, w6
bidir_fn w_avg, w7
bidir_fn mask, w7
.macro w_mask_fn type
function w_mask_\type\()_16bpc_neon, export=1
ldr w8, [sp]
clz w9, w4
movrel x10, w_mask_\type\()_tbl
dup v31.8h, w8 // bitdepth_max
sub w9, w9, #24
clz w8, w8 // clz(bitdepth_max)
ldrsw x9, [x10, x9, lsl #2]
add x10, x10, x9
sub w8, w8, #12 // sh = intermediate_bits + 6 = clz(bitdepth_max) - 12
mov w9, #PREP_BIAS*64
neg w8, w8 // -sh
mov w11, #27615 // (64 + 1 - 38)<<mask_sh - 1 - mask_rnd
dup v30.4s, w9 // PREP_BIAS*64
dup v29.4s, w8 // -sh
dup v0.8h, w11
.if \type == 444
movi v1.16b, #64
.elseif \type == 422
dup v2.8b, w7
movi v3.8b, #129
sub v3.8b, v3.8b, v2.8b
.elseif \type == 420
dup v2.8h, w7
movi v3.8h, #1, lsl #8
sub v3.8h, v3.8h, v2.8h
.endif
add x12, x0, x1
lsl x1, x1, #1
br x10
40:
AARCH64_VALID_JUMP_TARGET
4:
ld1 {v4.8h, v5.8h}, [x2], #32 // tmp1 (four rows at once)
ld1 {v6.8h, v7.8h}, [x3], #32 // tmp2 (four rows at once)
subs w5, w5, #4
sabd v20.8h, v4.8h, v6.8h // abs(tmp1 - tmp2)
sabd v21.8h, v5.8h, v7.8h
ssubl v16.4s, v6.4h, v4.4h // tmp2 - tmp1 (requires 17 bit)
ssubl2 v17.4s, v6.8h, v4.8h
ssubl v18.4s, v7.4h, v5.4h
ssubl2 v19.4s, v7.8h, v5.8h
uqsub v20.8h, v0.8h, v20.8h // 27615 - abs()
uqsub v21.8h, v0.8h, v21.8h
sshll2 v7.4s, v5.8h, #6 // tmp1 << 6
sshll v6.4s, v5.4h, #6
sshll2 v5.4s, v4.8h, #6
sshll v4.4s, v4.4h, #6
ushr v20.8h, v20.8h, #10 // 64-m = (27615 - abs()) >> mask_sh
ushr v21.8h, v21.8h, #10
add v4.4s, v4.4s, v30.4s // += PREP_BIAS*64
add v5.4s, v5.4s, v30.4s
add v6.4s, v6.4s, v30.4s
add v7.4s, v7.4s, v30.4s
uxtl v22.4s, v20.4h
uxtl2 v23.4s, v20.8h
uxtl v24.4s, v21.4h
uxtl2 v25.4s, v21.8h
mla v4.4s, v16.4s, v22.4s // (tmp2-tmp1)*(64-m)
mla v5.4s, v17.4s, v23.4s
mla v6.4s, v18.4s, v24.4s
mla v7.4s, v19.4s, v25.4s
srshl v4.4s, v4.4s, v29.4s // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
srshl v5.4s, v5.4s, v29.4s
srshl v6.4s, v6.4s, v29.4s
srshl v7.4s, v7.4s, v29.4s
sqxtun v4.4h, v4.4s // iclip_pixel
sqxtun2 v4.8h, v5.4s
sqxtun v5.4h, v6.4s
sqxtun2 v5.8h, v7.4s
umin v4.8h, v4.8h, v31.8h // iclip_pixel
umin v5.8h, v5.8h, v31.8h
.if \type == 444
uzp1 v20.16b, v20.16b, v21.16b // 64 - m
sub v20.16b, v1.16b, v20.16b // m
st1 {v20.16b}, [x6], #16
.elseif \type == 422
addp v20.8h, v20.8h, v21.8h // (64 - m) + (64 - n) (column wise addition)
xtn v20.8b, v20.8h
uhsub v20.8b, v3.8b, v20.8b // ((129 - sign) - ((64 - m) + (64 - n)) >> 1
st1 {v20.8b}, [x6], #8
.elseif \type == 420
trn1 v24.2d, v20.2d, v21.2d
trn2 v25.2d, v20.2d, v21.2d
add v24.8h, v24.8h, v25.8h // (64 - my1) + (64 - my2) (row wise addition)
addp v20.8h, v24.8h, v24.8h // (128 - m) + (128 - n) (column wise addition)
sub v20.4h, v3.4h, v20.4h // (256 - sign) - ((128 - m) + (128 - n))
rshrn v20.8b, v20.8h, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
str s20, [x6], #4
.endif
st1 {v4.8b}, [x0], x1
st1 {v4.d}[1], [x12], x1
st1 {v5.8b}, [x0], x1
st1 {v5.d}[1], [x12], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ld1 {v4.8h, v5.8h}, [x2], #32 // tmp1
ld1 {v6.8h, v7.8h}, [x3], #32 // tmp2
subs w5, w5, #2
sabd v20.8h, v4.8h, v6.8h // abs(tmp1 - tmp2)
sabd v21.8h, v5.8h, v7.8h
ssubl v16.4s, v6.4h, v4.4h // tmp2 - tmp1 (requires 17 bit)
ssubl2 v17.4s, v6.8h, v4.8h
ssubl v18.4s, v7.4h, v5.4h
ssubl2 v19.4s, v7.8h, v5.8h
uqsub v20.8h, v0.8h, v20.8h // 27615 - abs()
uqsub v21.8h, v0.8h, v21.8h
sshll2 v7.4s, v5.8h, #6 // tmp1 << 6
sshll v6.4s, v5.4h, #6
sshll2 v5.4s, v4.8h, #6
sshll v4.4s, v4.4h, #6
ushr v20.8h, v20.8h, #10 // 64-m = (27615 - abs()) >> mask_sh
ushr v21.8h, v21.8h, #10
add v4.4s, v4.4s, v30.4s // += PREP_BIAS*64
add v5.4s, v5.4s, v30.4s
add v6.4s, v6.4s, v30.4s
add v7.4s, v7.4s, v30.4s
uxtl v22.4s, v20.4h
uxtl2 v23.4s, v20.8h
uxtl v24.4s, v21.4h
uxtl2 v25.4s, v21.8h
mla v4.4s, v16.4s, v22.4s // (tmp2-tmp1)*(64-m)
mla v5.4s, v17.4s, v23.4s
mla v6.4s, v18.4s, v24.4s
mla v7.4s, v19.4s, v25.4s
srshl v4.4s, v4.4s, v29.4s // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
srshl v5.4s, v5.4s, v29.4s
srshl v6.4s, v6.4s, v29.4s
srshl v7.4s, v7.4s, v29.4s
sqxtun v4.4h, v4.4s // iclip_pixel
sqxtun2 v4.8h, v5.4s
sqxtun v5.4h, v6.4s
sqxtun2 v5.8h, v7.4s
umin v4.8h, v4.8h, v31.8h // iclip_pixel
umin v5.8h, v5.8h, v31.8h
.if \type == 444
uzp1 v20.16b, v20.16b, v21.16b // 64 - m
sub v20.16b, v1.16b, v20.16b // m
st1 {v20.16b}, [x6], #16
.elseif \type == 422
addp v20.8h, v20.8h, v21.8h // (64 - m) + (64 - n) (column wise addition)
xtn v20.8b, v20.8h
uhsub v20.8b, v3.8b, v20.8b // ((129 - sign) - ((64 - m) + (64 - n)) >> 1
st1 {v20.8b}, [x6], #8
.elseif \type == 420
add v20.8h, v20.8h, v21.8h // (64 - my1) + (64 - my2) (row wise addition)
addp v20.8h, v20.8h, v20.8h // (128 - m) + (128 - n) (column wise addition)
sub v20.4h, v3.4h, v20.4h // (256 - sign) - ((128 - m) + (128 - n))
rshrn v20.8b, v20.8h, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
str s20, [x6], #4
.endif
st1 {v4.8h}, [x0], x1
st1 {v5.8h}, [x12], x1
b.gt 8b
ret
1280:
640:
320:
160:
AARCH64_VALID_JUMP_TARGET
mov w11, w4
sub x1, x1, w4, uxtw #1
.if \type == 444
add x10, x6, w4, uxtw
.elseif \type == 422
add x10, x6, x11, lsr #1
.endif
add x9, x3, w4, uxtw #1
add x7, x2, w4, uxtw #1
161:
mov w8, w4
16:
ld1 {v4.8h, v5.8h}, [x2], #32 // tmp1
ld1 {v16.8h, v17.8h}, [x3], #32 // tmp2
ld1 {v6.8h, v7.8h}, [x7], #32
ld1 {v18.8h, v19.8h}, [x9], #32
subs w8, w8, #16
sabd v20.8h, v4.8h, v16.8h // abs(tmp1 - tmp2)
sabd v21.8h, v5.8h, v17.8h
ssubl v22.4s, v16.4h, v4.4h // tmp2 - tmp1 (requires 17 bit)
ssubl2 v23.4s, v16.8h, v4.8h
ssubl v24.4s, v17.4h, v5.4h
ssubl2 v25.4s, v17.8h, v5.8h
uqsub v20.8h, v0.8h, v20.8h // 27615 - abs()
uqsub v21.8h, v0.8h, v21.8h
sshll2 v27.4s, v5.8h, #6 // tmp1 << 6
sshll v26.4s, v5.4h, #6
sshll2 v5.4s, v4.8h, #6
sshll v4.4s, v4.4h, #6
ushr v20.8h, v20.8h, #10 // 64-m = (27615 - abs()) >> mask_sh
ushr v21.8h, v21.8h, #10
add v4.4s, v4.4s, v30.4s // += PREP_BIAS*64
add v5.4s, v5.4s, v30.4s
add v26.4s, v26.4s, v30.4s
add v27.4s, v27.4s, v30.4s
uxtl v16.4s, v20.4h
uxtl2 v17.4s, v20.8h
uxtl v28.4s, v21.4h
mla v4.4s, v22.4s, v16.4s // (tmp2-tmp1)*(64-m)
uxtl2 v16.4s, v21.8h
mla v5.4s, v23.4s, v17.4s
mla v26.4s, v24.4s, v28.4s
mla v27.4s, v25.4s, v16.4s
srshl v4.4s, v4.4s, v29.4s // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
srshl v5.4s, v5.4s, v29.4s
srshl v26.4s, v26.4s, v29.4s
srshl v27.4s, v27.4s, v29.4s
sqxtun v4.4h, v4.4s // iclip_pixel
sqxtun2 v4.8h, v5.4s
sqxtun v5.4h, v26.4s
sqxtun2 v5.8h, v27.4s
// Start of other half
sabd v22.8h, v6.8h, v18.8h // abs(tmp1 - tmp2)
sabd v23.8h, v7.8h, v19.8h
umin v4.8h, v4.8h, v31.8h // iclip_pixel
umin v5.8h, v5.8h, v31.8h
ssubl v16.4s, v18.4h, v6.4h // tmp2 - tmp1 (requires 17 bit)
ssubl2 v17.4s, v18.8h, v6.8h
ssubl v18.4s, v19.4h, v7.4h
ssubl2 v19.4s, v19.8h, v7.8h
uqsub v22.8h, v0.8h, v22.8h // 27615 - abs()
uqsub v23.8h, v0.8h, v23.8h
sshll v24.4s, v6.4h, #6 // tmp1 << 6
sshll2 v25.4s, v6.8h, #6
sshll v26.4s, v7.4h, #6
sshll2 v27.4s, v7.8h, #6
ushr v22.8h, v22.8h, #10 // 64-m = (27615 - abs()) >> mask_sh
ushr v23.8h, v23.8h, #10
add v24.4s, v24.4s, v30.4s // += PREP_BIAS*64
add v25.4s, v25.4s, v30.4s
add v26.4s, v26.4s, v30.4s
add v27.4s, v27.4s, v30.4s
uxtl v6.4s, v22.4h
uxtl2 v7.4s, v22.8h
uxtl v28.4s, v23.4h
mla v24.4s, v16.4s, v6.4s // (tmp2-tmp1)*(64-m)
uxtl2 v6.4s, v23.8h
mla v25.4s, v17.4s, v7.4s
mla v26.4s, v18.4s, v28.4s
mla v27.4s, v19.4s, v6.4s
srshl v24.4s, v24.4s, v29.4s // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
srshl v25.4s, v25.4s, v29.4s
srshl v26.4s, v26.4s, v29.4s
srshl v27.4s, v27.4s, v29.4s
sqxtun v6.4h, v24.4s // iclip_pixel
sqxtun2 v6.8h, v25.4s
sqxtun v7.4h, v26.4s
sqxtun2 v7.8h, v27.4s
umin v6.8h, v6.8h, v31.8h // iclip_pixel
umin v7.8h, v7.8h, v31.8h
.if \type == 444
uzp1 v20.16b, v20.16b, v21.16b // 64 - m
uzp1 v21.16b, v22.16b, v23.16b
sub v20.16b, v1.16b, v20.16b // m
sub v21.16b, v1.16b, v21.16b
st1 {v20.16b}, [x6], #16
st1 {v21.16b}, [x10], #16
.elseif \type == 422
addp v20.8h, v20.8h, v21.8h // (64 - m) + (64 - n) (column wise addition)
addp v21.8h, v22.8h, v23.8h
xtn v20.8b, v20.8h
xtn v21.8b, v21.8h
uhsub v20.8b, v3.8b, v20.8b // ((129 - sign) - ((64 - m) + (64 - n)) >> 1
uhsub v21.8b, v3.8b, v21.8b
st1 {v20.8b}, [x6], #8
st1 {v21.8b}, [x10], #8
.elseif \type == 420
add v20.8h, v20.8h, v22.8h // (64 - my1) + (64 - my2) (row wise addition)
add v21.8h, v21.8h, v23.8h
addp v20.8h, v20.8h, v21.8h // (128 - m) + (128 - n) (column wise addition)
sub v20.8h, v3.8h, v20.8h // (256 - sign) - ((128 - m) + (128 - n))
rshrn v20.8b, v20.8h, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
st1 {v20.8b}, [x6], #8
.endif
st1 {v4.8h, v5.8h}, [x0], #32
st1 {v6.8h, v7.8h}, [x12], #32
b.gt 16b
subs w5, w5, #2
add x2, x2, w4, uxtw #1
add x3, x3, w4, uxtw #1
add x7, x7, w4, uxtw #1
add x9, x9, w4, uxtw #1
.if \type == 444
add x6, x6, w4, uxtw
add x10, x10, w4, uxtw
.elseif \type == 422
add x6, x6, x11, lsr #1
add x10, x10, x11, lsr #1
.endif
add x0, x0, x1
add x12, x12, x1
b.gt 161b
ret
endfunc
jumptable w_mask_\type\()_tbl
.word 1280b - w_mask_\type\()_tbl
.word 640b - w_mask_\type\()_tbl
.word 320b - w_mask_\type\()_tbl
.word 160b - w_mask_\type\()_tbl
.word 80b - w_mask_\type\()_tbl
.word 40b - w_mask_\type\()_tbl
endjumptable
.endm
w_mask_fn 444
w_mask_fn 422
w_mask_fn 420
function blend_16bpc_neon, export=1
movrel x6, blend_tbl
clz w3, w3
sub w3, w3, #26
ldrsw x3, [x6, x3, lsl #2]
add x6, x6, x3
add x8, x0, x1
br x6
40:
AARCH64_VALID_JUMP_TARGET
lsl x1, x1, #1
4:
ld1 {v2.8b}, [x5], #8
ld1 {v1.8h}, [x2], #16
ldr d0, [x0]
neg v2.8b, v2.8b // -m
subs w4, w4, #2
ld1 {v0.d}[1], [x8]
sxtl v2.8h, v2.8b
shl v2.8h, v2.8h, #9 // -m << 9
sub v1.8h, v0.8h, v1.8h // a - b
sqrdmulh v1.8h, v1.8h, v2.8h // ((a-b)*-m + 32) >> 6
add v0.8h, v0.8h, v1.8h
st1 {v0.8b}, [x0], x1
st1 {v0.d}[1], [x8], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
lsl x1, x1, #1
8:
ld1 {v4.16b}, [x5], #16
ld1 {v2.8h, v3.8h}, [x2], #32
neg v5.16b, v4.16b // -m
ld1 {v0.8h}, [x0]
ld1 {v1.8h}, [x8]
sxtl v4.8h, v5.8b
sxtl2 v5.8h, v5.16b
shl v4.8h, v4.8h, #9 // -m << 9
shl v5.8h, v5.8h, #9
sub v2.8h, v0.8h, v2.8h // a - b
sub v3.8h, v1.8h, v3.8h
subs w4, w4, #2
sqrdmulh v2.8h, v2.8h, v4.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v3.8h, v3.8h, v5.8h
add v0.8h, v0.8h, v2.8h
add v1.8h, v1.8h, v3.8h
st1 {v0.8h}, [x0], x1
st1 {v1.8h}, [x8], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
lsl x1, x1, #1
16:
ld1 {v16.16b, v17.16b}, [x5], #32
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
subs w4, w4, #2
neg v18.16b, v16.16b // -m
neg v19.16b, v17.16b
ld1 {v0.8h, v1.8h}, [x0]
sxtl v16.8h, v18.8b
sxtl2 v17.8h, v18.16b
sxtl v18.8h, v19.8b
sxtl2 v19.8h, v19.16b
ld1 {v2.8h, v3.8h}, [x8]
shl v16.8h, v16.8h, #9 // -m << 9
shl v17.8h, v17.8h, #9
shl v18.8h, v18.8h, #9
shl v19.8h, v19.8h, #9
sub v4.8h, v0.8h, v4.8h // a - b
sub v5.8h, v1.8h, v5.8h
sub v6.8h, v2.8h, v6.8h
sub v7.8h, v3.8h, v7.8h
sqrdmulh v4.8h, v4.8h, v16.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v5.8h, v5.8h, v17.8h
sqrdmulh v6.8h, v6.8h, v18.8h
sqrdmulh v7.8h, v7.8h, v19.8h
add v0.8h, v0.8h, v4.8h
add v1.8h, v1.8h, v5.8h
add v2.8h, v2.8h, v6.8h
add v3.8h, v3.8h, v7.8h
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v2.8h, v3.8h}, [x8], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
32:
ld1 {v16.16b, v17.16b}, [x5], #32
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
subs w4, w4, #1
neg v18.16b, v16.16b // -m
neg v19.16b, v17.16b
sxtl v16.8h, v18.8b
sxtl2 v17.8h, v18.16b
sxtl v18.8h, v19.8b
sxtl2 v19.8h, v19.16b
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0]
shl v16.8h, v16.8h, #9 // -m << 9
shl v17.8h, v17.8h, #9
shl v18.8h, v18.8h, #9
shl v19.8h, v19.8h, #9
sub v4.8h, v0.8h, v4.8h // a - b
sub v5.8h, v1.8h, v5.8h
sub v6.8h, v2.8h, v6.8h
sub v7.8h, v3.8h, v7.8h
sqrdmulh v4.8h, v4.8h, v16.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v5.8h, v5.8h, v17.8h
sqrdmulh v6.8h, v6.8h, v18.8h
sqrdmulh v7.8h, v7.8h, v19.8h
add v0.8h, v0.8h, v4.8h
add v1.8h, v1.8h, v5.8h
add v2.8h, v2.8h, v6.8h
add v3.8h, v3.8h, v7.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
b.gt 32b
ret
endfunc
jumptable blend_tbl
.word 320b - blend_tbl
.word 160b - blend_tbl
.word 80b - blend_tbl
.word 40b - blend_tbl
endjumptable
function blend_h_16bpc_neon, export=1
movrel x6, blend_h_tbl
movrel x5, X(obmc_masks)
add x5, x5, w4, uxtw
sub w4, w4, w4, lsr #2
clz w7, w3
add x8, x0, x1
lsl x1, x1, #1
sub w7, w7, #24
ldrsw x7, [x6, x7, lsl #2]
add x6, x6, x7
br x6
20:
AARCH64_VALID_JUMP_TARGET
2:
ld2r {v2.8b, v3.8b}, [x5], #2
ld1 {v1.4h}, [x2], #8
ext v2.8b, v2.8b, v3.8b, #6
subs w4, w4, #2
neg v2.8b, v2.8b // -m
ldr s0, [x0]
ld1 {v0.s}[1], [x8]
sxtl v2.8h, v2.8b
shl v2.4h, v2.4h, #9 // -m << 9
sub v1.4h, v0.4h, v1.4h // a - b
sqrdmulh v1.4h, v1.4h, v2.4h // ((a-b)*-m + 32) >> 6
add v0.4h, v0.4h, v1.4h
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[1], [x8], x1
b.gt 2b
ret
40:
AARCH64_VALID_JUMP_TARGET
4:
ld2r {v2.8b, v3.8b}, [x5], #2
ld1 {v1.8h}, [x2], #16
ext v2.8b, v2.8b, v3.8b, #4
subs w4, w4, #2
neg v2.8b, v2.8b // -m
ldr d0, [x0]
ld1 {v0.d}[1], [x8]
sxtl v2.8h, v2.8b
shl v2.8h, v2.8h, #9 // -m << 9
sub v1.8h, v0.8h, v1.8h // a - b
sqrdmulh v1.8h, v1.8h, v2.8h // ((a-b)*-m + 32) >> 6
add v0.8h, v0.8h, v1.8h
st1 {v0.8b}, [x0], x1
st1 {v0.d}[1], [x8], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ld2r {v4.8b, v5.8b}, [x5], #2
ld1 {v2.8h, v3.8h}, [x2], #32
neg v4.8b, v4.8b // -m
neg v5.8b, v5.8b
ld1 {v0.8h}, [x0]
subs w4, w4, #2
sxtl v4.8h, v4.8b
sxtl v5.8h, v5.8b
ld1 {v1.8h}, [x8]
shl v4.8h, v4.8h, #9 // -m << 9
shl v5.8h, v5.8h, #9
sub v2.8h, v0.8h, v2.8h // a - b
sub v3.8h, v1.8h, v3.8h
sqrdmulh v2.8h, v2.8h, v4.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v3.8h, v3.8h, v5.8h
add v0.8h, v0.8h, v2.8h
add v1.8h, v1.8h, v3.8h
st1 {v0.8h}, [x0], x1
st1 {v1.8h}, [x8], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ld2r {v16.8b, v17.8b}, [x5], #2
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
neg v16.8b, v16.8b // -m
neg v17.8b, v17.8b
ld1 {v0.8h, v1.8h}, [x0]
ld1 {v2.8h, v3.8h}, [x8]
subs w4, w4, #2
sxtl v16.8h, v16.8b
sxtl v17.8h, v17.8b
shl v16.8h, v16.8h, #9 // -m << 9
shl v17.8h, v17.8h, #9
sub v4.8h, v0.8h, v4.8h // a - b
sub v5.8h, v1.8h, v5.8h
sub v6.8h, v2.8h, v6.8h
sub v7.8h, v3.8h, v7.8h
sqrdmulh v4.8h, v4.8h, v16.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v5.8h, v5.8h, v16.8h
sqrdmulh v6.8h, v6.8h, v17.8h
sqrdmulh v7.8h, v7.8h, v17.8h
add v0.8h, v0.8h, v4.8h
add v1.8h, v1.8h, v5.8h
add v2.8h, v2.8h, v6.8h
add v3.8h, v3.8h, v7.8h
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v2.8h, v3.8h}, [x8], x1
b.gt 16b
ret
1280:
640:
320:
AARCH64_VALID_JUMP_TARGET
sub x1, x1, w3, uxtw #1
add x7, x2, w3, uxtw #1
321:
ld2r {v24.8b, v25.8b}, [x5], #2
mov w6, w3
neg v24.8b, v24.8b // -m
neg v25.8b, v25.8b
sxtl v24.8h, v24.8b
sxtl v25.8h, v25.8b
shl v24.8h, v24.8h, #9 // -m << 9
shl v25.8h, v25.8h, #9
32:
ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x2], #64
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0]
subs w6, w6, #32
sub v16.8h, v0.8h, v16.8h // a - b
sub v17.8h, v1.8h, v17.8h
sub v18.8h, v2.8h, v18.8h
sub v19.8h, v3.8h, v19.8h
ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x7], #64
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x8]
sqrdmulh v16.8h, v16.8h, v24.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v17.8h, v17.8h, v24.8h
sqrdmulh v18.8h, v18.8h, v24.8h
sqrdmulh v19.8h, v19.8h, v24.8h
sub v20.8h, v4.8h, v20.8h // a - b
sub v21.8h, v5.8h, v21.8h
sub v22.8h, v6.8h, v22.8h
sub v23.8h, v7.8h, v23.8h
add v0.8h, v0.8h, v16.8h
add v1.8h, v1.8h, v17.8h
add v2.8h, v2.8h, v18.8h
add v3.8h, v3.8h, v19.8h
sqrdmulh v20.8h, v20.8h, v25.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v21.8h, v21.8h, v25.8h
sqrdmulh v22.8h, v22.8h, v25.8h
sqrdmulh v23.8h, v23.8h, v25.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v4.8h, v4.8h, v20.8h
add v5.8h, v5.8h, v21.8h
add v6.8h, v6.8h, v22.8h
add v7.8h, v7.8h, v23.8h
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x8], #64
b.gt 32b
subs w4, w4, #2
add x0, x0, x1
add x8, x8, x1
add x2, x2, w3, uxtw #1
add x7, x7, w3, uxtw #1
b.gt 321b
ret
endfunc
jumptable blend_h_tbl
.word 1280b - blend_h_tbl
.word 640b - blend_h_tbl
.word 320b - blend_h_tbl
.word 160b - blend_h_tbl
.word 80b - blend_h_tbl
.word 40b - blend_h_tbl
.word 20b - blend_h_tbl
endjumptable
function blend_v_16bpc_neon, export=1
movrel x6, blend_v_tbl
movrel x5, X(obmc_masks)
add x5, x5, w3, uxtw
clz w3, w3
add x8, x0, x1
lsl x1, x1, #1
sub w3, w3, #26
ldrsw x3, [x6, x3, lsl #2]
add x6, x6, x3
br x6
20:
AARCH64_VALID_JUMP_TARGET
ld1r {v2.8b}, [x5]
neg v2.8b, v2.8b // -m
sxtl v2.8h, v2.8b
shl v2.4h, v2.4h, #9 // -m << 9
2:
ldr s1, [x2], #4
ldr h0, [x0]
subs w4, w4, #2
ld1 {v1.h}[1], [x2]
ld1 {v0.h}[1], [x8]
add x2, x2, #4
sub v1.4h, v0.4h, v1.4h // a - b
sqrdmulh v1.4h, v1.4h, v2.4h // ((a-b)*-m + 32) >> 6
add v0.4h, v0.4h, v1.4h
st1 {v0.h}[0], [x0], x1
st1 {v0.h}[1], [x8], x1
b.gt 2b
ret
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v2.2s}, [x5]
sub x1, x1, #4
neg v2.8b, v2.8b // -m
sxtl v2.8h, v2.8b
shl v2.8h, v2.8h, #9 // -m << 9
4:
ld1 {v1.8h}, [x2], #16
ldr d0, [x0]
ld1 {v0.d}[1], [x8]
subs w4, w4, #2
sub v1.8h, v0.8h, v1.8h // a - b
sqrdmulh v1.8h, v1.8h, v2.8h // ((a-b)*-m + 32) >> 6
add v0.8h, v0.8h, v1.8h
str s0, [x0], #4
st1 {v0.s}[2], [x8], #4
st1 {v0.h}[2], [x0], x1
st1 {v0.h}[6], [x8], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v4.8b}, [x5]
sub x1, x1, #8
neg v4.8b, v4.8b // -m
sxtl v4.8h, v4.8b
shl v4.8h, v4.8h, #9 // -m << 9
8:
ld1 {v2.8h, v3.8h}, [x2], #32
ld1 {v0.8h}, [x0]
ld1 {v1.8h}, [x8]
subs w4, w4, #2
sub v2.8h, v0.8h, v2.8h // a - b
sub v3.8h, v1.8h, v3.8h
sqrdmulh v2.8h, v2.8h, v4.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v3.8h, v3.8h, v4.8h
add v0.8h, v0.8h, v2.8h
add v1.8h, v1.8h, v3.8h
str d0, [x0], #8
str d1, [x8], #8
st1 {v0.s}[2], [x0], x1
st1 {v1.s}[2], [x8], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
ld1 {v16.16b}, [x5]
sub x1, x1, #16
neg v17.16b, v16.16b // -m
sxtl v16.8h, v17.8b
sxtl2 v17.8h, v17.16b
shl v16.8h, v16.8h, #9 // -m << 9
shl v17.4h, v17.4h, #9
16:
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
ld1 {v0.8h, v1.8h}, [x0]
subs w4, w4, #2
ld1 {v2.8h, v3.8h}, [x8]
sub v4.8h, v0.8h, v4.8h // a - b
sub v5.4h, v1.4h, v5.4h
sub v6.8h, v2.8h, v6.8h
sub v7.4h, v3.4h, v7.4h
sqrdmulh v4.8h, v4.8h, v16.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v5.4h, v5.4h, v17.4h
sqrdmulh v6.8h, v6.8h, v16.8h
sqrdmulh v7.4h, v7.4h, v17.4h
add v0.8h, v0.8h, v4.8h
add v1.4h, v1.4h, v5.4h
add v2.8h, v2.8h, v6.8h
add v3.4h, v3.4h, v7.4h
st1 {v0.8h}, [x0], #16
st1 {v2.8h}, [x8], #16
st1 {v1.4h}, [x0], x1
st1 {v3.4h}, [x8], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
ld1 {v24.16b, v25.16b}, [x5]
neg v26.16b, v24.16b // -m
neg v27.8b, v25.8b
sxtl v24.8h, v26.8b
sxtl2 v25.8h, v26.16b
sxtl v26.8h, v27.8b
shl v24.8h, v24.8h, #9 // -m << 9
shl v25.8h, v25.8h, #9
shl v26.8h, v26.8h, #9
32:
ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x2], #64
ld1 {v0.8h, v1.8h, v2.8h}, [x0]
ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x2], #64
ld1 {v4.8h, v5.8h, v6.8h}, [x8]
subs w4, w4, #2
sub v16.8h, v0.8h, v16.8h // a - b
sub v17.8h, v1.8h, v17.8h
sub v18.8h, v2.8h, v18.8h
sub v20.8h, v4.8h, v20.8h
sub v21.8h, v5.8h, v21.8h
sub v22.8h, v6.8h, v22.8h
sqrdmulh v16.8h, v16.8h, v24.8h // ((a-b)*-m + 32) >> 6
sqrdmulh v17.8h, v17.8h, v25.8h
sqrdmulh v18.8h, v18.8h, v26.8h
sqrdmulh v20.8h, v20.8h, v24.8h
sqrdmulh v21.8h, v21.8h, v25.8h
sqrdmulh v22.8h, v22.8h, v26.8h
add v0.8h, v0.8h, v16.8h
add v1.8h, v1.8h, v17.8h
add v2.8h, v2.8h, v18.8h
add v4.8h, v4.8h, v20.8h
add v5.8h, v5.8h, v21.8h
add v6.8h, v6.8h, v22.8h
st1 {v0.8h, v1.8h, v2.8h}, [x0], x1
st1 {v4.8h, v5.8h, v6.8h}, [x8], x1
b.gt 32b
ret
endfunc
jumptable blend_v_tbl
.word 320b - blend_v_tbl
.word 160b - blend_v_tbl
.word 80b - blend_v_tbl
.word 40b - blend_v_tbl
.word 20b - blend_v_tbl
endjumptable
// This has got the same signature as the put_8tap functions,
// and assumes that x9 is set to (clz(w)-24).
function put_16bpc_neon, export=1
movrel x10, put_16bpc_tbl
ldrsw x9, [x10, x9, lsl #2]
add x10, x10, x9
br x10
20:
AARCH64_VALID_JUMP_TARGET
2:
ld1r {v0.4s}, [x2], x3
ld1r {v1.4s}, [x2], x3
subs w5, w5, #2
st1 {v0.s}[0], [x0], x1
st1 {v1.s}[0], [x0], x1
b.gt 2b
ret
40:
AARCH64_VALID_JUMP_TARGET
4:
ld1 {v0.4h}, [x2], x3
ld1 {v1.4h}, [x2], x3
subs w5, w5, #2
st1 {v0.4h}, [x0], x1
st1 {v1.4h}, [x0], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
add x8, x0, x1
lsl x1, x1, #1
add x9, x2, x3
lsl x3, x3, #1
8:
ld1 {v0.8h}, [x2], x3
ld1 {v1.8h}, [x9], x3
subs w5, w5, #2
st1 {v0.8h}, [x0], x1
st1 {v1.8h}, [x8], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ldp x6, x7, [x2]
ldp x8, x9, [x2, #16]
stp x6, x7, [x0]
subs w5, w5, #1
stp x8, x9, [x0, #16]
add x2, x2, x3
add x0, x0, x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
32:
ldp x6, x7, [x2]
ldp x8, x9, [x2, #16]
stp x6, x7, [x0]
ldp x10, x11, [x2, #32]
stp x8, x9, [x0, #16]
subs w5, w5, #1
ldp x12, x13, [x2, #48]
stp x10, x11, [x0, #32]
stp x12, x13, [x0, #48]
add x2, x2, x3
add x0, x0, x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
64:
ldp q0, q1, [x2]
ldp q2, q3, [x2, #32]
stp q0, q1, [x0]
ldp q4, q5, [x2, #64]
stp q2, q3, [x0, #32]
ldp q6, q7, [x2, #96]
subs w5, w5, #1
stp q4, q5, [x0, #64]
stp q6, q7, [x0, #96]
add x2, x2, x3
add x0, x0, x1
b.gt 64b
ret
1280:
AARCH64_VALID_JUMP_TARGET
128:
ldp q0, q1, [x2]
ldp q2, q3, [x2, #32]
stp q0, q1, [x0]
ldp q4, q5, [x2, #64]
stp q2, q3, [x0, #32]
ldp q6, q7, [x2, #96]
subs w5, w5, #1
stp q4, q5, [x0, #64]
ldp q16, q17, [x2, #128]
stp q6, q7, [x0, #96]
ldp q18, q19, [x2, #160]
stp q16, q17, [x0, #128]
ldp q20, q21, [x2, #192]
stp q18, q19, [x0, #160]
ldp q22, q23, [x2, #224]
stp q20, q21, [x0, #192]
stp q22, q23, [x0, #224]
add x2, x2, x3
add x0, x0, x1
b.gt 128b
ret
endfunc
jumptable put_16bpc_tbl
.word 1280b - put_16bpc_tbl
.word 640b - put_16bpc_tbl
.word 320b - put_16bpc_tbl
.word 160b - put_16bpc_tbl
.word 80b - put_16bpc_tbl
.word 40b - put_16bpc_tbl
.word 20b - put_16bpc_tbl
endjumptable
// This has got the same signature as the prep_8tap functions,
// and assumes that x9 is set to (clz(w)-24), w7 to intermediate_bits and
// x8 to w*2.
function prep_16bpc_neon
movrel x10, prep_16bpc_tbl
ldrsw x9, [x10, x9, lsl #2]
dup v31.8h, w7 // intermediate_bits
movi v30.8h, #(PREP_BIAS >> 8), lsl #8
add x10, x10, x9
br x10
40:
AARCH64_VALID_JUMP_TARGET
add x9, x1, x2
lsl x2, x2, #1
4:
ld1 {v0.8b}, [x1], x2
ld1 {v0.d}[1], [x9], x2
subs w4, w4, #2
sshl v0.8h, v0.8h, v31.8h
sub v0.8h, v0.8h, v30.8h
st1 {v0.8h}, [x0], #16
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
add x9, x1, x2
lsl x2, x2, #1
8:
ld1 {v0.8h}, [x1], x2
ld1 {v1.8h}, [x9], x2
subs w4, w4, #2
sshl v0.8h, v0.8h, v31.8h
sshl v1.8h, v1.8h, v31.8h
sub v0.8h, v0.8h, v30.8h
sub v1.8h, v1.8h, v30.8h
st1 {v0.8h, v1.8h}, [x0], #32
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ldp q0, q1, [x1]
add x1, x1, x2
sshl v0.8h, v0.8h, v31.8h
ldp q2, q3, [x1]
add x1, x1, x2
subs w4, w4, #2
sshl v1.8h, v1.8h, v31.8h
sshl v2.8h, v2.8h, v31.8h
sshl v3.8h, v3.8h, v31.8h
sub v0.8h, v0.8h, v30.8h
sub v1.8h, v1.8h, v30.8h
sub v2.8h, v2.8h, v30.8h
sub v3.8h, v3.8h, v30.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
32:
ldp q0, q1, [x1]
sshl v0.8h, v0.8h, v31.8h
ldp q2, q3, [x1, #32]
add x1, x1, x2
sshl v1.8h, v1.8h, v31.8h
sshl v2.8h, v2.8h, v31.8h
sshl v3.8h, v3.8h, v31.8h
subs w4, w4, #1
sub v0.8h, v0.8h, v30.8h
sub v1.8h, v1.8h, v30.8h
sub v2.8h, v2.8h, v30.8h
sub v3.8h, v3.8h, v30.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
64:
ldp q0, q1, [x1]
subs w4, w4, #1
sshl v0.8h, v0.8h, v31.8h
ldp q2, q3, [x1, #32]
sshl v1.8h, v1.8h, v31.8h
ldp q4, q5, [x1, #64]
sshl v2.8h, v2.8h, v31.8h
sshl v3.8h, v3.8h, v31.8h
ldp q6, q7, [x1, #96]
add x1, x1, x2
sshl v4.8h, v4.8h, v31.8h
sshl v5.8h, v5.8h, v31.8h
sshl v6.8h, v6.8h, v31.8h
sshl v7.8h, v7.8h, v31.8h
sub v0.8h, v0.8h, v30.8h
sub v1.8h, v1.8h, v30.8h
sub v2.8h, v2.8h, v30.8h
sub v3.8h, v3.8h, v30.8h
stp q0, q1, [x0]
sub v4.8h, v4.8h, v30.8h
sub v5.8h, v5.8h, v30.8h
stp q2, q3, [x0, #32]
sub v6.8h, v6.8h, v30.8h
sub v7.8h, v7.8h, v30.8h
stp q4, q5, [x0, #64]
stp q6, q7, [x0, #96]
add x0, x0, x8
b.gt 64b
ret
1280:
AARCH64_VALID_JUMP_TARGET
128:
ldp q0, q1, [x1]
subs w4, w4, #1
sshl v0.8h, v0.8h, v31.8h
ldp q2, q3, [x1, #32]
sshl v1.8h, v1.8h, v31.8h
ldp q4, q5, [x1, #64]
sshl v2.8h, v2.8h, v31.8h
sshl v3.8h, v3.8h, v31.8h
ldp q6, q7, [x1, #96]
sshl v4.8h, v4.8h, v31.8h
sshl v5.8h, v5.8h, v31.8h
ldp q16, q17, [x1, #128]
sshl v6.8h, v6.8h, v31.8h
sshl v7.8h, v7.8h, v31.8h
ldp q18, q19, [x1, #160]
sshl v16.8h, v16.8h, v31.8h
sshl v17.8h, v17.8h, v31.8h
ldp q20, q21, [x1, #192]
sshl v18.8h, v18.8h, v31.8h
sshl v19.8h, v19.8h, v31.8h
ldp q22, q23, [x1, #224]
add x1, x1, x2
sshl v20.8h, v20.8h, v31.8h
sshl v21.8h, v21.8h, v31.8h
sshl v22.8h, v22.8h, v31.8h
sshl v23.8h, v23.8h, v31.8h
sub v0.8h, v0.8h, v30.8h
sub v1.8h, v1.8h, v30.8h
sub v2.8h, v2.8h, v30.8h
sub v3.8h, v3.8h, v30.8h
stp q0, q1, [x0]
sub v4.8h, v4.8h, v30.8h
sub v5.8h, v5.8h, v30.8h
stp q2, q3, [x0, #32]
sub v6.8h, v6.8h, v30.8h
sub v7.8h, v7.8h, v30.8h
stp q4, q5, [x0, #64]
sub v16.8h, v16.8h, v30.8h
sub v17.8h, v17.8h, v30.8h
stp q6, q7, [x0, #96]
sub v18.8h, v18.8h, v30.8h
sub v19.8h, v19.8h, v30.8h
stp q16, q17, [x0, #128]
sub v20.8h, v20.8h, v30.8h
sub v21.8h, v21.8h, v30.8h
stp q18, q19, [x0, #160]
sub v22.8h, v22.8h, v30.8h
sub v23.8h, v23.8h, v30.8h
stp q20, q21, [x0, #192]
stp q22, q23, [x0, #224]
add x0, x0, x8
b.gt 128b
ret
endfunc
jumptable prep_16bpc_tbl
.word 1280b - prep_16bpc_tbl
.word 640b - prep_16bpc_tbl
.word 320b - prep_16bpc_tbl
.word 160b - prep_16bpc_tbl
.word 80b - prep_16bpc_tbl
.word 40b - prep_16bpc_tbl
endjumptable
.macro load_slice s0, s1, strd, wd, d0, d1, d2, d3, d4, d5, d6
ld1 {\d0\wd}[0], [\s0], \strd
ld1 {\d1\wd}[0], [\s1], \strd
.ifnb \d2
ld1 {\d2\wd}[0], [\s0], \strd
ld1 {\d3\wd}[0], [\s1], \strd
.endif
.ifnb \d4
ld1 {\d4\wd}[0], [\s0], \strd
.endif
.ifnb \d5
ld1 {\d5\wd}[0], [\s1], \strd
.endif
.ifnb \d6
ld1 {\d6\wd}[0], [\s0], \strd
.endif
.endm
.macro load_reg s0, s1, strd, wd, d0, d1, d2, d3, d4, d5, d6
ld1 {\d0\wd}, [\s0], \strd
ld1 {\d1\wd}, [\s1], \strd
.ifnb \d2
ld1 {\d2\wd}, [\s0], \strd
ld1 {\d3\wd}, [\s1], \strd
.endif
.ifnb \d4
ld1 {\d4\wd}, [\s0], \strd
.endif
.ifnb \d5
ld1 {\d5\wd}, [\s1], \strd
.endif
.ifnb \d6
ld1 {\d6\wd}, [\s0], \strd
.endif
.endm
.macro load_regpair s0, s1, strd, wd, d0, d1, d2, d3, d4, d5
ld1 {\d0\wd, \d1\wd}, [\s0], \strd
.ifnb \d2
ld1 {\d2\wd, \d3\wd}, [\s1], \strd
.endif
.ifnb \d4
ld1 {\d4\wd, \d5\wd}, [\s0], \strd
.endif
.endm
.macro load_s s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_slice \s0, \s1, \strd, .s, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro load_4h s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_reg \s0, \s1, \strd, .4h, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro load_8h s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_reg \s0, \s1, \strd, .8h, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro load_16h s0, s1, strd, d0, d1, d2, d3, d4, d5
load_regpair \s0, \s1, \strd, .8h, \d0, \d1, \d2, \d3, \d4, \d5
.endm
.macro interleave_1 wd, r0, r1, r2, r3, r4
trn1 \r0\wd, \r0\wd, \r1\wd
trn1 \r1\wd, \r1\wd, \r2\wd
.ifnb \r3
trn1 \r2\wd, \r2\wd, \r3\wd
trn1 \r3\wd, \r3\wd, \r4\wd
.endif
.endm
.macro interleave_1_s r0, r1, r2, r3, r4
interleave_1 .2s, \r0, \r1, \r2, \r3, \r4
.endm
.macro umin_h c, wd, r0, r1, r2, r3
umin \r0\wd, \r0\wd, \c\wd
.ifnb \r1
umin \r1\wd, \r1\wd, \c\wd
.endif
.ifnb \r2
umin \r2\wd, \r2\wd, \c\wd
umin \r3\wd, \r3\wd, \c\wd
.endif
.endm
.macro sub_h c, wd, r0, r1, r2, r3
sub \r0\wd, \r0\wd, \c\wd
.ifnb \r1
sub \r1\wd, \r1\wd, \c\wd
.endif
.ifnb \r2
sub \r2\wd, \r2\wd, \c\wd
sub \r3\wd, \r3\wd, \c\wd
.endif
.endm
.macro smull_smlal_4tap d, s0, s1, s2, s3
smull \d\().4s, \s0\().4h, v0.h[0]
smlal \d\().4s, \s1\().4h, v0.h[1]
smlal \d\().4s, \s2\().4h, v0.h[2]
smlal \d\().4s, \s3\().4h, v0.h[3]
.endm
.macro smull2_smlal2_4tap d, s0, s1, s2, s3
smull2 \d\().4s, \s0\().8h, v0.h[0]
smlal2 \d\().4s, \s1\().8h, v0.h[1]
smlal2 \d\().4s, \s2\().8h, v0.h[2]
smlal2 \d\().4s, \s3\().8h, v0.h[3]
.endm
.macro smull_smlal_6tap d, s0, s1, s2, s3, s4, s5, s6, s7
smull \d\().4s, \s1\().4h, v0.h[1]
smlal \d\().4s, \s2\().4h, v0.h[2]
smlal \d\().4s, \s3\().4h, v0.h[3]
smlal \d\().4s, \s4\().4h, v0.h[4]
smlal \d\().4s, \s5\().4h, v0.h[5]
smlal \d\().4s, \s6\().4h, v0.h[6]
.endm
.macro smull2_smlal2_6tap d, s0, s1, s2, s3, s4, s5, s6, s7
smull2 \d\().4s, \s1\().8h, v0.h[1]
smlal2 \d\().4s, \s2\().8h, v0.h[2]
smlal2 \d\().4s, \s3\().8h, v0.h[3]
smlal2 \d\().4s, \s4\().8h, v0.h[4]
smlal2 \d\().4s, \s5\().8h, v0.h[5]
smlal2 \d\().4s, \s6\().8h, v0.h[6]
.endm
.macro smull_smlal_8tap d, s0, s1, s2, s3, s4, s5, s6, s7
smull \d\().4s, \s0\().4h, v0.h[0]
smlal \d\().4s, \s1\().4h, v0.h[1]
smlal \d\().4s, \s2\().4h, v0.h[2]
smlal \d\().4s, \s3\().4h, v0.h[3]
smlal \d\().4s, \s4\().4h, v0.h[4]
smlal \d\().4s, \s5\().4h, v0.h[5]
smlal \d\().4s, \s6\().4h, v0.h[6]
smlal \d\().4s, \s7\().4h, v0.h[7]
.endm
.macro smull2_smlal2_8tap d, s0, s1, s2, s3, s4, s5, s6, s7
smull2 \d\().4s, \s0\().8h, v0.h[0]
smlal2 \d\().4s, \s1\().8h, v0.h[1]
smlal2 \d\().4s, \s2\().8h, v0.h[2]
smlal2 \d\().4s, \s3\().8h, v0.h[3]
smlal2 \d\().4s, \s4\().8h, v0.h[4]
smlal2 \d\().4s, \s5\().8h, v0.h[5]
smlal2 \d\().4s, \s6\().8h, v0.h[6]
smlal2 \d\().4s, \s7\().8h, v0.h[7]
.endm
.macro sqrshrun_h shift, r0, r1, r2, r3
sqrshrun \r0\().4h, \r0\().4s, #\shift
.ifnb \r1
sqrshrun2 \r0\().8h, \r1\().4s, #\shift
.endif
.ifnb \r2
sqrshrun \r2\().4h, \r2\().4s, #\shift
sqrshrun2 \r2\().8h, \r3\().4s, #\shift
.endif
.endm
.macro xtn_h r0, r1, r2, r3
uzp1 \r0\().8h, \r0\().8h, \r1\().8h // Same as xtn, xtn2
.ifnb \r2
uzp1 \r2\().8h, \r2\().8h, \r3\().8h // Ditto
.endif
.endm
.macro srshl_s shift, r0, r1, r2, r3
srshl \r0\().4s, \r0\().4s, \shift\().4s
srshl \r1\().4s, \r1\().4s, \shift\().4s
.ifnb \r2
srshl \r2\().4s, \r2\().4s, \shift\().4s
srshl \r3\().4s, \r3\().4s, \shift\().4s
.endif
.endm
.macro st_s strd, reg, lanes
st1 {\reg\().s}[0], [x0], \strd
st1 {\reg\().s}[1], [x9], \strd
.if \lanes > 2
st1 {\reg\().s}[2], [x0], \strd
st1 {\reg\().s}[3], [x9], \strd
.endif
.endm
.macro st_d strd, r0, r1
st1 {\r0\().8b}, [x0], \strd
st1 {\r0\().d}[1], [x9], \strd
.ifnb \r1
st1 {\r1\().8b}, [x0], \strd
st1 {\r1\().d}[1], [x9], \strd
.endif
.endm
.macro shift_store_4 type, strd, r0, r1, r2, r3
.ifc \type, put
sqrshrun_h 6, \r0, \r1, \r2, \r3
umin_h v31, .8h, \r0, \r2
.else
srshl_s v30, \r0, \r1, \r2, \r3 // -(6-intermediate_bits)
xtn_h \r0, \r1, \r2, \r3
sub_h v29, .8h, \r0, \r2 // PREP_BIAS
.endif
st_d \strd, \r0, \r2
.endm
.macro st_reg strd, wd, r0, r1, r2, r3, r4, r5, r6, r7
st1 {\r0\wd}, [x0], \strd
st1 {\r1\wd}, [x9], \strd
.ifnb \r2
st1 {\r2\wd}, [x0], \strd
st1 {\r3\wd}, [x9], \strd
.endif
.ifnb \r4
st1 {\r4\wd}, [x0], \strd
st1 {\r5\wd}, [x9], \strd
st1 {\r6\wd}, [x0], \strd
st1 {\r7\wd}, [x9], \strd
.endif
.endm
.macro st_8h strd, r0, r1, r2, r3, r4, r5, r6, r7
st_reg \strd, .8h, \r0, \r1, \r2, \r3, \r4, \r5, \r6, \r7
.endm
.macro shift_store_8 type, strd, r0, r1, r2, r3
.ifc \type, put
sqrshrun_h 6, \r0, \r1, \r2, \r3
umin_h v31, .8h, \r0, \r2
.else
srshl_s v30, \r0, \r1, \r2, \r3 // -(6-intermediate_bits)
xtn_h \r0, \r1, \r2, \r3
sub_h v29, .8h, \r0, \r2 // PREP_BIAS
.endif
st_8h \strd, \r0, \r2
.endm
.macro shift_store_16 type, strd, dst, r0, r1, r2, r3
.ifc \type, put
sqrshrun_h 6, \r0, \r1, \r2, \r3
umin \r0\().8h, \r0\().8h, v31.8h
umin \r1\().8h, \r2\().8h, v31.8h
.else
srshl_s v30, \r0, \r1, \r2, \r3 // -(6-intermediate_bits)
xtn_h \r0, \r1, \r2, \r3
sub \r0\().8h, \r0\().8h, v29.8h
sub \r1\().8h, \r2\().8h, v29.8h
.endif
st1 {\r0\().8h, \r1\().8h}, [\dst], \strd
.endm
.macro make_8tap_fn op, type, type_h, type_v, taps
function \op\()_8tap_\type\()_16bpc_neon, export=1
mov w9, \type_h
mov w10, \type_v
b \op\()_\taps\()_neon
endfunc
.endm
// No spaces in these expressions, due to gas-preprocessor.
#define REGULAR ((0*15<<7)|3*15)
#define SMOOTH ((1*15<<7)|4*15)
#define SHARP ((2*15<<7)|3*15)
.macro filter_fn type, dst, d_strd, src, s_strd, w, h, mx, xmx, my, xmy, bdmax, ds2, sr2, taps
function \type\()_\taps\()_neon
.ifc \bdmax, w8
ldr w8, [sp]
.endif
mov w11, #0x4081 // (1 << 14) | (1 << 7) | (1 << 0)
mul \mx, \mx, w11
mul \my, \my, w11
add \mx, \mx, w9 // mx, 8tap_h, 4tap_h
add \my, \my, w10 // my, 8tap_v, 4tap_v
.ifc \type, prep
uxtw \d_strd, \w
lsl \d_strd, \d_strd, #1
.endif
dup v31.8h, \bdmax // bitdepth_max
clz \bdmax, \bdmax
clz w9, \w
sub \bdmax, \bdmax, #18 // intermediate_bits = clz(bitdepth_max) - 18
mov w12, #6
tst \mx, #(0x7f << 14)
sub w9, w9, #24
add w13, w12, \bdmax // 6 + intermediate_bits
sub w12, w12, \bdmax // 6 - intermediate_bits
movrel x11, X(mc_subpel_filters), -8
b.ne L(\type\()_\taps\()_h)
tst \my, #(0x7f << 14)
b.ne L(\type\()_\taps\()_v)
b \type\()_16bpc_neon
L(\type\()_\taps\()_h):
cmp \w, #4
ubfx w10, \mx, #7, #7
and \mx, \mx, #0x7f
b.le 4f
mov \mx, w10
4:
tst \my, #(0x7f << 14)
add \xmx, x11, \mx, uxtw #3
b.ne L(\type\()_\taps\()_hv)
movrel x10, \type\()_\taps\()_h_tbl
ldrsw x9, [x10, x9, lsl #2]
.ifc \type, put
mov w12, #34 // rounding for 10-bit
mov w13, #40 // rounding for 12-bit
cmp \bdmax, #2 // 10-bit: 4, 12-bit: 2
csel w12, w12, w13, ne // select rounding based on \bdmax
.else
neg w12, w12 // -(6 - intermediate_bits)
movi v28.8h, #(PREP_BIAS >> 8), lsl #8
.endif
add x10, x10, x9
dup v30.4s, w12 // rounding or shift amount
br x10
20: // 2xN h
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
ldur s0, [\xmx, #2]
sub \src, \src, #2
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
sxtl v0.8h, v0.8b
2:
ld1 {v4.8h}, [\src], \s_strd
ld1 {v6.8h}, [\sr2], \s_strd
mov v2.16b, v30.16b
ext v5.16b, v4.16b, v4.16b, #2
ext v7.16b, v6.16b, v6.16b, #2
subs \h, \h, #2
trn1 v3.2s, v4.2s, v6.2s
trn2 v6.2s, v4.2s, v6.2s
trn1 v4.2s, v5.2s, v7.2s
trn2 v7.2s, v5.2s, v7.2s
smlal v2.4s, v3.4h, v0.h[0]
smlal v2.4s, v4.4h, v0.h[1]
smlal v2.4s, v6.4h, v0.h[2]
smlal v2.4s, v7.4h, v0.h[3]
sqshrun v2.4h, v2.4s, #6
umin v2.4h, v2.4h, v31.4h
st1 {v2.s}[0], [\dst], \d_strd
st1 {v2.s}[1], [\ds2], \d_strd
b.gt 2b
ret
.endif
40: // 4xN h
AARCH64_VALID_JUMP_TARGET
ldur s0, [\xmx, #2]
sub \src, \src, #2
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
sxtl v0.8h, v0.8b
4:
ld1 {v16.8h}, [\src], \s_strd
ld1 {v20.8h}, [\sr2], \s_strd
.ifc \type, put
mov v2.16b, v30.16b
mov v3.16b, v30.16b
.endif
ext v17.16b, v16.16b, v16.16b, #2
ext v18.16b, v16.16b, v16.16b, #4
ext v19.16b, v16.16b, v16.16b, #6
ext v21.16b, v20.16b, v20.16b, #2
ext v22.16b, v20.16b, v20.16b, #4
ext v23.16b, v20.16b, v20.16b, #6
subs \h, \h, #2
.ifc \type, put
smlal v2.4s, v16.4h, v0.h[0]
.else
smull v2.4s, v16.4h, v0.h[0]
.endif
smlal v2.4s, v17.4h, v0.h[1]
smlal v2.4s, v18.4h, v0.h[2]
smlal v2.4s, v19.4h, v0.h[3]
.ifc \type, put
smlal v3.4s, v20.4h, v0.h[0]
.else
smull v3.4s, v20.4h, v0.h[0]
.endif
smlal v3.4s, v21.4h, v0.h[1]
smlal v3.4s, v22.4h, v0.h[2]
smlal v3.4s, v23.4h, v0.h[3]
.ifc \type, put
sqshrun v16.4h, v2.4s, #6
sqshrun2 v16.8h, v3.4s, #6
umin v16.8h, v16.8h, v31.8h
.else
srshl v16.4s, v2.4s, v30.4s // -(6-intermediate_bits)
srshl v20.4s, v3.4s, v30.4s // -(6-intermediate_bits)
uzp1 v16.8h, v16.8h, v20.8h // Same as xtn, xtn2
sub v16.8h, v16.8h, v28.8h // PREP_BIAS
.endif
st1 {v16.8b}, [\dst], \d_strd
st1 {v16.d}[1], [\ds2], \d_strd
b.gt 4b
ret
80:
160:
320:
640:
1280: // 8xN, 16xN, 32xN, ... h
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [\xmx]
.ifc \taps, 6tap
sub \src, \src, #4
.else
sub \src, \src, #6
.endif
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
sxtl v0.8h, v0.8b
sub \s_strd, \s_strd, \w, uxtw #1
sub \s_strd, \s_strd, #16
.ifc \type, put
lsl \d_strd, \d_strd, #1
sub \d_strd, \d_strd, \w, uxtw #1
.endif
81:
ld1 {v16.8h, v17.8h}, [\src], #32
ld1 {v20.8h, v21.8h}, [\sr2], #32
mov \mx, \w
8:
.ifc \taps, 6tap
.ifc \type, put
mov v18.16b, v30.16b
mov v19.16b, v30.16b
smlal v18.4s, v16.4h, v0.h[1]
smlal2 v19.4s, v16.8h, v0.h[1]
mov v22.16b, v30.16b
mov v23.16b, v30.16b
smlal v22.4s, v20.4h, v0.h[1]
smlal2 v23.4s, v20.8h, v0.h[1]
.else
smull v18.4s, v16.4h, v0.h[1]
smull2 v19.4s, v16.8h, v0.h[1]
smull v22.4s, v20.4h, v0.h[1]
smull2 v23.4s, v20.8h, v0.h[1]
.endif
.irpc i, 23456
ext v24.16b, v16.16b, v17.16b, #(2*\i-2)
ext v25.16b, v20.16b, v21.16b, #(2*\i-2)
smlal v18.4s, v24.4h, v0.h[\i]
smlal2 v19.4s, v24.8h, v0.h[\i]
smlal v22.4s, v25.4h, v0.h[\i]
smlal2 v23.4s, v25.8h, v0.h[\i]
.endr
.else // 8tap
.ifc \type, put
mov v18.16b, v30.16b
mov v19.16b, v30.16b
smlal v18.4s, v16.4h, v0.h[0]
smlal2 v19.4s, v16.8h, v0.h[0]
mov v22.16b, v30.16b
mov v23.16b, v30.16b
smlal v22.4s, v20.4h, v0.h[0]
smlal2 v23.4s, v20.8h, v0.h[0]
.else
smull v18.4s, v16.4h, v0.h[0]
smull2 v19.4s, v16.8h, v0.h[0]
smull v22.4s, v20.4h, v0.h[0]
smull2 v23.4s, v20.8h, v0.h[0]
.endif
.irpc i, 1234567
ext v24.16b, v16.16b, v17.16b, #(2*\i)
ext v25.16b, v20.16b, v21.16b, #(2*\i)
smlal v18.4s, v24.4h, v0.h[\i]
smlal2 v19.4s, v24.8h, v0.h[\i]
smlal v22.4s, v25.4h, v0.h[\i]
smlal2 v23.4s, v25.8h, v0.h[\i]
.endr
.endif
subs \mx, \mx, #8
.ifc \type, put
sqshrun v18.4h, v18.4s, #6
sqshrun2 v18.8h, v19.4s, #6
sqshrun v22.4h, v22.4s, #6
sqshrun2 v22.8h, v23.4s, #6
umin v18.8h, v18.8h, v31.8h
umin v22.8h, v22.8h, v31.8h
.else
srshl v18.4s, v18.4s, v30.4s // -(6-intermediate_bits)
srshl v19.4s, v19.4s, v30.4s // -(6-intermediate_bits)
srshl v22.4s, v22.4s, v30.4s // -(6-intermediate_bits)
srshl v23.4s, v23.4s, v30.4s // -(6-intermediate_bits)
uzp1 v18.8h, v18.8h, v19.8h // Same as xtn, xtn2
uzp1 v22.8h, v22.8h, v23.8h // Ditto
sub v18.8h, v18.8h, v28.8h // PREP_BIAS
sub v22.8h, v22.8h, v28.8h // PREP_BIAS
.endif
st1 {v18.8h}, [\dst], #16
st1 {v22.8h}, [\ds2], #16
b.le 9f
mov v16.16b, v17.16b
mov v20.16b, v21.16b
ld1 {v17.8h}, [\src], #16
ld1 {v21.8h}, [\sr2], #16
b 8b
9:
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
add \src, \src, \s_strd
add \sr2, \sr2, \s_strd
subs \h, \h, #2
b.gt 81b
ret
endfunc
jumptable \type\()_\taps\()_h_tbl
.word 1280b - \type\()_\taps\()_h_tbl
.word 640b - \type\()_\taps\()_h_tbl
.word 320b - \type\()_\taps\()_h_tbl
.word 160b - \type\()_\taps\()_h_tbl
.word 80b - \type\()_\taps\()_h_tbl
.word 40b - \type\()_\taps\()_h_tbl
.word 20b - \type\()_\taps\()_h_tbl
endjumptable
function L(\type\()_\taps\()_v)
cmp \h, #4
ubfx w10, \my, #7, #7
and \my, \my, #0x7f
b.le 4f
mov \my, w10
4:
add \xmy, x11, \my, uxtw #3
.ifc \type, prep
dup v30.4s, w12 // 6 - intermediate_bits
movi v29.8h, #(PREP_BIAS >> 8), lsl #8
.endif
movrel x10, \type\()_\taps\()_v_tbl
ldrsw x9, [x10, x9, lsl #2]
.ifc \type, prep
neg v30.4s, v30.4s // -(6-intermediate_bits)
.endif
add x10, x10, x9
br x10
20: // 2xN v
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
b.gt 28f
cmp \h, #2
ldur s0, [\xmy, #2]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
// 2x2 v
load_s \src, \sr2, \s_strd, v1, v2, v3, v4, v5
interleave_1_s v1, v2, v3, v4, v5
b.gt 24f
smull_smlal_4tap v6, v1, v2, v3, v4
sqrshrun_h 6, v6
umin_h v31, .8h, v6
st_s \d_strd, v6, 2
ret
24: // 2x4 v
load_s \sr2, \src, \s_strd, v6, v7
interleave_1_s v5, v6, v7
smull_smlal_4tap v16, v1, v2, v3, v4
smull_smlal_4tap v17, v3, v4, v5, v6
sqrshrun_h 6, v16, v17
umin_h v31, .8h, v16
st_s \d_strd, v16, 4
ret
28: // 2x6, 2x8, 2x12, 2x16 v
ld1 {v0.8b}, [\xmy]
sub \sr2, \src, \s_strd, lsl #1
add \ds2, \dst, \d_strd
sub \src, \sr2, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
sxtl v0.8h, v0.8b
load_s \src, \sr2, \s_strd, v1, v2, v3, v4, v5, v6, v7
interleave_1_s v1, v2, v3, v4, v5
interleave_1_s v5, v6, v7
216:
subs \h, \h, #4
load_s \sr2, \src, \s_strd, v16, v17, v18, v19
interleave_1_s v7, v16, v17, v18, v19
smull_smlal_\taps v24, v1, v2, v3, v4, v5, v6, v7, v16
smull_smlal_\taps v25, v3, v4, v5, v6, v7, v16, v17, v18
sqrshrun_h 6, v24, v25
umin_h v31, .8h, v24
st_s \d_strd, v24, 4
b.le 0f
cmp \h, #2
mov v1.16b, v5.16b
mov v2.16b, v6.16b
mov v3.16b, v7.16b
mov v4.16b, v16.16b
mov v5.16b, v17.16b
mov v6.16b, v18.16b
mov v7.16b, v19.16b
b.eq 26f
b 216b
26:
load_s \sr2, \src, \s_strd, v16, v17
interleave_1_s v7, v16, v17
smull_smlal_\taps v24, v1, v2, v3, v4, v5, v6, v7, v16
sqrshrun_h 6, v24
umin_h v31, .4h, v24
st_s \d_strd, v24, 2
0:
ret
.endif
40:
AARCH64_VALID_JUMP_TARGET
b.gt 480f
// 4x2, 4x4 v
cmp \h, #2
ldur s0, [\xmy, #2]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
load_4h \src, \sr2, \s_strd, v1, v2, v3, v4, v5
smull_smlal_4tap v6, v1, v2, v3, v4
smull_smlal_4tap v7, v2, v3, v4, v5
shift_store_4 \type, \d_strd, v6, v7
b.le 0f
load_4h \sr2, \src, \s_strd, v6, v7
smull_smlal_4tap v1, v3, v4, v5, v6
smull_smlal_4tap v2, v4, v5, v6, v7
shift_store_4 \type, \d_strd, v1, v2
0:
ret
480: // 4x6, 4x8, 4x12, 4x16 v
ld1 {v0.8b}, [\xmy]
sub \sr2, \src, \s_strd, lsl #1
add \ds2, \dst, \d_strd
sub \src, \sr2, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
load_4h \src, \sr2, \s_strd, v16, v17, v18, v19, v20, v21, v22
48:
subs \h, \h, #4
load_4h \sr2, \src, \s_strd, v23, v24, v25, v26
smull_smlal_\taps v1, v16, v17, v18, v19, v20, v21, v22, v23
smull_smlal_\taps v2, v17, v18, v19, v20, v21, v22, v23, v24
smull_smlal_\taps v3, v18, v19, v20, v21, v22, v23, v24, v25
smull_smlal_\taps v4, v19, v20, v21, v22, v23, v24, v25, v26
shift_store_4 \type, \d_strd, v1, v2, v3, v4
b.le 0f
cmp \h, #2
mov v16.8b, v20.8b
mov v17.8b, v21.8b
mov v18.8b, v22.8b
mov v19.8b, v23.8b
mov v20.8b, v24.8b
mov v21.8b, v25.8b
mov v22.8b, v26.8b
b.eq 46f
b 48b
46:
load_4h \sr2, \src, \s_strd, v23, v24
smull_smlal_\taps v1, v16, v17, v18, v19, v20, v21, v22, v23
smull_smlal_\taps v2, v17, v18, v19, v20, v21, v22, v23, v24
shift_store_4 \type, \d_strd, v1, v2
0:
ret
80:
AARCH64_VALID_JUMP_TARGET
b.gt 880f
// 8x2, 8x4 v
cmp \h, #2
ldur s0, [\xmy, #2]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
load_8h \src, \sr2, \s_strd, v1, v2, v3, v4, v5
smull_smlal_4tap v16, v1, v2, v3, v4
smull2_smlal2_4tap v17, v1, v2, v3, v4
smull_smlal_4tap v18, v2, v3, v4, v5
smull2_smlal2_4tap v19, v2, v3, v4, v5
shift_store_8 \type, \d_strd, v16, v17, v18, v19
b.le 0f
load_8h \sr2, \src, \s_strd, v6, v7
smull_smlal_4tap v16, v3, v4, v5, v6
smull2_smlal2_4tap v17, v3, v4, v5, v6
smull_smlal_4tap v18, v4, v5, v6, v7
smull2_smlal2_4tap v19, v4, v5, v6, v7
shift_store_8 \type, \d_strd, v16, v17, v18, v19
0:
ret
880: // 8x6, 8x8, 8x16, 8x32 v
1680: // 16x8, 16x16, ...
320: // 32x8, 32x16, ...
640:
1280:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [\xmy]
sub \src, \src, \s_strd
sub \src, \src, \s_strd, lsl #1
sxtl v0.8h, v0.8b
mov \my, \h
168:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
load_8h \src, \sr2, \s_strd, v16, v17, v18, v19, v20, v21, v22
88:
subs \h, \h, #2
load_8h \sr2, \src, \s_strd, v23, v24
smull_smlal_\taps v1, v16, v17, v18, v19, v20, v21, v22, v23
smull2_smlal2_\taps v2, v16, v17, v18, v19, v20, v21, v22, v23
smull_smlal_\taps v3, v17, v18, v19, v20, v21, v22, v23, v24
smull2_smlal2_\taps v4, v17, v18, v19, v20, v21, v22, v23, v24
shift_store_8 \type, \d_strd, v1, v2, v3, v4
b.le 9f
subs \h, \h, #2
load_8h \sr2, \src, \s_strd, v25, v26
smull_smlal_\taps v1, v18, v19, v20, v21, v22, v23, v24, v25
smull2_smlal2_\taps v2, v18, v19, v20, v21, v22, v23, v24, v25
smull_smlal_\taps v3, v19, v20, v21, v22, v23, v24, v25, v26
smull2_smlal2_\taps v4, v19, v20, v21, v22, v23, v24, v25, v26
shift_store_8 \type, \d_strd, v1, v2, v3, v4
b.le 9f
mov v16.16b, v20.16b
mov v17.16b, v21.16b
mov v18.16b, v22.16b
mov v19.16b, v23.16b
mov v20.16b, v24.16b
mov v21.16b, v25.16b
mov v22.16b, v26.16b
b 88b
9:
subs \w, \w, #8
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #3
mov \h, \my
add \src, \src, #16
add \dst, \dst, #16
b 168b
0:
ret
160:
AARCH64_VALID_JUMP_TARGET
b.gt 1680b
// 16x2, 16x4 v
ldur s0, [\xmy, #2]
sub \src, \src, \s_strd
sxtl v0.8h, v0.8b
load_16h \src, \src, \s_strd, v16, v17, v18, v19, v20, v21
16:
load_16h \src, \src, \s_strd, v22, v23
subs \h, \h, #1
smull_smlal_4tap v1, v16, v18, v20, v22
smull2_smlal2_4tap v2, v16, v18, v20, v22
smull_smlal_4tap v3, v17, v19, v21, v23
smull2_smlal2_4tap v4, v17, v19, v21, v23
shift_store_16 \type, \d_strd, x0, v1, v2, v3, v4
b.le 0f
mov v16.16b, v18.16b
mov v17.16b, v19.16b
mov v18.16b, v20.16b
mov v19.16b, v21.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
b 16b
0:
ret
endfunc
jumptable \type\()_\taps\()_v_tbl
.word 1280b - \type\()_\taps\()_v_tbl
.word 640b - \type\()_\taps\()_v_tbl
.word 320b - \type\()_\taps\()_v_tbl
.word 160b - \type\()_\taps\()_v_tbl
.word 80b - \type\()_\taps\()_v_tbl
.word 40b - \type\()_\taps\()_v_tbl
.word 20b - \type\()_\taps\()_v_tbl
endjumptable
function L(\type\()_\taps\()_hv)
cmp \h, #4
ubfx w10, \my, #7, #7
and \my, \my, #0x7f
b.le 4f
mov \my, w10
4:
add \xmy, x11, \my, uxtw #3
movrel x10, \type\()_\taps\()_hv_tbl
dup v30.4s, w12 // 6 - intermediate_bits
ldrsw x9, [x10, x9, lsl #2]
neg v30.4s, v30.4s // -(6-intermediate_bits)
.ifc \type, put
dup v29.4s, w13 // 6 + intermediate_bits
.else
movi v29.8h, #(PREP_BIAS >> 8), lsl #8
.endif
add x10, x10, x9
.ifc \type, put
neg v29.4s, v29.4s // -(6+intermediate_bits)
.endif
br x10
20:
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
ldur s0, [\xmx, #2]
b.gt 280f
ldur s1, [\xmy, #2]
// 2x2, 2x4 hv
sub \sr2, \src, #2
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
ld1 {v27.8h}, [\src], \s_strd
ext v28.16b, v27.16b, v27.16b, #2
smull v27.4s, v27.4h, v0.4h
smull v28.4s, v28.4h, v0.4h
addp v27.4s, v27.4s, v28.4s
addp v16.4s, v27.4s, v27.4s
srshl v16.2s, v16.2s, v30.2s // -(6-intermediate_bits)
bl L(\type\()_\taps\()_filter_2)
// The intermediates from the horizontal pass fit in 16 bit without
// any bias; we could just as well keep them as .4s, but narrowing
// them to .4h gives a significant speedup on out of order cores
// (at the cost of a smaller slowdown on in-order cores such as A53).
xtn v16.4h, v16.4s
trn1 v16.2s, v16.2s, v24.2s
mov v17.8b, v24.8b
2:
bl L(\type\()_\taps\()_filter_2)
ext v18.8b, v17.8b, v24.8b, #4
smull v2.4s, v16.4h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal v2.4s, v24.4h, v1.h[3]
srshl v2.4s, v2.4s, v29.4s // -(6+intermediate_bits)
sqxtun v2.4h, v2.4s
umin v2.4h, v2.4h, v31.4h
subs \h, \h, #2
st1 {v2.s}[0], [\dst], \d_strd
st1 {v2.s}[1], [\ds2], \d_strd
b.le 0f
mov v16.8b, v18.8b
mov v17.8b, v24.8b
b 2b
280: // 2x8, 2x16, 2x32 hv
ld1 {v1.8b}, [\xmy]
sub \src, \src, #2
sub \sr2, \src, \s_strd, lsl #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
ld1 {v27.8h}, [\src], \s_strd
ext v28.16b, v27.16b, v27.16b, #2
smull v27.4s, v27.4h, v0.4h
smull v28.4s, v28.4h, v0.4h
addp v27.4s, v27.4s, v28.4s
addp v16.4s, v27.4s, v27.4s
srshl v16.2s, v16.2s, v30.2s // -(6-intermediate_bits)
// The intermediates from the horizontal pass fit in 16 bit without
// any bias; we could just as well keep them as .4s, but narrowing
// them to .4h gives a significant speedup on out of order cores
// (at the cost of a smaller slowdown on in-order cores such as A53).
bl L(\type\()_\taps\()_filter_2)
xtn v16.4h, v16.4s
trn1 v16.2s, v16.2s, v24.2s
mov v17.8b, v24.8b
bl L(\type\()_\taps\()_filter_2)
ext v18.8b, v17.8b, v24.8b, #4
mov v19.8b, v24.8b
bl L(\type\()_\taps\()_filter_2)
ext v20.8b, v19.8b, v24.8b, #4
mov v21.8b, v24.8b
28:
bl L(\type\()_\taps\()_filter_2)
ext v22.8b, v21.8b, v24.8b, #4
.ifc \taps, 6tap
smull v3.4s, v17.4h, v1.h[1]
smlal v3.4s, v18.4h, v1.h[2]
smlal v3.4s, v19.4h, v1.h[3]
smlal v3.4s, v20.4h, v1.h[4]
smlal v3.4s, v21.4h, v1.h[5]
smlal v3.4s, v22.4h, v1.h[6]
.else // 8tap
smull v3.4s, v16.4h, v1.h[0]
smlal v3.4s, v17.4h, v1.h[1]
smlal v3.4s, v18.4h, v1.h[2]
smlal v3.4s, v19.4h, v1.h[3]
smlal v3.4s, v20.4h, v1.h[4]
smlal v3.4s, v21.4h, v1.h[5]
smlal v3.4s, v22.4h, v1.h[6]
smlal v3.4s, v24.4h, v1.h[7]
.endif
srshl v3.4s, v3.4s, v29.4s // -(6+intermediate_bits)
sqxtun v3.4h, v3.4s
umin v3.4h, v3.4h, v31.4h
subs \h, \h, #2
st1 {v3.s}[0], [\dst], \d_strd
st1 {v3.s}[1], [\ds2], \d_strd
b.le 0f
mov v16.8b, v18.8b
mov v17.8b, v19.8b
mov v18.8b, v20.8b
mov v19.8b, v21.8b
mov v20.8b, v22.8b
mov v21.8b, v24.8b
b 28b
0:
ret x15
L(\type\()_\taps\()_filter_2):
ld1 {v25.8h}, [\sr2], \s_strd
ld1 {v27.8h}, [\src], \s_strd
ext v26.16b, v25.16b, v25.16b, #2
ext v28.16b, v27.16b, v27.16b, #2
trn1 v24.2s, v25.2s, v27.2s
trn2 v27.2s, v25.2s, v27.2s
trn1 v25.2s, v26.2s, v28.2s
trn2 v28.2s, v26.2s, v28.2s
smull v24.4s, v24.4h, v0.h[0]
smlal v24.4s, v25.4h, v0.h[1]
smlal v24.4s, v27.4h, v0.h[2]
smlal v24.4s, v28.4h, v0.h[3]
srshl v24.4s, v24.4s, v30.4s // -(6-intermediate_bits)
xtn v24.4h, v24.4s
ret
.endif
40:
AARCH64_VALID_JUMP_TARGET
ldur s0, [\xmx, #2]
b.gt 480f
ldur s1, [\xmy, #2]
sub \sr2, \src, #2
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
// 4x2, 4x4 hv
ld1 {v25.8h}, [\src], \s_strd
ext v26.16b, v25.16b, v25.16b, #2
ext v27.16b, v25.16b, v25.16b, #4
ext v28.16b, v25.16b, v25.16b, #6
smull v25.4s, v25.4h, v0.h[0]
smlal v25.4s, v26.4h, v0.h[1]
smlal v25.4s, v27.4h, v0.h[2]
smlal v25.4s, v28.4h, v0.h[3]
srshl v16.4s, v25.4s, v30.4s // -(6-intermediate_bits)
// The intermediates from the horizontal pass fit in 16 bit without
// any bias; we could just as well keep them as .4s, but narrowing
// them to .4h gives a significant speedup on out of order cores
// (at the cost of a smaller slowdown on in-order cores such as A53).
xtn v16.4h, v16.4s
bl L(\type\()_\taps\()_filter_4)
mov v17.8b, v24.8b
mov v18.8b, v25.8b
4:
bl L(\type\()_\taps\()_filter_4)
smull v2.4s, v16.4h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal v2.4s, v24.4h, v1.h[3]
smull v3.4s, v17.4h, v1.h[0]
smlal v3.4s, v18.4h, v1.h[1]
smlal v3.4s, v24.4h, v1.h[2]
smlal v3.4s, v25.4h, v1.h[3]
.ifc \type, put
srshl v2.4s, v2.4s, v29.4s // -(6+intermediate_bits)
srshl v3.4s, v3.4s, v29.4s // -(6+intermediate_bits)
sqxtun v2.4h, v2.4s
sqxtun2 v2.8h, v3.4s
umin v2.8h, v2.8h, v31.8h
.else
rshrn v2.4h, v2.4s, #6
rshrn2 v2.8h, v3.4s, #6
sub v2.8h, v2.8h, v29.8h // PREP_BIAS
.endif
subs \h, \h, #2
st1 {v2.8b}, [\dst], \d_strd
st1 {v2.d}[1], [\ds2], \d_strd
b.le 0f
mov v16.8b, v18.8b
mov v17.8b, v24.8b
mov v18.8b, v25.8b
b 4b
480: // 4x8, 4x16, 4x32 hv
ld1 {v1.8b}, [\xmy]
sub \src, \src, #2
.ifc \taps, 6tap
sub \sr2, \src, \s_strd
sub \src, \src, \s_strd, lsl #1
.else
sub \sr2, \src, \s_strd, lsl #1
sub \src, \sr2, \s_strd
.endif
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
ld1 {v25.8h}, [\src], \s_strd
ext v26.16b, v25.16b, v25.16b, #2
ext v27.16b, v25.16b, v25.16b, #4
ext v28.16b, v25.16b, v25.16b, #6
smull v25.4s, v25.4h, v0.h[0]
smlal v25.4s, v26.4h, v0.h[1]
smlal v25.4s, v27.4h, v0.h[2]
smlal v25.4s, v28.4h, v0.h[3]
srshl v16.4s, v25.4s, v30.4s // -(6-intermediate_bits)
// The intermediates from the horizontal pass fit in 16 bit without
// any bias; we could just as well keep them as .4s, but narrowing
// them to .4h gives a significant speedup on out of order cores
// (at the cost of a smaller slowdown on in-order cores such as A53).
.ifc \taps, 6tap
xtn v18.4h, v16.4s
.else
xtn v16.4h, v16.4s
bl L(\type\()_\taps\()_filter_4)
mov v17.8b, v24.8b
mov v18.8b, v25.8b
.endif
bl L(\type\()_\taps\()_filter_4)
mov v19.8b, v24.8b
mov v20.8b, v25.8b
bl L(\type\()_\taps\()_filter_4)
mov v21.8b, v24.8b
mov v22.8b, v25.8b
48:
bl L(\type\()_\taps\()_filter_4)
.ifc \taps, 6tap
smull v3.4s, v18.4h, v1.h[1]
smlal v3.4s, v19.4h, v1.h[2]
smlal v3.4s, v20.4h, v1.h[3]
smlal v3.4s, v21.4h, v1.h[4]
smlal v3.4s, v22.4h, v1.h[5]
smlal v3.4s, v24.4h, v1.h[6]
smull v4.4s, v19.4h, v1.h[1]
smlal v4.4s, v20.4h, v1.h[2]
smlal v4.4s, v21.4h, v1.h[3]
smlal v4.4s, v22.4h, v1.h[4]
smlal v4.4s, v24.4h, v1.h[5]
smlal v4.4s, v25.4h, v1.h[6]
.else // 8tap
smull v3.4s, v16.4h, v1.h[0]
smlal v3.4s, v17.4h, v1.h[1]
smlal v3.4s, v18.4h, v1.h[2]
smlal v3.4s, v19.4h, v1.h[3]
smlal v3.4s, v20.4h, v1.h[4]
smlal v3.4s, v21.4h, v1.h[5]
smlal v3.4s, v22.4h, v1.h[6]
smlal v3.4s, v24.4h, v1.h[7]
smull v4.4s, v17.4h, v1.h[0]
smlal v4.4s, v18.4h, v1.h[1]
smlal v4.4s, v19.4h, v1.h[2]
smlal v4.4s, v20.4h, v1.h[3]
smlal v4.4s, v21.4h, v1.h[4]
smlal v4.4s, v22.4h, v1.h[5]
smlal v4.4s, v24.4h, v1.h[6]
smlal v4.4s, v25.4h, v1.h[7]
.endif
.ifc \type, put
srshl v3.4s, v3.4s, v29.4s // -(6+intermediate_bits)
srshl v4.4s, v4.4s, v29.4s // -(6+intermediate_bits)
sqxtun v3.4h, v3.4s
sqxtun2 v3.8h, v4.4s
umin v3.8h, v3.8h, v31.8h
.else
rshrn v3.4h, v3.4s, #6
rshrn2 v3.8h, v4.4s, #6
sub v3.8h, v3.8h, v29.8h // PREP_BIAS
.endif
subs \h, \h, #2
st1 {v3.8b}, [\dst], \d_strd
st1 {v3.d}[1], [\ds2], \d_strd
b.le 0f
.ifc \taps, 8tap
mov v16.8b, v18.8b
mov v17.8b, v19.8b
.endif
mov v18.8b, v20.8b
mov v19.8b, v21.8b
mov v20.8b, v22.8b
mov v21.8b, v24.8b
mov v22.8b, v25.8b
b 48b
0:
ret x15
L(\type\()_\taps\()_filter_4):
ld1 {v24.8h}, [\sr2], \s_strd
ld1 {v25.8h}, [\src], \s_strd
ext v26.16b, v24.16b, v24.16b, #2
ext v27.16b, v24.16b, v24.16b, #4
ext v28.16b, v24.16b, v24.16b, #6
smull v24.4s, v24.4h, v0.h[0]
smlal v24.4s, v26.4h, v0.h[1]
smlal v24.4s, v27.4h, v0.h[2]
smlal v24.4s, v28.4h, v0.h[3]
ext v26.16b, v25.16b, v25.16b, #2
ext v27.16b, v25.16b, v25.16b, #4
ext v28.16b, v25.16b, v25.16b, #6
smull v25.4s, v25.4h, v0.h[0]
smlal v25.4s, v26.4h, v0.h[1]
smlal v25.4s, v27.4h, v0.h[2]
smlal v25.4s, v28.4h, v0.h[3]
srshl v24.4s, v24.4s, v30.4s // -(6-intermediate_bits)
srshl v25.4s, v25.4s, v30.4s // -(6-intermediate_bits)
xtn v24.4h, v24.4s
xtn v25.4h, v25.4s
ret
80:
160:
320:
AARCH64_VALID_JUMP_TARGET
b.gt 880f
ld1 {v0.8b}, [\xmx]
ldur s1, [\xmy, #2]
.ifc \taps, 6tap
sub \src, \src, #4
.else
sub \src, \src, #6
.endif
sub \src, \src, \s_strd
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
mov \my, \h
164: // 8x2, 8x4, 16x2, 16x4, 32x2, 32x4 hv
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
ld1 {v27.8h, v28.8h}, [\src], \s_strd
.ifc \taps, 6tap
smull v24.4s, v27.4h, v0.h[1]
smull2 v25.4s, v27.8h, v0.h[1]
.irpc i, 23456
ext v26.16b, v27.16b, v28.16b, #(2*\i-2)
smlal v24.4s, v26.4h, v0.h[\i]
smlal2 v25.4s, v26.8h, v0.h[\i]
.endr
.else
smull v24.4s, v27.4h, v0.h[0]
smull2 v25.4s, v27.8h, v0.h[0]
.irpc i, 1234567
ext v26.16b, v27.16b, v28.16b, #(2*\i)
smlal v24.4s, v26.4h, v0.h[\i]
smlal2 v25.4s, v26.8h, v0.h[\i]
.endr
.endif
srshl v24.4s, v24.4s, v30.4s // -(6-intermediate_bits)
srshl v25.4s, v25.4s, v30.4s // -(6-intermediate_bits)
// The intermediates from the horizontal pass fit in 16 bit without
// any bias; we could just as well keep them as .4s, but narrowing
// them to .4h gives a significant speedup on out of order cores
// (at the cost of a smaller slowdown on in-order cores such as A53),
// and conserves register space (no need to clobber v8-v15).
uzp1 v16.8h, v24.8h, v25.8h // Same as xtn, xtn2
bl L(\type\()_\taps\()_filter_8)
mov v17.16b, v23.16b
mov v18.16b, v24.16b
8:
smull v2.4s, v16.4h, v1.h[0]
smull2 v3.4s, v16.8h, v1.h[0]
bl L(\type\()_\taps\()_filter_8)
smull v4.4s, v17.4h, v1.h[0]
smull2 v5.4s, v17.8h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal2 v3.4s, v17.8h, v1.h[1]
smlal v4.4s, v18.4h, v1.h[1]
smlal2 v5.4s, v18.8h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal2 v3.4s, v18.8h, v1.h[2]
smlal v4.4s, v23.4h, v1.h[2]
smlal2 v5.4s, v23.8h, v1.h[2]
smlal v2.4s, v23.4h, v1.h[3]
smlal2 v3.4s, v23.8h, v1.h[3]
smlal v4.4s, v24.4h, v1.h[3]
smlal2 v5.4s, v24.8h, v1.h[3]
.ifc \type, put
srshl v2.4s, v2.4s, v29.4s // -(6+intermediate_bits)
srshl v3.4s, v3.4s, v29.4s // -(6+intermediate_bits)
srshl v4.4s, v4.4s, v29.4s // -(6+intermediate_bits)
srshl v5.4s, v5.4s, v29.4s // -(6+intermediate_bits)
sqxtun v2.4h, v2.4s
sqxtun2 v2.8h, v3.4s
sqxtun v3.4h, v4.4s
sqxtun2 v3.8h, v5.4s
umin v2.8h, v2.8h, v31.8h
umin v3.8h, v3.8h, v31.8h
.else
rshrn v2.4h, v2.4s, #6
rshrn2 v2.8h, v3.4s, #6
rshrn v3.4h, v4.4s, #6
rshrn2 v3.8h, v5.4s, #6
sub v2.8h, v2.8h, v29.8h // PREP_BIAS
sub v3.8h, v3.8h, v29.8h // PREP_BIAS
.endif
subs \h, \h, #2
st1 {v2.8h}, [\dst], \d_strd
st1 {v3.8h}, [\ds2], \d_strd
b.le 9f
mov v16.16b, v18.16b
mov v17.16b, v23.16b
mov v18.16b, v24.16b
b 8b
9:
subs \w, \w, #8
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #2
mov \h, \my
add \src, \src, #16
add \dst, \dst, #16
b 164b
880: // 8x8, 8x16, ..., 16x8, ..., 32x8, ... hv
640:
1280:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [\xmx]
ld1 {v1.8b}, [\xmy]
.ifc \taps, 6tap
sub \src, \src, #4
.else
sub \src, \src, #6
sub \src, \src, \s_strd
.endif
sub \src, \src, \s_strd, lsl #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
mov \my, \h
168:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
ld1 {v27.8h, v28.8h}, [\src], \s_strd
.ifc \taps, 6tap
smull v24.4s, v27.4h, v0.h[1]
smull2 v25.4s, v27.8h, v0.h[1]
.irpc i, 23456
ext v26.16b, v27.16b, v28.16b, #(2*\i-2)
smlal v24.4s, v26.4h, v0.h[\i]
smlal2 v25.4s, v26.8h, v0.h[\i]
.endr
.else // 8tap
smull v24.4s, v27.4h, v0.h[0]
smull2 v25.4s, v27.8h, v0.h[0]
.irpc i, 1234567
ext v26.16b, v27.16b, v28.16b, #(2*\i)
smlal v24.4s, v26.4h, v0.h[\i]
smlal2 v25.4s, v26.8h, v0.h[\i]
.endr
.endif
srshl v24.4s, v24.4s, v30.4s // -(6-intermediate_bits)
srshl v25.4s, v25.4s, v30.4s // -(6-intermediate_bits)
// The intermediates from the horizontal pass fit in 16 bit without
// any bias; we could just as well keep them as .4s, but narrowing
// them to .4h gives a significant speedup on out of order cores
// (at the cost of a smaller slowdown on in-order cores such as A53),
// and conserves register space (no need to clobber v8-v15).
.ifc \taps, 6tap
uzp1 v18.8h, v24.8h, v25.8h // Same as xtn, xtn2
.else
uzp1 v16.8h, v24.8h, v25.8h // Same as xtn, xtn2
bl L(\type\()_\taps\()_filter_8)
mov v17.16b, v23.16b
mov v18.16b, v24.16b
.endif
bl L(\type\()_\taps\()_filter_8)
mov v19.16b, v23.16b
mov v20.16b, v24.16b
bl L(\type\()_\taps\()_filter_8)
mov v21.16b, v23.16b
mov v22.16b, v24.16b
88:
.ifc \taps, 6tap
smull v2.4s, v18.4h, v1.h[1]
smull2 v3.4s, v18.8h, v1.h[1]
bl L(\type\()_\taps\()_filter_8)
smull v4.4s, v19.4h, v1.h[1]
smull2 v5.4s, v19.8h, v1.h[1]
smlal v2.4s, v19.4h, v1.h[2]
smlal2 v3.4s, v19.8h, v1.h[2]
smlal v4.4s, v20.4h, v1.h[2]
smlal2 v5.4s, v20.8h, v1.h[2]
smlal v2.4s, v20.4h, v1.h[3]
smlal2 v3.4s, v20.8h, v1.h[3]
smlal v4.4s, v21.4h, v1.h[3]
smlal2 v5.4s, v21.8h, v1.h[3]
smlal v2.4s, v21.4h, v1.h[4]
smlal2 v3.4s, v21.8h, v1.h[4]
smlal v4.4s, v22.4h, v1.h[4]
smlal2 v5.4s, v22.8h, v1.h[4]
smlal v2.4s, v22.4h, v1.h[5]
smlal2 v3.4s, v22.8h, v1.h[5]
smlal v4.4s, v23.4h, v1.h[5]
smlal2 v5.4s, v23.8h, v1.h[5]
smlal v2.4s, v23.4h, v1.h[6]
smlal2 v3.4s, v23.8h, v1.h[6]
smlal v4.4s, v24.4h, v1.h[6]
smlal2 v5.4s, v24.8h, v1.h[6]
.else // 8tap
smull v2.4s, v16.4h, v1.h[0]
smull2 v3.4s, v16.8h, v1.h[0]
bl L(\type\()_\taps\()_filter_8)
smull v4.4s, v17.4h, v1.h[0]
smull2 v5.4s, v17.8h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal2 v3.4s, v17.8h, v1.h[1]
smlal v4.4s, v18.4h, v1.h[1]
smlal2 v5.4s, v18.8h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal2 v3.4s, v18.8h, v1.h[2]
smlal v4.4s, v19.4h, v1.h[2]
smlal2 v5.4s, v19.8h, v1.h[2]
smlal v2.4s, v19.4h, v1.h[3]
smlal2 v3.4s, v19.8h, v1.h[3]
smlal v4.4s, v20.4h, v1.h[3]
smlal2 v5.4s, v20.8h, v1.h[3]
smlal v2.4s, v20.4h, v1.h[4]
smlal2 v3.4s, v20.8h, v1.h[4]
smlal v4.4s, v21.4h, v1.h[4]
smlal2 v5.4s, v21.8h, v1.h[4]
smlal v2.4s, v21.4h, v1.h[5]
smlal2 v3.4s, v21.8h, v1.h[5]
smlal v4.4s, v22.4h, v1.h[5]
smlal2 v5.4s, v22.8h, v1.h[5]
smlal v2.4s, v22.4h, v1.h[6]
smlal2 v3.4s, v22.8h, v1.h[6]
smlal v4.4s, v23.4h, v1.h[6]
smlal2 v5.4s, v23.8h, v1.h[6]
smlal v2.4s, v23.4h, v1.h[7]
smlal2 v3.4s, v23.8h, v1.h[7]
smlal v4.4s, v24.4h, v1.h[7]
smlal2 v5.4s, v24.8h, v1.h[7]
.endif
.ifc \type, put
srshl v2.4s, v2.4s, v29.4s // -(6+intermediate_bits)
srshl v3.4s, v3.4s, v29.4s // -(6+intermediate_bits)
srshl v4.4s, v4.4s, v29.4s // -(6+intermediate_bits)
srshl v5.4s, v5.4s, v29.4s // -(6+intermediate_bits)
sqxtun v2.4h, v2.4s
sqxtun2 v2.8h, v3.4s
sqxtun v3.4h, v4.4s
sqxtun2 v3.8h, v5.4s
umin v2.8h, v2.8h, v31.8h
umin v3.8h, v3.8h, v31.8h
.else
rshrn v2.4h, v2.4s, #6
rshrn2 v2.8h, v3.4s, #6
rshrn v3.4h, v4.4s, #6
rshrn2 v3.8h, v5.4s, #6
sub v2.8h, v2.8h, v29.8h // PREP_BIAS
sub v3.8h, v3.8h, v29.8h // PREP_BIAS
.endif
subs \h, \h, #2
st1 {v2.8h}, [\dst], \d_strd
st1 {v3.8h}, [\ds2], \d_strd
b.le 9f
.ifc \taps, 8tap
mov v16.16b, v18.16b
mov v17.16b, v19.16b
.endif
mov v18.16b, v20.16b
mov v19.16b, v21.16b
mov v20.16b, v22.16b
mov v21.16b, v23.16b
mov v22.16b, v24.16b
b 88b
9:
subs \w, \w, #8
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #3
mov \h, \my
add \src, \src, #16
add \dst, \dst, #16
.ifc \taps, 6tap
add \src, \src, \s_strd, lsl #1
.endif
b 168b
0:
ret x15
L(\type\()_\taps\()_filter_8):
ld1 {v4.8h, v5.8h}, [\sr2], \s_strd
ld1 {v6.8h, v7.8h}, [\src], \s_strd
.ifc \taps, 6tap
smull v25.4s, v4.4h, v0.h[1]
smull2 v26.4s, v4.8h, v0.h[1]
smull v27.4s, v6.4h, v0.h[1]
smull2 v28.4s, v6.8h, v0.h[1]
.irpc i, 23456
ext v23.16b, v4.16b, v5.16b, #(2*\i-2)
ext v24.16b, v6.16b, v7.16b, #(2*\i-2)
smlal v25.4s, v23.4h, v0.h[\i]
smlal2 v26.4s, v23.8h, v0.h[\i]
smlal v27.4s, v24.4h, v0.h[\i]
smlal2 v28.4s, v24.8h, v0.h[\i]
.endr
.else // 8tap
smull v25.4s, v4.4h, v0.h[0]
smull2 v26.4s, v4.8h, v0.h[0]
smull v27.4s, v6.4h, v0.h[0]
smull2 v28.4s, v6.8h, v0.h[0]
.irpc i, 1234567
ext v23.16b, v4.16b, v5.16b, #(2*\i)
ext v24.16b, v6.16b, v7.16b, #(2*\i)
smlal v25.4s, v23.4h, v0.h[\i]
smlal2 v26.4s, v23.8h, v0.h[\i]
smlal v27.4s, v24.4h, v0.h[\i]
smlal2 v28.4s, v24.8h, v0.h[\i]
.endr
.endif
srshl v25.4s, v25.4s, v30.4s // -(6-intermediate_bits)
srshl v26.4s, v26.4s, v30.4s // -(6-intermediate_bits)
srshl v27.4s, v27.4s, v30.4s // -(6-intermediate_bits)
srshl v28.4s, v28.4s, v30.4s // -(6-intermediate_bits)
uzp1 v23.8h, v25.8h, v26.8h // Same as xtn, xtn2
uzp1 v24.8h, v27.8h, v28.8h // Ditto
ret
endfunc
jumptable \type\()_\taps\()_hv_tbl
.word 1280b - \type\()_\taps\()_hv_tbl
.word 640b - \type\()_\taps\()_hv_tbl
.word 320b - \type\()_\taps\()_hv_tbl
.word 160b - \type\()_\taps\()_hv_tbl
.word 80b - \type\()_\taps\()_hv_tbl
.word 40b - \type\()_\taps\()_hv_tbl
.word 20b - \type\()_\taps\()_hv_tbl
endjumptable
.endm
.macro filter_bilin_fn type, dst, d_strd, src, s_strd, w, h, mx, xmx, my, xmy, bdmax, ds2, sr2
function \type\()_bilin_16bpc_neon, export=1
.ifc \bdmax, w8
ldr w8, [sp]
.endif
dup v1.8h, \mx
dup v3.8h, \my
mov w10, #16
sub w9, w10, \mx
sub w10, w10, \my
dup v0.8h, w9
dup v2.8h, w10
.ifc \type, prep
uxtw \d_strd, \w
lsl \d_strd, \d_strd, #1
.endif
clz \bdmax, \bdmax // bitdepth_max
clz w9, \w
sub \bdmax, \bdmax, #18 // intermediate_bits = clz(bitdepth_max) - 18
mov w11, #4
sub w9, w9, #24
sub w11, w11, \bdmax // 4 - intermediate_bits
add w12, \bdmax, #4 // 4 + intermediate_bits
cbnz \mx, L(\type\()_bilin_h)
cbnz \my, L(\type\()_bilin_v)
b \type\()_16bpc_neon
L(\type\()_bilin_h):
cbnz \my, L(\type\()_bilin_hv)
movrel x10, \type\()_bilin_h_tbl
dup v31.8h, w11 // 4 - intermediate_bits
ldrsw x9, [x10, x9, lsl #2]
neg v31.8h, v31.8h // -(4-intermediate_bits)
.ifc \type, put
dup v30.8h, \bdmax // intermediate_bits
.else
movi v29.8h, #(PREP_BIAS >> 8), lsl #8
.endif
add x10, x10, x9
.ifc \type, put
neg v30.8h, v30.8h // -intermediate_bits
.endif
br x10
20: // 2xN h
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
2:
ld1 {v4.4h}, [\src], \s_strd
ld1 {v6.4h}, [\sr2], \s_strd
ext v5.8b, v4.8b, v4.8b, #2
ext v7.8b, v6.8b, v6.8b, #2
trn1 v4.2s, v4.2s, v6.2s
trn1 v5.2s, v5.2s, v7.2s
subs \h, \h, #2
mul v4.4h, v4.4h, v0.4h
mla v4.4h, v5.4h, v1.4h
urshl v4.4h, v4.4h, v31.4h
urshl v4.4h, v4.4h, v30.4h
st1 {v4.s}[0], [\dst], \d_strd
st1 {v4.s}[1], [\ds2], \d_strd
b.gt 2b
ret
.endif
40: // 4xN h
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
4:
ld1 {v4.8h}, [\src], \s_strd
ld1 {v6.8h}, [\sr2], \s_strd
ext v5.16b, v4.16b, v4.16b, #2
ext v7.16b, v6.16b, v6.16b, #2
trn1 v4.2d, v4.2d, v6.2d
trn1 v5.2d, v5.2d, v7.2d
subs \h, \h, #2
mul v4.8h, v4.8h, v0.8h
mla v4.8h, v5.8h, v1.8h
urshl v4.8h, v4.8h, v31.8h
.ifc \type, put
urshl v4.8h, v4.8h, v30.8h
.else
sub v4.8h, v4.8h, v29.8h
.endif
st1 {v4.8b}, [\dst], \d_strd
st1 {v4.d}[1], [\ds2], \d_strd
b.gt 4b
ret
80: // 8xN h
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
8:
ldr h5, [\src, #16]
ldr h7, [\sr2, #16]
ld1 {v4.8h}, [\src], \s_strd
ld1 {v6.8h}, [\sr2], \s_strd
ext v5.16b, v4.16b, v5.16b, #2
ext v7.16b, v6.16b, v7.16b, #2
subs \h, \h, #2
mul v4.8h, v4.8h, v0.8h
mla v4.8h, v5.8h, v1.8h
mul v6.8h, v6.8h, v0.8h
mla v6.8h, v7.8h, v1.8h
urshl v4.8h, v4.8h, v31.8h
urshl v6.8h, v6.8h, v31.8h
.ifc \type, put
urshl v4.8h, v4.8h, v30.8h
urshl v6.8h, v6.8h, v30.8h
.else
sub v4.8h, v4.8h, v29.8h
sub v6.8h, v6.8h, v29.8h
.endif
st1 {v4.8h}, [\dst], \d_strd
st1 {v6.8h}, [\ds2], \d_strd
b.gt 8b
ret
160:
320:
640:
1280: // 16xN, 32xN, ... h
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
sub \s_strd, \s_strd, \w, uxtw #1
sub \s_strd, \s_strd, #16
.ifc \type, put
lsl \d_strd, \d_strd, #1
sub \d_strd, \d_strd, \w, uxtw #1
.endif
161:
ld1 {v16.8h}, [\src], #16
ld1 {v21.8h}, [\sr2], #16
mov \mx, \w
16:
ld1 {v17.8h, v18.8h}, [\src], #32
ld1 {v22.8h, v23.8h}, [\sr2], #32
ext v19.16b, v16.16b, v17.16b, #2
ext v20.16b, v17.16b, v18.16b, #2
ext v24.16b, v21.16b, v22.16b, #2
ext v25.16b, v22.16b, v23.16b, #2
mul v16.8h, v16.8h, v0.8h
mla v16.8h, v19.8h, v1.8h
mul v17.8h, v17.8h, v0.8h
mla v17.8h, v20.8h, v1.8h
mul v21.8h, v21.8h, v0.8h
mla v21.8h, v24.8h, v1.8h
mul v22.8h, v22.8h, v0.8h
mla v22.8h, v25.8h, v1.8h
urshl v16.8h, v16.8h, v31.8h
urshl v17.8h, v17.8h, v31.8h
urshl v21.8h, v21.8h, v31.8h
urshl v22.8h, v22.8h, v31.8h
subs \mx, \mx, #16
.ifc \type, put
urshl v16.8h, v16.8h, v30.8h
urshl v17.8h, v17.8h, v30.8h
urshl v21.8h, v21.8h, v30.8h
urshl v22.8h, v22.8h, v30.8h
.else
sub v16.8h, v16.8h, v29.8h
sub v17.8h, v17.8h, v29.8h
sub v21.8h, v21.8h, v29.8h
sub v22.8h, v22.8h, v29.8h
.endif
st1 {v16.8h, v17.8h}, [\dst], #32
st1 {v21.8h, v22.8h}, [\ds2], #32
b.le 9f
mov v16.16b, v18.16b
mov v21.16b, v23.16b
b 16b
9:
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
add \src, \src, \s_strd
add \sr2, \sr2, \s_strd
subs \h, \h, #2
b.gt 161b
ret
endfunc
jumptable \type\()_bilin_h_tbl
.word 1280b - \type\()_bilin_h_tbl
.word 640b - \type\()_bilin_h_tbl
.word 320b - \type\()_bilin_h_tbl
.word 160b - \type\()_bilin_h_tbl
.word 80b - \type\()_bilin_h_tbl
.word 40b - \type\()_bilin_h_tbl
.word 20b - \type\()_bilin_h_tbl
endjumptable
function L(\type\()_bilin_v)
cmp \h, #4
movrel x10, \type\()_bilin_v_tbl
.ifc \type, prep
dup v31.8h, w11 // 4 - intermediate_bits
.endif
ldrsw x9, [x10, x9, lsl #2]
.ifc \type, prep
movi v29.8h, #(PREP_BIAS >> 8), lsl #8
neg v31.8h, v31.8h // -(4-intermediate_bits)
.endif
add x10, x10, x9
br x10
20: // 2xN v
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
cmp \h, #2
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
// 2x2 v
ld1r {v16.4s}, [\src], \s_strd
b.gt 24f
22:
ld1r {v17.4s}, [\sr2], \s_strd
ld1r {v18.4s}, [\src], \s_strd
trn1 v16.2s, v16.2s, v17.2s
trn1 v17.2s, v17.2s, v18.2s
mul v4.4h, v16.4h, v2.4h
mla v4.4h, v17.4h, v3.4h
urshr v4.8h, v4.8h, #4
str s4, [\dst]
st1 {v4.s}[1], [\ds2]
ret
24: // 2x4, 2x6, 2x8, ... v
ld1r {v17.4s}, [\sr2], \s_strd
ld1r {v18.4s}, [\src], \s_strd
ld1r {v19.4s}, [\sr2], \s_strd
ld1r {v20.4s}, [\src], \s_strd
sub \h, \h, #4
trn1 v16.2s, v16.2s, v17.2s
trn1 v17.2s, v17.2s, v18.2s
trn1 v18.2s, v18.2s, v19.2s
trn1 v19.2s, v19.2s, v20.2s
trn1 v16.2d, v16.2d, v18.2d
trn1 v17.2d, v17.2d, v19.2d
mul v4.8h, v16.8h, v2.8h
mla v4.8h, v17.8h, v3.8h
cmp \h, #2
urshr v4.8h, v4.8h, #4
st1 {v4.s}[0], [\dst], \d_strd
st1 {v4.s}[1], [\ds2], \d_strd
st1 {v4.s}[2], [\dst], \d_strd
st1 {v4.s}[3], [\ds2], \d_strd
b.lt 0f
mov v16.8b, v20.8b
b.eq 22b
b 24b
0:
ret
.endif
40: // 4xN v
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1 {v16.4h}, [\src], \s_strd
4:
ld1 {v17.4h}, [\sr2], \s_strd
ld1 {v18.4h}, [\src], \s_strd
trn1 v16.2d, v16.2d, v17.2d
trn1 v17.2d, v17.2d, v18.2d
mul v4.8h, v16.8h, v2.8h
mla v4.8h, v17.8h, v3.8h
subs \h, \h, #2
.ifc \type, put
urshr v4.8h, v4.8h, #4
.else
urshl v4.8h, v4.8h, v31.8h
sub v4.8h, v4.8h, v29.8h
.endif
st1 {v4.8b}, [\dst], \d_strd
st1 {v4.d}[1], [\ds2], \d_strd
b.le 0f
mov v16.8b, v18.8b
b 4b
0:
ret
80: // 8xN v
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1 {v16.8h}, [\src], \s_strd
8:
ld1 {v17.8h}, [\sr2], \s_strd
ld1 {v18.8h}, [\src], \s_strd
mul v4.8h, v16.8h, v2.8h
mla v4.8h, v17.8h, v3.8h
mul v5.8h, v17.8h, v2.8h
mla v5.8h, v18.8h, v3.8h
subs \h, \h, #2
.ifc \type, put
urshr v4.8h, v4.8h, #4
urshr v5.8h, v5.8h, #4
.else
urshl v4.8h, v4.8h, v31.8h
urshl v5.8h, v5.8h, v31.8h
sub v4.8h, v4.8h, v29.8h
sub v5.8h, v5.8h, v29.8h
.endif
st1 {v4.8h}, [\dst], \d_strd
st1 {v5.8h}, [\ds2], \d_strd
b.le 0f
mov v16.16b, v18.16b
b 8b
0:
ret
160: // 16xN, 32xN, ...
320:
640:
1280:
AARCH64_VALID_JUMP_TARGET
mov \my, \h
1:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1 {v16.8h, v17.8h}, [\src], \s_strd
2:
ld1 {v18.8h, v19.8h}, [\sr2], \s_strd
ld1 {v20.8h, v21.8h}, [\src], \s_strd
mul v4.8h, v16.8h, v2.8h
mla v4.8h, v18.8h, v3.8h
mul v5.8h, v17.8h, v2.8h
mla v5.8h, v19.8h, v3.8h
mul v6.8h, v18.8h, v2.8h
mla v6.8h, v20.8h, v3.8h
mul v7.8h, v19.8h, v2.8h
mla v7.8h, v21.8h, v3.8h
subs \h, \h, #2
.ifc \type, put
urshr v4.8h, v4.8h, #4
urshr v5.8h, v5.8h, #4
urshr v6.8h, v6.8h, #4
urshr v7.8h, v7.8h, #4
.else
urshl v4.8h, v4.8h, v31.8h
urshl v5.8h, v5.8h, v31.8h
urshl v6.8h, v6.8h, v31.8h
urshl v7.8h, v7.8h, v31.8h
sub v4.8h, v4.8h, v29.8h
sub v5.8h, v5.8h, v29.8h
sub v6.8h, v6.8h, v29.8h
sub v7.8h, v7.8h, v29.8h
.endif
st1 {v4.8h, v5.8h}, [\dst], \d_strd
st1 {v6.8h, v7.8h}, [\ds2], \d_strd
b.le 9f
mov v16.16b, v20.16b
mov v17.16b, v21.16b
b 2b
9:
subs \w, \w, #16
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #1
mov \h, \my
add \src, \src, #32
add \dst, \dst, #32
b 1b
0:
ret
endfunc
jumptable \type\()_bilin_v_tbl
.word 1280b - \type\()_bilin_v_tbl
.word 640b - \type\()_bilin_v_tbl
.word 320b - \type\()_bilin_v_tbl
.word 160b - \type\()_bilin_v_tbl
.word 80b - \type\()_bilin_v_tbl
.word 40b - \type\()_bilin_v_tbl
.word 20b - \type\()_bilin_v_tbl
endjumptable
function L(\type\()_bilin_hv)
movrel x10, \type\()_bilin_hv_tbl
dup v31.8h, w11 // 4 - intermediate_bits
ldrsw x9, [x10, x9, lsl #2]
neg v31.8h, v31.8h // -(4-intermediate_bits)
.ifc \type, put
dup v30.4s, w12 // 4 + intermediate_bits
.else
movi v29.8h, #(PREP_BIAS >> 8), lsl #8
.endif
add x10, x10, x9
.ifc \type, put
neg v30.4s, v30.4s // -(4+intermediate_bits)
.endif
br x10
20: // 2xN hv
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1 {v20.4h}, [\src], \s_strd
ext v21.8b, v20.8b, v20.8b, #2
mul v16.4h, v20.4h, v0.4h
mla v16.4h, v21.4h, v1.4h
urshl v16.4h, v16.4h, v31.4h
2:
ld1 {v22.4h}, [\sr2], \s_strd
ld1 {v24.4h}, [\src], \s_strd
ext v23.8b, v22.8b, v22.8b, #2
ext v25.8b, v24.8b, v24.8b, #2
trn1 v22.2s, v22.2s, v24.2s
trn1 v23.2s, v23.2s, v25.2s
mul v17.4h, v22.4h, v0.4h
mla v17.4h, v23.4h, v1.4h
urshl v17.4h, v17.4h, v31.4h
trn1 v16.2s, v16.2s, v17.2s
umull v4.4s, v16.4h, v2.4h
umlal v4.4s, v17.4h, v3.4h
urshl v4.4s, v4.4s, v30.4s
xtn v4.4h, v4.4s
subs \h, \h, #2
st1 {v4.s}[0], [\dst], \d_strd
st1 {v4.s}[1], [\ds2], \d_strd
b.le 0f
trn2 v16.2s, v17.2s, v17.2s
b 2b
0:
ret
.endif
40: // 4xN hv
AARCH64_VALID_JUMP_TARGET
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1 {v20.8h}, [\src], \s_strd
ext v21.16b, v20.16b, v20.16b, #2
mul v16.4h, v20.4h, v0.4h
mla v16.4h, v21.4h, v1.4h
urshl v16.4h, v16.4h, v31.4h
4:
ld1 {v22.8h}, [\sr2], \s_strd
ld1 {v24.8h}, [\src], \s_strd
ext v23.16b, v22.16b, v22.16b, #2
ext v25.16b, v24.16b, v24.16b, #2
trn1 v22.2d, v22.2d, v24.2d
trn1 v23.2d, v23.2d, v25.2d
mul v17.8h, v22.8h, v0.8h
mla v17.8h, v23.8h, v1.8h
urshl v17.8h, v17.8h, v31.8h
trn1 v16.2d, v16.2d, v17.2d
umull v4.4s, v16.4h, v2.4h
umlal v4.4s, v17.4h, v3.4h
umull2 v5.4s, v16.8h, v2.8h
umlal2 v5.4s, v17.8h, v3.8h
.ifc \type, put
urshl v4.4s, v4.4s, v30.4s
urshl v5.4s, v5.4s, v30.4s
uzp1 v4.8h, v4.8h, v5.8h // Same as xtn, xtn2
.else
rshrn v4.4h, v4.4s, #4
rshrn2 v4.8h, v5.4s, #4
sub v4.8h, v4.8h, v29.8h
.endif
subs \h, \h, #2
st1 {v4.8b}, [\dst], \d_strd
st1 {v4.d}[1], [\ds2], \d_strd
b.le 0f
trn2 v16.2d, v17.2d, v17.2d
b 4b
0:
ret
80: // 8xN, 16xN, ... hv
160:
320:
640:
1280:
AARCH64_VALID_JUMP_TARGET
mov \my, \h
1:
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ldr h21, [\src, #16]
ld1 {v20.8h}, [\src], \s_strd
ext v21.16b, v20.16b, v21.16b, #2
mul v16.8h, v20.8h, v0.8h
mla v16.8h, v21.8h, v1.8h
urshl v16.8h, v16.8h, v31.8h
2:
ldr h23, [\sr2, #16]
ld1 {v22.8h}, [\sr2], \s_strd
ldr h25, [\src, #16]
ld1 {v24.8h}, [\src], \s_strd
ext v23.16b, v22.16b, v23.16b, #2
ext v25.16b, v24.16b, v25.16b, #2
mul v17.8h, v22.8h, v0.8h
mla v17.8h, v23.8h, v1.8h
mul v18.8h, v24.8h, v0.8h
mla v18.8h, v25.8h, v1.8h
urshl v17.8h, v17.8h, v31.8h
urshl v18.8h, v18.8h, v31.8h
umull v4.4s, v16.4h, v2.4h
umlal v4.4s, v17.4h, v3.4h
umull2 v5.4s, v16.8h, v2.8h
umlal2 v5.4s, v17.8h, v3.8h
umull v6.4s, v17.4h, v2.4h
umlal v6.4s, v18.4h, v3.4h
umull2 v7.4s, v17.8h, v2.8h
umlal2 v7.4s, v18.8h, v3.8h
.ifc \type, put
urshl v4.4s, v4.4s, v30.4s
urshl v5.4s, v5.4s, v30.4s
urshl v6.4s, v6.4s, v30.4s
urshl v7.4s, v7.4s, v30.4s
uzp1 v4.8h, v4.8h, v5.8h // Same as xtn, xtn2
uzp1 v5.8h, v6.8h, v7.8h // Ditto
.else
rshrn v4.4h, v4.4s, #4
rshrn2 v4.8h, v5.4s, #4
rshrn v5.4h, v6.4s, #4
rshrn2 v5.8h, v7.4s, #4
sub v4.8h, v4.8h, v29.8h
sub v5.8h, v5.8h, v29.8h
.endif
subs \h, \h, #2
st1 {v4.8h}, [\dst], \d_strd
st1 {v5.8h}, [\ds2], \d_strd
b.le 9f
mov v16.16b, v18.16b
b 2b
9:
subs \w, \w, #8
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #1
mov \h, \my
add \src, \src, #16
add \dst, \dst, #16
b 1b
0:
ret
endfunc
jumptable \type\()_bilin_hv_tbl
.word 1280b - \type\()_bilin_hv_tbl
.word 640b - \type\()_bilin_hv_tbl
.word 320b - \type\()_bilin_hv_tbl
.word 160b - \type\()_bilin_hv_tbl
.word 80b - \type\()_bilin_hv_tbl
.word 40b - \type\()_bilin_hv_tbl
.word 20b - \type\()_bilin_hv_tbl
endjumptable
.endm
make_8tap_fn put, regular_sharp, REGULAR, SHARP, 8tap
make_8tap_fn put, smooth_sharp, SMOOTH, SHARP, 8tap
make_8tap_fn put, sharp, SHARP, SHARP, 8tap
make_8tap_fn put, sharp_regular, SHARP, REGULAR, 8tap
make_8tap_fn put, sharp_smooth, SHARP, SMOOTH, 8tap
filter_fn put, x0, x1, x2, x3, w4, w5, w6, x6, w7, x7, w8, x9, x10, 8tap
make_8tap_fn put, regular, REGULAR, REGULAR, 6tap
make_8tap_fn put, regular_smooth, REGULAR, SMOOTH, 6tap
make_8tap_fn put, smooth, SMOOTH, SMOOTH, 6tap
make_8tap_fn put, smooth_regular, SMOOTH, REGULAR, 6tap
filter_fn put, x0, x1, x2, x3, w4, w5, w6, x6, w7, x7, w8, x9, x10, 6tap
filter_bilin_fn put, x0, x1, x2, x3, w4, w5, w6, x6, w7, x7, w8, x9, x10
make_8tap_fn prep, regular_sharp, REGULAR, SHARP, 8tap
make_8tap_fn prep, smooth_sharp, SMOOTH, SHARP, 8tap
make_8tap_fn prep, sharp, SHARP, SHARP, 8tap
make_8tap_fn prep, sharp_regular, SHARP, REGULAR, 8tap
make_8tap_fn prep, sharp_smooth, SHARP, SMOOTH, 8tap
filter_fn prep, x0, x8, x1, x2, w3, w4, w5, x5, w6, x6, w7, x9, x10, 8tap
make_8tap_fn prep, regular, REGULAR, REGULAR, 6tap
make_8tap_fn prep, regular_smooth, REGULAR, SMOOTH, 6tap
make_8tap_fn prep, smooth, SMOOTH, SMOOTH, 6tap
make_8tap_fn prep, smooth_regular, SMOOTH, REGULAR, 6tap
filter_fn prep, x0, x8, x1, x2, w3, w4, w5, x5, w6, x6, w7, x9, x10, 6tap
filter_bilin_fn prep, x0, x8, x1, x2, w3, w4, w5, x5, w6, x6, w7, x9, x10
.macro load_filter_row dst, src, inc
asr w13, \src, #10
add \src, \src, \inc
ldr \dst, [x11, w13, sxtw #3]
.endm
function warp_filter_horz_neon
add w12, w5, #512
ld1 {v16.8h, v17.8h}, [x2], x3
load_filter_row d0, w12, w7
load_filter_row d1, w12, w7
load_filter_row d2, w12, w7
sxtl v0.8h, v0.8b
load_filter_row d3, w12, w7
sxtl v1.8h, v1.8b
load_filter_row d4, w12, w7
sxtl v2.8h, v2.8b
load_filter_row d5, w12, w7
sxtl v3.8h, v3.8b
load_filter_row d6, w12, w7
sxtl v4.8h, v4.8b
load_filter_row d7, w12, w7
sxtl v5.8h, v5.8b
ext v18.16b, v16.16b, v17.16b, #2*1
smull v8.4s, v16.4h, v0.4h
smull2 v9.4s, v16.8h, v0.8h
sxtl v6.8h, v6.8b
ext v19.16b, v16.16b, v17.16b, #2*2
smull v10.4s, v18.4h, v1.4h
smull2 v11.4s, v18.8h, v1.8h
sxtl v7.8h, v7.8b
ext v20.16b, v16.16b, v17.16b, #2*3
smull v0.4s, v19.4h, v2.4h
smull2 v1.4s, v19.8h, v2.8h
ext v21.16b, v16.16b, v17.16b, #2*4
addp v8.4s, v8.4s, v9.4s
smull v2.4s, v20.4h, v3.4h
smull2 v3.4s, v20.8h, v3.8h
ext v22.16b, v16.16b, v17.16b, #2*5
addp v9.4s, v10.4s, v11.4s
smull v10.4s, v21.4h, v4.4h
smull2 v11.4s, v21.8h, v4.8h
ext v23.16b, v16.16b, v17.16b, #2*6
addp v0.4s, v0.4s, v1.4s
smull v18.4s, v22.4h, v5.4h
smull2 v19.4s, v22.8h, v5.8h
ext v16.16b, v16.16b, v17.16b, #2*7
addp v1.4s, v2.4s, v3.4s
addp v2.4s, v10.4s, v11.4s
smull v20.4s, v23.4h, v6.4h
smull2 v21.4s, v23.8h, v6.8h
addp v3.4s, v18.4s, v19.4s
smull v22.4s, v16.4h, v7.4h
smull2 v23.4s, v16.8h, v7.8h
addp v4.4s, v20.4s, v21.4s
addp v5.4s, v22.4s, v23.4s
addp v8.4s, v8.4s, v9.4s
addp v0.4s, v0.4s, v1.4s
addp v2.4s, v2.4s, v3.4s
addp v4.4s, v4.4s, v5.4s
addp v16.4s, v8.4s, v0.4s
addp v17.4s, v2.4s, v4.4s
add w5, w5, w8
srshl v16.4s, v16.4s, v14.4s // -(7 - intermediate_bits)
srshl v17.4s, v17.4s, v14.4s // -(7 - intermediate_bits)
ret
endfunc
// void dav1d_warp_affine_8x8_16bpc_neon(
// pixel *dst, const ptrdiff_t dst_stride,
// const pixel *src, const ptrdiff_t src_stride,
// const int16_t *const abcd, int mx, int my,
// const int bitdepth_max)
.macro warp t
function warp_affine_8x8\t\()_16bpc_neon, export=1
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
.ifb \t
dup v15.8h, w7 // bitdepth_max
.else
movi v15.8h, #(PREP_BIAS >> 8), lsl #8
.endif
clz w7, w7
// intermediate_bits = clz(bitdepth_max) - 18
.ifb \t
sub w8, w7, #11 // 7 + intermediate_bits = clz(bitdepth_max) - 18 + 7
.endif
sub w7, w7, #25 // -(7 - intermediate_bits)
.ifb \t
neg w8, w8 // -(7 + intermediate_bits)
.endif
dup v14.4s, w7 // -(7 - intermediate_bits)
.ifb \t
dup v13.4s, w8 // -(7 + intermediate_bits)
.endif
ldr x4, [x4]
sbfx x7, x4, #0, #16
sbfx x8, x4, #16, #16
sbfx x9, x4, #32, #16
sbfx x4, x4, #48, #16
mov w10, #8
sub x2, x2, x3, lsl #1
sub x2, x2, x3
sub x2, x2, #6
movrel x11, X(mc_warp_filter), 64*8
mov x15, x30
.ifnb \t
lsl x1, x1, #1
.endif
bl warp_filter_horz_neon
uzp1 v24.8h, v16.8h, v17.8h // Same as xtn, xtn2
bl warp_filter_horz_neon
uzp1 v25.8h, v16.8h, v17.8h // Ditto
bl warp_filter_horz_neon
uzp1 v26.8h, v16.8h, v17.8h // Ditto
bl warp_filter_horz_neon
uzp1 v27.8h, v16.8h, v17.8h // Ditto
bl warp_filter_horz_neon
uzp1 v28.8h, v16.8h, v17.8h // Ditto
bl warp_filter_horz_neon
uzp1 v29.8h, v16.8h, v17.8h // Ditto
bl warp_filter_horz_neon
uzp1 v30.8h, v16.8h, v17.8h // Ditto
1:
add w14, w6, #512
bl warp_filter_horz_neon
uzp1 v31.8h, v16.8h, v17.8h // Same as xtn, xtn2
load_filter_row d0, w14, w9
load_filter_row d1, w14, w9
load_filter_row d2, w14, w9
load_filter_row d3, w14, w9
load_filter_row d4, w14, w9
load_filter_row d5, w14, w9
load_filter_row d6, w14, w9
load_filter_row d7, w14, w9
transpose_8x8b_xtl v0, v1, v2, v3, v4, v5, v6, v7, sxtl
// This ordering of smull/smlal/smull2/smlal2 is highly
// beneficial for Cortex A53 here.
smull v16.4s, v24.4h, v0.4h
smlal v16.4s, v25.4h, v1.4h
smlal v16.4s, v26.4h, v2.4h
smlal v16.4s, v27.4h, v3.4h
smlal v16.4s, v28.4h, v4.4h
smlal v16.4s, v29.4h, v5.4h
smlal v16.4s, v30.4h, v6.4h
smlal v16.4s, v31.4h, v7.4h
smull2 v17.4s, v24.8h, v0.8h
smlal2 v17.4s, v25.8h, v1.8h
smlal2 v17.4s, v26.8h, v2.8h
smlal2 v17.4s, v27.8h, v3.8h
smlal2 v17.4s, v28.8h, v4.8h
smlal2 v17.4s, v29.8h, v5.8h
smlal2 v17.4s, v30.8h, v6.8h
smlal2 v17.4s, v31.8h, v7.8h
mov v24.16b, v25.16b
mov v25.16b, v26.16b
.ifb \t
srshl v16.4s, v16.4s, v13.4s // -(7 + intermediate_bits)
srshl v17.4s, v17.4s, v13.4s // -(7 + intermediate_bits)
.else
rshrn v16.4h, v16.4s, #7
rshrn2 v16.8h, v17.4s, #7
.endif
mov v26.16b, v27.16b
.ifb \t
sqxtun v16.4h, v16.4s
sqxtun2 v16.8h, v17.4s
.else
sub v16.8h, v16.8h, v15.8h // PREP_BIAS
.endif
mov v27.16b, v28.16b
mov v28.16b, v29.16b
.ifb \t
umin v16.8h, v16.8h, v15.8h // bitdepth_max
.endif
mov v29.16b, v30.16b
mov v30.16b, v31.16b
subs w10, w10, #1
st1 {v16.8h}, [x0], x1
add w6, w6, w4
b.gt 1b
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret x15
endfunc
.endm
warp
warp t
// void dav1d_emu_edge_16bpc_neon(
// const intptr_t bw, const intptr_t bh,
// const intptr_t iw, const intptr_t ih,
// const intptr_t x, const intptr_t y,
// pixel *dst, const ptrdiff_t dst_stride,
// const pixel *ref, const ptrdiff_t ref_stride)
function emu_edge_16bpc_neon, export=1
ldp x8, x9, [sp]
// ref += iclip(y, 0, ih - 1) * PXSTRIDE(ref_stride)
// ref += iclip(x, 0, iw - 1)
sub x12, x3, #1 // ih - 1
cmp x5, x3
sub x13, x2, #1 // iw - 1
csel x12, x12, x5, ge // min(y, ih - 1)
cmp x4, x2
bic x12, x12, x12, asr #63 // max(min(y, ih - 1), 0)
csel x13, x13, x4, ge // min(x, iw - 1)
bic x13, x13, x13, asr #63 // max(min(x, iw - 1), 0)
madd x8, x12, x9, x8 // ref += iclip() * stride
add x8, x8, x13, lsl #1 // ref += iclip()
// bottom_ext = iclip(y + bh - ih, 0, bh - 1)
// top_ext = iclip(-y, 0, bh - 1)
add x10, x5, x1 // y + bh
neg x5, x5 // -y
sub x10, x10, x3 // y + bh - ih
sub x12, x1, #1 // bh - 1
cmp x10, x1
bic x5, x5, x5, asr #63 // max(-y, 0)
csel x10, x10, x12, lt // min(y + bh - ih, bh-1)
cmp x5, x1
bic x10, x10, x10, asr #63 // max(min(y + bh - ih, bh-1), 0)
csel x5, x5, x12, lt // min(max(-y, 0), bh-1)
// right_ext = iclip(x + bw - iw, 0, bw - 1)
// left_ext = iclip(-x, 0, bw - 1)
add x11, x4, x0 // x + bw
neg x4, x4 // -x
sub x11, x11, x2 // x + bw - iw
sub x13, x0, #1 // bw - 1
cmp x11, x0
bic x4, x4, x4, asr #63 // max(-x, 0)
csel x11, x11, x13, lt // min(x + bw - iw, bw-1)
cmp x4, x0
bic x11, x11, x11, asr #63 // max(min(x + bw - iw, bw-1), 0)
csel x4, x4, x13, lt // min(max(-x, 0), bw - 1)
// center_h = bh - top_ext - bottom_ext
// dst += top_ext * PXSTRIDE(dst_stride)
// center_w = bw - left_ext - right_ext
sub x1, x1, x5 // bh - top_ext
madd x6, x5, x7, x6
sub x2, x0, x4 // bw - left_ext
sub x1, x1, x10 // center_h = bh - top_ext - bottom_ext
sub x2, x2, x11 // center_w = bw - left_ext - right_ext
mov x14, x6 // backup of dst
.macro v_loop need_left, need_right
0:
.if \need_left
ld1r {v0.8h}, [x8]
mov x12, x6 // out = dst
mov x3, x4
mov v1.16b, v0.16b
1:
subs x3, x3, #16
st1 {v0.8h, v1.8h}, [x12], #32
b.gt 1b
.endif
mov x13, x8
add x12, x6, x4, lsl #1 // out = dst + left_ext
mov x3, x2
1:
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x13], #64
subs x3, x3, #32
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x12], #64
b.gt 1b
.if \need_right
add x3, x8, x2, lsl #1 // in + center_w
sub x3, x3, #2 // in + center_w - 1
add x12, x6, x4, lsl #1 // dst + left_ext
ld1r {v0.8h}, [x3]
add x12, x12, x2, lsl #1 // out = dst + left_ext + center_w
mov x3, x11
mov v1.16b, v0.16b
1:
subs x3, x3, #16
st1 {v0.8h, v1.8h}, [x12], #32
b.gt 1b
.endif
subs x1, x1, #1 // center_h--
add x6, x6, x7
add x8, x8, x9
b.gt 0b
.endm
cbz x4, 2f
// need_left
cbz x11, 3f
// need_left + need_right
v_loop 1, 1
b 5f
2:
// !need_left
cbz x11, 4f
// !need_left + need_right
v_loop 0, 1
b 5f
3:
// need_left + !need_right
v_loop 1, 0
b 5f
4:
// !need_left + !need_right
v_loop 0, 0
5:
cbz x10, 3f
// need_bottom
sub x8, x6, x7 // ref = dst - stride
mov x4, x0
1:
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x8], #64
mov x3, x10
2:
subs x3, x3, #1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x7
b.gt 2b
msub x6, x7, x10, x6 // dst -= bottom_ext * stride
subs x4, x4, #32 // bw -= 32
add x6, x6, #64 // dst += 32
b.gt 1b
3:
cbz x5, 3f
// need_top
msub x6, x7, x5, x14 // dst = stored_dst - top_ext * stride
1:
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x14], #64
mov x3, x5
2:
subs x3, x3, #1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x7
b.gt 2b
msub x6, x7, x5, x6 // dst -= top_ext * stride
subs x0, x0, #32 // bw -= 32
add x6, x6, #64 // dst += 32
b.gt 1b
3:
ret
endfunc
|
Admenri/urge
| 64,820
|
third_party/dav1d/src/arm/64/mc_dotprod.S
|
/*
* Copyright © 2024, VideoLAN and dav1d authors
* Copyright © 2024, Janne Grunau
* Copyright © 2024, Martin Storsjo
* Copyright © 2024, Arm Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#if HAVE_DOTPROD
ENABLE_DOTPROD
// No spaces in these expressions, due to gas-preprocessor. It is translated by
// -1 to save the negative offset at getting the address of `mc_subpel_filters`.
#define REGULAR1 (((0*15-1)<<7)|(3*15-1))
#define SMOOTH1 (((1*15-1)<<7)|(4*15-1))
#define SHARP1 (((2*15-1)<<7)|(3*15-1))
#define FUNC_ALIGN 2
#define JUMP_ALIGN 2
#define LOOP_ALIGN 2
const h_tbl_neon_dotprod, align=4
// Shuffle indices to permute horizontal samples in preparation for
// input to SDOT instructions. The 8-tap horizontal convolution uses
// sample indices in the interval of [-3, 4] relative to the current
// sample position.
.byte 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6
.byte 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10
.byte 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
// Shuffle indices to permute horizontal samples in preparation for
// input to USMMLA instructions.
#define OFFSET_USMMLA 48
.byte 0, 1, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 7, 8, 9
.byte 4, 5, 6, 7, 8, 9, 10, 11, 6, 7, 8, 9, 10, 11, 12, 13
// Lookup table used to help conversion of shifted 32-bit values to 8-bit.
#define OFFSET_CVT_32_8 80
.byte 1, 2, 5, 6, 9, 10, 13, 14, 17, 18, 21, 22, 25, 26, 29, 30
endconst
const v_tbl_neon_dotprod, align=4
// Vertical convolutions are also using SDOT instructions, where a
// 128-bit register contains a transposed 4x4 matrix of values.
// Subsequent iterations of the vertical convolution can reuse the
// 3x4 sub-matrix from the previous loop iteration. These shuffle
// indices shift and merge this 4x4 matrix with the values of a new
// line.
.byte 1, 2, 3, 16, 5, 6, 7, 20, 9, 10, 11, 24, 13, 14, 15, 28
.byte 1, 2, 3, 16, 5, 6, 7, 17, 9, 10, 11, 18, 13, 14, 15, 19
.byte 1, 2, 3, 20, 5, 6, 7, 21, 9, 10, 11, 22, 13, 14, 15, 23
.byte 1, 2, 3, 24, 5, 6, 7, 25, 9, 10, 11, 26, 13, 14, 15, 27
.byte 1, 2, 3, 28, 5, 6, 7, 29, 9, 10, 11, 30, 13, 14, 15, 31
endconst
.macro make_8tap_fn op, type, type_h, type_v, isa, jump=1
function \op\()_8tap_\type\()_8bpc_\isa, export=1, align=FUNC_ALIGN
mov x9, \type_h
mov x10, \type_v
.if \jump
b \op\()_8tap_\isa
.endif
endfunc
.endm
.macro filter_8tap_fn type, dot, isa, dst, d_strd, src, s_strd, w, h, mx, my, xmx, xmy, ldst, lsrc, wd_strd
make_8tap_fn \type, sharp, SHARP1, SHARP1, \isa
make_8tap_fn \type, sharp_smooth, SHARP1, SMOOTH1, \isa
make_8tap_fn \type, sharp_regular, SHARP1, REGULAR1, \isa
make_8tap_fn \type, smooth_sharp, SMOOTH1, SHARP1, \isa
make_8tap_fn \type, smooth, SMOOTH1, SMOOTH1, \isa
make_8tap_fn \type, smooth_regular, SMOOTH1, REGULAR1, \isa
make_8tap_fn \type, regular_sharp, REGULAR1, SHARP1, \isa
make_8tap_fn \type, regular_smooth, REGULAR1, SMOOTH1, \isa
make_8tap_fn \type, regular, REGULAR1, REGULAR1, \isa, jump=0
function \type\()_8tap_\isa, align=FUNC_ALIGN
clz w8, \w
mov w11, #0x4081 // (1 << 14) | (1 << 7) | (1 << 0)
sub w8, w8, #24 // for jump tables
movrel x12, X(mc_subpel_filters)
cbnz \mx, L(\type\()_8tap_h_hv_\isa)
cbnz \my, L(\type\()_8tap_v_\isa)
.ifc \type, prep
add \wd_strd, \w, \w // prep_neon needs w * 2 as stride
.endif
b X(\type\()_neon)
.align JUMP_ALIGN
L(\type\()_8tap_v_\isa):
madd \my, \my, w11, w10
movrel x13, v_tbl_neon_dotprod
sub \src, \src, \s_strd
.ifc \isa, neon_dotprod
.ifc \type, prep
mov w8, #0x2002 // FILTER_WEIGHT * 128 + rounding
dup v4.4s, w8
.else
movi v4.4s, #32, lsl #8 // FILTER_WEIGHT * 128, bias for SDOT
.endif
.endif
ubfx w11, \my, #7, #7
and \my, \my, #0x7F
ldp q6, q28, [x13]
cmp \h, #4
csel \my, \my, w11, le
sub \src, \src, \s_strd, lsl #1 // src - s_strd * 3
add \xmy, x12, \xmy, lsl #3 // subpel V filter address
ldr q29, [x13, #32]
.ifc \isa, neon_dotprod
movi v5.16b, #128
.endif
ldr d7, [\xmy]
cmp \w, #8
b.eq 80f
b.lt 40f
// .align JUMP_ALIGN // fallthrough
160: // V - 16xN+
ldp q30, q31, [x13, #48]
.ifc \type, prep
add \wd_strd, \w, \w
.endif
.align LOOP_ALIGN
161:
mov \lsrc, \src
mov \ldst, \dst
sub w8, \h, #1
ldr q16, [\lsrc]
ldr q17, [\lsrc, \s_strd]
add \lsrc, \lsrc, \s_strd, lsl #1
ldr q18, [\lsrc]
ldr q19, [\lsrc, \s_strd]
add \lsrc, \lsrc, \s_strd, lsl #1
zip1 v0.16b, v16.16b, v17.16b
zip2 v1.16b, v16.16b, v17.16b
zip1 v2.16b, v18.16b, v19.16b
zip2 v3.16b, v18.16b, v19.16b
ldr q20, [\lsrc]
ldr q21, [\lsrc, \s_strd]
add \lsrc, \lsrc, \s_strd, lsl #1
ldr q22, [\lsrc]
ldr q23, [\lsrc, \s_strd]
add \lsrc, \lsrc, \s_strd, lsl #1
zip1 v18.16b, v20.16b, v21.16b
zip2 v21.16b, v20.16b, v21.16b
zip1 v24.16b, v22.16b, v23.16b
zip2 v27.16b, v22.16b, v23.16b
zip1 v16.8h, v0.8h, v2.8h
zip2 v19.8h, v0.8h, v2.8h
zip1 v22.8h, v1.8h, v3.8h
zip2 v25.8h, v1.8h, v3.8h
zip1 v17.8h, v18.8h, v24.8h
zip2 v20.8h, v18.8h, v24.8h
zip1 v23.8h, v21.8h, v27.8h
zip2 v26.8h, v21.8h, v27.8h
.ifc \isa, neon_dotprod
sub v16.16b, v16.16b, v5.16b
sub v19.16b, v19.16b, v5.16b
sub v22.16b, v22.16b, v5.16b
sub v25.16b, v25.16b, v5.16b
sub v17.16b, v17.16b, v5.16b
sub v20.16b, v20.16b, v5.16b
sub v23.16b, v23.16b, v5.16b
sub v26.16b, v26.16b, v5.16b
.endif
.align LOOP_ALIGN
16:
.ifc \isa, neon_i8mm
ld1 {v18.16b}, [\lsrc], \s_strd
movi v0.4s, #0
movi v1.4s, #0
movi v2.4s, #0
movi v3.4s, #0
mov v21.16b, v18.16b
mov v24.16b, v18.16b
mov v27.16b, v18.16b
.else // neon_dotprod
ld1 {v27.16b}, [\lsrc], \s_strd
mov v0.16b, v4.16b
mov v1.16b, v4.16b
mov v2.16b, v4.16b
mov v3.16b, v4.16b
sub v18.16b, v27.16b, v5.16b
sub v21.16b, v27.16b, v5.16b
sub v24.16b, v27.16b, v5.16b
sub v27.16b, v27.16b, v5.16b
.endif
\dot v0.4s, v16.16b, v7.4b[0]
\dot v1.4s, v19.16b, v7.4b[0]
\dot v2.4s, v22.16b, v7.4b[0]
\dot v3.4s, v25.16b, v7.4b[0]
tbl v16.16b, {v16.16b, v17.16b}, v6.16b
tbl v19.16b, {v19.16b, v20.16b}, v6.16b
tbl v22.16b, {v22.16b, v23.16b}, v6.16b
tbl v25.16b, {v25.16b, v26.16b}, v6.16b
\dot v0.4s, v17.16b, v7.4b[1]
\dot v1.4s, v20.16b, v7.4b[1]
\dot v2.4s, v23.16b, v7.4b[1]
\dot v3.4s, v26.16b, v7.4b[1]
tbl v17.16b, {v17.16b, v18.16b}, v28.16b
tbl v20.16b, {v20.16b, v21.16b}, v29.16b
tbl v23.16b, {v23.16b, v24.16b}, v30.16b
tbl v26.16b, {v26.16b, v27.16b}, v31.16b
subs w8, w8, #1
uzp1 v0.8h, v0.8h, v1.8h
uzp1 v2.8h, v2.8h, v3.8h
.ifc \type, prep
.ifc \isa, neon_i8mm
srshr v0.8h, v0.8h, #2
srshr v1.8h, v2.8h, #2
.else
sshr v0.8h, v0.8h, #2
sshr v1.8h, v2.8h, #2
.endif
st1 {v0.8h, v1.8h}, [\ldst], \d_strd
.else // put
sqrshrun v0.8b, v0.8h, #6
sqrshrun2 v0.16b, v2.8h, #6
st1 {v0.16b}, [\ldst], \d_strd
.endif
b.gt 16b
.ifc \isa, neon_i8mm
movi v0.4s, #0
movi v1.4s, #0
movi v2.4s, #0
movi v3.4s, #0
.else // neon_dotprod
mov v0.16b, v4.16b
mov v1.16b, v4.16b
mov v2.16b, v4.16b
mov v3.16b, v4.16b
.endif
\dot v0.4s, v16.16b, v7.4b[0]
\dot v1.4s, v19.16b, v7.4b[0]
\dot v2.4s, v22.16b, v7.4b[0]
\dot v3.4s, v25.16b, v7.4b[0]
\dot v0.4s, v17.16b, v7.4b[1]
\dot v1.4s, v20.16b, v7.4b[1]
\dot v2.4s, v23.16b, v7.4b[1]
\dot v3.4s, v26.16b, v7.4b[1]
subs \w, \w, #16
uzp1 v0.8h, v0.8h, v1.8h
uzp1 v2.8h, v2.8h, v3.8h
.ifc \type, prep
.ifc \isa, neon_i8mm
srshr v0.8h, v0.8h, #2
srshr v1.8h, v2.8h, #2
.else
sshr v0.8h, v0.8h, #2
sshr v1.8h, v2.8h, #2
.endif
stp q0, q1, [\ldst]
add \dst, \dst, #32
.else // put
sqrshrun v0.8b, v0.8h, #6
sqrshrun2 v0.16b, v2.8h, #6
str q0, [\ldst]
add \dst, \dst, #16
.endif
add \src, \src, #16
b.gt 161b
ret
.align JUMP_ALIGN
80: // V - 8xN
ldr d16, [\src]
ldr d17, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr d18, [\src]
ldr d19, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr d20, [\src]
ldr d21, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr d22, [\src]
ldr d23, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
subs \h, \h, #2 // for prep: sub is enough
zip1 v0.16b, v16.16b, v17.16b
zip1 v2.16b, v18.16b, v19.16b
zip1 v18.16b, v20.16b, v21.16b
zip1 v24.16b, v22.16b, v23.16b
zip1 v16.8h, v0.8h, v2.8h
zip2 v19.8h, v0.8h, v2.8h
zip1 v17.8h, v18.8h, v24.8h
zip2 v20.8h, v18.8h, v24.8h
.ifc \isa, neon_dotprod
sub v16.16b, v16.16b, v5.16b
sub v19.16b, v19.16b, v5.16b
sub v17.16b, v17.16b, v5.16b
sub v20.16b, v20.16b, v5.16b
.endif
.ifc \type, put
b.eq 82f
.endif
.align LOOP_ALIGN
8:
.ifc \isa, neon_i8mm
ldr d18, [\src]
movi v0.4s, #0
movi v1.4s, #0
ldr d24, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
movi v2.4s, #0
movi v3.4s, #0
mov v21.8b, v18.8b
mov v27.8b, v24.8b
.else // neon_dotprod
ldr d21, [\src]
ldr d27, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
mov v0.16b, v4.16b
mov v1.16b, v4.16b
mov v2.16b, v4.16b
mov v3.16b, v4.16b
sub v18.16b, v21.16b, v5.16b
sub v21.16b, v21.16b, v5.16b
sub v24.16b, v27.16b, v5.16b
sub v27.16b, v27.16b, v5.16b
.endif
tbl v22.16b, {v16.16b, v17.16b}, v6.16b
tbl v25.16b, {v19.16b, v20.16b}, v6.16b
tbl v23.16b, {v17.16b, v18.16b}, v28.16b
tbl v26.16b, {v20.16b, v21.16b}, v29.16b
\dot v0.4s, v16.16b, v7.4b[0]
\dot v0.4s, v17.16b, v7.4b[1]
\dot v1.4s, v19.16b, v7.4b[0]
\dot v1.4s, v20.16b, v7.4b[1]
tbl v16.16b, {v22.16b, v23.16b}, v6.16b
tbl v19.16b, {v25.16b, v26.16b}, v6.16b
tbl v17.16b, {v23.16b, v24.16b}, v28.16b
tbl v20.16b, {v26.16b, v27.16b}, v29.16b
\dot v2.4s, v22.16b, v7.4b[0]
\dot v2.4s, v23.16b, v7.4b[1]
\dot v3.4s, v25.16b, v7.4b[0]
\dot v3.4s, v26.16b, v7.4b[1]
subs \h, \h, #2
uzp1 v0.8h, v0.8h, v1.8h
uzp1 v2.8h, v2.8h, v3.8h
.ifc \type, prep
.ifc \isa, neon_i8mm
srshr v0.8h, v0.8h, #2
srshr v1.8h, v2.8h, #2
.else
sshr v0.8h, v0.8h, #2
sshr v1.8h, v2.8h, #2
.endif
stp q0, q1, [\dst], #32
.else // put
sqrshrun v0.8b, v0.8h, #6
sqrshrun v1.8b, v2.8h, #6
str d0, [\dst]
str d1, [\dst, \d_strd]
add \dst, \dst, \d_strd, lsl #1
.endif
b.gt 8b
.ifc \type, put
.align JUMP_ALIGN
82:
.endif
.ifc \isa, neon_i8mm
ldr d18, [\src]
movi v0.4s, #0
movi v1.4s, #0
movi v2.4s, #0
movi v3.4s, #0
mov v21.8b, v18.8b
.else // neon_dotprod
ldr d21, [\src]
mov v0.16b, v4.16b
mov v1.16b, v4.16b
mov v2.16b, v4.16b
mov v3.16b, v4.16b
sub v18.16b, v21.16b, v5.16b
sub v21.16b, v21.16b, v5.16b
.endif
tbl v22.16b, {v16.16b, v17.16b}, v6.16b
tbl v25.16b, {v19.16b, v20.16b}, v6.16b
tbl v23.16b, {v17.16b, v18.16b}, v28.16b
tbl v26.16b, {v20.16b, v21.16b}, v29.16b
\dot v0.4s, v16.16b, v7.4b[0]
\dot v0.4s, v17.16b, v7.4b[1]
\dot v1.4s, v19.16b, v7.4b[0]
\dot v1.4s, v20.16b, v7.4b[1]
\dot v2.4s, v22.16b, v7.4b[0]
\dot v2.4s, v23.16b, v7.4b[1]
\dot v3.4s, v25.16b, v7.4b[0]
\dot v3.4s, v26.16b, v7.4b[1]
uzp1 v0.8h, v0.8h, v1.8h
uzp1 v2.8h, v2.8h, v3.8h
.ifc \type, prep
.ifc \isa, neon_i8mm
srshr v0.8h, v0.8h, #2
srshr v1.8h, v2.8h, #2
.else
sshr v0.8h, v0.8h, #2
sshr v1.8h, v2.8h, #2
.endif
stp q0, q1, [\dst]
.else // put
sqrshrun v0.8b, v0.8h, #6
sqrshrun v1.8b, v2.8h, #6
str d0, [\dst]
str d1, [\dst, \d_strd]
.endif
ret
.align JUMP_ALIGN
40: // V - 4xN or 2xN (put only)
.ifc \type, put
cmp \w, #2
b.eq 20f
.endif
ldr s16, [\src]
ldr s17, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr s18, [\src]
ldr s19, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr s20, [\src]
ldr s21, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr s22, [\src]
ldr s23, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
subs \h, \h, #2 // for prep: sub is enough
zip1 v0.8b, v16.8b, v17.8b
zip1 v2.8b, v18.8b, v19.8b
zip1 v18.8b, v20.8b, v21.8b
zip1 v24.8b, v22.8b, v23.8b
zip1 v16.8h, v0.8h, v2.8h
zip1 v17.8h, v18.8h, v24.8h
.ifc \isa, neon_dotprod
sub v16.16b, v16.16b, v5.16b
sub v17.16b, v17.16b, v5.16b
.endif
.ifc \type, put
b.eq 42f
.endif
.align LOOP_ALIGN
4:
ldr s18, [\src]
ldr s21, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
.ifc \isa, neon_i8mm
movi v0.4s, #0
movi v1.4s, #0
.else // neon_dotprod
mov v0.16b, v4.16b
mov v1.16b, v4.16b
sub v18.16b, v18.16b, v5.16b
sub v21.16b, v21.16b, v5.16b
.endif
tbl v19.16b, {v16.16b, v17.16b}, v6.16b
tbl v20.16b, {v17.16b, v18.16b}, v28.16b
\dot v0.4s, v16.16b, v7.4b[0]
\dot v0.4s, v17.16b, v7.4b[1]
tbl v16.16b, {v19.16b, v20.16b}, v6.16b
tbl v17.16b, {v20.16b, v21.16b}, v28.16b
\dot v1.4s, v19.16b, v7.4b[0]
\dot v1.4s, v20.16b, v7.4b[1]
.ifc \type, prep
subs \h, \h, #2
.ifc \isa, neon_i8mm
rshrn v0.4h, v0.4s, #2
rshrn2 v0.8h, v1.4s, #2
.else
shrn v0.4h, v0.4s, #2
shrn2 v0.8h, v1.4s, #2
.endif
str q0, [\dst], #16
.else
uzp1 v0.8h, v0.8h, v1.8h
sqrshrun v0.8b, v0.8h, #6
subs \h, \h, #2
fmov x8, d0
lsr x9, x8, #32
str w8, [\dst]
str w9, [\dst, \d_strd]
add \dst, \dst, \d_strd, lsl #1
.endif
b.gt 4b
.ifc \type, put
.align JUMP_ALIGN
42:
.endif
ldr s18, [\src]
.ifc \isa, neon_i8mm
movi v0.4s, #0
movi v1.4s, #0
.else // neon_dotprod
mov v0.16b, v4.16b
mov v1.16b, v4.16b
sub v18.16b, v18.16b, v5.16b
.endif
tbl v19.16b, {v16.16b, v17.16b}, v6.16b
tbl v20.16b, {v17.16b, v18.16b}, v28.16b
\dot v0.4s, v16.16b, v7.4b[0]
\dot v0.4s, v17.16b, v7.4b[1]
\dot v1.4s, v19.16b, v7.4b[0]
\dot v1.4s, v20.16b, v7.4b[1]
.ifc \type, prep
.ifc \isa, neon_i8mm
rshrn v0.4h, v0.4s, #2
rshrn2 v0.8h, v1.4s, #2
.else
shrn v0.4h, v0.4s, #2
shrn2 v0.8h, v1.4s, #2
.endif
str q0, [\dst]
.else
uzp1 v0.8h, v0.8h, v1.8h
sqrshrun v0.8b, v0.8h, #6
fmov x8, d0
lsr x9, x8, #32
str w8, [\dst]
str w9, [\dst, \d_strd]
.endif
ret
.ifc \type, put
.align JUMP_ALIGN
20: // V - 2xN
ldr h16, [\src]
ldr h17, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr h18, [\src]
ldr h19, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr h20, [\src]
ldr h21, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr h22, [\src]
ldr h23, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
subs \h, \h, #2
zip1 v0.8b, v16.8b, v17.8b
zip1 v2.8b, v18.8b, v19.8b
zip1 v18.8b, v20.8b, v21.8b
zip1 v24.8b, v22.8b, v23.8b
zip1 v16.4h, v0.4h, v2.4h
zip1 v17.4h, v18.4h, v24.4h
.ifc \isa, neon_dotprod
sub v16.8b, v16.8b, v5.8b
sub v17.8b, v17.8b, v5.8b
.endif
b.eq 22f
.align LOOP_ALIGN
2:
ldr h18, [\src]
ldr h21, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
.ifc \isa, neon_i8mm
movi v0.4s, #0
movi v1.4s, #0
.else // put
mov v0.16b, v4.16b
mov v1.16b, v4.16b
sub v18.8b, v18.8b, v5.8b
sub v21.8b, v21.8b, v5.8b
.endif
tbl v19.16b, {v16.16b, v17.16b}, v6.16b
tbl v20.16b, {v17.16b, v18.16b}, v28.16b
\dot v0.4s, v16.16b, v7.4b[0]
\dot v0.4s, v17.16b, v7.4b[1]
tbl v16.16b, {v19.16b, v20.16b}, v6.16b
tbl v17.16b, {v20.16b, v21.16b}, v28.16b
\dot v1.4s, v19.16b, v7.4b[0]
\dot v1.4s, v20.16b, v7.4b[1]
uzp1 v0.8h, v0.8h, v1.8h
sqrshrun v0.8b, v0.8h, #6
subs \h, \h, #2
fmov x8, d0
lsr x9, x8, #32
strh w8, [\dst]
strh w9, [\dst, \d_strd]
add \dst, \dst, \d_strd, lsl #1
b.gt 2b
.align JUMP_ALIGN
22:
ldr h18, [\src]
.ifc \isa, neon_i8mm
movi v0.4s, #0
movi v1.4s, #0
.else // put
mov v0.16b, v4.16b
mov v1.16b, v4.16b
sub v18.8b, v18.8b, v5.8b
.endif
tbl v19.16b, {v16.16b, v17.16b}, v6.16b
tbl v20.16b, {v17.16b, v18.16b}, v28.16b
\dot v0.4s, v16.16b, v7.4b[0]
\dot v0.4s, v17.16b, v7.4b[1]
\dot v1.4s, v19.16b, v7.4b[0]
\dot v1.4s, v20.16b, v7.4b[1]
uzp1 v0.8h, v0.8h, v1.8h
sqrshrun v0.8b, v0.8h, #6
fmov x8, d0
lsr x9, x8, #32
strh w8, [\dst]
strh w9, [\dst, \d_strd]
ret
.endif
.align JUMP_ALIGN
L(\type\()_8tap_h_hv_\isa):
madd \mx, \mx, w11, w9
madd w14, \my, w11, w10 // for HV
.ifc \isa, neon_dotprod
mov w13, #0x2002 // FILTER_WEIGHT * 128 + rounding
dup v27.4s, w13 // put H overrides this
.endif
movrel x13, h_tbl_neon_dotprod
sub \src, \src, #3 // src - 3
ldr q28, [x13] // for 4-tap & 8-tap H filters
ubfx w15, \mx, #7, #7
and \mx, \mx, #0x7F
ubfx w11, w14, #7, #7 // for HV
and w14, w14, #0x7F // for HV
cmp \w, #4
csel \mx, \mx, w15, le
add \xmx, x12, \xmx, lsl #3 // subpel H filter address
.ifc \isa, neon_dotprod
movi v24.16b, #128
.endif
cbz \my, L(\type\()_8tap_h_\isa)
// HV cases
cmp \h, #4
csel w14, w14, w11, le
sub \src, \src, \s_strd, lsl #1 // src - s_strd * 2 - 3
add \xmy, x12, x14, lsl #3 // subpel V filter address
mov x15, x30
ldr d7, [\xmy]
.ifc \type, put
ldr q25, [x13, #(OFFSET_CVT_32_8)] // LUT to help conversion
.endif // of 32b values to 8b
sxtl v7.8h, v7.8b
cmp w10, #SHARP1
b.ne L(\type\()_6tap_hv_\isa) // vertical != SHARP1
// HV 8-tap cases
sub \src, \src, \s_strd // src - s_strd * 3 - 3
cmp \w, #4
b.eq 40f
.ifc \type, put
b.lt 20f
.endif
// .align JUMP_ALIGN // fallthrough
80: // HV8 - 8xN+
ldp q29, q30, [x13, #16]
ldr d26, [\xmx]
.ifc \type, prep
add \wd_strd, \w, \w
.endif
.align LOOP_ALIGN
81:
mov \lsrc, \src
mov \ldst, \dst
mov w8, \h
.ifc \isa, neon_i8mm
bl L(\type\()_hv_filter8_\isa)
srshr v16.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v17.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v18.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v19.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v20.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v21.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v22.8h, v22.8h, #2
.else
bl L(\type\()_hv_filter8_\isa)
sshr v16.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v17.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v18.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v19.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v20.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v21.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v22.8h, v22.8h, #2
.endif
.align LOOP_ALIGN
8:
ldr q23, [\lsrc]
add \lsrc, \lsrc, \s_strd
smull v0.4s, v16.4h, v7.h[0]
smull2 v1.4s, v16.8h, v7.h[0]
mov v16.16b, v17.16b
.ifc \isa, neon_i8mm
movi v5.4s, #0
movi v6.4s, #0
tbl v2.16b, {v23.16b}, v28.16b
tbl v3.16b, {v23.16b}, v29.16b
.else // neon_dotprod
sub v23.16b, v23.16b, v24.16b
mov v5.16b, v27.16b
mov v6.16b, v27.16b
.endif
smlal v0.4s, v17.4h, v7.h[1]
smlal2 v1.4s, v17.8h, v7.h[1]
.ifc \isa, neon_i8mm
tbl v4.16b, {v23.16b}, v30.16b
mov v17.16b, v18.16b
.else // neon_dotprod
mov v17.16b, v18.16b
tbl v2.16b, {v23.16b}, v28.16b
tbl v3.16b, {v23.16b}, v29.16b
tbl v4.16b, {v23.16b}, v30.16b
.endif
smlal v0.4s, v18.4h, v7.h[2]
smlal2 v1.4s, v18.8h, v7.h[2]
mov v18.16b, v19.16b
\dot v5.4s, v2.16b, v26.4b[0]
\dot v6.4s, v3.16b, v26.4b[0]
smlal v0.4s, v19.4h, v7.h[3]
smlal2 v1.4s, v19.8h, v7.h[3]
mov v19.16b, v20.16b
\dot v5.4s, v3.16b, v26.4b[1]
\dot v6.4s, v4.16b, v26.4b[1]
smlal v0.4s, v20.4h, v7.h[4]
smlal2 v1.4s, v20.8h, v7.h[4]
mov v20.16b, v21.16b
smlal v0.4s, v21.4h, v7.h[5]
smlal2 v1.4s, v21.8h, v7.h[5]
.ifc \type, prep
uzp1 v23.8h, v5.8h, v6.8h
.endif
mov v21.16b, v22.16b
smlal v0.4s, v22.4h, v7.h[6]
smlal2 v1.4s, v22.8h, v7.h[6]
.ifc \isa, neon_i8mm
subs w8, w8, #1
.endif
.ifc \type, prep
.ifc \isa, neon_i8mm
srshr v22.8h, v23.8h, #2
.else
sshr v22.8h, v23.8h, #2
.endif
smlal v0.4s, v22.4h, v7.h[7]
smlal2 v1.4s, v22.8h, v7.h[7]
rshrn v0.4h, v0.4s, #6
rshrn2 v0.8h, v1.4s, #6
.else // put
.ifc \isa, neon_i8mm
rshrn v22.4h, v5.4s, #2
rshrn2 v22.8h, v6.4s, #2
.else
shrn v22.4h, v5.4s, #2
shrn2 v22.8h, v6.4s, #2
.endif
smlal v0.4s, v22.4h, v7.h[7]
smlal2 v1.4s, v22.8h, v7.h[7]
tbl v0.16b, {v0.16b, v1.16b}, v25.16b
sqrshrun v0.8b, v0.8h, #2
.endif
.ifc \isa, neon_dotprod
subs w8, w8, #1
.endif
.ifc \type, prep
st1 {v0.8h}, [\ldst], \d_strd
b.gt 8b
add \dst, \dst, #16
.else
st1 {v0.8b}, [\ldst], \d_strd
b.gt 8b
add \dst, \dst, #8
.endif
add \src, \src, #8
subs \w, \w, #8
b.gt 81b
ret x15
.align JUMP_ALIGN
40: // HV8 - 4xN
ldur s26, [\xmx, #2]
add \src, \src, #2
bl L(\type\()_hv_filter4_\isa)
shrn v16.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v17.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v18.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v19.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v20.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v21.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v22.4h, v22.4s, #2
.align LOOP_ALIGN
4:
ld1 {v4.8b}, [\src], \s_strd
smull v0.4s, v16.4h, v7.h[0]
smlal v0.4s, v17.4h, v7.h[1]
mov v16.16b, v17.16b
mov v17.16b, v18.16b
.ifc \isa, neon_dotprod
sub v4.16b, v4.16b, v24.16b
.endif
smlal v0.4s, v18.4h, v7.h[2]
smlal v0.4s, v19.4h, v7.h[3]
tbl v2.16b, {v4.16b}, v28.16b
.ifc \isa, neon_i8mm
movi v5.4s, #0
.else
mov v5.16b, v27.16b
.endif
mov v18.16b, v19.16b
mov v19.16b, v20.16b
smlal v0.4s, v20.4h, v7.h[4]
smlal v0.4s, v21.4h, v7.h[5]
\dot v5.4s, v2.16b, v26.4b[0]
mov v20.16b, v21.16b
mov v21.16b, v22.16b
smlal v0.4s, v22.4h, v7.h[6]
.ifc \isa, neon_i8mm
rshrn v22.4h, v5.4s, #2
.else
shrn v22.4h, v5.4s, #2
.endif
smlal v0.4s, v22.4h, v7.h[7]
.ifc \type, prep
rshrn v0.4h, v0.4s, #6
str d0, [\dst], #8
subs \h, \h, #1
.else
subs \h, \h, #1
tbl v0.8b, {v0.16b}, v25.8b
sqrshrun v0.8b, v0.8h, #2
str s0, [\dst]
add \dst, \dst, \d_strd
.endif
b.gt 4b
ret x15
.ifc \type, put
.align JUMP_ALIGN
20: // HV8 - 2xN
ldur s26, [\xmx, #2]
add \src, \src, #2
bl L(\type\()_hv_filter4_\isa)
shrn v16.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v17.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v18.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v19.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v20.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v21.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v22.4h, v22.4s, #2
.align LOOP_ALIGN
2:
ld1 {v4.8b}, [\src], \s_strd
smull v0.4s, v16.4h, v7.h[0]
smlal v0.4s, v17.4h, v7.h[1]
mov v16.16b, v17.16b
mov v17.16b, v18.16b
.ifc \isa, neon_dotprod
sub v4.16b, v4.16b, v24.16b
.endif
smlal v0.4s, v18.4h, v7.h[2]
smlal v0.4s, v19.4h, v7.h[3]
tbl v2.16b, {v4.16b}, v28.16b
.ifc \isa, neon_i8mm
movi v5.4s, #0
.else
mov v5.16b, v27.16b
.endif
mov v18.16b, v19.16b
mov v19.16b, v20.16b
smlal v0.4s, v20.4h, v7.h[4]
smlal v0.4s, v21.4h, v7.h[5]
\dot v5.4s, v2.16b, v26.4b[0]
mov v20.16b, v21.16b
mov v21.16b, v22.16b
smlal v0.4s, v22.4h, v7.h[6]
.ifc \isa, neon_i8mm
rshrn v22.4h, v5.4s, #2
.else
shrn v22.4h, v5.4s, #2
.endif
smlal v0.4s, v22.4h, v7.h[7]
subs \h, \h, #1
tbl v0.8b, {v0.16b}, v25.8b
sqrshrun v0.8b, v0.8h, #2
str h0, [\dst]
add \dst, \dst, \d_strd
b.gt 2b
ret x15
.endif
.align JUMP_ALIGN
L(\type\()_6tap_hv_\isa):
cmp \w, #4
b.eq 40f
.ifc \type, put
b.lt 20f
.endif
// .align JUMP_ALIGN // fallthrough
80: // HV6 - 8xN+
ldr d26, [\xmx]
.ifc \type, prep
add \wd_strd, \w, \w
.endif
.ifc \isa, neon_i8mm
cmp w9, #SHARP1
b.eq 88f // horizontal == SHARP1
ldp q29, q30, [x13, #(OFFSET_USMMLA)]
ext v0.8b, v26.8b, v26.8b, #7
ins v26.d[1], v0.d[0]
.align LOOP_ALIGN
81:
mov \lsrc, \src
mov \ldst, \dst
mov w8, \h
bl L(\type\()_hv_filter6_neon_i8mm)
srshr v16.8h, v22.8h, #2
bl L(\type\()_hv_filter6_neon_i8mm)
srshr v17.8h, v22.8h, #2
bl L(\type\()_hv_filter6_neon_i8mm)
srshr v18.8h, v22.8h, #2
bl L(\type\()_hv_filter6_neon_i8mm)
srshr v19.8h, v22.8h, #2
bl L(\type\()_hv_filter6_neon_i8mm)
srshr v20.8h, v22.8h, #2
.align LOOP_ALIGN
8:
ld1 {v23.16b}, [\lsrc], \s_strd
smull v0.4s, v16.4h, v7.h[1]
smull2 v1.4s, v16.8h, v7.h[1]
mov v16.16b, v17.16b
movi v5.4s, #0
movi v6.4s, #0
tbl v2.16b, {v23.16b}, v29.16b
tbl v3.16b, {v23.16b}, v30.16b
smlal v0.4s, v17.4h, v7.h[2]
smlal2 v1.4s, v17.8h, v7.h[2]
mov v17.16b, v18.16b
usmmla v5.4s, v2.16b, v26.16b
usmmla v6.4s, v3.16b, v26.16b
smlal v0.4s, v18.4h, v7.h[3]
smlal2 v1.4s, v18.8h, v7.h[3]
mov v18.16b, v19.16b
subs w8, w8, #1
smlal v0.4s, v19.4h, v7.h[4]
smlal2 v1.4s, v19.8h, v7.h[4]
uzp1 v23.8h, v5.8h, v6.8h
mov v19.16b, v20.16b
smlal v0.4s, v20.4h, v7.h[5]
smlal2 v1.4s, v20.8h, v7.h[5]
srshr v20.8h, v23.8h, #2
smlal v0.4s, v20.4h, v7.h[6]
smlal2 v1.4s, v20.8h, v7.h[6]
.ifc \type, prep
rshrn v0.4h, v0.4s, #6
rshrn2 v0.8h, v1.4s, #6
st1 {v0.8h}, [\ldst], \d_strd
b.gt 8b
add \dst, \dst, #16
.else
tbl v0.16b, {v0.16b, v1.16b}, v25.16b
sqrshrun v0.8b, v0.8h, #2
st1 {v0.8b}, [\ldst], \d_strd
b.gt 8b
add \dst, \dst, #8
.endif
add \src, \src, #8
subs \w, \w, #8
b.gt 81b
ret x15
.align JUMP_ALIGN
88:
.endif // neon_i8mm
ldp q29, q30, [x13, #16]
.align LOOP_ALIGN
81:
mov \lsrc, \src
mov \ldst, \dst
mov w8, \h
.ifc \isa, neon_i8mm
bl L(\type\()_hv_filter8_\isa)
srshr v16.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v17.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v18.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v19.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
srshr v20.8h, v22.8h, #2
.else
bl L(\type\()_hv_filter8_\isa)
sshr v16.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v17.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v18.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v19.8h, v22.8h, #2
bl L(\type\()_hv_filter8_\isa)
sshr v20.8h, v22.8h, #2
.endif
.align LOOP_ALIGN
8:
ldr q23, [\lsrc]
add \lsrc, \lsrc, \s_strd
smull v0.4s, v16.4h, v7.h[1]
smull2 v1.4s, v16.8h, v7.h[1]
.ifc \isa, neon_dotprod
sub v23.16b, v23.16b, v24.16b
.endif
mov v16.16b, v17.16b
.ifc \isa, neon_i8mm
movi v5.4s, #0
movi v6.4s, #0
.else
mov v5.16b, v27.16b
mov v6.16b, v27.16b
.endif
tbl v2.16b, {v23.16b}, v28.16b
tbl v3.16b, {v23.16b}, v29.16b
smlal v0.4s, v17.4h, v7.h[2]
smlal2 v1.4s, v17.8h, v7.h[2]
tbl v4.16b, {v23.16b}, v30.16b
mov v17.16b, v18.16b
\dot v5.4s, v2.16b, v26.4b[0]
\dot v6.4s, v3.16b, v26.4b[0]
smlal v0.4s, v18.4h, v7.h[3]
smlal2 v1.4s, v18.8h, v7.h[3]
mov v18.16b, v19.16b
\dot v5.4s, v3.16b, v26.4b[1]
\dot v6.4s, v4.16b, v26.4b[1]
smlal v0.4s, v19.4h, v7.h[4]
smlal2 v1.4s, v19.8h, v7.h[4]
mov v19.16b, v20.16b
uzp1 v23.8h, v5.8h, v6.8h
smlal v0.4s, v20.4h, v7.h[5]
smlal2 v1.4s, v20.8h, v7.h[5]
.ifc \isa, neon_i8mm
srshr v20.8h, v23.8h, #2
.else
sshr v20.8h, v23.8h, #2
.endif
subs w8, w8, #1
smlal v0.4s, v20.4h, v7.h[6]
smlal2 v1.4s, v20.8h, v7.h[6]
.ifc \type, prep
rshrn v0.4h, v0.4s, #6
rshrn2 v0.8h, v1.4s, #6
st1 {v0.8h}, [\ldst], \d_strd
b.gt 8b
add \dst, \dst, #16
.else
tbl v0.16b, {v0.16b, v1.16b}, v25.16b
sqrshrun v0.8b, v0.8h, #2
st1 {v0.8b}, [\ldst], \d_strd
b.gt 8b
add \dst, \dst, #8
.endif
add \src, \src, #8
subs \w, \w, #8
b.gt 81b
ret x15
.align FUNC_ALIGN
L(\type\()_hv_filter8_\isa):
ld1 {v4.16b}, [\lsrc], \s_strd
.ifc \isa, neon_i8mm
movi v22.4s, #0
movi v23.4s, #0
.else // neon_dotprod
sub v4.16b, v4.16b, v24.16b
mov v22.16b, v27.16b
mov v23.16b, v27.16b
.endif
tbl v2.16b, {v4.16b}, v28.16b
tbl v3.16b, {v4.16b}, v29.16b
tbl v4.16b, {v4.16b}, v30.16b
\dot v22.4s, v2.16b, v26.4b[0]
\dot v23.4s, v3.16b, v26.4b[0]
\dot v22.4s, v3.16b, v26.4b[1]
\dot v23.4s, v4.16b, v26.4b[1]
uzp1 v22.8h, v22.8h, v23.8h
ret
.ifc \isa, neon_i8mm
.align FUNC_ALIGN
L(\type\()_hv_filter6_neon_i8mm):
ld1 {v4.16b}, [\lsrc], \s_strd
movi v22.4s, #0
movi v23.4s, #0
tbl v2.16b, {v4.16b}, v29.16b
tbl v3.16b, {v4.16b}, v30.16b
usmmla v22.4s, v2.16b, v26.16b
usmmla v23.4s, v3.16b, v26.16b
uzp1 v22.8h, v22.8h, v23.8h
ret
.endif
.align FUNC_ALIGN
L(\type\()_hv_filter4_\isa):
ld1 {v4.8b}, [\src], \s_strd
.ifc \isa, neon_i8mm
movi v22.4s, #2
.else
mov v22.16b, v27.16b
sub v4.16b, v4.16b, v24.16b
.endif
tbl v2.16b, {v4.16b}, v28.16b
\dot v22.4s, v2.16b, v26.4b[0]
ret
.align JUMP_ALIGN
40: // HV6 - 4xN
ldur s26, [\xmx, #2]
add \src, \src, #2
bl L(\type\()_hv_filter4_\isa)
shrn v16.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v17.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v18.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v19.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v20.4h, v22.4s, #2
.align LOOP_ALIGN
4:
ld1 {v4.8b}, [\src], \s_strd
smull v0.4s, v16.4h, v7.h[1]
smlal v0.4s, v17.4h, v7.h[2]
.ifc \isa, neon_dotprod
sub v4.16b, v4.16b, v24.16b
.endif
mov v16.16b, v17.16b
mov v17.16b, v18.16b
smlal v0.4s, v18.4h, v7.h[3]
smlal v0.4s, v19.4h, v7.h[4]
tbl v2.16b, {v4.16b}, v28.16b
.ifc \isa, neon_i8mm
movi v5.4s, #0
.else
mov v5.16b, v27.16b
.endif
mov v18.16b, v19.16b
mov v19.16b, v20.16b
\dot v5.4s, v2.16b, v26.4b[0]
smlal v0.4s, v20.4h, v7.h[5]
.ifc \isa, neon_i8mm
rshrn v20.4h, v5.4s, #2
.else
shrn v20.4h, v5.4s, #2
.endif
subs \h, \h, #1
smlal v0.4s, v20.4h, v7.h[6]
.ifc \type, prep
rshrn v0.4h, v0.4s, #6
str d0, [\dst], #8
.else
tbl v0.8b, {v0.16b}, v25.8b
sqrshrun v0.8b, v0.8h, #2
str s0, [\dst]
add \dst, \dst, \d_strd
.endif
b.gt 4b
ret x15
.ifc \type, put
.align JUMP_ALIGN
20: // HV6 - 2xN
ldur s26, [\xmx, #2]
add \src, \src, #2
bl L(\type\()_hv_filter4_\isa)
shrn v16.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v17.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v18.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v19.4h, v22.4s, #2
bl L(\type\()_hv_filter4_\isa)
shrn v20.4h, v22.4s, #2
.align LOOP_ALIGN
2:
ld1 {v4.8b}, [\src], \s_strd
smull v0.4s, v16.4h, v7.h[1]
smlal v0.4s, v17.4h, v7.h[2]
.ifc \isa, neon_dotprod
sub v4.16b, v4.16b, v24.16b
.endif
mov v16.16b, v17.16b
mov v17.16b, v18.16b
smlal v0.4s, v18.4h, v7.h[3]
smlal v0.4s, v19.4h, v7.h[4]
tbl v2.16b, {v4.16b}, v28.16b
.ifc \isa, neon_i8mm
movi v5.4s, #0
.else
mov v5.16b, v27.16b
.endif
mov v18.16b, v19.16b
mov v19.16b, v20.16b
\dot v5.4s, v2.16b, v26.4b[0]
smlal v0.4s, v20.4h, v7.h[5]
.ifc \isa, neon_i8mm
rshrn v20.4h, v5.4s, #2
.else
shrn v20.4h, v5.4s, #2
.endif
subs \h, \h, #1
smlal v0.4s, v20.4h, v7.h[6]
tbl v0.8b, {v0.16b}, v25.8b
sqrshrun v0.8b, v0.8h, #2
str h0, [\dst]
add \dst, \dst, \d_strd
b.gt 2b
ret x15
.endif
.align JUMP_ALIGN
L(\type\()_8tap_h_\isa):
movrel x11, \type\()_8tap_h_\isa\()_tbl
ldrsw x8, [x11, x8, lsl #2]
.ifc \type, put
.ifc \isa, neon_i8mm
movi v27.4s, #34 // special rounding
.else
mov w10, #0x2022 // 64 * 128 + 34, bias and rounding for SDOT
dup v27.4s, w10
.endif
.endif
add x11, x11, x8
br x11
.ifc \type, put
.align JUMP_ALIGN
20: // H - 2xN
AARCH64_VALID_JUMP_TARGET
add \src, \src, #2
ldur s26, [\xmx, #2]
.align LOOP_ALIGN
2:
ldr d0, [\src]
ldr d1, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
.ifc \isa, neon_dotprod
sub v0.8b, v0.8b, v24.8b
sub v1.8b, v1.8b, v24.8b
.endif
mov v4.16b, v27.16b
mov v5.16b, v27.16b
tbl v2.16b, {v0.16b}, v28.16b
tbl v3.16b, {v1.16b}, v28.16b
\dot v4.4s, v2.16b, v26.4b[0]
\dot v5.4s, v3.16b, v26.4b[0]
uzp1 v4.8h, v4.8h, v5.8h
sqshrun v4.8b, v4.8h, #6
subs \h, \h, #2
fmov x8, d4
lsr x9, x8, #32
strh w8, [\dst]
strh w9, [\dst, \d_strd]
add \dst, \dst, \d_strd, lsl #1
b.gt 2b
ret
.endif
.align JUMP_ALIGN
40: // H - 4xN
AARCH64_VALID_JUMP_TARGET
add \src, \src, #2
ldur s26, [\xmx, #2]
.align LOOP_ALIGN
4:
ldr d0, [\src]
ldr d1, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
.ifc \type\()_\isa, prep_neon_i8mm
movi v4.4s, #0
movi v5.4s, #0
.else
.ifc \isa, neon_dotprod
sub v0.8b, v0.8b, v24.8b
sub v1.8b, v1.8b, v24.8b
.endif
mov v4.16b, v27.16b
mov v5.16b, v27.16b
.endif
tbl v2.16b, {v0.16b}, v28.16b
tbl v3.16b, {v1.16b}, v28.16b
\dot v4.4s, v2.16b, v26.4b[0]
\dot v5.4s, v3.16b, v26.4b[0]
.ifc \type, prep
subs \h, \h, #2
.ifc \isa, neon_i8mm
uzp1 v4.8h, v4.8h, v5.8h
srshr v4.8h, v4.8h, #2
.else
shrn v4.4h, v4.4s, #2
shrn2 v4.8h, v5.4s, #2
.endif
str q4, [\dst], #16
.else // put
uzp1 v4.8h, v4.8h, v5.8h
sqshrun v4.8b, v4.8h, #6
subs \h, \h, #2
fmov x8, d4
lsr x9, x8, #32
str w8, [\dst]
str w9, [\dst, \d_strd]
add \dst, \dst, \d_strd, lsl #1
.endif
b.gt 4b
ret
.align JUMP_ALIGN
80: // H - 8xN
AARCH64_VALID_JUMP_TARGET
ldr d26, [\xmx]
.ifc \isa, neon_i8mm
cmp w9, #SHARP1
b.eq 88f // horizontal == SHARP1
ldp q29, q30, [x13, #(OFFSET_USMMLA)]
ext v0.8b, v26.8b, v26.8b, #7
ins v26.d[1], v0.d[0]
.align LOOP_ALIGN
8:
ldr q0, [\src]
ldr q16, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
.ifc \type, prep
movi v4.4s, #0
movi v5.4s, #0
movi v20.4s, #0
movi v21.4s, #0
.else
mov v4.16b, v27.16b
mov v5.16b, v27.16b
mov v20.16b, v27.16b
mov v21.16b, v27.16b
.endif
tbl v1.16b, {v0.16b}, v29.16b
tbl v2.16b, {v0.16b}, v30.16b
tbl v17.16b, {v16.16b}, v29.16b
tbl v18.16b, {v16.16b}, v30.16b
usmmla v4.4s, v1.16b, v26.16b
usmmla v5.4s, v2.16b, v26.16b
usmmla v20.4s, v17.16b, v26.16b
usmmla v21.4s, v18.16b, v26.16b
uzp1 v4.8h, v4.8h, v5.8h
uzp1 v20.8h, v20.8h, v21.8h
.ifc \type, prep
srshr v4.8h, v4.8h, #2
srshr v20.8h, v20.8h, #2
subs \h, \h, #2
stp q4, q20, [\dst], #32
.else // put
sqshrun v4.8b, v4.8h, #6
sqshrun v20.8b, v20.8h, #6
subs \h, \h, #2
str d4, [\dst]
str d20, [\dst, \d_strd]
add \dst, \dst, \d_strd, lsl #1
.endif
b.gt 8b
ret
.align JUMP_ALIGN
88:
.endif // neon_i8mm
ldp q29, q30, [x13, #16]
.align LOOP_ALIGN
8:
ldr q0, [\src]
ldr q16, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
.ifc \type\()_\isa, prep_neon_i8mm
movi v4.4s, #0
movi v5.4s, #0
movi v20.4s, #0
movi v21.4s, #0
.else
.ifc \isa, neon_dotprod
sub v0.16b, v0.16b, v24.16b
sub v16.16b, v16.16b, v24.16b
.endif
mov v4.16b, v27.16b
mov v5.16b, v27.16b
mov v20.16b, v27.16b
mov v21.16b, v27.16b
.endif
tbl v1.16b, {v0.16b}, v28.16b
tbl v2.16b, {v0.16b}, v29.16b
tbl v3.16b, {v0.16b}, v30.16b
tbl v17.16b, {v16.16b}, v28.16b
tbl v18.16b, {v16.16b}, v29.16b
tbl v19.16b, {v16.16b}, v30.16b
\dot v4.4s, v1.16b, v26.4b[0]
\dot v5.4s, v2.16b, v26.4b[0]
\dot v20.4s, v17.16b, v26.4b[0]
\dot v21.4s, v18.16b, v26.4b[0]
\dot v4.4s, v2.16b, v26.4b[1]
\dot v5.4s, v3.16b, v26.4b[1]
\dot v20.4s, v18.16b, v26.4b[1]
\dot v21.4s, v19.16b, v26.4b[1]
uzp1 v4.8h, v4.8h, v5.8h
uzp1 v20.8h, v20.8h, v21.8h
.ifc \type, prep
.ifc \isa, neon_i8mm
srshr v4.8h, v4.8h, #2
srshr v20.8h, v20.8h, #2
.else
sshr v4.8h, v4.8h, #2
sshr v20.8h, v20.8h, #2
.endif
subs \h, \h, #2
stp q4, q20, [\dst], #32
.else // put
sqshrun v4.8b, v4.8h, #6
sqshrun v20.8b, v20.8h, #6
subs \h, \h, #2
str d4, [\dst]
str d20, [\dst, \d_strd]
add \dst, \dst, \d_strd, lsl #1
.endif
b.gt 8b
ret
.align JUMP_ALIGN
160: // H - 16xN
AARCH64_VALID_JUMP_TARGET
ldr d26, [\xmx]
.ifc \isa, neon_i8mm
cmp w9, #SHARP1
b.eq 168f // horizontal == SHARP1
ldp q29, q30, [x13, #(OFFSET_USMMLA)]
ext v0.8b, v26.8b, v26.8b, #7
ins v26.d[1], v0.d[0]
.align LOOP_ALIGN
16:
ldr q16, [\src]
ldur q17, [\src, #8] // avoid 2 register TBL for small cores
add \src, \src, \s_strd
.ifc \type, prep
movi v6.4s, #0
movi v7.4s, #0
movi v22.4s, #0
movi v23.4s, #0
.else
mov v6.16b, v27.16b
mov v7.16b, v27.16b
mov v22.16b, v27.16b
mov v23.16b, v27.16b
.endif
tbl v0.16b, {v16.16b}, v29.16b
tbl v1.16b, {v16.16b}, v30.16b
tbl v2.16b, {v17.16b}, v29.16b
tbl v3.16b, {v17.16b}, v30.16b
usmmla v6.4s, v0.16b, v26.16b
usmmla v7.4s, v1.16b, v26.16b
usmmla v22.4s, v2.16b, v26.16b
usmmla v23.4s, v3.16b, v26.16b
uzp1 v6.8h, v6.8h, v7.8h
uzp1 v22.8h, v22.8h, v23.8h
.ifc \type, prep
srshr v6.8h, v6.8h, #2
srshr v22.8h, v22.8h, #2
subs \h, \h, #1
stp q6, q22, [\dst], #32
.else // put
sqshrun v6.8b, v6.8h, #6
sqshrun2 v6.16b, v22.8h, #6
subs \h, \h, #1
st1 {v6.16b}, [\dst], \d_strd
.endif
b.gt 16b
ret
.align JUMP_ALIGN
168:
.endif // neon_i8mm
ldp q29, q30, [x13, #16]
.align LOOP_ALIGN
16:
ldr q16, [\src]
ldur q17, [\src, #12] // avoid 2 register TBL for small cores
add \src, \src, \s_strd
.ifc \type\()_\isa, prep_neon_i8mm
movi v6.4s, #0
movi v7.4s, #0
movi v22.4s, #0
movi v23.4s, #0
.else
.ifc \isa, neon_dotprod
sub v16.16b, v16.16b, v24.16b
sub v17.16b, v17.16b, v24.16b
.endif
mov v6.16b, v27.16b
mov v7.16b, v27.16b
mov v22.16b, v27.16b
mov v23.16b, v27.16b
.endif
tbl v0.16b, {v16.16b}, v28.16b
tbl v1.16b, {v16.16b}, v29.16b
tbl v2.16b, {v16.16b}, v30.16b
tbl v3.16b, {v17.16b}, v28.16b
tbl v4.16b, {v17.16b}, v29.16b
\dot v6.4s, v0.16b, v26.4b[0]
\dot v7.4s, v1.16b, v26.4b[0]
\dot v22.4s, v2.16b, v26.4b[0]
\dot v23.4s, v3.16b, v26.4b[0]
\dot v6.4s, v1.16b, v26.4b[1]
\dot v7.4s, v2.16b, v26.4b[1]
\dot v22.4s, v3.16b, v26.4b[1]
\dot v23.4s, v4.16b, v26.4b[1]
uzp1 v6.8h, v6.8h, v7.8h
uzp1 v22.8h, v22.8h, v23.8h
.ifc \type, prep
.ifc \isa, neon_i8mm
srshr v6.8h, v6.8h, #2
srshr v22.8h, v22.8h, #2
.else
sshr v6.8h, v6.8h, #2
sshr v22.8h, v22.8h, #2
.endif
subs \h, \h, #1
stp q6, q22, [\dst], #32
.else // put
sqshrun v6.8b, v6.8h, #6
sqshrun2 v6.16b, v22.8h, #6
subs \h, \h, #1
st1 {v6.16b}, [\dst], \d_strd
.endif
b.gt 16b
ret
.align JUMP_ALIGN
320: // H - 32xN+
640:
1280:
AARCH64_VALID_JUMP_TARGET
ldr d26, [\xmx]
.ifc \type, put
sub \d_strd, \d_strd, \w, uxtw
.endif
sub \s_strd, \s_strd, \w, uxtw
mov w8, \w
.ifc \isa, neon_i8mm
cmp w9, #SHARP1
b.eq 328f // horizontal == SHARP1
ldp q29, q30, [x13, #(OFFSET_USMMLA)]
ext v0.8b, v26.8b, v26.8b, #7
ins v26.d[1], v0.d[0]
.align LOOP_ALIGN
32:
ldr q16, [\src]
ldur q17, [\src, #8] // avoid 2 register TBL for small cores
add \src, \src, #16
.ifc \type, prep
movi v6.4s, #0
movi v7.4s, #0
movi v22.4s, #0
movi v23.4s, #0
.else
mov v6.16b, v27.16b
mov v7.16b, v27.16b
mov v22.16b, v27.16b
mov v23.16b, v27.16b
.endif
tbl v0.16b, {v16.16b}, v29.16b
tbl v1.16b, {v16.16b}, v30.16b
tbl v2.16b, {v17.16b}, v29.16b
tbl v3.16b, {v17.16b}, v30.16b
usmmla v6.4s, v0.16b, v26.16b
usmmla v7.4s, v1.16b, v26.16b
usmmla v22.4s, v2.16b, v26.16b
usmmla v23.4s, v3.16b, v26.16b
uzp1 v6.8h, v6.8h, v7.8h
uzp1 v22.8h, v22.8h, v23.8h
.ifc \type, prep
srshr v6.8h, v6.8h, #2
srshr v22.8h, v22.8h, #2
subs w8, w8, #16
stp q6, q22, [\dst], #32
.else // put
sqshrun v6.8b, v6.8h, #6
sqshrun2 v6.16b, v22.8h, #6
subs w8, w8, #16
str q6, [\dst], #16
.endif
b.gt 32b
add \src, \src, \s_strd
.ifc \type, put
add \dst, \dst, \d_strd
.endif
mov w8, \w
subs \h, \h, #1
b.gt 32b
ret
.align JUMP_ALIGN
328:
.endif // neon_i8mm
ldp q29, q30, [x13, #16]
.align LOOP_ALIGN
32:
ldr q16, [\src]
ldur q17, [\src, #12] // avoid 2 register TBL for small cores
add \src, \src, #16
.ifc \type\()_\isa, prep_neon_i8mm
movi v6.4s, #0
movi v7.4s, #0
movi v22.4s, #0
movi v23.4s, #0
.else
.ifc \isa, neon_dotprod
sub v16.16b, v16.16b, v24.16b
sub v17.16b, v17.16b, v24.16b
.endif
mov v6.16b, v27.16b
mov v7.16b, v27.16b
mov v22.16b, v27.16b
mov v23.16b, v27.16b
.endif
tbl v0.16b, {v16.16b}, v28.16b
tbl v1.16b, {v16.16b}, v29.16b
tbl v2.16b, {v16.16b}, v30.16b
tbl v3.16b, {v17.16b}, v28.16b
tbl v4.16b, {v17.16b}, v29.16b
\dot v6.4s, v0.16b, v26.4b[0]
\dot v7.4s, v1.16b, v26.4b[0]
\dot v22.4s, v2.16b, v26.4b[0]
\dot v23.4s, v3.16b, v26.4b[0]
\dot v6.4s, v1.16b, v26.4b[1]
\dot v7.4s, v2.16b, v26.4b[1]
\dot v22.4s, v3.16b, v26.4b[1]
\dot v23.4s, v4.16b, v26.4b[1]
uzp1 v6.8h, v6.8h, v7.8h
uzp1 v22.8h, v22.8h, v23.8h
.ifc \type, prep
.ifc \isa, neon_i8mm
srshr v6.8h, v6.8h, #2
srshr v22.8h, v22.8h, #2
.else
sshr v6.8h, v6.8h, #2
sshr v22.8h, v22.8h, #2
.endif
subs w8, w8, #16
stp q6, q22, [\dst], #32
.else // put
sqshrun v6.8b, v6.8h, #6
sqshrun2 v6.16b, v22.8h, #6
subs w8, w8, #16
str q6, [\dst], #16
.endif
b.gt 32b
add \src, \src, \s_strd
.ifc \type, put
add \dst, \dst, \d_strd
.endif
mov w8, \w
subs \h, \h, #1
b.gt 32b
ret
endfunc
jumptable \type\()_8tap_h_\isa\()_tbl
.word 1280b - \type\()_8tap_h_\isa\()_tbl
.word 640b - \type\()_8tap_h_\isa\()_tbl
.word 320b - \type\()_8tap_h_\isa\()_tbl
.word 160b - \type\()_8tap_h_\isa\()_tbl
.word 80b - \type\()_8tap_h_\isa\()_tbl
.word 40b - \type\()_8tap_h_\isa\()_tbl
.ifc \type, put
.word 20b - \type\()_8tap_h_\isa\()_tbl
.endif
endjumptable
.endm
// dst(x0), d_strd(x7), src(x1), s_strd(x2), w(w3), h(w4), mx(w5), my(w6)
// xmx(x5), xmy(x6), ldst(x5), lsrc(x6), wd_strd(w7)
filter_8tap_fn prep, sdot, neon_dotprod, x0, x7, x1, x2, w3, w4, w5, w6, x5, x6, x5, x6, w7
// dst(x0) d_strd(x1) src(x2) s_strd(x3) w(w4) h(w5) mx(w6) my(w7)
// xmx(x6), xmy(x7), ldst(x6), lsrc(x7), wd_strd(w1)
filter_8tap_fn put, sdot, neon_dotprod, x0, x1, x2, x3, w4, w5, w6, w7, x6, x7, x6, x7, w1
#if HAVE_I8MM
ENABLE_I8MM
// dst(x0), d_strd(x7), src(x1), s_strd(x2), w(w3), h(w4), mx(w5), my(w6)
// xmx(x5), xmy(x6), ldst(x5), lsrc(x6), wd_strd(w7)
filter_8tap_fn prep, usdot, neon_i8mm, x0, x7, x1, x2, w3, w4, w5, w6, x5, x6, x5, x6, w7
// dst(x0) d_strd(x1) src(x2) s_strd(x3) w(w4) h(w5) mx(w6) my(w7)
// xmx(x6), xmy(x7), ldst(x6), lsrc(x7), wd_strd(w1)
filter_8tap_fn put, usdot, neon_i8mm, x0, x1, x2, x3, w4, w5, w6, w7, x6, x7, x6, x7, w1
DISABLE_I8MM
#endif // HAVE_I8MM
DISABLE_DOTPROD
#endif // HAVE_DOTPROD
|
Admenri/urge
| 47,617
|
third_party/dav1d/src/arm/64/loopfilter.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
// depending on how many pixels need to be stored, returns:
// x14 = (1 << 0) : 0 pixels
// x14 = (1 << 4) : inner 4 pixels
// x14 = (1 << 6) : inner 6 pixels
// x14 = 0 : all pixels
.macro loop_filter wd
function lpf_16_wd\wd\()_neon
uabd v0.16b, v22.16b, v23.16b // abs(p1 - p0)
uabd v1.16b, v25.16b, v24.16b // abs(q1 - q0)
uabd v2.16b, v23.16b, v24.16b // abs(p0 - q0)
uabd v3.16b, v22.16b, v25.16b // abs(p1 - q1)
.if \wd >= 6
uabd v4.16b, v21.16b, v22.16b // abs(p2 - p1)
uabd v5.16b, v26.16b, v25.16b // abs(q2 - q1)
.endif
.if \wd >= 8
uabd v6.16b, v20.16b, v21.16b // abs(p3 - p2)
uabd v7.16b, v27.16b, v26.16b // abs(q3 - q3)
.endif
.if \wd >= 6
umax v4.16b, v4.16b, v5.16b
.endif
uqadd v2.16b, v2.16b, v2.16b // abs(p0 - q0) * 2
.if \wd >= 8
umax v6.16b, v6.16b, v7.16b
.endif
ushr v3.16b, v3.16b, #1
.if \wd >= 8
umax v4.16b, v4.16b, v6.16b
.endif
.if \wd >= 6
and v4.16b, v4.16b, v14.16b
.endif
umax v0.16b, v0.16b, v1.16b // max(abs(p1 - p0), abs(q1 - q0))
uqadd v2.16b, v2.16b, v3.16b // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
.if \wd >= 6
umax v4.16b, v0.16b, v4.16b
cmhs v1.16b, v11.16b, v4.16b // max(abs(p1 - p0), abs(q1 - q0), abs(), abs(), ...) <= I
.else
cmhs v1.16b, v11.16b, v0.16b // max(abs(p1 - p0), abs(q1 - q0)) <= I
.endif
cmhs v2.16b, v10.16b, v2.16b // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1 <= E
and v1.16b, v1.16b, v2.16b // fm
and v1.16b, v1.16b, v13.16b // fm && wd >= 4
.if \wd >= 6
and v14.16b, v14.16b, v1.16b // fm && wd > 4
.endif
.if \wd >= 16
and v15.16b, v15.16b, v1.16b // fm && wd == 16
.endif
mov x16, v1.d[0]
mov x17, v1.d[1]
adds x16, x16, x17
b.ne 9f // if (!fm || wd < 4) return;
mov x14, #(1 << 0)
ret
9:
.if \wd >= 6
movi v10.16b, #1
uabd v2.16b, v21.16b, v23.16b // abs(p2 - p0)
uabd v3.16b, v22.16b, v23.16b // abs(p1 - p0)
uabd v4.16b, v25.16b, v24.16b // abs(q1 - q0)
uabd v5.16b, v26.16b, v24.16b // abs(q2 - q0)
.if \wd >= 8
uabd v6.16b, v20.16b, v23.16b // abs(p3 - p0)
uabd v7.16b, v27.16b, v24.16b // abs(q3 - q0)
.endif
umax v2.16b, v2.16b, v3.16b
umax v4.16b, v4.16b, v5.16b
.if \wd >= 8
umax v6.16b, v6.16b, v7.16b
.endif
umax v2.16b, v2.16b, v4.16b
.if \wd >= 8
umax v2.16b, v2.16b, v6.16b
.endif
.if \wd == 16
uabd v3.16b, v17.16b, v23.16b // abs(p6 - p0)
uabd v4.16b, v18.16b, v23.16b // abs(p5 - p0)
uabd v5.16b, v19.16b, v23.16b // abs(p4 - p0)
.endif
cmhs v2.16b, v10.16b, v2.16b // flat8in
.if \wd == 16
uabd v6.16b, v28.16b, v24.16b // abs(q4 - q0)
uabd v7.16b, v29.16b, v24.16b // abs(q5 - q0)
uabd v8.16b, v30.16b, v24.16b // abs(q6 - q0)
.endif
and v14.16b, v2.16b, v14.16b // flat8in && fm && wd > 4
bic v1.16b, v1.16b, v14.16b // fm && wd >= 4 && !flat8in
.if \wd == 16
umax v3.16b, v3.16b, v4.16b
umax v5.16b, v5.16b, v6.16b
.endif
mov x16, v1.d[0]
mov x17, v1.d[1]
.if \wd == 16
umax v7.16b, v7.16b, v8.16b
umax v3.16b, v3.16b, v5.16b
umax v3.16b, v3.16b, v7.16b
cmhs v3.16b, v10.16b, v3.16b // flat8out
.endif
adds x16, x16, x17
.if \wd == 16
and v15.16b, v15.16b, v3.16b // flat8out && fm && wd == 16
and v15.16b, v15.16b, v14.16b // flat8out && flat8in && fm && wd == 16
bic v14.16b, v14.16b, v15.16b // flat8in && fm && wd >= 4 && !flat8out
.endif
b.eq 1f // skip wd == 4 case
.endif
movi v3.16b, #128
eor v2.16b, v22.16b, v3.16b // p1 - 128
eor v3.16b, v25.16b, v3.16b // q1 - 128
cmhi v0.16b, v0.16b, v12.16b // hev
sqsub v2.16b, v2.16b, v3.16b // iclip_diff(p1 - q1)
and v4.16b, v2.16b, v0.16b // if (hev) iclip_diff(p1 - q1)
bic v0.16b, v1.16b, v0.16b // (fm && wd >= 4 && !hev)
usubl v2.8h, v24.8b, v23.8b
movi v5.8h, #3
usubl2 v3.8h, v24.16b, v23.16b
mul v2.8h, v2.8h, v5.8h
mul v3.8h, v3.8h, v5.8h
movi v6.16b, #4
saddw v2.8h, v2.8h, v4.8b
saddw2 v3.8h, v3.8h, v4.16b
movi v7.16b, #3
sqxtn v2.8b, v2.8h // f
sqxtn2 v2.16b, v3.8h
sqadd v4.16b, v6.16b, v2.16b // imin(f + 4, 127)
sqadd v5.16b, v7.16b, v2.16b // imin(f + 3, 127)
sshr v4.16b, v4.16b, #3 // f1
sshr v5.16b, v5.16b, #3 // f2
mov v2.16b, v23.16b // p0
mov v3.16b, v24.16b // q0
neg v6.16b, v4.16b // -f1
srshr v4.16b, v4.16b, #1 // (f1 + 1) >> 1
// p0 + f2, q0 - f1
usqadd v2.16b, v5.16b // out p0
usqadd v3.16b, v6.16b // out q0
neg v6.16b, v4.16b // -((f1 + 1) >> 1)
bit v23.16b, v2.16b, v1.16b // if (fm && wd >= 4)
bit v24.16b, v3.16b, v1.16b // if (fm && wd >= 4)
mov v2.16b, v22.16b // p1
mov v3.16b, v25.16b // q1
// p1 + ((f1 + 1) >> 1), q1 - ((f1 + 1) >> 1)
usqadd v2.16b, v4.16b // out p1
usqadd v3.16b, v6.16b // out q1
bit v22.16b, v2.16b, v0.16b // if (fm && wd >= 4 && !hev)
bit v25.16b, v3.16b, v0.16b // if (fm && wd >= 4 && !hev)
1:
.if \wd == 6
mov x16, v14.d[0]
mov x17, v14.d[1]
adds x16, x16, x17
b.eq 2f // skip if there's no flat8in
uaddl v0.8h, v21.8b, v21.8b // p2 * 2
uaddl2 v1.8h, v21.16b, v21.16b
uaddl v2.8h, v21.8b, v22.8b // p2 + p1
uaddl2 v3.8h, v21.16b, v22.16b
uaddl v4.8h, v22.8b, v23.8b // p1 + p0
uaddl2 v5.8h, v22.16b, v23.16b
uaddl v6.8h, v23.8b, v24.8b // p0 + q0
uaddl2 v7.8h, v23.16b, v24.16b
add v8.8h, v0.8h, v2.8h
add v9.8h, v1.8h, v3.8h
add v10.8h, v4.8h, v6.8h
add v11.8h, v5.8h, v7.8h
uaddl v12.8h, v24.8b, v25.8b // q0 + q1
uaddl2 v13.8h, v24.16b, v25.16b
add v8.8h, v8.8h, v10.8h
add v9.8h, v9.8h, v11.8h
sub v12.8h, v12.8h, v0.8h
sub v13.8h, v13.8h, v1.8h
uaddl v10.8h, v25.8b, v26.8b // q1 + q2
uaddl2 v11.8h, v25.16b, v26.16b
rshrn v0.8b, v8.8h, #3 // out p1
rshrn2 v0.16b, v9.8h, #3
add v8.8h, v8.8h, v12.8h
add v9.8h, v9.8h, v13.8h
sub v10.8h, v10.8h, v2.8h
sub v11.8h, v11.8h, v3.8h
uaddl v12.8h, v26.8b, v26.8b // q2 + q2
uaddl2 v13.8h, v26.16b, v26.16b
rshrn v1.8b, v8.8h, #3 // out p0
rshrn2 v1.16b, v9.8h, #3
add v8.8h, v8.8h, v10.8h
add v9.8h, v9.8h, v11.8h
sub v12.8h, v12.8h, v4.8h
sub v13.8h, v13.8h, v5.8h
rshrn v2.8b, v8.8h, #3 // out q0
rshrn2 v2.16b, v9.8h, #3
bit v22.16b, v0.16b, v14.16b // p1 if (flat8in)
add v8.8h, v8.8h, v12.8h
add v9.8h, v9.8h, v13.8h
bit v23.16b, v1.16b, v14.16b // p0 if (flat8in)
rshrn v3.8b, v8.8h, #3 // out q1
rshrn2 v3.16b, v9.8h, #3
bit v24.16b, v2.16b, v14.16b // q0 if (flat8in)
bit v25.16b, v3.16b, v14.16b // q1 if (flat8in)
.elseif \wd >= 8
mov x16, v14.d[0]
mov x17, v14.d[1]
adds x16, x16, x17
.if \wd == 8
b.eq 8f // skip if there's no flat8in
.else
b.eq 2f // skip if there's no flat8in
.endif
uaddl v0.8h, v20.8b, v21.8b // p3 + p2
uaddl2 v1.8h, v20.16b, v21.16b
uaddl v2.8h, v22.8b, v25.8b // p1 + q1
uaddl2 v3.8h, v22.16b, v25.16b
uaddl v4.8h, v20.8b, v22.8b // p3 + p1
uaddl2 v5.8h, v20.16b, v22.16b
uaddl v6.8h, v23.8b, v26.8b // p0 + q2
uaddl2 v7.8h, v23.16b, v26.16b
add v8.8h, v0.8h, v0.8h // 2 * (p3 + p2)
add v9.8h, v1.8h, v1.8h
uaddw v8.8h, v8.8h, v23.8b // + p0
uaddw2 v9.8h, v9.8h, v23.16b
uaddw v8.8h, v8.8h, v24.8b // + q0
uaddw2 v9.8h, v9.8h, v24.16b
add v8.8h, v8.8h, v4.8h
add v9.8h, v9.8h, v5.8h // + p3 + p1
sub v2.8h, v2.8h, v0.8h // p1 + q1 - p3 - p2
sub v3.8h, v3.8h, v1.8h
sub v6.8h, v6.8h, v4.8h // p0 + q2 - p3 - p1
sub v7.8h, v7.8h, v5.8h
rshrn v10.8b, v8.8h, #3 // out p2
rshrn2 v10.16b, v9.8h, #3
add v8.8h, v8.8h, v2.8h
add v9.8h, v9.8h, v3.8h
uaddl v0.8h, v20.8b, v23.8b // p3 + p0
uaddl2 v1.8h, v20.16b, v23.16b
uaddl v2.8h, v24.8b, v27.8b // q0 + q3
uaddl2 v3.8h, v24.16b, v27.16b
rshrn v11.8b, v8.8h, #3 // out p1
rshrn2 v11.16b, v9.8h, #3
add v8.8h, v8.8h, v6.8h
add v9.8h, v9.8h, v7.8h
sub v2.8h, v2.8h, v0.8h // q0 + q3 - p3 - p0
sub v3.8h, v3.8h, v1.8h
uaddl v4.8h, v21.8b, v24.8b // p2 + q0
uaddl2 v5.8h, v21.16b, v24.16b
uaddl v6.8h, v25.8b, v27.8b // q1 + q3
uaddl2 v7.8h, v25.16b, v27.16b
rshrn v12.8b, v8.8h, #3 // out p0
rshrn2 v12.16b, v9.8h, #3
add v8.8h, v8.8h, v2.8h
add v9.8h, v9.8h, v3.8h
sub v6.8h, v6.8h, v4.8h // q1 + q3 - p2 - q0
sub v7.8h, v7.8h, v5.8h
uaddl v0.8h, v22.8b, v25.8b // p1 + q1
uaddl2 v1.8h, v22.16b, v25.16b
uaddl v2.8h, v26.8b, v27.8b // q2 + q3
uaddl2 v3.8h, v26.16b, v27.16b
rshrn v13.8b, v8.8h, #3 // out q0
rshrn2 v13.16b, v9.8h, #3
add v8.8h, v8.8h, v6.8h
add v9.8h, v9.8h, v7.8h
sub v2.8h, v2.8h, v0.8h // q2 + q3 - p1 - q1
sub v3.8h, v3.8h, v1.8h
rshrn v0.8b, v8.8h, #3 // out q1
rshrn2 v0.16b, v9.8h, #3
add v8.8h, v8.8h, v2.8h
add v9.8h , v9.8h, v3.8h
bit v21.16b, v10.16b, v14.16b
bit v22.16b, v11.16b, v14.16b
bit v23.16b, v12.16b, v14.16b
rshrn v1.8b, v8.8h, #3 // out q2
rshrn2 v1.16b, v9.8h, #3
bit v24.16b, v13.16b, v14.16b
bit v25.16b, v0.16b, v14.16b
bit v26.16b, v1.16b, v14.16b
.endif
2:
.if \wd == 16
mov x16, v15.d[0]
mov x17, v15.d[1]
adds x16, x16, x17
b.ne 1f // check if flat8out is needed
mov x16, v14.d[0]
mov x17, v14.d[1]
adds x16, x16, x17
b.eq 8f // if there was no flat8in, just write the inner 4 pixels
b 7f // if flat8in was used, write the inner 6 pixels
1:
uaddl v2.8h, v17.8b, v17.8b // p6 + p6
uaddl2 v3.8h, v17.16b, v17.16b
uaddl v4.8h, v17.8b, v18.8b // p6 + p5
uaddl2 v5.8h, v17.16b, v18.16b
uaddl v6.8h, v17.8b, v19.8b // p6 + p4
uaddl2 v7.8h, v17.16b, v19.16b
uaddl v8.8h, v17.8b, v20.8b // p6 + p3
uaddl2 v9.8h, v17.16b, v20.16b
add v12.8h, v2.8h, v4.8h
add v13.8h, v3.8h, v5.8h
add v10.8h, v6.8h, v8.8h
add v11.8h, v7.8h, v9.8h
uaddl v6.8h, v17.8b, v21.8b // p6 + p2
uaddl2 v7.8h, v17.16b, v21.16b
add v12.8h, v12.8h, v10.8h
add v13.8h, v13.8h, v11.8h
uaddl v8.8h, v17.8b, v22.8b // p6 + p1
uaddl2 v9.8h, v17.16b, v22.16b
uaddl v10.8h, v18.8b, v23.8b // p5 + p0
uaddl2 v11.8h, v18.16b, v23.16b
add v6.8h, v6.8h, v8.8h
add v7.8h, v7.8h, v9.8h
uaddl v8.8h, v19.8b, v24.8b // p4 + q0
uaddl2 v9.8h, v19.16b, v24.16b
add v12.8h, v12.8h, v6.8h
add v13.8h, v13.8h, v7.8h
add v10.8h, v10.8h, v8.8h
add v11.8h, v11.8h, v9.8h
uaddl v6.8h, v20.8b, v25.8b // p3 + q1
uaddl2 v7.8h, v20.16b, v25.16b
add v12.8h, v12.8h, v10.8h
add v13.8h, v13.8h, v11.8h
sub v6.8h, v6.8h, v2.8h
sub v7.8h, v7.8h, v3.8h
uaddl v2.8h, v21.8b, v26.8b // p2 + q2
uaddl2 v3.8h, v21.16b, v26.16b
rshrn v0.8b, v12.8h, #4 // out p5
rshrn2 v0.16b, v13.8h, #4
add v12.8h, v12.8h, v6.8h // - (p6 + p6) + (p3 + q1)
add v13.8h, v13.8h, v7.8h
sub v2.8h, v2.8h, v4.8h
sub v3.8h, v3.8h, v5.8h
uaddl v4.8h, v22.8b, v27.8b // p1 + q3
uaddl2 v5.8h, v22.16b, v27.16b
uaddl v6.8h, v17.8b, v19.8b // p6 + p4
uaddl2 v7.8h, v17.16b, v19.16b
rshrn v1.8b, v12.8h, #4 // out p4
rshrn2 v1.16b, v13.8h, #4
add v12.8h, v12.8h, v2.8h // - (p6 + p5) + (p2 + q2)
add v13.8h, v13.8h, v3.8h
sub v4.8h, v4.8h, v6.8h
sub v5.8h, v5.8h, v7.8h
uaddl v6.8h, v23.8b, v28.8b // p0 + q4
uaddl2 v7.8h, v23.16b, v28.16b
uaddl v8.8h, v17.8b, v20.8b // p6 + p3
uaddl2 v9.8h, v17.16b, v20.16b
rshrn v2.8b, v12.8h, #4 // out p3
rshrn2 v2.16b, v13.8h, #4
add v12.8h, v12.8h, v4.8h // - (p6 + p4) + (p1 + q3)
add v13.8h, v13.8h, v5.8h
sub v6.8h, v6.8h, v8.8h
sub v7.8h, v7.8h, v9.8h
uaddl v8.8h, v24.8b, v29.8b // q0 + q5
uaddl2 v9.8h, v24.16b, v29.16b
uaddl v4.8h, v17.8b, v21.8b // p6 + p2
uaddl2 v5.8h, v17.16b, v21.16b
rshrn v3.8b, v12.8h, #4 // out p2
rshrn2 v3.16b, v13.8h, #4
add v12.8h, v12.8h, v6.8h // - (p6 + p3) + (p0 + q4)
add v13.8h, v13.8h, v7.8h
sub v8.8h, v8.8h, v4.8h
sub v9.8h, v9.8h, v5.8h
uaddl v6.8h, v25.8b, v30.8b // q1 + q6
uaddl2 v7.8h, v25.16b, v30.16b
uaddl v10.8h, v17.8b, v22.8b // p6 + p1
uaddl2 v11.8h, v17.16b, v22.16b
rshrn v4.8b, v12.8h, #4 // out p1
rshrn2 v4.16b, v13.8h, #4
add v12.8h, v12.8h, v8.8h // - (p6 + p2) + (q0 + q5)
add v13.8h, v13.8h, v9.8h
sub v6.8h, v6.8h, v10.8h
sub v7.8h, v7.8h, v11.8h
uaddl v8.8h, v26.8b, v30.8b // q2 + q6
uaddl2 v9.8h, v26.16b, v30.16b
bif v0.16b, v18.16b, v15.16b // out p5
uaddl v10.8h, v18.8b, v23.8b // p5 + p0
uaddl2 v11.8h, v18.16b, v23.16b
rshrn v5.8b, v12.8h, #4 // out p0
rshrn2 v5.16b, v13.8h, #4
add v12.8h, v12.8h, v6.8h // - (p6 + p1) + (q1 + q6)
add v13.8h, v13.8h, v7.8h
sub v8.8h, v8.8h, v10.8h
sub v9.8h, v9.8h, v11.8h
uaddl v10.8h, v27.8b, v30.8b // q3 + q6
uaddl2 v11.8h, v27.16b, v30.16b
bif v1.16b, v19.16b, v15.16b // out p4
uaddl v18.8h, v19.8b, v24.8b // p4 + q0
uaddl2 v19.8h, v19.16b, v24.16b
rshrn v6.8b, v12.8h, #4 // out q0
rshrn2 v6.16b, v13.8h, #4
add v12.8h, v12.8h, v8.8h // - (p5 + p0) + (q2 + q6)
add v13.8h, v13.8h, v9.8h
sub v10.8h, v10.8h, v18.8h
sub v11.8h, v11.8h, v19.8h
uaddl v8.8h, v28.8b, v30.8b // q4 + q6
uaddl2 v9.8h, v28.16b, v30.16b
bif v2.16b, v20.16b, v15.16b // out p3
uaddl v18.8h, v20.8b, v25.8b // p3 + q1
uaddl2 v19.8h, v20.16b, v25.16b
rshrn v7.8b, v12.8h, #4 // out q1
rshrn2 v7.16b, v13.8h, #4
add v12.8h, v12.8h, v10.8h // - (p4 + q0) + (q3 + q6)
add v13.8h, v13.8h, v11.8h
sub v18.8h, v8.8h, v18.8h
sub v19.8h, v9.8h, v19.8h
uaddl v10.8h, v29.8b, v30.8b // q5 + q6
uaddl2 v11.8h, v29.16b, v30.16b
bif v3.16b, v21.16b, v15.16b // out p2
uaddl v20.8h, v21.8b, v26.8b // p2 + q2
uaddl2 v21.8h, v21.16b, v26.16b
rshrn v8.8b, v12.8h, #4 // out q2
rshrn2 v8.16b, v13.8h, #4
add v12.8h, v12.8h, v18.8h // - (p3 + q1) + (q4 + q6)
add v13.8h, v13.8h, v19.8h
sub v10.8h, v10.8h, v20.8h
sub v11.8h, v11.8h, v21.8h
uaddl v18.8h, v30.8b, v30.8b // q6 + q6
uaddl2 v19.8h, v30.16b, v30.16b
bif v4.16b, v22.16b, v15.16b // out p1
uaddl v20.8h, v22.8b, v27.8b // p1 + q3
uaddl2 v21.8h, v22.16b, v27.16b
rshrn v9.8b, v12.8h, #4 // out q3
rshrn2 v9.16b, v13.8h, #4
add v12.8h, v12.8h, v10.8h // - (p2 + q2) + (q5 + q6)
add v13.8h, v13.8h, v11.8h
sub v18.8h, v18.8h, v20.8h
sub v19.8h, v19.8h, v21.8h
bif v5.16b, v23.16b, v15.16b // out p0
rshrn v10.8b, v12.8h, #4 // out q4
rshrn2 v10.16b, v13.8h, #4
add v12.8h, v12.8h, v18.8h // - (p1 + q3) + (q6 + q6)
add v13.8h, v13.8h, v19.8h
rshrn v11.8b, v12.8h, #4 // out q5
rshrn2 v11.16b, v13.8h, #4
bif v6.16b, v24.16b, v15.16b // out q0
bif v7.16b, v25.16b, v15.16b // out q1
bif v8.16b, v26.16b, v15.16b // out q2
bif v9.16b, v27.16b, v15.16b // out q3
bif v10.16b, v28.16b, v15.16b // out q4
bif v11.16b, v29.16b, v15.16b // out q5
.endif
mov x14, #0
ret
.if \wd == 16
7:
// Return to a shorter epilogue, writing only the inner 6 pixels
mov x14, #(1 << 6)
ret
.endif
.if \wd >= 8
8:
// Return to a shorter epilogue, writing only the inner 4 pixels
mov x14, #(1 << 4)
ret
.endif
endfunc
.endm
loop_filter 16
loop_filter 8
loop_filter 6
loop_filter 4
.macro lpf_16_wd16
bl lpf_16_wd16_neon
cbz x14, 1f
tbnz x14, #6, 7f
tbnz x14, #4, 8f
ret x15
1:
.endm
.macro lpf_16_wd8
bl lpf_16_wd8_neon
cbz x14, 1f
tbnz x14, #4, 8f
ret x15
1:
.endm
.macro lpf_16_wd6
bl lpf_16_wd6_neon
cbz x14, 1f
ret x15
1:
.endm
.macro lpf_16_wd4
bl lpf_16_wd4_neon
cbz x14, 1f
ret x15
1:
.endm
function lpf_v_4_16_neon
mov x15, x30
sub x16, x0, x1, lsl #1
ld1 {v22.16b}, [x16], x1 // p1
ld1 {v24.16b}, [x0], x1 // q0
ld1 {v23.16b}, [x16], x1 // p0
ld1 {v25.16b}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
lpf_16_wd4
sub x16, x0, x1, lsl #1
st1 {v22.16b}, [x16], x1 // p1
st1 {v24.16b}, [x0], x1 // q0
st1 {v23.16b}, [x16], x1 // p0
st1 {v25.16b}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
ret x15
endfunc
function lpf_h_4_16_neon
mov x15, x30
sub x16, x0, #2
add x0, x16, x1, lsl #3
ld1 {v22.s}[0], [x16], x1
ld1 {v22.s}[2], [x0], x1
ld1 {v23.s}[0], [x16], x1
ld1 {v23.s}[2], [x0], x1
ld1 {v24.s}[0], [x16], x1
ld1 {v24.s}[2], [x0], x1
ld1 {v25.s}[0], [x16], x1
ld1 {v25.s}[2], [x0], x1
ld1 {v22.s}[1], [x16], x1
ld1 {v22.s}[3], [x0], x1
ld1 {v23.s}[1], [x16], x1
ld1 {v23.s}[3], [x0], x1
ld1 {v24.s}[1], [x16], x1
ld1 {v24.s}[3], [x0], x1
ld1 {v25.s}[1], [x16], x1
ld1 {v25.s}[3], [x0], x1
add x0, x0, #2
transpose_4x16b v22, v23, v24, v25, v26, v27, v28, v29
lpf_16_wd4
sub x16, x0, x1, lsl #4
sub x16, x16, #2
transpose_4x16b v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #3
st1 {v22.s}[0], [x16], x1
st1 {v22.s}[2], [x0], x1
st1 {v23.s}[0], [x16], x1
st1 {v23.s}[2], [x0], x1
st1 {v24.s}[0], [x16], x1
st1 {v24.s}[2], [x0], x1
st1 {v25.s}[0], [x16], x1
st1 {v25.s}[2], [x0], x1
st1 {v22.s}[1], [x16], x1
st1 {v22.s}[3], [x0], x1
st1 {v23.s}[1], [x16], x1
st1 {v23.s}[3], [x0], x1
st1 {v24.s}[1], [x16], x1
st1 {v24.s}[3], [x0], x1
st1 {v25.s}[1], [x16], x1
st1 {v25.s}[3], [x0], x1
add x0, x0, #2
ret x15
endfunc
function lpf_v_6_16_neon
mov x15, x30
sub x16, x0, x1, lsl #1
sub x16, x16, x1
ld1 {v21.16b}, [x16], x1 // p2
ld1 {v24.16b}, [x0], x1 // q0
ld1 {v22.16b}, [x16], x1 // p1
ld1 {v25.16b}, [x0], x1 // q1
ld1 {v23.16b}, [x16], x1 // p0
ld1 {v26.16b}, [x0], x1 // q2
sub x0, x0, x1, lsl #1
sub x0, x0, x1
lpf_16_wd6
sub x16, x0, x1, lsl #1
st1 {v22.16b}, [x16], x1 // p1
st1 {v24.16b}, [x0], x1 // q0
st1 {v23.16b}, [x16], x1 // p0
st1 {v25.16b}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
ret x15
endfunc
function lpf_h_6_16_neon
mov x15, x30
sub x16, x0, #4
add x0, x16, x1, lsl #3
ld1 {v20.d}[0], [x16], x1
ld1 {v20.d}[1], [x0], x1
ld1 {v21.d}[0], [x16], x1
ld1 {v21.d}[1], [x0], x1
ld1 {v22.d}[0], [x16], x1
ld1 {v22.d}[1], [x0], x1
ld1 {v23.d}[0], [x16], x1
ld1 {v23.d}[1], [x0], x1
ld1 {v24.d}[0], [x16], x1
ld1 {v24.d}[1], [x0], x1
ld1 {v25.d}[0], [x16], x1
ld1 {v25.d}[1], [x0], x1
ld1 {v26.d}[0], [x16], x1
ld1 {v26.d}[1], [x0], x1
ld1 {v27.d}[0], [x16], x1
ld1 {v27.d}[1], [x0], x1
add x0, x0, #4
transpose_8x16b v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
lpf_16_wd6
sub x16, x0, x1, lsl #4
sub x16, x16, #2
transpose_4x16b v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #3
st1 {v22.s}[0], [x16], x1
st1 {v22.s}[2], [x0], x1
st1 {v23.s}[0], [x16], x1
st1 {v23.s}[2], [x0], x1
st1 {v24.s}[0], [x16], x1
st1 {v24.s}[2], [x0], x1
st1 {v25.s}[0], [x16], x1
st1 {v25.s}[2], [x0], x1
st1 {v22.s}[1], [x16], x1
st1 {v22.s}[3], [x0], x1
st1 {v23.s}[1], [x16], x1
st1 {v23.s}[3], [x0], x1
st1 {v24.s}[1], [x16], x1
st1 {v24.s}[3], [x0], x1
st1 {v25.s}[1], [x16], x1
st1 {v25.s}[3], [x0], x1
add x0, x0, #2
ret x15
endfunc
function lpf_v_8_16_neon
mov x15, x30
sub x16, x0, x1, lsl #2
ld1 {v20.16b}, [x16], x1 // p3
ld1 {v24.16b}, [x0], x1 // q0
ld1 {v21.16b}, [x16], x1 // p2
ld1 {v25.16b}, [x0], x1 // q1
ld1 {v22.16b}, [x16], x1 // p1
ld1 {v26.16b}, [x0], x1 // q2
ld1 {v23.16b}, [x16], x1 // p0
ld1 {v27.16b}, [x0], x1 // q3
sub x0, x0, x1, lsl #2
lpf_16_wd8
sub x16, x0, x1, lsl #1
sub x16, x16, x1
st1 {v21.16b}, [x16], x1 // p2
st1 {v24.16b}, [x0], x1 // q0
st1 {v22.16b}, [x16], x1 // p1
st1 {v25.16b}, [x0], x1 // q1
st1 {v23.16b}, [x16], x1 // p0
st1 {v26.16b}, [x0], x1 // q2
sub x0, x0, x1, lsl #1
sub x0, x0, x1
ret x15
8:
sub x16, x0, x1, lsl #1
st1 {v22.16b}, [x16], x1 // p1
st1 {v24.16b}, [x0], x1 // q0
st1 {v23.16b}, [x16], x1 // p0
st1 {v25.16b}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
ret x15
endfunc
function lpf_h_8_16_neon
mov x15, x30
sub x16, x0, #4
add x0, x16, x1, lsl #3
ld1 {v20.d}[0], [x16], x1
ld1 {v20.d}[1], [x0], x1
ld1 {v21.d}[0], [x16], x1
ld1 {v21.d}[1], [x0], x1
ld1 {v22.d}[0], [x16], x1
ld1 {v22.d}[1], [x0], x1
ld1 {v23.d}[0], [x16], x1
ld1 {v23.d}[1], [x0], x1
ld1 {v24.d}[0], [x16], x1
ld1 {v24.d}[1], [x0], x1
ld1 {v25.d}[0], [x16], x1
ld1 {v25.d}[1], [x0], x1
ld1 {v26.d}[0], [x16], x1
ld1 {v26.d}[1], [x0], x1
ld1 {v27.d}[0], [x16], x1
ld1 {v27.d}[1], [x0], x1
add x0, x0, #4
transpose_8x16b v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
lpf_16_wd8
sub x16, x0, x1, lsl #4
sub x16, x16, #4
transpose_8x16b v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #3
st1 {v20.d}[0], [x16], x1
st1 {v20.d}[1], [x0], x1
st1 {v21.d}[0], [x16], x1
st1 {v21.d}[1], [x0], x1
st1 {v22.d}[0], [x16], x1
st1 {v22.d}[1], [x0], x1
st1 {v23.d}[0], [x16], x1
st1 {v23.d}[1], [x0], x1
st1 {v24.d}[0], [x16], x1
st1 {v24.d}[1], [x0], x1
st1 {v25.d}[0], [x16], x1
st1 {v25.d}[1], [x0], x1
st1 {v26.d}[0], [x16], x1
st1 {v26.d}[1], [x0], x1
st1 {v27.d}[0], [x16], x1
st1 {v27.d}[1], [x0], x1
add x0, x0, #4
ret x15
8:
sub x16, x0, x1, lsl #4
sub x16, x16, #2
transpose_4x16b v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #3
st1 {v22.s}[0], [x16], x1
st1 {v22.s}[2], [x0], x1
st1 {v23.s}[0], [x16], x1
st1 {v23.s}[2], [x0], x1
st1 {v24.s}[0], [x16], x1
st1 {v24.s}[2], [x0], x1
st1 {v25.s}[0], [x16], x1
st1 {v25.s}[2], [x0], x1
st1 {v22.s}[1], [x16], x1
st1 {v22.s}[3], [x0], x1
st1 {v23.s}[1], [x16], x1
st1 {v23.s}[3], [x0], x1
st1 {v24.s}[1], [x16], x1
st1 {v24.s}[3], [x0], x1
st1 {v25.s}[1], [x16], x1
st1 {v25.s}[3], [x0], x1
add x0, x0, #2
ret x15
endfunc
function lpf_v_16_16_neon
mov x15, x30
sub x16, x0, x1, lsl #3
add x16, x16, x1
ld1 {v17.16b}, [x16], x1 // p6
ld1 {v24.16b}, [x0], x1 // q0
ld1 {v18.16b}, [x16], x1 // p5
ld1 {v25.16b}, [x0], x1 // q1
ld1 {v19.16b}, [x16], x1 // p4
ld1 {v26.16b}, [x0], x1 // q2
ld1 {v20.16b}, [x16], x1 // p3
ld1 {v27.16b}, [x0], x1 // q3
ld1 {v21.16b}, [x16], x1 // p2
ld1 {v28.16b}, [x0], x1 // q4
ld1 {v22.16b}, [x16], x1 // p1
ld1 {v29.16b}, [x0], x1 // q5
ld1 {v23.16b}, [x16], x1 // p0
ld1 {v30.16b}, [x0], x1 // q6
sub x0, x0, x1, lsl #3
add x0, x0, x1
lpf_16_wd16
sub x16, x0, x1, lsl #2
sub x16, x16, x1, lsl #1
st1 {v0.16b}, [x16], x1 // p5
st1 {v6.16b}, [x0], x1 // q0
st1 {v1.16b}, [x16], x1 // p4
st1 {v7.16b}, [x0], x1 // q1
st1 {v2.16b}, [x16], x1 // p3
st1 {v8.16b}, [x0], x1 // q2
st1 {v3.16b}, [x16], x1 // p2
st1 {v9.16b}, [x0], x1 // q3
st1 {v4.16b}, [x16], x1 // p1
st1 {v10.16b}, [x0], x1 // q4
st1 {v5.16b}, [x16], x1 // p0
st1 {v11.16b}, [x0], x1 // q5
sub x0, x0, x1, lsl #2
sub x0, x0, x1, lsl #1
ret x15
7:
sub x16, x0, x1
sub x16, x16, x1, lsl #1
st1 {v21.16b}, [x16], x1 // p2
st1 {v24.16b}, [x0], x1 // q0
st1 {v22.16b}, [x16], x1 // p1
st1 {v25.16b}, [x0], x1 // q1
st1 {v23.16b}, [x16], x1 // p0
st1 {v26.16b}, [x0], x1 // q2
sub x0, x0, x1, lsl #1
sub x0, x0, x1
ret x15
8:
sub x16, x0, x1, lsl #1
st1 {v22.16b}, [x16], x1 // p1
st1 {v24.16b}, [x0], x1 // q0
st1 {v23.16b}, [x16], x1 // p0
st1 {v25.16b}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
ret x15
endfunc
function lpf_h_16_16_neon
mov x15, x30
sub x16, x0, #8
ld1 {v16.d}[0], [x16], x1
ld1 {v24.d}[0], [x0], x1
ld1 {v17.d}[0], [x16], x1
ld1 {v25.d}[0], [x0], x1
ld1 {v18.d}[0], [x16], x1
ld1 {v26.d}[0], [x0], x1
ld1 {v19.d}[0], [x16], x1
ld1 {v27.d}[0], [x0], x1
ld1 {v20.d}[0], [x16], x1
ld1 {v28.d}[0], [x0], x1
ld1 {v21.d}[0], [x16], x1
ld1 {v29.d}[0], [x0], x1
ld1 {v22.d}[0], [x16], x1
ld1 {v30.d}[0], [x0], x1
ld1 {v23.d}[0], [x16], x1
ld1 {v31.d}[0], [x0], x1
ld1 {v16.d}[1], [x16], x1
ld1 {v24.d}[1], [x0], x1
ld1 {v17.d}[1], [x16], x1
ld1 {v25.d}[1], [x0], x1
ld1 {v18.d}[1], [x16], x1
ld1 {v26.d}[1], [x0], x1
ld1 {v19.d}[1], [x16], x1
ld1 {v27.d}[1], [x0], x1
ld1 {v20.d}[1], [x16], x1
ld1 {v28.d}[1], [x0], x1
ld1 {v21.d}[1], [x16], x1
ld1 {v29.d}[1], [x0], x1
ld1 {v22.d}[1], [x16], x1
ld1 {v30.d}[1], [x0], x1
ld1 {v23.d}[1], [x16], x1
ld1 {v31.d}[1], [x0], x1
transpose_8x16b v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
transpose_8x16b v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
lpf_16_wd16
sub x0, x0, x1, lsl #4
sub x16, x0, #8
transpose_8x16b v16, v17, v0, v1, v2, v3, v4, v5, v18, v19
transpose_8x16b v6, v7, v8, v9, v10, v11, v30, v31, v18, v19
st1 {v16.d}[0], [x16], x1
st1 {v6.d}[0], [x0], x1
st1 {v17.d}[0], [x16], x1
st1 {v7.d}[0], [x0], x1
st1 {v0.d}[0], [x16], x1
st1 {v8.d}[0], [x0], x1
st1 {v1.d}[0], [x16], x1
st1 {v9.d}[0], [x0], x1
st1 {v2.d}[0], [x16], x1
st1 {v10.d}[0], [x0], x1
st1 {v3.d}[0], [x16], x1
st1 {v11.d}[0], [x0], x1
st1 {v4.d}[0], [x16], x1
st1 {v30.d}[0], [x0], x1
st1 {v5.d}[0], [x16], x1
st1 {v31.d}[0], [x0], x1
st1 {v16.d}[1], [x16], x1
st1 {v6.d}[1], [x0], x1
st1 {v17.d}[1], [x16], x1
st1 {v7.d}[1], [x0], x1
st1 {v0.d}[1], [x16], x1
st1 {v8.d}[1], [x0], x1
st1 {v1.d}[1], [x16], x1
st1 {v9.d}[1], [x0], x1
st1 {v2.d}[1], [x16], x1
st1 {v10.d}[1], [x0], x1
st1 {v3.d}[1], [x16], x1
st1 {v11.d}[1], [x0], x1
st1 {v4.d}[1], [x16], x1
st1 {v30.d}[1], [x0], x1
st1 {v5.d}[1], [x16], x1
st1 {v31.d}[1], [x0], x1
ret x15
7:
sub x16, x0, x1, lsl #4
sub x16, x16, #4
transpose_8x16b v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #3
st1 {v20.d}[0], [x16], x1
st1 {v20.d}[1], [x0], x1
st1 {v21.d}[0], [x16], x1
st1 {v21.d}[1], [x0], x1
st1 {v22.d}[0], [x16], x1
st1 {v22.d}[1], [x0], x1
st1 {v23.d}[0], [x16], x1
st1 {v23.d}[1], [x0], x1
st1 {v24.d}[0], [x16], x1
st1 {v24.d}[1], [x0], x1
st1 {v25.d}[0], [x16], x1
st1 {v25.d}[1], [x0], x1
st1 {v26.d}[0], [x16], x1
st1 {v26.d}[1], [x0], x1
st1 {v27.d}[0], [x16], x1
st1 {v27.d}[1], [x0], x1
add x0, x0, #4
ret x15
8:
sub x16, x0, x1, lsl #4
sub x16, x16, #2
transpose_4x16b v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #3
st1 {v22.s}[0], [x16], x1
st1 {v22.s}[2], [x0], x1
st1 {v23.s}[0], [x16], x1
st1 {v23.s}[2], [x0], x1
st1 {v24.s}[0], [x16], x1
st1 {v24.s}[2], [x0], x1
st1 {v25.s}[0], [x16], x1
st1 {v25.s}[2], [x0], x1
st1 {v22.s}[1], [x16], x1
st1 {v22.s}[3], [x0], x1
st1 {v23.s}[1], [x16], x1
st1 {v23.s}[3], [x0], x1
st1 {v24.s}[1], [x16], x1
st1 {v24.s}[3], [x0], x1
st1 {v25.s}[1], [x16], x1
st1 {v25.s}[3], [x0], x1
add x0, x0, #2
ret x15
endfunc
// void dav1d_lpf_v_sb_y_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const uint32_t *const vmask,
// const uint8_t (*l)[4], ptrdiff_t b4_stride,
// const Av1FilterLUT *lut, const int w)
.macro lpf_func dir, type
function lpf_\dir\()_sb_\type\()_8bpc_neon, export=1
mov x11, x30
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
ldp w6, w7, [x2] // vmask[0], vmask[1]
.ifc \type, y
ldr w2, [x2, #8] // vmask[2]
.endif
add x5, x5, #128 // Move to sharp part of lut
.ifc \type, y
orr w7, w7, w2 // vmask[1] |= vmask[2]
.endif
.ifc \dir, v
sub x4, x3, x4, lsl #2
.else
sub x3, x3, #4
lsl x4, x4, #2
.endif
orr w6, w6, w7 // vmask[0] |= vmask[1]
1:
tst w6, #0x0f
.ifc \dir, v
ld1 {v0.16b}, [x4], #16
ld1 {v1.16b}, [x3], #16
.else
ld2 {v0.s,v1.s}[0], [x3], x4
ld2 {v0.s,v1.s}[1], [x3], x4
ld2 {v0.s,v1.s}[2], [x3], x4
ld2 {v0.s,v1.s}[3], [x3], x4
.endif
b.eq 7f // if (!(vm & bits)) continue;
ld1r {v5.16b}, [x5] // sharp[0]
add x5, x5, #8
movi v2.4s, #0xff
dup v13.4s, w6 // vmask[0]
and v0.16b, v0.16b, v2.16b // Keep only lowest byte in each 32 bit word
and v1.16b, v1.16b, v2.16b
cmtst v3.16b, v1.16b, v2.16b // Check for nonzero values in l[0][0]
movi v4.16b, #1
ld1r {v6.16b}, [x5] // sharp[1]
sub x5, x5, #8
bif v1.16b, v0.16b, v3.16b // if (!l[0][0]) L = l[offset][0]
cmtst v2.4s, v1.4s, v2.4s // L != 0
mul v1.4s, v1.4s, v4.4s // L
.ifc \type, y
dup v15.4s, w2 // vmask[2]
.endif
dup v14.4s, w7 // vmask[1]
mov x16, v2.d[0]
mov x17, v2.d[1]
adds x16, x16, x17
b.eq 7f // if (!L) continue;
neg v5.16b, v5.16b // -sharp[0]
movrel x16, word_1248
ushr v12.16b, v1.16b, #4 // H
ld1 {v16.4s}, [x16]
sshl v3.16b, v1.16b, v5.16b // L >> sharp[0]
.ifc \type, y
cmtst v15.4s, v15.4s, v16.4s // if (vmask[2] & bits)
.endif
movi v7.16b, #2
umin v3.16b, v3.16b, v6.16b // imin(L >> sharp[0], sharp[1])
add v0.16b, v1.16b, v7.16b // L + 2
umax v11.16b, v3.16b, v4.16b // imax(imin(), 1) = limit = I
add v0.16b, v0.16b, v0.16b // 2*(L + 2)
cmtst v14.4s, v14.4s, v16.4s // if (vmask[1] & bits)
add v10.16b, v0.16b, v11.16b // 2*(L + 2) + limit = E
cmtst v13.4s, v13.4s, v16.4s // if (vmask[0] & bits)
and v13.16b, v13.16b, v2.16b // vmask[0] &= L != 0
.ifc \type, y
tst w2, #0x0f
b.eq 2f
// wd16
bl lpf_\dir\()_16_16_neon
b 8f
2:
.endif
tst w7, #0x0f
b.eq 3f
.ifc \type, y
// wd8
bl lpf_\dir\()_8_16_neon
.else
// wd6
bl lpf_\dir\()_6_16_neon
.endif
b 8f
3:
// wd4
bl lpf_\dir\()_4_16_neon
.ifc \dir, h
b 8f
7:
// For dir h, the functions above increment x0.
// If the whole function is skipped, increment it here instead.
add x0, x0, x1, lsl #4
.else
7:
.endif
8:
lsr w6, w6, #4 // vmask[0] >>= 4
lsr w7, w7, #4 // vmask[1] >>= 4
.ifc \type, y
lsr w2, w2, #4 // vmask[2] >>= 4
.endif
.ifc \dir, v
add x0, x0, #16
.else
// For dir h, x0 is returned incremented
.endif
cbnz w6, 1b
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret x11
endfunc
.endm
lpf_func v, y
lpf_func h, y
lpf_func v, uv
lpf_func h, uv
const word_1248
.word 1, 2, 4, 8
endconst
|
Admenri/urge
| 57,922
|
third_party/dav1d/src/arm/64/looprestoration16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
const right_ext_mask_buf
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
right_ext_mask:
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
endconst
// void dav1d_wiener_filter7_16bpc_neon(pixel *p, const ptrdiff_t p_stride,
// const pixel (*left)[4], const pixel *lpf,
// const int w, int h,
// const int16_t filter[2][8],
// const enum LrEdgeFlags edges,
// const int bitdepth_max);
function wiener_filter7_16bpc_neon, export=1
ldr w8, [sp]
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-32]!
stp d8, d9, [sp, #16]
mov x29, sp
ld1 {v0.8h, v1.8h}, [x6]
tst w7, #4 // LR_HAVE_TOP
sub_sp 384*2*6
dup v28.8h, w8 // bitdepth_max
clz w8, w8
movi v30.4s, #1
sub w10, w8, #38 // -(bitdepth + 6)
sub w11, w8, #11 // round_bits_v
sub w8, w8, #25 // -round_bits_h
neg w10, w10 // bitdepth + 6
neg w11, w11 // -round_bits_v
dup v2.4s, w10
dup v29.4s, w8 // -round_bits_h
dup v27.4s, w11 // -round_bits_v
movi v31.8h, #0x20, lsl #8 // 1 << 13 = 8192
ushl v30.4s, v30.4s, v2.4s // 1 << (bitdepth + 6)
zip1 v0.2d, v0.2d, v1.2d // move vertical coeffs to v0.h[4-7], freeing up v1
// x9 - t6
// x10 - t5
// x11 - t4
// x12 - t3
// x13 - t2
// x14 - t1
// x15 - t0
mov x14, sp // t1
b.eq L(no_top_7)
mov x16, x2 // backup left
mov x2, #0
bl wiener_filter7_h_16bpc_neon
add x3, x3, x1 // lpf += stride
mov x9, x14 // t6
mov x10, x14 // t5
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_16bpc_neon
add x3, x3, x1, lsl #2
add x3, x3, x1 // lpf += stride*5
mov x11, x14 // t4
add x14, x14, #384*2 // t1 += 384*2
mov x2, x16 // left
mov x16, x3 // backup lpf
mov x3, x0 // lpf = p
bl wiener_filter7_h_16bpc_neon
subs w5, w5, #1 // h--
mov x12, x14 // t3
mov x13, x14 // t2
b.eq L(v1_7)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_16bpc_neon
mov x13, x14 // t2
subs w5, w5, #1 // h--
b.eq L(v2_7)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_16bpc_neon
subs w5, w5, #1 // h--
b.eq L(v3_7)
add x3, x3, x1 // src += stride
L(main_7):
add x15, x14, #384*2 // t0 = t1 + 384*2
L(main_loop_7):
bl wiener_filter7_hv_16bpc_neon
subs w5, w5, #1 // h--
b.ne L(main_loop_7)
tst w7, #8 // LR_HAVE_BOTTOM
b.eq L(v3_7)
mov x3, x16 // restore lpf
mov x2, #0 // left = NULL
bl wiener_filter7_hv_16bpc_neon
bl wiener_filter7_hv_16bpc_neon
L(v1_7):
bl wiener_filter7_v_16bpc_neon
mov sp, x29
ldp d8, d9, [sp, #16]
ldp x29, x30, [sp], #32
AARCH64_VALIDATE_LINK_REGISTER
ret
L(no_top_7):
add x3, x3, x1, lsl #2
add x16, x3, x1, lsl #1 // lpf += stride*6, backup
mov x3, x0 // lpf = p
bl wiener_filter7_h_16bpc_neon
subs w5, w5, #1 // h--
mov x9, x14 // t6
mov x10, x14 // t5
mov x11, x14 // t4
mov x12, x14 // t3
mov x13, x14 // t2
b.eq L(v1_7)
add x3, x3, x1 // src += p_stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_16bpc_neon
subs w5, w5, #1 // h--
mov x13, x14 // t2
b.eq L(v2_7)
add x3, x3, x1 // src += p_stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_16bpc_neon
subs w5, w5, #1 // h--
b.eq L(v3_7)
add x3, x3, x1 // src += p_stride
add x15, x14, #384*2 // t0 = t1 + 384*2
bl wiener_filter7_hv_16bpc_neon
subs w5, w5, #1 // h--
b.eq L(v3_7)
add x15, x15, #384*2*4 // t0 += 384*2*4
bl wiener_filter7_hv_16bpc_neon
subs w5, w5, #1 // h--
b.ne L(main_7)
L(v3_7):
bl wiener_filter7_v_16bpc_neon
L(v2_7):
bl wiener_filter7_v_16bpc_neon
b L(v1_7)
endfunc
function wiener_filter7_h_16bpc_neon
stp x3, x4, [sp, #-32]!
str x14, [sp, #16]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
// LR_HAVE_LEFT
cbnz x2, 0f
// left == NULL
sub x3, x3, #6
ld1 {v2.8h, v3.8h}, [x3], #32
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v2.8h, v3.8h}, [x3], #32
ld1 {v4.d}[1], [x2], #8
// Move x3 back to account for the last 3 pixels we loaded earlier,
// which we'll shift out.
sub x3, x3, #6
ext v3.16b, v2.16b, v3.16b, #10
ext v2.16b, v4.16b, v2.16b, #10
b 2f
1:
ld1 {v2.8h, v3.8h}, [x3], #32
// !LR_HAVE_LEFT, fill v4 with the leftmost pixel
// and shift v3 to have 3x the first pixel at the front.
dup v4.8h, v2.h[0]
// Move x3 back to account for the last 3 pixels we loaded before,
// which we shifted out.
sub x3, x3, #6
ext v3.16b, v2.16b, v3.16b, #10
ext v2.16b, v4.16b, v2.16b, #10
2:
ld1 {v4.8h}, [x3], #16
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #19
b.ge 4f // If w >= 19, all used input pixels are valid
// 1 <= w < 19, w+3 pixels valid in v2-v4. For w>=9,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is v2/3/4.h[w+2]. x3 points at the next input, ie
// v2/3/4.h[24]. Thus read from x3[w-22] to find the padding pixel.
sub w17, w4, #22
// Insert padding in v2/3/4.h[w+3] onwards; fuse the +3 (*2) into the
// buffer pointer.
movrel x6, right_ext_mask, -6
ldr h26, [x3, w17, sxtw #1]
sub x6, x6, w4, uxtw #1
dup v26.8h, v26.h[0]
ld1 {v23.16b, v24.16b, v25.16b}, [x6]
bit v2.16b, v26.16b, v23.16b
bit v3.16b, v26.16b, v24.16b
bit v4.16b, v26.16b, v25.16b
4: // Loop horizontally
// Interleaving the mul/mla chains actually hurts performance
// significantly on Cortex A53, thus keeping mul/mla tightly
// chained like this.
ext v17.16b, v2.16b, v3.16b, #4
ext v19.16b, v2.16b, v3.16b, #8
ext v16.16b, v2.16b, v3.16b, #2
ext v20.16b, v2.16b, v3.16b, #10
ext v21.16b, v2.16b, v3.16b, #12
ext v18.16b, v2.16b, v3.16b, #6
add v19.8h, v19.8h, v17.8h
add v20.8h, v20.8h, v16.8h
add v21.8h, v21.8h, v2.8h
smull v6.4s, v18.4h, v0.h[3]
smlal v6.4s, v19.4h, v0.h[2]
smlal v6.4s, v20.4h, v0.h[1]
smlal v6.4s, v21.4h, v0.h[0]
smull2 v7.4s, v18.8h, v0.h[3]
smlal2 v7.4s, v19.8h, v0.h[2]
smlal2 v7.4s, v20.8h, v0.h[1]
smlal2 v7.4s, v21.8h, v0.h[0]
ext v17.16b, v3.16b, v4.16b, #4
ext v19.16b, v3.16b, v4.16b, #8
ext v16.16b, v3.16b, v4.16b, #2
ext v20.16b, v3.16b, v4.16b, #10
ext v21.16b, v3.16b, v4.16b, #12
ext v18.16b, v3.16b, v4.16b, #6
add v19.8h, v19.8h, v17.8h
add v20.8h, v20.8h, v16.8h
add v21.8h, v21.8h, v3.8h
smull v16.4s, v18.4h, v0.h[3]
smlal v16.4s, v19.4h, v0.h[2]
smlal v16.4s, v20.4h, v0.h[1]
smlal v16.4s, v21.4h, v0.h[0]
smull2 v17.4s, v18.8h, v0.h[3]
smlal2 v17.4s, v19.8h, v0.h[2]
smlal2 v17.4s, v20.8h, v0.h[1]
smlal2 v17.4s, v21.8h, v0.h[0]
mvni v24.8h, #0x80, lsl #8 // 0x7fff = (1 << 15) - 1
add v6.4s, v6.4s, v30.4s
add v7.4s, v7.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
srshl v6.4s, v6.4s, v29.4s
srshl v7.4s, v7.4s, v29.4s
srshl v16.4s, v16.4s, v29.4s
srshl v17.4s, v17.4s, v29.4s
sqxtun v6.4h, v6.4s
sqxtun2 v6.8h, v7.4s
sqxtun v7.4h, v16.4s
sqxtun2 v7.8h, v17.4s
umin v6.8h, v6.8h, v24.8h
umin v7.8h, v7.8h, v24.8h
sub v6.8h, v6.8h, v31.8h
sub v7.8h, v7.8h, v31.8h
subs w4, w4, #16
st1 {v6.8h, v7.8h}, [x14], #32
b.le 0f
mov v2.16b, v4.16b
tst w7, #2 // LR_HAVE_RIGHT
ld1 {v3.8h, v4.8h}, [x3], #32
b.ne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
0:
ldr x14, [sp, #16]
ldp x3, x4, [sp], #32
ret
endfunc
function wiener_filter7_v_16bpc_neon
// Backing up/restoring registers shifted, so that x9 gets the value
// of x10, etc, afterwards.
stp x10, x11, [sp, #-64]!
stp x12, x13, [sp, #16]
stp x14, x14, [sp, #32]
stp x0, x4, [sp, #48]
1:
ld1 {v16.8h, v17.8h}, [x9], #32
ld1 {v18.8h, v19.8h}, [x10], #32
ld1 {v20.8h, v21.8h}, [x11], #32
ld1 {v22.8h, v23.8h}, [x12], #32
ld1 {v24.8h, v25.8h}, [x13], #32
ld1 {v6.8h, v7.8h}, [x14], #32
smull v2.4s, v16.4h, v0.h[4]
smlal v2.4s, v18.4h, v0.h[5]
smlal v2.4s, v20.4h, v0.h[6]
smlal v2.4s, v22.4h, v0.h[7]
smlal v2.4s, v24.4h, v0.h[6]
smlal v2.4s, v6.4h, v0.h[5]
smlal v2.4s, v6.4h, v0.h[4]
smull2 v3.4s, v16.8h, v0.h[4]
smlal2 v3.4s, v18.8h, v0.h[5]
smlal2 v3.4s, v20.8h, v0.h[6]
smlal2 v3.4s, v22.8h, v0.h[7]
smlal2 v3.4s, v24.8h, v0.h[6]
smlal2 v3.4s, v6.8h, v0.h[5]
smlal2 v3.4s, v6.8h, v0.h[4]
smull v4.4s, v17.4h, v0.h[4]
smlal v4.4s, v19.4h, v0.h[5]
smlal v4.4s, v21.4h, v0.h[6]
smlal v4.4s, v23.4h, v0.h[7]
smlal v4.4s, v25.4h, v0.h[6]
smlal v4.4s, v7.4h, v0.h[5]
smlal v4.4s, v7.4h, v0.h[4]
smull2 v5.4s, v17.8h, v0.h[4]
smlal2 v5.4s, v19.8h, v0.h[5]
smlal2 v5.4s, v21.8h, v0.h[6]
smlal2 v5.4s, v23.8h, v0.h[7]
smlal2 v5.4s, v25.8h, v0.h[6]
smlal2 v5.4s, v7.8h, v0.h[5]
smlal2 v5.4s, v7.8h, v0.h[4]
srshl v2.4s, v2.4s, v27.4s // -round_bits_v
srshl v3.4s, v3.4s, v27.4s
srshl v4.4s, v4.4s, v27.4s
srshl v5.4s, v5.4s, v27.4s
sqxtun v2.4h, v2.4s
sqxtun2 v2.8h, v3.4s
sqxtun v3.4h, v4.4s
sqxtun2 v3.8h, v5.4s
umin v2.8h, v2.8h, v28.8h // bitdepth_max
umin v3.8h, v3.8h, v28.8h
subs w4, w4, #16
st1 {v2.8h, v3.8h}, [x0], #32
b.gt 1b
ldp x0, x4, [sp, #48]
ldp x13, x14, [sp, #32]
ldp x11, x12, [sp, #16]
ldp x9, x10, [sp], #64
add x0, x0, x1
ret
endfunc
function wiener_filter7_hv_16bpc_neon
// Backing up/restoring registers shifted, so that x9 gets the value
// of x10, etc, and x15==x9, afterwards.
stp x10, x11, [sp, #-80]!
stp x12, x13, [sp, #16]
stp x14, x15, [sp, #32]
stp x10, x0, [sp, #48]
stp x3, x4, [sp, #64]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
// LR_HAVE_LEFT
cbnz x2, 0f
// left == NULL
sub x3, x3, #6
ld1 {v2.8h, v3.8h}, [x3], #32
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v2.8h, v3.8h}, [x3], #32
ld1 {v4.d}[1], [x2], #8
// Move x3 back to account for the last 3 pixels we loaded earlier,
// which we'll shift out.
sub x3, x3, #6
ext v3.16b, v2.16b, v3.16b, #10
ext v2.16b, v4.16b, v2.16b, #10
b 2f
1:
ld1 {v2.8h, v3.8h}, [x3], #32
// !LR_HAVE_LEFT, fill v4 with the leftmost pixel
// and shift v3 to have 3x the first pixel at the front.
dup v4.8h, v2.h[0]
// Move x3 back to account for the last 3 pixels we loaded before,
// which we shifted out.
sub x3, x3, #6
ext v3.16b, v2.16b, v3.16b, #10
ext v2.16b, v4.16b, v2.16b, #10
2:
ld1 {v4.8h}, [x3], #16
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #19
b.ge 4f // If w >= 19, all used input pixels are valid
// 1 <= w < 19, w+3 pixels valid in v2-v4. For w>=9,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is v2/3/4.h[w+2]. x3 points at the next input, ie
// v2/3/4.h[24]. Thus read from x3[w-22] to find the padding pixel.
sub w17, w4, #22
// Insert padding in v2/3/4.h[w+3] onwards; fuse the +3 (*2) into the
// buffer pointer.
movrel x6, right_ext_mask, -6
ldr h26, [x3, w17, sxtw #1]
sub x6, x6, w4, uxtw #1
dup v26.8h, v26.h[0]
ld1 {v23.16b, v24.16b, v25.16b}, [x6]
bit v2.16b, v26.16b, v23.16b
bit v3.16b, v26.16b, v24.16b
bit v4.16b, v26.16b, v25.16b
4: // Loop horizontally
ext v17.16b, v2.16b, v3.16b, #4
ext v19.16b, v2.16b, v3.16b, #8
ext v16.16b, v2.16b, v3.16b, #2
ext v20.16b, v2.16b, v3.16b, #10
ext v21.16b, v2.16b, v3.16b, #12
ext v18.16b, v2.16b, v3.16b, #6
add v19.8h, v19.8h, v17.8h
add v20.8h, v20.8h, v16.8h
add v21.8h, v21.8h, v2.8h
smull v6.4s, v18.4h, v0.h[3]
smlal v6.4s, v19.4h, v0.h[2]
smlal v6.4s, v20.4h, v0.h[1]
smlal v6.4s, v21.4h, v0.h[0]
smull2 v7.4s, v18.8h, v0.h[3]
smlal2 v7.4s, v19.8h, v0.h[2]
smlal2 v7.4s, v20.8h, v0.h[1]
smlal2 v7.4s, v21.8h, v0.h[0]
ext v17.16b, v3.16b, v4.16b, #4
ext v19.16b, v3.16b, v4.16b, #8
ext v16.16b, v3.16b, v4.16b, #2
ext v20.16b, v3.16b, v4.16b, #10
ext v21.16b, v3.16b, v4.16b, #12
ext v18.16b, v3.16b, v4.16b, #6
add v19.8h, v19.8h, v17.8h
add v20.8h, v20.8h, v16.8h
add v21.8h, v21.8h, v3.8h
smull v24.4s, v18.4h, v0.h[3]
smlal v24.4s, v19.4h, v0.h[2]
smlal v24.4s, v20.4h, v0.h[1]
smlal v24.4s, v21.4h, v0.h[0]
smull2 v25.4s, v18.8h, v0.h[3]
smlal2 v25.4s, v19.8h, v0.h[2]
smlal2 v25.4s, v20.8h, v0.h[1]
smlal2 v25.4s, v21.8h, v0.h[0]
ld1 {v16.8h, v17.8h}, [x9], #32
mvni v26.8h, #0x80, lsl #8 // 0x7fff = (1 << 15) - 1
add v6.4s, v6.4s, v30.4s
add v7.4s, v7.4s, v30.4s
add v24.4s, v24.4s, v30.4s
add v25.4s, v25.4s, v30.4s
ld1 {v18.8h, v19.8h}, [x10], #32
srshl v6.4s, v6.4s, v29.4s
srshl v7.4s, v7.4s, v29.4s
srshl v24.4s, v24.4s, v29.4s
srshl v25.4s, v25.4s, v29.4s
ld1 {v20.8h, v21.8h}, [x11], #32
sqxtun v6.4h, v6.4s
sqxtun2 v6.8h, v7.4s
sqxtun v7.4h, v24.4s
sqxtun2 v7.8h, v25.4s
ld1 {v22.8h, v23.8h}, [x12], #32
umin v6.8h, v6.8h, v26.8h
umin v7.8h, v7.8h, v26.8h
ld1 {v24.8h, v25.8h}, [x13], #32
sub v6.8h, v6.8h, v31.8h
sub v7.8h, v7.8h, v31.8h
ld1 {v8.8h, v9.8h}, [x14], #32
smull v1.4s, v16.4h, v0.h[4]
smlal v1.4s, v18.4h, v0.h[5]
smlal v1.4s, v20.4h, v0.h[6]
smlal v1.4s, v22.4h, v0.h[7]
smlal v1.4s, v24.4h, v0.h[6]
smlal v1.4s, v8.4h, v0.h[5]
smlal v1.4s, v6.4h, v0.h[4]
smull2 v5.4s, v16.8h, v0.h[4]
smlal2 v5.4s, v18.8h, v0.h[5]
smlal2 v5.4s, v20.8h, v0.h[6]
smlal2 v5.4s, v22.8h, v0.h[7]
smlal2 v5.4s, v24.8h, v0.h[6]
smlal2 v5.4s, v8.8h, v0.h[5]
smlal2 v5.4s, v6.8h, v0.h[4]
smull v26.4s, v17.4h, v0.h[4]
smlal v26.4s, v19.4h, v0.h[5]
smlal v26.4s, v21.4h, v0.h[6]
smlal v26.4s, v23.4h, v0.h[7]
smlal v26.4s, v25.4h, v0.h[6]
smlal v26.4s, v9.4h, v0.h[5]
smlal v26.4s, v7.4h, v0.h[4]
smull2 v16.4s, v17.8h, v0.h[4]
smlal2 v16.4s, v19.8h, v0.h[5]
smlal2 v16.4s, v21.8h, v0.h[6]
smlal2 v16.4s, v23.8h, v0.h[7]
smlal2 v16.4s, v25.8h, v0.h[6]
smlal2 v16.4s, v9.8h, v0.h[5]
smlal2 v16.4s, v7.8h, v0.h[4]
srshl v1.4s, v1.4s, v27.4s // -round_bits_v
srshl v5.4s, v5.4s, v27.4s
srshl v26.4s, v26.4s, v27.4s
srshl v16.4s, v16.4s, v27.4s
sqxtun v18.4h, v1.4s
sqxtun2 v18.8h, v5.4s
sqxtun v19.4h, v26.4s
sqxtun2 v19.8h, v16.4s
st1 {v6.8h, v7.8h}, [x15], #32
umin v18.8h, v18.8h, v28.8h // bitdepth_max
umin v19.8h, v19.8h, v28.8h
subs w4, w4, #16
st1 {v18.8h, v19.8h}, [x0], #32
b.le 0f
mov v2.16b, v4.16b
tst w7, #2 // LR_HAVE_RIGHT
ld1 {v3.8h, v4.8h}, [x3], #32
b.ne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
0:
ldp x3, x4, [sp, #64]
ldp x15, x0, [sp, #48]
ldp x13, x14, [sp, #32]
ldp x11, x12, [sp, #16]
ldp x9, x10, [sp], #80
add x3, x3, x1
add x0, x0, x1
ret
endfunc
// void dav1d_wiener_filter5_16bpc_neon(pixel *p, const ptrdiff_t p_stride,
// const pixel (*left)[4], const pixel *lpf,
// const int w, int h,
// const int16_t filter[2][8],
// const enum LrEdgeFlags edges,
// const int bitdepth_max);
function wiener_filter5_16bpc_neon, export=1
ldr w8, [sp]
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-32]!
stp d8, d9, [sp, #16]
mov x29, sp
ld1 {v0.8h, v1.8h}, [x6]
tst w7, #4 // LR_HAVE_TOP
sub_sp 384*2*4
dup v28.8h, w8 // bitdepth_max
clz w8, w8
movi v30.4s, #1
sub w10, w8, #38 // -(bitdepth + 6)
sub w11, w8, #11 // round_bits_v
sub w8, w8, #25 // -round_bits_h
neg w10, w10 // bitdepth + 6
neg w11, w11 // -round_bits_v
dup v2.4s, w10
dup v29.4s, w8 // -round_bits_h
dup v27.4s, w11 // -round_bits_v
movi v31.8h, #0x20, lsl #8 // 1 << 13 = 8192
ushl v30.4s, v30.4s, v2.4s // 1 << (bitdepth + 6)
zip1 v0.2d, v0.2d, v1.2d // move vertical coeffs to v0.h[4-7], freeing up v1
// x11 - t4
// x12 - t3
// x13 - t2
// x14 - t1
// x15 - t0
mov x14, sp // t1
b.eq L(no_top_5)
mov x16, x2 // backup left
mov x2, #0
bl wiener_filter5_h_16bpc_neon
add x3, x3, x1 // lpf += stride
mov x11, x14 // t4
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter5_h_16bpc_neon
add x3, x3, x1, lsl #2
add x3, x3, x1 // lpf += stride*5
mov x12, x14 // t3
add x14, x14, #384*2 // t1 += 384*2
mov x2, x16 // left
mov x16, x3 // backup lpf
mov x3, x0 // lpf = p
bl wiener_filter5_h_16bpc_neon
subs w5, w5, #1 // h--
mov x13, x14 // t2
b.eq L(v1_5)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter5_h_16bpc_neon
subs w5, w5, #1 // h--
b.eq L(v2_5)
add x3, x3, x1 // src += stride
L(main_5):
mov x15, x11 // t0 = t4
L(main_loop_5):
bl wiener_filter5_hv_16bpc_neon
subs w5, w5, #1 // h--
b.ne L(main_loop_5)
tst w7, #8 // LR_HAVE_BOTTOM
b.eq L(v2_5)
mov x3, x16 // restore lpf
mov x2, #0 // left = NULL
bl wiener_filter5_hv_16bpc_neon
bl wiener_filter5_hv_16bpc_neon
L(end_5):
mov sp, x29
ldp d8, d9, [sp, #16]
ldp x29, x30, [sp], #32
AARCH64_VALIDATE_LINK_REGISTER
ret
L(no_top_5):
add x3, x3, x1, lsl #2
add x16, x3, x1, lsl #1 // lpf += stride*6, backup
mov x3, x0 // lpf = p
bl wiener_filter5_h_16bpc_neon
subs w5, w5, #1 // h--
mov x11, x14 // t4
mov x12, x14 // t3
mov x13, x14 // t2
b.eq L(v1_5)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter5_h_16bpc_neon
subs w5, w5, #1 // h--
b.eq L(v2_5)
add x3, x3, x1 // src += stride
add x15, x14, #384*2 // t0 = t1 + 384*2
bl wiener_filter5_hv_16bpc_neon
subs w5, w5, #1 // h--
b.eq L(v2_5)
add x15, x15, #384*2*3 // t0 += 384*2*3
bl wiener_filter5_hv_16bpc_neon
subs w5, w5, #1 // h--
b.ne L(main_5)
L(v2_5):
bl wiener_filter5_v_16bpc_neon
add x0, x0, x1
mov x11, x12
mov x12, x13
mov x13, x14
L(v1_5):
bl wiener_filter5_v_16bpc_neon
b L(end_5)
endfunc
function wiener_filter5_h_16bpc_neon
stp x3, x4, [sp, #-32]!
str x14, [sp, #16]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
// LR_HAVE_LEFT
cbnz x2, 0f
// left == NULL
sub x3, x3, #4
ld1 {v2.8h, v3.8h}, [x3], #32
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v2.8h, v3.8h}, [x3], #32
ld1 {v4.d}[1], [x2], #8
// Move x3 back to account for the last 2 pixels we loaded earlier,
// which we'll shift out.
sub x3, x3, #4
ext v3.16b, v2.16b, v3.16b, #12
ext v2.16b, v4.16b, v2.16b, #12
b 2f
1:
ld1 {v2.8h, v3.8h}, [x3], #32
// !LR_HAVE_LEFT, fill v2 with the leftmost pixel
// and shift v3 to have 3x the first pixel at the front.
dup v4.8h, v2.h[0]
// Move x3 back to account for the last 2 pixels we loaded before,
// which we shifted out.
sub x3, x3, #4
ext v3.16b, v2.16b, v3.16b, #12
ext v2.16b, v4.16b, v2.16b, #12
2:
ld1 {v4.8h}, [x3], #16
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #18
b.ge 4f // If w >= 18, all used input pixels are valid
// 1 <= w < 18, w+2 pixels valid in v2-v4. For w>=9,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is v2/3/4.h[w+2]. x3 points at the next input, ie
// v2/3/4.h[24]. Thus read from x3[w-23] to find the padding pixel.
sub w17, w4, #23
// Insert padding in v2/3/4.h[w+2] onwards; fuse the +2 (*2) into the
// buffer pointer.
movrel x6, right_ext_mask, -4
ldr h26, [x3, w17, sxtw #1]
sub x6, x6, w4, uxtw #1
dup v26.8h, v26.h[0]
ld1 {v23.16b, v24.16b, v25.16b}, [x6]
bit v2.16b, v26.16b, v23.16b
bit v3.16b, v26.16b, v24.16b
bit v4.16b, v26.16b, v25.16b
4: // Loop horizontally
// Interleaving the mul/mla chains actually hurts performance
// significantly on Cortex A53, thus keeping mul/mla tightly
// chained like this.
ext v16.16b, v2.16b, v3.16b, #2
ext v18.16b, v2.16b, v3.16b, #6
ext v19.16b, v2.16b, v3.16b, #8
ext v17.16b, v2.16b, v3.16b, #4
add v18.8h, v18.8h, v16.8h
add v19.8h, v19.8h, v2.8h
smull v6.4s, v17.4h, v0.h[3]
smlal v6.4s, v18.4h, v0.h[2]
smlal v6.4s, v19.4h, v0.h[1]
smull2 v7.4s, v17.8h, v0.h[3]
smlal2 v7.4s, v18.8h, v0.h[2]
smlal2 v7.4s, v19.8h, v0.h[1]
ext v16.16b, v3.16b, v4.16b, #2
ext v18.16b, v3.16b, v4.16b, #6
ext v19.16b, v3.16b, v4.16b, #8
ext v17.16b, v3.16b, v4.16b, #4
add v18.8h, v18.8h, v16.8h
add v19.8h, v19.8h, v3.8h
smull v16.4s, v17.4h, v0.h[3]
smlal v16.4s, v18.4h, v0.h[2]
smlal v16.4s, v19.4h, v0.h[1]
smull2 v17.4s, v17.8h, v0.h[3]
smlal2 v17.4s, v18.8h, v0.h[2]
smlal2 v17.4s, v19.8h, v0.h[1]
mvni v24.8h, #0x80, lsl #8 // 0x7fff = (1 << 15) - 1
add v6.4s, v6.4s, v30.4s
add v7.4s, v7.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
srshl v6.4s, v6.4s, v29.4s
srshl v7.4s, v7.4s, v29.4s
srshl v16.4s, v16.4s, v29.4s
srshl v17.4s, v17.4s, v29.4s
sqxtun v6.4h, v6.4s
sqxtun2 v6.8h, v7.4s
sqxtun v7.4h, v16.4s
sqxtun2 v7.8h, v17.4s
umin v6.8h, v6.8h, v24.8h
umin v7.8h, v7.8h, v24.8h
sub v6.8h, v6.8h, v31.8h
sub v7.8h, v7.8h, v31.8h
subs w4, w4, #16
st1 {v6.8h, v7.8h}, [x14], #32
b.le 0f
mov v2.16b, v4.16b
tst w7, #2 // LR_HAVE_RIGHT
ld1 {v3.8h, v4.8h}, [x3], #32
b.ne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
0:
ldr x14, [sp, #16]
ldp x3, x4, [sp], #32
ret
endfunc
function wiener_filter5_v_16bpc_neon
stp x11, x12, [sp, #-48]!
stp x13, x14, [sp, #16]
stp x0, x4, [sp, #32]
1:
ld1 {v16.8h, v17.8h}, [x11], #32
ld1 {v18.8h, v19.8h}, [x12], #32
ld1 {v20.8h, v21.8h}, [x13], #32
ld1 {v22.8h, v23.8h}, [x14], #32
smull v2.4s, v16.4h, v0.h[5]
smlal v2.4s, v18.4h, v0.h[6]
smlal v2.4s, v20.4h, v0.h[7]
smlal v2.4s, v22.4h, v0.h[6]
smlal v2.4s, v22.4h, v0.h[5]
smull2 v3.4s, v16.8h, v0.h[5]
smlal2 v3.4s, v18.8h, v0.h[6]
smlal2 v3.4s, v20.8h, v0.h[7]
smlal2 v3.4s, v22.8h, v0.h[6]
smlal2 v3.4s, v22.8h, v0.h[5]
smull v4.4s, v17.4h, v0.h[5]
smlal v4.4s, v19.4h, v0.h[6]
smlal v4.4s, v21.4h, v0.h[7]
smlal v4.4s, v23.4h, v0.h[6]
smlal v4.4s, v23.4h, v0.h[5]
smull2 v5.4s, v17.8h, v0.h[5]
smlal2 v5.4s, v19.8h, v0.h[6]
smlal2 v5.4s, v21.8h, v0.h[7]
smlal2 v5.4s, v23.8h, v0.h[6]
smlal2 v5.4s, v23.8h, v0.h[5]
srshl v2.4s, v2.4s, v27.4s // -round_bits_v
srshl v3.4s, v3.4s, v27.4s
srshl v4.4s, v4.4s, v27.4s
srshl v5.4s, v5.4s, v27.4s
sqxtun v2.4h, v2.4s
sqxtun2 v2.8h, v3.4s
sqxtun v3.4h, v4.4s
sqxtun2 v3.8h, v5.4s
umin v2.8h, v2.8h, v28.8h // bitdepth_max
umin v3.8h, v3.8h, v28.8h
subs w4, w4, #16
st1 {v2.8h, v3.8h}, [x0], #32
b.gt 1b
ldp x0, x4, [sp, #32]
ldp x13, x14, [sp, #16]
ldp x11, x12, [sp], #48
ret
endfunc
function wiener_filter5_hv_16bpc_neon
// Backing up/restoring registers shifted, so that x11 gets the value
// of x12, etc, and x15==x11, afterwards.
stp x12, x13, [sp, #-64]!
stp x14, x15, [sp, #16]
stp x12, x0, [sp, #32]
stp x3, x4, [sp, #48]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
// LR_HAVE_LEFT
cbnz x2, 0f
// left == NULL
sub x3, x3, #4
ld1 {v2.8h, v3.8h}, [x3], #32
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v2.8h, v3.8h}, [x3], #32
ld1 {v4.d}[1], [x2], #8
// Move x3 back to account for the last 2 pixels we loaded earlier,
// which we'll shift out.
sub x3, x3, #4
ext v3.16b, v2.16b, v3.16b, #12
ext v2.16b, v4.16b, v2.16b, #12
b 2f
1:
ld1 {v2.8h, v3.8h}, [x3], #32
// !LR_HAVE_LEFT, fill v2 with the leftmost pixel
// and shift v3 to have 2x the first pixel at the front.
dup v4.8h, v2.h[0]
// Move x3 back to account for the last 2 pixels we loaded before,
// which we shifted out.
sub x3, x3, #4
ext v3.16b, v2.16b, v3.16b, #12
ext v2.16b, v4.16b, v2.16b, #12
2:
ld1 {v4.8h}, [x3], #16
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #18
b.ge 4f // If w >= 18, all used input pixels are valid
// 1 <= w < 18, w+2 pixels valid in v2-v4. For w>=9,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is v2/3/4.h[w+1]. x3 points at the next input, ie
// v2/3/4.h[24]. Thus read from x3[w-23] to find the padding pixel.
sub w17, w4, #23
// Insert padding in v2/3/4.h[w+2] onwards; fuse the +2 (*2) into the
// buffer pointer.
movrel x6, right_ext_mask, -4
ldr h26, [x3, w17, sxtw #1]
sub x6, x6, w4, uxtw #1
dup v26.8h, v26.h[0]
ld1 {v23.16b, v24.16b, v25.16b}, [x6]
bit v2.16b, v26.16b, v23.16b
bit v3.16b, v26.16b, v24.16b
bit v4.16b, v26.16b, v25.16b
4: // Loop horizontally
ext v16.16b, v2.16b, v3.16b, #2
ext v18.16b, v2.16b, v3.16b, #6
ext v19.16b, v2.16b, v3.16b, #8
ext v17.16b, v2.16b, v3.16b, #4
add v18.8h, v18.8h, v16.8h
add v19.8h, v19.8h, v2.8h
smull v6.4s, v17.4h, v0.h[3]
smlal v6.4s, v18.4h, v0.h[2]
smlal v6.4s, v19.4h, v0.h[1]
smull2 v7.4s, v17.8h, v0.h[3]
smlal2 v7.4s, v18.8h, v0.h[2]
smlal2 v7.4s, v19.8h, v0.h[1]
ext v16.16b, v3.16b, v4.16b, #2
ext v18.16b, v3.16b, v4.16b, #6
ext v19.16b, v3.16b, v4.16b, #8
ext v17.16b, v3.16b, v4.16b, #4
add v18.8h, v18.8h, v16.8h
add v19.8h, v19.8h, v3.8h
smull v24.4s, v17.4h, v0.h[3]
smlal v24.4s, v18.4h, v0.h[2]
smlal v24.4s, v19.4h, v0.h[1]
smull2 v25.4s, v17.8h, v0.h[3]
smlal2 v25.4s, v18.8h, v0.h[2]
smlal2 v25.4s, v19.8h, v0.h[1]
ld1 {v16.8h, v17.8h}, [x11], #32
mvni v26.8h, #0x80, lsl #8 // 0x7fff = (1 << 15) - 1
add v6.4s, v6.4s, v30.4s
add v7.4s, v7.4s, v30.4s
add v24.4s, v24.4s, v30.4s
add v25.4s, v25.4s, v30.4s
ld1 {v18.8h, v19.8h}, [x12], #32
srshl v6.4s, v6.4s, v29.4s
srshl v7.4s, v7.4s, v29.4s
srshl v24.4s, v24.4s, v29.4s
srshl v25.4s, v25.4s, v29.4s
ld1 {v20.8h, v21.8h}, [x13], #32
sqxtun v6.4h, v6.4s
sqxtun2 v6.8h, v7.4s
sqxtun v7.4h, v24.4s
sqxtun2 v7.8h, v25.4s
ld1 {v22.8h, v23.8h}, [x14], #32
umin v6.8h, v6.8h, v26.8h
umin v7.8h, v7.8h, v26.8h
sub v6.8h, v6.8h, v31.8h
sub v7.8h, v7.8h, v31.8h
smull v8.4s, v16.4h, v0.h[5]
smlal v8.4s, v18.4h, v0.h[6]
smlal v8.4s, v20.4h, v0.h[7]
smlal v8.4s, v22.4h, v0.h[6]
smlal v8.4s, v6.4h, v0.h[5]
smull2 v9.4s, v16.8h, v0.h[5]
smlal2 v9.4s, v18.8h, v0.h[6]
smlal2 v9.4s, v20.8h, v0.h[7]
smlal2 v9.4s, v22.8h, v0.h[6]
smlal2 v9.4s, v6.8h, v0.h[5]
smull v1.4s, v17.4h, v0.h[5]
smlal v1.4s, v19.4h, v0.h[6]
smlal v1.4s, v21.4h, v0.h[7]
smlal v1.4s, v23.4h, v0.h[6]
smlal v1.4s, v7.4h, v0.h[5]
smull2 v5.4s, v17.8h, v0.h[5]
smlal2 v5.4s, v19.8h, v0.h[6]
smlal2 v5.4s, v21.8h, v0.h[7]
smlal2 v5.4s, v23.8h, v0.h[6]
smlal2 v5.4s, v7.8h, v0.h[5]
srshl v8.4s, v8.4s, v27.4s // -round_bits_v
srshl v9.4s, v9.4s, v27.4s
srshl v1.4s, v1.4s, v27.4s
srshl v5.4s, v5.4s, v27.4s
sqxtun v8.4h, v8.4s
sqxtun2 v8.8h, v9.4s
sqxtun v9.4h, v1.4s
sqxtun2 v9.8h, v5.4s
st1 {v6.8h, v7.8h}, [x15], #32
umin v8.8h, v8.8h, v28.8h // bitdepth_max
umin v9.8h, v9.8h, v28.8h
subs w4, w4, #16
st1 {v8.8h, v9.8h}, [x0], #32
b.le 0f
mov v2.16b, v4.16b
tst w7, #2 // LR_HAVE_RIGHT
ld1 {v3.8h, v4.8h}, [x3], #32
b.ne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
0:
ldp x3, x4, [sp, #48]
ldp x15, x0, [sp, #32]
ldp x13, x14, [sp, #16]
ldp x11, x12, [sp], #64
add x3, x3, x1
add x0, x0, x1
ret
endfunc
#include "looprestoration_tmpl.S"
// void dav1d_sgr_box3_row_h_16bpc_neon(int32_t *sumsq, int16_t *sum,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box3_row_h_16bpc_neon, export=1
add w4, w4, #2 // w += 2
tst w5, #1 // LR_HAVE_LEFT
b.eq 1f
cbnz x2, 0f
// LR_HAVE_LEFT && left == NULL
sub x3, x3, #4
ld1 {v0.8h, v1.8h}, [x3], #32
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v0.8h, v1.8h}, [x3], #32
ld1 {v2.d}[1], [x2]
// Move x3 back to account for the last 2 pixels we loaded earlier,
// which we'll shift out.
sub x3, x3, #4
ext v1.16b, v0.16b, v1.16b, #12
ext v0.16b, v2.16b, v0.16b, #12
b 2f
1:
ld1 {v0.8h, v1.8h}, [x3], #32
// !LR_HAVE_LEFT, fill v2 with the leftmost pixel
// and shift v0/v1 to have 2x the first pixel at the front.
dup v2.8h, v0.h[0]
// Move x3 back to account for the last 2 pixels we loaded before,
// which we shifted out.
sub x3, x3, #4
ext v1.16b, v0.16b, v1.16b, #12
ext v0.16b, v2.16b, v0.16b, #12
2:
tst w5, #2 // LR_HAVE_RIGHT
b.ne 4f
// If we'll need to pad the right edge, load that pixel to pad with
// here since we can find it pretty easily from here.
sub w13, w4, #(2 + 16 - 2 + 1)
ldr h30, [x3, w13, sxtw #1]
// Fill v30 with the right padding pixel
dup v30.8h, v30.h[0]
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #10
b.ge 4f // If w >= 10, all used input pixels are valid
// 1 <= w < 10, w pixels valid in v0. For w=9, this ends up called
// again; it's not strictly needed in those cases (we pad enough here),
// but keeping the code as simple as possible.
// Insert padding in v0.h[w] onwards
movrel x13, right_ext_mask
sub x13, x13, w4, uxtw #1
ld1 {v28.16b, v29.16b}, [x13]
bit v0.16b, v30.16b, v28.16b
bit v1.16b, v30.16b, v29.16b
4: // Loop horizontally
ext v26.16b, v0.16b, v1.16b, #2
ext v27.16b, v0.16b, v1.16b, #4
add v6.8h, v0.8h, v26.8h
umull v22.4s, v0.4h, v0.4h
umlal v22.4s, v26.4h, v26.4h
umlal v22.4s, v27.4h, v27.4h
add v6.8h, v6.8h, v27.8h
umull2 v23.4s, v0.8h, v0.8h
umlal2 v23.4s, v26.8h, v26.8h
umlal2 v23.4s, v27.8h, v27.8h
subs w4, w4, #8
st1 {v6.8h}, [x1], #16
st1 {v22.4s,v23.4s}, [x0], #32
b.le 9f
tst w5, #2 // LR_HAVE_RIGHT
mov v0.16b, v1.16b
ld1 {v1.8h}, [x3], #16
b.ne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
ret
endfunc
// void dav1d_sgr_box5_row_h_16bpc_neon(int32_t *sumsq, int16_t *sum,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box5_row_h_16bpc_neon, export=1
add w4, w4, #2 // w += 2
tst w5, #1 // LR_HAVE_LEFT
b.eq 1f
cbnz x2, 0f
// LR_HAVE_LEFT && left == NULL
sub x3, x3, #6
ld1 {v0.8h, v1.8h}, [x3], #32
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v0.8h, v1.8h}, [x3], #32
ld1 {v2.d}[1], [x2], #8
// Move x3 back to account for the last 3 pixels we loaded earlier,
// which we'll shift out.
sub x3, x3, #6
ext v1.16b, v0.16b, v1.16b, #10
ext v0.16b, v2.16b, v0.16b, #10
b 2f
1:
ld1 {v0.8h, v1.8h}, [x3], #32
// !LR_HAVE_LEFT, fill v2 with the leftmost pixel
// and shift v0/v1 to have 3x the first pixel at the front.
dup v2.8h, v0.h[0]
// Move x3 back to account for the last 3 pixels we loaded before,
// which we shifted out.
sub x3, x3, #6
ext v1.16b, v0.16b, v1.16b, #10
ext v0.16b, v2.16b, v0.16b, #10
2:
tst w5, #2 // LR_HAVE_RIGHT
b.ne 4f
// If we'll need to pad the right edge, load that pixel to pad with
// here since we can find it pretty easily from here.
sub w13, w4, #(2 + 16 - 3 + 1)
ldr h30, [x3, w13, sxtw #1]
// Fill v30 with the right padding pixel
dup v30.8h, v30.h[0]
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #11
b.ge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+1 pixels valid in v0. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// Insert padding in v0.h[w+1] onwards; fuse the +1 into the
// buffer pointer.
movrel x13, right_ext_mask, -1
sub x13, x13, w4, uxtw #1
ld1 {v28.16b, v29.16b}, [x13]
bit v0.16b, v30.16b, v28.16b
bit v1.16b, v30.16b, v29.16b
4: // Loop horizontally
ext v26.16b, v0.16b, v1.16b, #2
ext v27.16b, v0.16b, v1.16b, #4
add v6.8h, v0.8h, v26.8h
umull v22.4s, v0.4h, v0.4h
umlal v22.4s, v26.4h, v26.4h
umlal v22.4s, v27.4h, v27.4h
add v6.8h, v6.8h, v27.8h
umull2 v23.4s, v0.8h, v0.8h
umlal2 v23.4s, v26.8h, v26.8h
umlal2 v23.4s, v27.8h, v27.8h
ext v26.16b, v0.16b, v1.16b, #6
ext v27.16b, v0.16b, v1.16b, #8
add v6.8h, v6.8h, v26.8h
umlal v22.4s, v26.4h, v26.4h
umlal v22.4s, v27.4h, v27.4h
add v6.8h, v6.8h, v27.8h
umlal2 v23.4s, v26.8h, v26.8h
umlal2 v23.4s, v27.8h, v27.8h
subs w4, w4, #8
st1 {v6.8h}, [x1], #16
st1 {v22.4s,v23.4s}, [x0], #32
b.le 9f
tst w5, #2 // LR_HAVE_RIGHT
mov v0.16b, v1.16b
ld1 {v1.8h}, [x3], #16
b.ne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
ret
endfunc
// void dav1d_sgr_box35_row_h_16bpc_neon(int32_t *sumsq3, int16_t *sum3,
// int32_t *sumsq5, int16_t *sum5,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box35_row_h_16bpc_neon, export=1
add w6, w6, #2 // w += 2
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
cbnz x4, 0f
// LR_HAVE_LEFT && left == NULL
sub x5, x5, #6
ld1 {v0.8h, v1.8h}, [x5], #32
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v0.8h, v1.8h}, [x5], #32
ld1 {v2.d}[1], [x4], #8
// Move x3 back to account for the last 3 pixels we loaded earlier,
// which we'll shift out.
sub x5, x5, #6
ext v1.16b, v0.16b, v1.16b, #10
ext v0.16b, v2.16b, v0.16b, #10
b 2f
1:
ld1 {v0.8h, v1.8h}, [x5], #32
// !LR_HAVE_LEFT, fill v2 with the leftmost pixel
// and shift v0/v1 to have 3x the first pixel at the front.
dup v2.8h, v0.h[0]
// Move x5 back to account for the last 3 pixels we loaded before,
// which we shifted out.
sub x5, x5, #6
ext v1.16b, v0.16b, v1.16b, #10
ext v0.16b, v2.16b, v0.16b, #10
2:
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
// If we'll need to pad the right edge, load that pixel to pad with
// here since we can find it pretty easily from here.
sub w13, w6, #(2 + 16 - 3 + 1)
ldr h30, [x5, w13, sxtw #1]
// Fill v30 with the right padding pixel
dup v30.8h, v30.h[0]
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w6, #11
b.ge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+1 pixels valid in v0. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// Insert padding in v0.b[w+1] onwards; fuse the +1 into the
// buffer pointer.
movrel x13, right_ext_mask, -1
sub x13, x13, w6, uxtw #1
ld1 {v28.16b, v29.16b}, [x13]
bit v0.16b, v30.16b, v28.16b
bit v1.16b, v30.16b, v29.16b
4: // Loop horizontally
ext v16.16b, v0.16b, v1.16b, #2
ext v17.16b, v0.16b, v1.16b, #4
ext v19.16b, v0.16b, v1.16b, #8
ext v18.16b, v0.16b, v1.16b, #6
add v20.8h, v16.8h, v17.8h
add v21.8h, v0.8h, v19.8h
add v20.8h, v20.8h, v18.8h
umull v22.4s, v16.4h, v16.4h
umlal v22.4s, v17.4h, v17.4h
umlal v22.4s, v18.4h, v18.4h
umull2 v23.4s, v16.8h, v16.8h
umlal2 v23.4s, v17.8h, v17.8h
umlal2 v23.4s, v18.8h, v18.8h
add v21.8h, v21.8h, v20.8h
st1 {v20.8h}, [x1], #16
st1 {v22.4s,v23.4s}, [x0], #32
umlal v22.4s, v0.4h, v0.4h
umlal v22.4s, v19.4h, v19.4h
umlal2 v23.4s, v0.8h, v0.8h
umlal2 v23.4s, v19.8h, v19.8h
subs w6, w6, #8
st1 {v21.8h}, [x3], #16
st1 {v22.4s,v23.4s}, [x2], #32
b.le 9f
tst w7, #2 // LR_HAVE_RIGHT
mov v0.16b, v1.16b
ld1 {v1.8h}, [x5], #16
b.ne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
ret
endfunc
sgr_funcs 16
|
Admenri/urge
| 121,619
|
third_party/dav1d/src/arm/64/itx.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/arm/asm.S"
#include "util.S"
// The exported functions in this file have got the following signature:
// void itxfm_add(pixel *dst, ptrdiff_t dst_stride, coef *coeff, int eob);
// Most of the functions use the following register layout:
// x0-x3 external parameters
// x4 function pointer to first transform
// x5 function pointer to second transform
// x6 output parameter for helper function
// x7 input parameter for helper function
// x8 input stride for helper function
// x9-x12 scratch variables for helper functions
// x13 pointer to list of eob thresholds
// x14 return pointer for helper function
// x15 return pointer for main function
// The SIMD registers most often use the following layout:
// v0-v1 multiplication coefficients
// v2-v7 scratch registers
// v8-v15 unused
// v16-v31 inputs/outputs of transforms
// Potential further optimizations, that are left unimplemented for now:
// - Trying to keep multiplication coefficients in registers across multiple
// transform functions. (The register layout is designed to potentially
// allow this.)
// - Use a simplified version of the transforms themselves for cases where
// we know a significant number of inputs are zero. E.g. if the eob value
// indicates only a quarter of input values are set, for idct16 and up,
// a significant amount of calculation can be skipped, at the cost of more
// code duplication and special casing.
const idct_coeffs, align=4
// idct4
.short 2896, 2896*8, 1567, 3784
// idct8
.short 799, 4017, 3406, 2276
// idct16
.short 401, 4076, 3166, 2598
.short 1931, 3612, 3920, 1189
// idct32
.short 201, 4091, 3035, 2751
.short 1751, 3703, 3857, 1380
.short 995, 3973, 3513, 2106
.short 2440, 3290, 4052, 601
endconst
const idct64_coeffs, align=4
.short 101*8, 4095*8, 2967*8, -2824*8
.short 1660*8, 3745*8, 3822*8, -1474*8
.short 4076, 401, 4017, 799
.short 0, 0, 0, 0
.short 4036*8, -700*8, 2359*8, 3349*8
.short 3461*8, -2191*8, 897*8, 3996*8
.short -3166, -2598, -799, -4017
.short 0, 0, 0, 0
.short 501*8, 4065*8, 3229*8, -2520*8
.short 2019*8, 3564*8, 3948*8, -1092*8
.short 3612, 1931, 2276, 3406
.short 0, 0, 0, 0
.short 4085*8, -301*8, 2675*8, 3102*8
.short 3659*8, -1842*8, 1285*8, 3889*8
.short -3920, -1189, -3406, -2276
.short 0, 0, 0, 0
endconst
const iadst4_coeffs, align=4
// .h[4-5] can be interpreted as .s[2]
.short 1321, 3803, 2482, 3344, 3344, 0
endconst
const iadst8_coeffs, align=4
.short 4076, 401, 3612, 1931
.short 2598, 3166, 1189, 3920
// idct_coeffs
.short 2896, 0, 1567, 3784, 0, 0, 0, 0
endconst
const iadst16_coeffs, align=4
.short 4091, 201, 3973, 995
.short 3703, 1751, 3290, 2440
.short 2751, 3035, 2106, 3513
.short 1380, 3857, 601, 4052
endconst
.macro smull_smlal d0, d1, s0, s1, c0, c1, sz
smull \d0\().4s, \s0\().4h, \c0
smlal \d0\().4s, \s1\().4h, \c1
.ifc \sz, .8h
smull2 \d1\().4s, \s0\().8h, \c0
smlal2 \d1\().4s, \s1\().8h, \c1
.endif
.endm
.macro smull_smlsl d0, d1, s0, s1, c0, c1, sz
smull \d0\().4s, \s0\().4h, \c0
smlsl \d0\().4s, \s1\().4h, \c1
.ifc \sz, .8h
smull2 \d1\().4s, \s0\().8h, \c0
smlsl2 \d1\().4s, \s1\().8h, \c1
.endif
.endm
.macro sqrshrn_sz d0, s0, s1, shift, sz
sqrshrn \d0\().4h, \s0\().4s, \shift
.ifc \sz, .8h
sqrshrn2 \d0\().8h, \s1\().4s, \shift
.endif
.endm
.macro scale_input sz, c, r0, r1, r2 r3, r4, r5, r6, r7
sqrdmulh \r0\sz, \r0\sz, \c
sqrdmulh \r1\sz, \r1\sz, \c
sqrdmulh \r2\sz, \r2\sz, \c
sqrdmulh \r3\sz, \r3\sz, \c
.ifnb \r4
sqrdmulh \r4\sz, \r4\sz, \c
sqrdmulh \r5\sz, \r5\sz, \c
sqrdmulh \r6\sz, \r6\sz, \c
sqrdmulh \r7\sz, \r7\sz, \c
.endif
.endm
.macro load_add_store load, shift, addsrc, adddst, narrowsrc, narrowdst, store, dst, src, shiftbits=4
.ifnb \load
ld1 {\load}, [\src], x1
.endif
.ifnb \shift
srshr \shift, \shift, #\shiftbits
.endif
.ifnb \addsrc
uaddw \adddst, \adddst, \addsrc
.endif
.ifnb \narrowsrc
sqxtun \narrowdst, \narrowsrc
.endif
.ifnb \store
st1 {\store}, [\dst], x1
.endif
.endm
.macro load_add_store_8x16 dst, src
mov \src, \dst
load_add_store v2.8b, v16.8h, , , , , , \dst, \src
load_add_store v3.8b, v17.8h, , , , , , \dst, \src
load_add_store v4.8b, v18.8h, v2.8b, v16.8h, , , , \dst, \src
load_add_store v5.8b, v19.8h, v3.8b, v17.8h, v16.8h, v2.8b, , \dst, \src
load_add_store v6.8b, v20.8h, v4.8b, v18.8h, v17.8h, v3.8b, v2.8b, \dst, \src
load_add_store v7.8b, v21.8h, v5.8b, v19.8h, v18.8h, v4.8b, v3.8b, \dst, \src
load_add_store v2.8b, v22.8h, v6.8b, v20.8h, v19.8h, v5.8b, v4.8b, \dst, \src
load_add_store v3.8b, v23.8h, v7.8b, v21.8h, v20.8h, v6.8b, v5.8b, \dst, \src
load_add_store v4.8b, v24.8h, v2.8b, v22.8h, v21.8h, v7.8b, v6.8b, \dst, \src
load_add_store v5.8b, v25.8h, v3.8b, v23.8h, v22.8h, v2.8b, v7.8b, \dst, \src
load_add_store v6.8b, v26.8h, v4.8b, v24.8h, v23.8h, v3.8b, v2.8b, \dst, \src
load_add_store v7.8b, v27.8h, v5.8b, v25.8h, v24.8h, v4.8b, v3.8b, \dst, \src
load_add_store v2.8b, v28.8h, v6.8b, v26.8h, v25.8h, v5.8b, v4.8b, \dst, \src
load_add_store v3.8b, v29.8h, v7.8b, v27.8h, v26.8h, v6.8b, v5.8b, \dst, \src
load_add_store v4.8b, v30.8h, v2.8b, v28.8h, v27.8h, v7.8b, v6.8b, \dst, \src
load_add_store v5.8b, v31.8h, v3.8b, v29.8h, v28.8h, v2.8b, v7.8b, \dst, \src
load_add_store , , v4.8b, v30.8h, v29.8h, v3.8b, v2.8b, \dst, \src
load_add_store , , v5.8b, v31.8h, v30.8h, v4.8b, v3.8b, \dst, \src
load_add_store , , , , v31.8h, v5.8b, v4.8b, \dst, \src
load_add_store , , , , , , v5.8b, \dst, \src
.endm
.macro load_add_store_8x8 dst, src, shiftbits=4
mov \src, \dst
load_add_store v2.8b, v16.8h, , , , , , \dst, \src, \shiftbits
load_add_store v3.8b, v17.8h, , , , , , \dst, \src, \shiftbits
load_add_store v4.8b, v18.8h, v2.8b, v16.8h, , , , \dst, \src, \shiftbits
load_add_store v5.8b, v19.8h, v3.8b, v17.8h, v16.8h, v2.8b, , \dst, \src, \shiftbits
load_add_store v6.8b, v20.8h, v4.8b, v18.8h, v17.8h, v3.8b, v2.8b, \dst, \src, \shiftbits
load_add_store v7.8b, v21.8h, v5.8b, v19.8h, v18.8h, v4.8b, v3.8b, \dst, \src, \shiftbits
load_add_store v2.8b, v22.8h, v6.8b, v20.8h, v19.8h, v5.8b, v4.8b, \dst, \src, \shiftbits
load_add_store v3.8b, v23.8h, v7.8b, v21.8h, v20.8h, v6.8b, v5.8b, \dst, \src, \shiftbits
load_add_store , , v2.8b, v22.8h, v21.8h, v7.8b, v6.8b, \dst, \src, \shiftbits
load_add_store , , v3.8b, v23.8h, v22.8h, v2.8b, v7.8b, \dst, \src, \shiftbits
load_add_store , , , , v23.8h, v3.8b, v2.8b, \dst, \src, \shiftbits
load_add_store , , , , , , v3.8b, \dst, \src, \shiftbits
.endm
.macro load_add_store_8x4 dst, src
mov \src, \dst
load_add_store v2.8b, v16.8h, , , , , , \dst, \src
load_add_store v3.8b, v17.8h, , , , , , \dst, \src
load_add_store v4.8b, v18.8h, v2.8b, v16.8h, , , , \dst, \src
load_add_store v5.8b, v19.8h, v3.8b, v17.8h, v16.8h, v2.8b, , \dst, \src
load_add_store , , v4.8b, v18.8h, v17.8h, v3.8b, v2.8b, \dst, \src
load_add_store , , v5.8b, v19.8h, v18.8h, v4.8b, v3.8b, \dst, \src
load_add_store , , , , v19.8h, v5.8b, v4.8b, \dst, \src
load_add_store , , , , , , v5.8b, \dst, \src
.endm
.macro load_add_store4 load, inssrc, insdst, shift, addsrc, adddst, narrowsrc, narrowdst, store, dst, src
.ifnb \load
ld1 {\load}[0], [\src], x1
.endif
.ifnb \inssrc
ins \insdst\().d[1], \inssrc\().d[0]
.endif
.ifnb \shift
srshr \shift, \shift, #4
.endif
.ifnb \load
ld1 {\load}[1], [\src], x1
.endif
.ifnb \addsrc
uaddw \adddst, \adddst, \addsrc
.endif
.ifnb \store
st1 {\store}[0], [\dst], x1
.endif
.ifnb \narrowsrc
sqxtun \narrowdst, \narrowsrc
.endif
.ifnb \store
st1 {\store}[1], [\dst], x1
.endif
.endm
.macro load_add_store_4x16 dst, src
mov \src, \dst
load_add_store4 v0.s, v17, v16, , , , , , , \dst, \src
load_add_store4 v1.s, v19, v18, , , , , , , \dst, \src
load_add_store4 v2.s, v21, v20, v16.8h, , , , , , \dst, \src
load_add_store4 v3.s, v23, v22, v18.8h, v0.8b, v16.8h, , , , \dst, \src
load_add_store4 v4.s, v25, v24, v20.8h, v1.8b, v18.8h, v16.8h, v0.8b, , \dst, \src
load_add_store4 v5.s, v27, v26, v22.8h, v2.8b, v20.8h, v18.8h, v1.8b, v0.s, \dst, \src
load_add_store4 v6.s, v29, v28, v24.8h, v3.8b, v22.8h, v20.8h, v2.8b, v1.s, \dst, \src
load_add_store4 v7.s, v31, v30, v26.8h, v4.8b, v24.8h, v22.8h, v3.8b, v2.s, \dst, \src
load_add_store4 , , , v28.8h, v5.8b, v26.8h, v24.8h, v4.8b, v3.s, \dst, \src
load_add_store4 , , , v30.8h, v6.8b, v28.8h, v26.8h, v5.8b, v4.s, \dst, \src
load_add_store4 , , , , v7.8b, v30.8h, v28.8h, v6.8b, v5.s, \dst, \src
load_add_store4 , , , , , , v30.8h, v7.8b, v6.s, \dst, \src
load_add_store4 , , , , , , , , v7.s, \dst, \src
.endm
.macro load_add_store_4x8 dst, src
mov \src, \dst
load_add_store4 v0.s, v17, v16, , , , , , , \dst, \src
load_add_store4 v1.s, v19, v18, , , , , , , \dst, \src
load_add_store4 v2.s, v21, v20, v16.8h, , , , , , \dst, \src
load_add_store4 v3.s, v23, v22, v18.8h, v0.8b, v16.8h, , , , \dst, \src
load_add_store4 , , , v20.8h, v1.8b, v18.8h, v16.8h, v0.8b, , \dst, \src
load_add_store4 , , , v22.8h, v2.8b, v20.8h, v18.8h, v1.8b, v0.s, \dst, \src
load_add_store4 , , , , v3.8b, v22.8h, v20.8h, v2.8b, v1.s, \dst, \src
load_add_store4 , , , , , , v22.8h, v3.8b, v2.s, \dst, \src
load_add_store4 , , , , , , , , v3.s, \dst, \src
.endm
.macro idct_dc w, h, shift
cbnz w3, 1f
mov w16, #2896*8
ld1r {v16.8h}, [x2]
dup v0.4h, w16
sqrdmulh v16.8h, v16.8h, v0.h[0]
strh wzr, [x2]
.if (\w == 2*\h) || (2*\w == \h)
sqrdmulh v16.8h, v16.8h, v0.h[0]
.endif
.if \shift > 0
srshr v16.8h, v16.8h, #\shift
.endif
sqrdmulh v16.8h, v16.8h, v0.h[0]
srshr v16.8h, v16.8h, #4
mov w4, #\h
b idct_dc_w\w\()_neon
1:
.endm
function idct_dc_w4_neon
1:
ld1 {v0.s}[0], [x0], x1
ld1 {v0.s}[1], [x0], x1
ld1 {v1.s}[0], [x0], x1
ld1 {v1.s}[1], [x0], x1
subs w4, w4, #4
sub x0, x0, x1, lsl #2
uaddw v0.8h, v16.8h, v0.8b
sqxtun v0.8b, v0.8h
uaddw v1.8h, v16.8h, v1.8b
st1 {v0.s}[0], [x0], x1
sqxtun v1.8b, v1.8h
st1 {v0.s}[1], [x0], x1
st1 {v1.s}[0], [x0], x1
st1 {v1.s}[1], [x0], x1
b.gt 1b
ret
endfunc
function idct_dc_w8_neon
1:
ld1 {v0.8b}, [x0], x1
ld1 {v1.8b}, [x0], x1
ld1 {v2.8b}, [x0], x1
uaddw v20.8h, v16.8h, v0.8b
ld1 {v3.8b}, [x0], x1
sub x0, x0, x1, lsl #2
subs w4, w4, #4
uaddw v21.8h, v16.8h, v1.8b
sqxtun v0.8b, v20.8h
uaddw v22.8h, v16.8h, v2.8b
sqxtun v1.8b, v21.8h
uaddw v23.8h, v16.8h, v3.8b
st1 {v0.8b}, [x0], x1
sqxtun v2.8b, v22.8h
st1 {v1.8b}, [x0], x1
sqxtun v3.8b, v23.8h
st1 {v2.8b}, [x0], x1
st1 {v3.8b}, [x0], x1
b.gt 1b
ret
endfunc
function idct_dc_w16_neon
1:
ld1 {v0.16b}, [x0], x1
ld1 {v1.16b}, [x0], x1
ld1 {v2.16b}, [x0], x1
subs w4, w4, #4
uaddw v20.8h, v16.8h, v0.8b
uaddw2 v21.8h, v16.8h, v0.16b
ld1 {v3.16b}, [x0], x1
uaddw v22.8h, v16.8h, v1.8b
uaddw2 v23.8h, v16.8h, v1.16b
sub x0, x0, x1, lsl #2
uaddw v24.8h, v16.8h, v2.8b
uaddw2 v25.8h, v16.8h, v2.16b
sqxtun v0.8b, v20.8h
sqxtun2 v0.16b, v21.8h
uaddw v26.8h, v16.8h, v3.8b
uaddw2 v27.8h, v16.8h, v3.16b
sqxtun v1.8b, v22.8h
sqxtun2 v1.16b, v23.8h
sqxtun v2.8b, v24.8h
sqxtun2 v2.16b, v25.8h
st1 {v0.16b}, [x0], x1
sqxtun v3.8b, v26.8h
sqxtun2 v3.16b, v27.8h
st1 {v1.16b}, [x0], x1
st1 {v2.16b}, [x0], x1
st1 {v3.16b}, [x0], x1
b.gt 1b
ret
endfunc
function idct_dc_w32_neon
1:
ld1 {v0.16b, v1.16b}, [x0], x1
subs w4, w4, #2
uaddw v20.8h, v16.8h, v0.8b
uaddw2 v21.8h, v16.8h, v0.16b
ld1 {v2.16b, v3.16b}, [x0]
uaddw v22.8h, v16.8h, v1.8b
uaddw2 v23.8h, v16.8h, v1.16b
sub x0, x0, x1
uaddw v24.8h, v16.8h, v2.8b
uaddw2 v25.8h, v16.8h, v2.16b
sqxtun v0.8b, v20.8h
sqxtun2 v0.16b, v21.8h
uaddw v26.8h, v16.8h, v3.8b
uaddw2 v27.8h, v16.8h, v3.16b
sqxtun v1.8b, v22.8h
sqxtun2 v1.16b, v23.8h
sqxtun v2.8b, v24.8h
sqxtun2 v2.16b, v25.8h
st1 {v0.16b, v1.16b}, [x0], x1
sqxtun v3.8b, v26.8h
sqxtun2 v3.16b, v27.8h
st1 {v2.16b, v3.16b}, [x0], x1
b.gt 1b
ret
endfunc
function idct_dc_w64_neon
1:
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
subs w4, w4, #1
uaddw v20.8h, v16.8h, v0.8b
uaddw2 v21.8h, v16.8h, v0.16b
uaddw v22.8h, v16.8h, v1.8b
uaddw2 v23.8h, v16.8h, v1.16b
uaddw v24.8h, v16.8h, v2.8b
uaddw2 v25.8h, v16.8h, v2.16b
sqxtun v0.8b, v20.8h
sqxtun2 v0.16b, v21.8h
uaddw v26.8h, v16.8h, v3.8b
uaddw2 v27.8h, v16.8h, v3.16b
sqxtun v1.8b, v22.8h
sqxtun2 v1.16b, v23.8h
sqxtun v2.8b, v24.8h
sqxtun2 v2.16b, v25.8h
sqxtun v3.8b, v26.8h
sqxtun2 v3.16b, v27.8h
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
b.gt 1b
ret
endfunc
.macro iwht4
add v16.4h, v16.4h, v17.4h
sub v21.4h, v18.4h, v19.4h
sub v20.4h, v16.4h, v21.4h
sshr v20.4h, v20.4h, #1
sub v18.4h, v20.4h, v17.4h
sub v17.4h, v20.4h, v19.4h
add v19.4h, v21.4h, v18.4h
sub v16.4h, v16.4h, v17.4h
.endm
.macro idct_4 r0, r1, r2, r3, sz
smull_smlal v6, v7, \r1, \r3, v0.h[3], v0.h[2], \sz
smull_smlsl v4, v5, \r1, \r3, v0.h[2], v0.h[3], \sz
smull_smlal v2, v3, \r0, \r2, v0.h[0], v0.h[0], \sz
sqrshrn_sz v6, v6, v7, #12, \sz
sqrshrn_sz v7, v4, v5, #12, \sz
smull_smlsl v4, v5, \r0, \r2, v0.h[0], v0.h[0], \sz
sqrshrn_sz v2, v2, v3, #12, \sz
sqrshrn_sz v3, v4, v5, #12, \sz
sqadd \r0\sz, v2\sz, v6\sz
sqsub \r3\sz, v2\sz, v6\sz
sqadd \r1\sz, v3\sz, v7\sz
sqsub \r2\sz, v3\sz, v7\sz
.endm
function inv_dct_4h_x4_neon, export=1
movrel x16, idct_coeffs
ld1 {v0.4h}, [x16]
idct_4 v16, v17, v18, v19, .4h
ret
endfunc
function inv_dct_8h_x4_neon, export=1
movrel x16, idct_coeffs
ld1 {v0.4h}, [x16]
idct_4 v16, v17, v18, v19, .8h
ret
endfunc
.macro iadst_4x4 o0, o1, o2, o3
movrel x16, iadst4_coeffs
ld1 {v0.8h}, [x16]
ssubl v3.4s, v16.4h, v18.4h
smull v4.4s, v16.4h, v0.h[0]
smlal v4.4s, v18.4h, v0.h[1]
smlal v4.4s, v19.4h, v0.h[2]
smull v7.4s, v17.4h, v0.h[3]
saddw v3.4s, v3.4s, v19.4h
smull v5.4s, v16.4h, v0.h[2]
smlsl v5.4s, v18.4h, v0.h[0]
smlsl v5.4s, v19.4h, v0.h[1]
add \o3\().4s, v4.4s, v5.4s
mul \o2\().4s, v3.4s, v0.s[2]
add \o0\().4s, v4.4s, v7.4s
add \o1\().4s, v5.4s, v7.4s
sub \o3\().4s, \o3\().4s, v7.4s
sqrshrn \o0\().4h, \o0\().4s, #12
sqrshrn \o2\().4h, \o2\().4s, #12
sqrshrn \o1\().4h, \o1\().4s, #12
sqrshrn \o3\().4h, \o3\().4s, #12
.endm
function inv_adst_4h_x4_neon, export=1
iadst_4x4 v16, v17, v18, v19
ret
endfunc
function inv_flipadst_4h_x4_neon, export=1
iadst_4x4 v19, v18, v17, v16
ret
endfunc
.macro iadst_8x4 o0, o1, o2, o3
movrel x16, iadst4_coeffs
ld1 {v0.8h}, [x16]
ssubl v2.4s, v16.4h, v18.4h
ssubl2 v3.4s, v16.8h, v18.8h
smull v4.4s, v16.4h, v0.h[0]
smlal v4.4s, v18.4h, v0.h[1]
smlal v4.4s, v19.4h, v0.h[2]
smull2 v5.4s, v16.8h, v0.h[0]
smlal2 v5.4s, v18.8h, v0.h[1]
smlal2 v5.4s, v19.8h, v0.h[2]
saddw v2.4s, v2.4s, v19.4h
saddw2 v3.4s, v3.4s, v19.8h
smull v6.4s, v16.4h, v0.h[2]
smlsl v6.4s, v18.4h, v0.h[0]
smlsl v6.4s, v19.4h, v0.h[1]
smull2 v7.4s, v16.8h, v0.h[2]
smlsl2 v7.4s, v18.8h, v0.h[0]
smlsl2 v7.4s, v19.8h, v0.h[1]
mul v18.4s, v2.4s, v0.s[2]
mul v19.4s, v3.4s, v0.s[2]
smull v2.4s, v17.4h, v0.h[3]
smull2 v3.4s, v17.8h, v0.h[3]
add v16.4s, v4.4s, v2.4s // out0
add v17.4s, v5.4s, v3.4s
add v4.4s, v4.4s, v6.4s // out3
add v5.4s, v5.4s, v7.4s
add v6.4s, v6.4s, v2.4s // out1
add v7.4s, v7.4s, v3.4s
sub v4.4s, v4.4s, v2.4s // out3
sub v5.4s, v5.4s, v3.4s
sqrshrn v18.4h, v18.4s, #12
sqrshrn2 v18.8h, v19.4s, #12
sqrshrn \o0\().4h, v16.4s, #12
sqrshrn2 \o0\().8h, v17.4s, #12
.ifc \o2, v17
mov v17.16b, v18.16b
.endif
sqrshrn \o1\().4h, v6.4s, #12
sqrshrn2 \o1\().8h, v7.4s, #12
sqrshrn \o3\().4h, v4.4s, #12
sqrshrn2 \o3\().8h, v5.4s, #12
.endm
function inv_adst_8h_x4_neon, export=1
iadst_8x4 v16, v17, v18, v19
ret
endfunc
function inv_flipadst_8h_x4_neon, export=1
iadst_8x4 v19, v18, v17, v16
ret
endfunc
function inv_identity_4h_x4_neon, export=1
mov w16, #(5793-4096)*8
dup v0.4h, w16
sqrdmulh v4.4h, v16.4h, v0.h[0]
sqrdmulh v5.4h, v17.4h, v0.h[0]
sqrdmulh v6.4h, v18.4h, v0.h[0]
sqrdmulh v7.4h, v19.4h, v0.h[0]
sqadd v16.4h, v16.4h, v4.4h
sqadd v17.4h, v17.4h, v5.4h
sqadd v18.4h, v18.4h, v6.4h
sqadd v19.4h, v19.4h, v7.4h
ret
endfunc
function inv_identity_8h_x4_neon, export=1
mov w16, #(5793-4096)*8
dup v0.4h, w16
sqrdmulh v4.8h, v16.8h, v0.h[0]
sqrdmulh v5.8h, v17.8h, v0.h[0]
sqrdmulh v6.8h, v18.8h, v0.h[0]
sqrdmulh v7.8h, v19.8h, v0.h[0]
sqadd v16.8h, v16.8h, v4.8h
sqadd v17.8h, v17.8h, v5.8h
sqadd v18.8h, v18.8h, v6.8h
sqadd v19.8h, v19.8h, v7.8h
ret
endfunc
.macro identity_8x4_shift1 r0, r1, r2, r3, c
.irp i, \r0\().8h, \r1\().8h, \r2\().8h, \r3\().8h
sqrdmulh v2.8h, \i, \c
srhadd \i, \i, v2.8h
.endr
.endm
function inv_txfm_add_wht_wht_4x4_8bpc_neon, export=1
mov x15, x30
movi v31.8h, #0
ld1 {v16.4h,v17.4h,v18.4h,v19.4h}, [x2]
st1 {v31.8h}, [x2], #16
sshr v16.4h, v16.4h, #2
sshr v17.4h, v17.4h, #2
sshr v18.4h, v18.4h, #2
sshr v19.4h, v19.4h, #2
iwht4
st1 {v31.8h}, [x2], #16
transpose_4x4h v16, v17, v18, v19, v20, v21, v22, v23
iwht4
ld1 {v0.s}[0], [x0], x1
ld1 {v0.s}[1], [x0], x1
ins v16.d[1], v17.d[0]
ins v18.d[1], v19.d[0]
ld1 {v1.s}[0], [x0], x1
ld1 {v1.s}[1], [x0], x1
b L(itx_4x4_end)
endfunc
function inv_txfm_add_4x4_neon
movi v31.8h, #0
ld1 {v16.4h,v17.4h,v18.4h,v19.4h}, [x2]
st1 {v31.8h}, [x2], #16
blr x4
st1 {v31.8h}, [x2], #16
transpose_4x4h v16, v17, v18, v19, v20, v21, v22, v23
blr x5
ld1 {v0.s}[0], [x0], x1
ld1 {v0.s}[1], [x0], x1
ins v16.d[1], v17.d[0]
ins v18.d[1], v19.d[0]
ld1 {v1.s}[0], [x0], x1
ld1 {v1.s}[1], [x0], x1
srshr v16.8h, v16.8h, #4
srshr v18.8h, v18.8h, #4
L(itx_4x4_end):
sub x0, x0, x1, lsl #2
uaddw v16.8h, v16.8h, v0.8b
sqxtun v0.8b, v16.8h
uaddw v18.8h, v18.8h, v1.8b
st1 {v0.s}[0], [x0], x1
sqxtun v1.8b, v18.8h
st1 {v0.s}[1], [x0], x1
st1 {v1.s}[0], [x0], x1
st1 {v1.s}[1], [x0], x1
ret x15
endfunc
.macro def_fn_4x4 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_4x4_8bpc_neon, export=1
mov x15, x30
.ifc \txfm1\()_\txfm2, dct_dct
cbnz w3, 1f
mov w16, #2896*8
ld1r {v16.8h}, [x2]
dup v4.8h, w16
strh wzr, [x2]
sqrdmulh v16.8h, v16.8h, v4.h[0]
ld1 {v0.s}[0], [x0], x1
sqrdmulh v20.8h, v16.8h, v4.h[0]
ld1 {v0.s}[1], [x0], x1
srshr v16.8h, v20.8h, #4
ld1 {v1.s}[0], [x0], x1
srshr v18.8h, v20.8h, #4
ld1 {v1.s}[1], [x0], x1
b L(itx_4x4_end)
1:
.endif
adr x4, inv_\txfm1\()_4h_x4_neon
adr x5, inv_\txfm2\()_4h_x4_neon
b inv_txfm_add_4x4_neon
endfunc
.endm
def_fn_4x4 dct, dct
def_fn_4x4 identity, identity
def_fn_4x4 dct, adst
def_fn_4x4 dct, flipadst
def_fn_4x4 dct, identity
def_fn_4x4 adst, dct
def_fn_4x4 adst, adst
def_fn_4x4 adst, flipadst
def_fn_4x4 flipadst, dct
def_fn_4x4 flipadst, adst
def_fn_4x4 flipadst, flipadst
def_fn_4x4 identity, dct
def_fn_4x4 adst, identity
def_fn_4x4 flipadst, identity
def_fn_4x4 identity, adst
def_fn_4x4 identity, flipadst
.macro idct_8 r0, r1, r2, r3, r4, r5, r6, r7, sz, szb
idct_4 \r0, \r2, \r4, \r6, \sz
smull_smlsl v2, v3, \r1, \r7, v0.h[4], v0.h[5], \sz // -> t4a
smull_smlal v4, v5, \r1, \r7, v0.h[5], v0.h[4], \sz // -> t7a
smull_smlsl v6, v7, \r5, \r3, v0.h[6], v0.h[7], \sz // -> t5a
sqrshrn_sz \r1, v2, v3, #12, \sz // t4a
sqrshrn_sz \r7, v4, v5, #12, \sz // t7a
smull_smlal v2, v3, \r5, \r3, v0.h[7], v0.h[6], \sz // -> t6a
sqrshrn_sz \r3, v6, v7, #12, \sz // t5a
sqrshrn_sz \r5, v2, v3, #12, \sz // t6a
sqadd v2\sz, \r1\sz, \r3\sz // t4
sqsub \r1\sz, \r1\sz, \r3\sz // t5a
sqadd v3\sz, \r7\sz, \r5\sz // t7
sqsub \r3\sz, \r7\sz, \r5\sz // t6a
smull_smlsl v4, v5, \r3, \r1, v0.h[0], v0.h[0], \sz // -> t5
smull_smlal v6, v7, \r3, \r1, v0.h[0], v0.h[0], \sz // -> t6
sqrshrn_sz v4, v4, v5, #12, \sz // t5
sqrshrn_sz v5, v6, v7, #12, \sz // t6
sqsub \r7\sz, \r0\sz, v3\sz // out7
sqadd \r0\sz, \r0\sz, v3\sz // out0
sqadd \r1\sz, \r2\sz, v5\sz // out1
sqsub v6\sz, \r2\sz, v5\sz // out6
sqadd \r2\sz, \r4\sz, v4\sz // out2
sqsub \r5\sz, \r4\sz, v4\sz // out5
sqadd \r3\sz, \r6\sz, v2\sz // out3
sqsub \r4\sz, \r6\sz, v2\sz // out4
mov \r6\szb, v6\szb // out6
.endm
function inv_dct_8h_x8_neon, export=1
movrel x16, idct_coeffs
ld1 {v0.8h}, [x16]
idct_8 v16, v17, v18, v19, v20, v21, v22, v23, .8h, .16b
ret
endfunc
function inv_dct_4h_x8_neon, export=1
movrel x16, idct_coeffs
ld1 {v0.8h}, [x16]
idct_8 v16, v17, v18, v19, v20, v21, v22, v23, .4h, .8b
ret
endfunc
.macro iadst_8 o0, o1, o2, o3, o4, o5, o6, o7, sz
movrel x16, iadst8_coeffs
ld1 {v0.8h, v1.8h}, [x16]
smull_smlal v2, v3, v23, v16, v0.h[0], v0.h[1], \sz
smull_smlsl v4, v5, v23, v16, v0.h[1], v0.h[0], \sz
smull_smlal v6, v7, v21, v18, v0.h[2], v0.h[3], \sz
sqrshrn_sz v16, v2, v3, #12, \sz // t0a
sqrshrn_sz v23, v4, v5, #12, \sz // t1a
smull_smlsl v2, v3, v21, v18, v0.h[3], v0.h[2], \sz
smull_smlal v4, v5, v19, v20, v0.h[4], v0.h[5], \sz
sqrshrn_sz v18, v6, v7, #12, \sz // t2a
sqrshrn_sz v21, v2, v3, #12, \sz // t3a
smull_smlsl v6, v7, v19, v20, v0.h[5], v0.h[4], \sz
smull_smlal v2, v3, v17, v22, v0.h[6], v0.h[7], \sz
sqrshrn_sz v20, v4, v5, #12, \sz // t4a
sqrshrn_sz v19, v6, v7, #12, \sz // t5a
smull_smlsl v4, v5, v17, v22, v0.h[7], v0.h[6], \sz
sqrshrn_sz v22, v2, v3, #12, \sz // t6a
sqrshrn_sz v17, v4, v5, #12, \sz // t7a
sqadd v2\sz, v16\sz, v20\sz // t0
sqsub v3\sz, v16\sz, v20\sz // t4
sqadd v4\sz, v23\sz, v19\sz // t1
sqsub v5\sz, v23\sz, v19\sz // t5
sqadd v6\sz, v18\sz, v22\sz // t2
sqsub v7\sz, v18\sz, v22\sz // t6
sqadd v18\sz, v21\sz, v17\sz // t3
sqsub v19\sz, v21\sz, v17\sz // t7
smull_smlal v16, v17, v3, v5, v1.h[3], v1.h[2], \sz
smull_smlsl v20, v21, v3, v5, v1.h[2], v1.h[3], \sz
smull_smlsl v22, v23, v19, v7, v1.h[3], v1.h[2], \sz
sqrshrn_sz v3, v16, v17, #12, \sz // t4a
sqrshrn_sz v5, v20, v21, #12, \sz // t5a
smull_smlal v16, v17, v19, v7, v1.h[2], v1.h[3], \sz
sqrshrn_sz v7, v22, v23, #12, \sz // t6a
sqrshrn_sz v19, v16, v17, #12, \sz // t7a
sqadd \o0\()\sz, v2\sz, v6\sz // out0
sqsub v2\sz, v2\sz, v6\sz // t2
sqadd \o7\()\sz, v4\sz, v18\sz // out7
sqsub v4\sz, v4\sz, v18\sz // t3
sqneg \o7\()\sz, \o7\()\sz // out7
sqadd \o1\()\sz, v3\sz, v7\sz // out1
sqsub v3\sz, v3\sz, v7\sz // t6
sqadd \o6\()\sz, v5\sz, v19\sz // out6
sqsub v5\sz, v5\sz, v19\sz // t7
sqneg \o1\()\sz, \o1\()\sz // out1
smull_smlal v18, v19, v2, v4, v1.h[0], v1.h[0], \sz // -> out3 (v19 or v20)
smull_smlsl v6, v7, v2, v4, v1.h[0], v1.h[0], \sz // -> out4 (v20 or v19)
smull_smlsl v20, v21, v3, v5, v1.h[0], v1.h[0], \sz // -> out5 (v21 or v18)
sqrshrn_sz v2, v18, v19, #12, \sz // out3
smull_smlal v18, v19, v3, v5, v1.h[0], v1.h[0], \sz // -> out2 (v18 or v21)
sqrshrn_sz v3, v20, v21, #12, \sz // out5
sqrshrn_sz \o2, v18, v19, #12, \sz // out2 (v18 or v21)
sqrshrn_sz \o4, v6, v7, #12, \sz // out4 (v20 or v19)
sqneg \o3\()\sz, v2\sz // out3
sqneg \o5\()\sz, v3\sz // out5
.endm
function inv_adst_8h_x8_neon, export=1
iadst_8 v16, v17, v18, v19, v20, v21, v22, v23, .8h
ret
endfunc
function inv_flipadst_8h_x8_neon, export=1
iadst_8 v23, v22, v21, v20, v19, v18, v17, v16, .8h
ret
endfunc
function inv_adst_4h_x8_neon, export=1
iadst_8 v16, v17, v18, v19, v20, v21, v22, v23, .4h
ret
endfunc
function inv_flipadst_4h_x8_neon, export=1
iadst_8 v23, v22, v21, v20, v19, v18, v17, v16, .4h
ret
endfunc
function inv_identity_8h_x8_neon, export=1
sqshl v16.8h, v16.8h, #1
sqshl v17.8h, v17.8h, #1
sqshl v18.8h, v18.8h, #1
sqshl v19.8h, v19.8h, #1
sqshl v20.8h, v20.8h, #1
sqshl v21.8h, v21.8h, #1
sqshl v22.8h, v22.8h, #1
sqshl v23.8h, v23.8h, #1
ret
endfunc
function inv_identity_4h_x8_neon, export=1
sqshl v16.4h, v16.4h, #1
sqshl v17.4h, v17.4h, #1
sqshl v18.4h, v18.4h, #1
sqshl v19.4h, v19.4h, #1
sqshl v20.4h, v20.4h, #1
sqshl v21.4h, v21.4h, #1
sqshl v22.4h, v22.4h, #1
sqshl v23.4h, v23.4h, #1
ret
endfunc
.macro def_fn_8x8_base variant
function inv_txfm_\variant\()add_8x8_neon
movi v28.8h, #0
movi v29.8h, #0
movi v30.8h, #0
movi v31.8h, #0
ld1 {v16.8h,v17.8h,v18.8h,v19.8h}, [x2]
st1 {v28.8h,v29.8h,v30.8h,v31.8h}, [x2], #64
ld1 {v20.8h,v21.8h,v22.8h,v23.8h}, [x2]
st1 {v28.8h,v29.8h,v30.8h,v31.8h}, [x2]
.ifc \variant, identity_
// The identity shl #1 and downshift srshr #1 cancel out
b L(itx_8x8_epilog)
.else
blr x4
srshr v16.8h, v16.8h, #1
srshr v17.8h, v17.8h, #1
srshr v18.8h, v18.8h, #1
srshr v19.8h, v19.8h, #1
srshr v20.8h, v20.8h, #1
srshr v21.8h, v21.8h, #1
srshr v22.8h, v22.8h, #1
srshr v23.8h, v23.8h, #1
L(itx_8x8_epilog):
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v24, v25
blr x5
load_add_store_8x8 x0, x7
ret x15
.endif
endfunc
.endm
def_fn_8x8_base identity_
def_fn_8x8_base
.macro def_fn_8x8 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_8x8_8bpc_neon, export=1
mov x15, x30
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc 8, 8, 1
.endif
adr x5, inv_\txfm2\()_8h_x8_neon
.ifc \txfm1, identity
b inv_txfm_identity_add_8x8_neon
.else
adr x4, inv_\txfm1\()_8h_x8_neon
b inv_txfm_add_8x8_neon
.endif
endfunc
.endm
def_fn_8x8 dct, dct
def_fn_8x8 identity, identity
def_fn_8x8 dct, adst
def_fn_8x8 dct, flipadst
def_fn_8x8 dct, identity
def_fn_8x8 adst, dct
def_fn_8x8 adst, adst
def_fn_8x8 adst, flipadst
def_fn_8x8 flipadst, dct
def_fn_8x8 flipadst, adst
def_fn_8x8 flipadst, flipadst
def_fn_8x8 identity, dct
def_fn_8x8 adst, identity
def_fn_8x8 flipadst, identity
def_fn_8x8 identity, adst
def_fn_8x8 identity, flipadst
function inv_txfm_add_8x4_neon
movi v30.8h, #0
movi v31.8h, #0
mov w16, #2896*8
dup v0.4h, w16
ld1 {v16.4h,v17.4h,v18.4h,v19.4h}, [x2]
st1 {v30.8h,v31.8h}, [x2], #32
ld1 {v20.4h,v21.4h,v22.4h,v23.4h}, [x2]
st1 {v30.8h,v31.8h}, [x2]
scale_input .4h, v0.h[0], v16, v17, v18, v19, v20, v21, v22, v23
blr x4
transpose_4x4h v16, v17, v18, v19, v4, v5, v6, v7
transpose_4x4h v20, v21, v22, v23, v4, v5, v6, v7
ins v16.d[1], v20.d[0]
ins v17.d[1], v21.d[0]
ins v18.d[1], v22.d[0]
ins v19.d[1], v23.d[0]
blr x5
load_add_store_8x4 x0, x7
ret x15
endfunc
function inv_txfm_add_4x8_neon
movi v28.8h, #0
movi v29.8h, #0
movi v30.8h, #0
movi v31.8h, #0
mov w16, #2896*8
dup v0.4h, w16
ld1 {v16.8h,v17.8h,v18.8h,v19.8h}, [x2]
st1 {v28.8h,v29.8h,v30.8h,v31.8h}, [x2]
scale_input .8h, v0.h[0], v16, v17, v18, v19
blr x4
transpose_4x8h v16, v17, v18, v19, v4, v5, v6, v7
ins v20.d[0], v16.d[1]
ins v21.d[0], v17.d[1]
ins v22.d[0], v18.d[1]
ins v23.d[0], v19.d[1]
blr x5
load_add_store_4x8 x0, x7
ret x15
endfunc
.macro def_fn_48 w, h, txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_neon, export=1
mov x15, x30
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 0
.endif
adr x4, inv_\txfm1\()_\h\()h_x\w\()_neon
adr x5, inv_\txfm2\()_\w\()h_x\h\()_neon
b inv_txfm_add_\w\()x\h\()_neon
endfunc
.endm
.macro def_fns_48 w, h
def_fn_48 \w, \h, dct, dct
def_fn_48 \w, \h, identity, identity
def_fn_48 \w, \h, dct, adst
def_fn_48 \w, \h, dct, flipadst
def_fn_48 \w, \h, dct, identity
def_fn_48 \w, \h, adst, dct
def_fn_48 \w, \h, adst, adst
def_fn_48 \w, \h, adst, flipadst
def_fn_48 \w, \h, flipadst, dct
def_fn_48 \w, \h, flipadst, adst
def_fn_48 \w, \h, flipadst, flipadst
def_fn_48 \w, \h, identity, dct
def_fn_48 \w, \h, adst, identity
def_fn_48 \w, \h, flipadst, identity
def_fn_48 \w, \h, identity, adst
def_fn_48 \w, \h, identity, flipadst
.endm
def_fns_48 4, 8
def_fns_48 8, 4
.macro idct_16 sz, szb
idct_8 v16, v18, v20, v22, v24, v26, v28, v30, \sz, \szb
smull_smlsl v2, v3, v17, v31, v1.h[0], v1.h[1], \sz // -> t8a
smull_smlal v4, v5, v17, v31, v1.h[1], v1.h[0], \sz // -> t15a
smull_smlsl v6, v7, v25, v23, v1.h[2], v1.h[3], \sz // -> t9a
sqrshrn_sz v17, v2, v3, #12, \sz // t8a
sqrshrn_sz v31, v4, v5, #12, \sz // t15a
smull_smlal v2, v3, v25, v23, v1.h[3], v1.h[2], \sz // -> t14a
smull_smlsl v4, v5, v21, v27, v1.h[4], v1.h[5], \sz // -> t10a
sqrshrn_sz v23, v6, v7, #12, \sz // t9a
sqrshrn_sz v25, v2, v3, #12, \sz // t14a
smull_smlal v6, v7, v21, v27, v1.h[5], v1.h[4], \sz // -> t13a
smull_smlsl v2, v3, v29, v19, v1.h[6], v1.h[7], \sz // -> t11a
sqrshrn_sz v21, v4, v5, #12, \sz // t10a
sqrshrn_sz v27, v6, v7, #12, \sz // t13a
smull_smlal v4, v5, v29, v19, v1.h[7], v1.h[6], \sz // -> t12a
sqrshrn_sz v19, v2, v3, #12, \sz // t11a
sqrshrn_sz v29, v4, v5, #12, \sz // t12a
sqsub v2\sz, v17\sz, v23\sz // t9
sqadd v17\sz, v17\sz, v23\sz // t8
sqsub v3\sz, v31\sz, v25\sz // t14
sqadd v31\sz, v31\sz, v25\sz // t15
sqsub v23\sz, v19\sz, v21\sz // t10
sqadd v19\sz, v19\sz, v21\sz // t11
sqadd v25\sz, v29\sz, v27\sz // t12
sqsub v29\sz, v29\sz, v27\sz // t13
smull_smlsl v4, v5, v3, v2, v0.h[2], v0.h[3], \sz // -> t9a
smull_smlal v6, v7, v3, v2, v0.h[3], v0.h[2], \sz // -> t14a
sqrshrn_sz v21, v4, v5, #12, \sz // t9a
sqrshrn_sz v27, v6, v7, #12, \sz // t14a
smull_smlsl v4, v5, v29, v23, v0.h[2], v0.h[3], \sz // -> t13a
smull_smlal v6, v7, v29, v23, v0.h[3], v0.h[2], \sz // -> t10a
sqrshrn_sz v29, v4, v5, #12, \sz // t13a
neg v6.4s, v6.4s
.ifc \sz, .8h
neg v7.4s, v7.4s
.endif
sqrshrn_sz v23, v6, v7, #12, \sz // t10a
sqsub v2\sz, v17\sz, v19\sz // t11a
sqadd v17\sz, v17\sz, v19\sz // t8a
sqsub v3\sz, v31\sz, v25\sz // t12a
sqadd v31\sz, v31\sz, v25\sz // t15a
sqadd v19\sz, v21\sz, v23\sz // t9
sqsub v21\sz, v21\sz, v23\sz // t10
sqsub v25\sz, v27\sz, v29\sz // t13
sqadd v27\sz, v27\sz, v29\sz // t14
smull_smlsl v4, v5, v3, v2, v0.h[0], v0.h[0], \sz // -> t11
smull_smlal v6, v7, v3, v2, v0.h[0], v0.h[0], \sz // -> t12
smull_smlsl v2, v3, v25, v21, v0.h[0], v0.h[0], \sz // -> t10a
sqrshrn_sz v4, v4, v5, #12, \sz // t11
sqrshrn_sz v5, v6, v7, #12, \sz // t12
smull_smlal v6, v7, v25, v21, v0.h[0], v0.h[0], \sz // -> t13a
sqrshrn_sz v2, v2, v3, #12, \sz // t10a
sqrshrn_sz v3, v6, v7, #12, \sz // t13a
sqadd v6\sz, v16\sz, v31\sz // out0
sqsub v31\sz, v16\sz, v31\sz // out15
mov v16\szb, v6\szb
sqadd v23\sz, v30\sz, v17\sz // out7
sqsub v7\sz, v30\sz, v17\sz // out8
sqadd v17\sz, v18\sz, v27\sz // out1
sqsub v30\sz, v18\sz, v27\sz // out14
sqadd v18\sz, v20\sz, v3\sz // out2
sqsub v29\sz, v20\sz, v3\sz // out13
sqadd v3\sz, v28\sz, v19\sz // out6
sqsub v25\sz, v28\sz, v19\sz // out9
sqadd v19\sz, v22\sz, v5\sz // out3
sqsub v28\sz, v22\sz, v5\sz // out12
sqadd v20\sz, v24\sz, v4\sz // out4
sqsub v27\sz, v24\sz, v4\sz // out11
sqadd v21\sz, v26\sz, v2\sz // out5
sqsub v26\sz, v26\sz, v2\sz // out10
mov v24\szb, v7\szb
mov v22\szb, v3\szb
.endm
function inv_dct_8h_x16_neon, export=1
movrel x16, idct_coeffs
ld1 {v0.8h, v1.8h}, [x16]
idct_16 .8h, .16b
ret
endfunc
function inv_dct_4h_x16_neon, export=1
movrel x16, idct_coeffs
ld1 {v0.8h, v1.8h}, [x16]
idct_16 .4h, .8b
ret
endfunc
.macro iadst_16 o0, o1, o2, o3, o4, o5, o6, o7, o8, o9, o10, o11, o12, o13, o14, o15, sz, szb
movrel x16, iadst16_coeffs
ld1 {v0.8h, v1.8h}, [x16]
movrel x16, idct_coeffs
smull_smlal v2, v3, v31, v16, v0.h[0], v0.h[1], \sz // -> t0
smull_smlsl v4, v5, v31, v16, v0.h[1], v0.h[0], \sz // -> t1
smull_smlal v6, v7, v29, v18, v0.h[2], v0.h[3], \sz // -> t2
sqrshrn_sz v16, v2, v3, #12, \sz // t0
sqrshrn_sz v31, v4, v5, #12, \sz // t1
smull_smlsl v2, v3, v29, v18, v0.h[3], v0.h[2], \sz // -> t3
smull_smlal v4, v5, v27, v20, v0.h[4], v0.h[5], \sz // -> t4
sqrshrn_sz v18, v6, v7, #12, \sz // t2
sqrshrn_sz v29, v2, v3, #12, \sz // t3
smull_smlsl v6, v7, v27, v20, v0.h[5], v0.h[4], \sz // -> t5
smull_smlal v2, v3, v25, v22, v0.h[6], v0.h[7], \sz // -> t6
sqrshrn_sz v20, v4, v5, #12, \sz // t4
sqrshrn_sz v27, v6, v7, #12, \sz // t5
smull_smlsl v4, v5, v25, v22, v0.h[7], v0.h[6], \sz // -> t7
smull_smlal v6, v7, v23, v24, v1.h[0], v1.h[1], \sz // -> t8
sqrshrn_sz v22, v2, v3, #12, \sz // t6
sqrshrn_sz v25, v4, v5, #12, \sz // t7
smull_smlsl v2, v3, v23, v24, v1.h[1], v1.h[0], \sz // -> t9
smull_smlal v4, v5, v21, v26, v1.h[2], v1.h[3], \sz // -> t10
sqrshrn_sz v23, v6, v7, #12, \sz // t8
sqrshrn_sz v24, v2, v3, #12, \sz // t9
smull_smlsl v6, v7, v21, v26, v1.h[3], v1.h[2], \sz // -> t11
smull_smlal v2, v3, v19, v28, v1.h[4], v1.h[5], \sz // -> t12
sqrshrn_sz v21, v4, v5, #12, \sz // t10
sqrshrn_sz v26, v6, v7, #12, \sz // t11
smull_smlsl v4, v5, v19, v28, v1.h[5], v1.h[4], \sz // -> t13
smull_smlal v6, v7, v17, v30, v1.h[6], v1.h[7], \sz // -> t14
sqrshrn_sz v19, v2, v3, #12, \sz // t12
sqrshrn_sz v28, v4, v5, #12, \sz // t13
smull_smlsl v2, v3, v17, v30, v1.h[7], v1.h[6], \sz // -> t15
sqrshrn_sz v17, v6, v7, #12, \sz // t14
sqrshrn_sz v30, v2, v3, #12, \sz // t15
ld1 {v0.8h}, [x16]
sqsub v2\sz, v16\sz, v23\sz // t8a
sqadd v16\sz, v16\sz, v23\sz // t0a
sqsub v3\sz, v31\sz, v24\sz // t9a
sqadd v31\sz, v31\sz, v24\sz // t1a
sqadd v23\sz, v18\sz, v21\sz // t2a
sqsub v18\sz, v18\sz, v21\sz // t10a
sqadd v24\sz, v29\sz, v26\sz // t3a
sqsub v29\sz, v29\sz, v26\sz // t11a
sqadd v21\sz, v20\sz, v19\sz // t4a
sqsub v20\sz, v20\sz, v19\sz // t12a
sqadd v26\sz, v27\sz, v28\sz // t5a
sqsub v27\sz, v27\sz, v28\sz // t13a
sqadd v19\sz, v22\sz, v17\sz // t6a
sqsub v22\sz, v22\sz, v17\sz // t14a
sqadd v28\sz, v25\sz, v30\sz // t7a
sqsub v25\sz, v25\sz, v30\sz // t15a
smull_smlal v4, v5, v2, v3, v0.h[5], v0.h[4], \sz // -> t8
smull_smlsl v6, v7, v2, v3, v0.h[4], v0.h[5], \sz // -> t9
smull_smlal v2, v3, v18, v29, v0.h[7], v0.h[6], \sz // -> t10
sqrshrn_sz v17, v4, v5, #12, \sz // t8
sqrshrn_sz v30, v6, v7, #12, \sz // t9
smull_smlsl v4, v5, v18, v29, v0.h[6], v0.h[7], \sz // -> t11
smull_smlsl v6, v7, v27, v20, v0.h[5], v0.h[4], \sz // -> t12
sqrshrn_sz v18, v2, v3, #12, \sz // t10
sqrshrn_sz v29, v4, v5, #12, \sz // t11
smull_smlal v2, v3, v27, v20, v0.h[4], v0.h[5], \sz // -> t13
smull_smlsl v4, v5, v25, v22, v0.h[7], v0.h[6], \sz // -> t14
sqrshrn_sz v27, v6, v7, #12, \sz // t12
sqrshrn_sz v20, v2, v3, #12, \sz // t13
smull_smlal v6, v7, v25, v22, v0.h[6], v0.h[7], \sz // -> t15
sqrshrn_sz v25, v4, v5, #12, \sz // t14
sqrshrn_sz v22, v6, v7, #12, \sz // t15
sqsub v2\sz, v16\sz, v21\sz // t4
sqadd v16\sz, v16\sz, v21\sz // t0
sqsub v3\sz, v31\sz, v26\sz // t5
sqadd v31\sz, v31\sz, v26\sz // t1
sqadd v21\sz, v23\sz, v19\sz // t2
sqsub v23\sz, v23\sz, v19\sz // t6
sqadd v26\sz, v24\sz, v28\sz // t3
sqsub v24\sz, v24\sz, v28\sz // t7
sqadd v19\sz, v17\sz, v27\sz // t8a
sqsub v17\sz, v17\sz, v27\sz // t12a
sqadd v28\sz, v30\sz, v20\sz // t9a
sqsub v30\sz, v30\sz, v20\sz // t13a
sqadd v27\sz, v18\sz, v25\sz // t10a
sqsub v18\sz, v18\sz, v25\sz // t14a
sqadd v20\sz, v29\sz, v22\sz // t11a
sqsub v29\sz, v29\sz, v22\sz // t15a
smull_smlal v4, v5, v2, v3, v0.h[3], v0.h[2], \sz // -> t4a
smull_smlsl v6, v7, v2, v3, v0.h[2], v0.h[3], \sz // -> t5a
smull_smlsl v2, v3, v24, v23, v0.h[3], v0.h[2], \sz // -> t6a
sqrshrn_sz v22, v4, v5, #12, \sz // t4a
sqrshrn_sz v25, v6, v7, #12, \sz // t5a
smull_smlal v4, v5, v24, v23, v0.h[2], v0.h[3], \sz // -> t7a
smull_smlal v6, v7, v17, v30, v0.h[3], v0.h[2], \sz // -> t12
sqrshrn_sz v24, v2, v3, #12, \sz // t6a
sqrshrn_sz v23, v4, v5, #12, \sz // t7a
smull_smlsl v2, v3, v17, v30, v0.h[2], v0.h[3], \sz // -> t13
smull_smlsl v4, v5, v29, v18, v0.h[3], v0.h[2], \sz // -> t14
sqrshrn_sz v17, v6, v7, #12, \sz // t12
smull_smlal v6, v7, v29, v18, v0.h[2], v0.h[3], \sz // -> t15
sqrshrn_sz v29, v2, v3, #12, \sz // t13
sqrshrn_sz v30, v4, v5, #12, \sz // t14
sqrshrn_sz v18, v6, v7, #12, \sz // t15
sqsub v2\sz, v16\sz, v21\sz // t2a
.ifc \o0, v16
sqadd \o0\sz, v16\sz, v21\sz // out0
sqsub v21\sz, v31\sz, v26\sz // t3a
sqadd \o15\sz, v31\sz, v26\sz // out15
.else
sqadd v4\sz, v16\sz, v21\sz // out0
sqsub v21\sz, v31\sz, v26\sz // t3a
sqadd \o15\sz, v31\sz, v26\sz // out15
mov \o0\szb, v4\szb
.endif
sqneg \o15\sz, \o15\sz // out15
sqsub v3\sz, v29\sz, v18\sz // t15a
sqadd \o13\sz, v29\sz, v18\sz // out13
sqadd \o2\sz, v17\sz, v30\sz // out2
sqsub v26\sz, v17\sz, v30\sz // t14a
sqneg \o13\sz, \o13\sz // out13
sqadd \o1\sz, v19\sz, v27\sz // out1
sqsub v27\sz, v19\sz, v27\sz // t10
sqadd \o14\sz, v28\sz, v20\sz // out14
sqsub v20\sz, v28\sz, v20\sz // t11
sqneg \o1\sz, \o1\sz // out1
sqadd \o3\sz, v22\sz, v24\sz // out3
sqsub v22\sz, v22\sz, v24\sz // t6
sqadd \o12\sz, v25\sz, v23\sz // out12
sqsub v23\sz, v25\sz, v23\sz // t7
sqneg \o3\sz, \o3\sz // out3
smull_smlsl v24, v25, v2, v21, v0.h[0], v0.h[0], \sz // -> out8 (v24 or v23)
smull_smlal v4, v5, v2, v21, v0.h[0], v0.h[0], \sz // -> out7 (v23 or v24)
smull_smlal v6, v7, v26, v3, v0.h[0], v0.h[0], \sz // -> out5 (v21 or v26)
sqrshrn_sz v24, v24, v25, #12, \sz // out8
sqrshrn_sz v4, v4, v5, #12, \sz // out7
sqrshrn_sz v5, v6, v7, #12, \sz // out5
smull_smlsl v6, v7, v26, v3, v0.h[0], v0.h[0], \sz // -> out10 (v26 or v21)
smull_smlal v2, v3, v22, v23, v0.h[0], v0.h[0], \sz // -> out4 (v20 or v27)
sqrshrn_sz v26, v6, v7, #12, \sz // out10
smull_smlsl v6, v7, v22, v23, v0.h[0], v0.h[0], \sz // -> out11 (v27 or v20)
smull_smlal v22, v23, v27, v20, v0.h[0], v0.h[0], \sz // -> out6 (v22 or v25)
smull_smlsl v21, v25, v27, v20, v0.h[0], v0.h[0], \sz // -> out9 (v25 or v22)
sqrshrn_sz \o4, v2, v3, #12, \sz // out4
sqrshrn_sz v6, v6, v7, #12, \sz // out11
sqrshrn_sz v7, v21, v25, #12, \sz // out9
sqrshrn_sz \o6, v22, v23, #12, \sz // out6
.ifc \o8, v23
mov \o8\szb, v24\szb
mov \o10\szb, v26\szb
.endif
sqneg \o7\sz, v4\sz // out7
sqneg \o5\sz, v5\sz // out5
sqneg \o11\sz, v6\sz // out11
sqneg \o9\sz, v7\sz // out9
.endm
function inv_adst_8h_x16_neon, export=1
iadst_16 v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, .8h, .16b
ret
endfunc
function inv_flipadst_8h_x16_neon, export=1
iadst_16 v31, v30, v29, v28, v27, v26, v25, v24, v23, v22, v21, v20, v19, v18, v17, v16, .8h, .16b
ret
endfunc
function inv_adst_4h_x16_neon, export=1
iadst_16 v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, .4h, .8b
ret
endfunc
function inv_flipadst_4h_x16_neon, export=1
iadst_16 v31, v30, v29, v28, v27, v26, v25, v24, v23, v22, v21, v20, v19, v18, v17, v16, .4h, .8b
ret
endfunc
function inv_identity_8h_x16_neon, export=1
mov w16, #2*(5793-4096)*8
dup v0.4h, w16
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
sqrdmulh v2.8h, v\i\().8h, v0.h[0]
sqadd v\i\().8h, v\i\().8h, v\i\().8h
sqadd v\i\().8h, v\i\().8h, v2.8h
.endr
ret
endfunc
function inv_identity_4h_x16_neon, export=1
mov w16, #2*(5793-4096)*8
dup v0.4h, w16
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
sqrdmulh v2.4h, v\i\().4h, v0.h[0]
sqadd v\i\().4h, v\i\().4h, v\i\().4h
sqadd v\i\().4h, v\i\().4h, v2.4h
.endr
ret
endfunc
.macro identity_8x16_shift2 c
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
sqrdmulh v2.8h, \i, \c
sshr v2.8h, v2.8h, #1
srhadd \i, \i, v2.8h
.endr
.endm
.macro identity_8x16_shift1 c
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
sqrdmulh v2.8h, \i, \c
srshr v2.8h, v2.8h, #1
sqadd \i, \i, v2.8h
.endr
.endm
.macro identity_8x8_shift1 c
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
sqrdmulh v2.8h, \i, \c
srshr v2.8h, v2.8h, #1
sqadd \i, \i, v2.8h
.endr
.endm
.macro identity_8x8 c
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
sqrdmulh v2.8h, \i, \c
sqadd \i, \i, \i
sqadd \i, \i, v2.8h
.endr
.endm
.macro def_horz_16 scale=0, identity=0, shift=2, suffix
function inv_txfm_horz\suffix\()_16x8_neon
AARCH64_VALID_CALL_TARGET
mov x14, x30
movi v7.8h, #0
.if \identity
mov w16, #2*(5793-4096)*8
dup v0.4h, w16
.elseif \scale
mov w16, #2896*8
dup v0.4h, w16
.endif
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
ld1 {\i}, [x7]
st1 {v7.8h}, [x7], x8
.endr
.if \scale
scale_input .8h, v0.h[0], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .8h, v0.h[0], v24, v25, v26, v27, v28, v29, v30, v31
.endif
.if \identity
identity_8x16_shift2 v0.h[0]
b L(horz_16x8_epilog)
.else
blr x4
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
srshr \i, \i, #\shift
.endr
.if \shift == 1
b L(horz_16x8_epilog)
.else
L(horz_16x8_epilog):
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v4, v5
transpose_8x8h v24, v25, v26, v27, v28, v29, v30, v31, v4, v5
.irp i, v16.8h, v24.8h, v17.8h, v25.8h, v18.8h, v26.8h, v19.8h, v27.8h, v20.8h, v28.8h, v21.8h, v29.8h, v22.8h, v30.8h, v23.8h, v31.8h
st1 {\i}, [x6], #16
.endr
ret x14
.endif
.endif
endfunc
.endm
def_horz_16 scale=1, identity=0, shift=1, suffix=_scale
def_horz_16 scale=0, identity=1, shift=0, suffix=_identity
def_horz_16 scale=0, identity=0, shift=2
function inv_txfm_add_vert_8x16_neon
mov x14, x30
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
ld1 {v\i\().8h}, [x7], x8
.endr
blr x5
load_add_store_8x16 x6, x7
ret x14
endfunc
function inv_txfm_add_16x16_neon
mov x15, x30
sub sp, sp, #512
mov x8, #16*2
.irp i, 0, 8
add x6, sp, #(\i*16*2)
.if \i == 8
cmp w3, w13
b.lt 1f
.endif
add x7, x2, #(\i*2)
blr x9
.endr
b 2f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
2:
.irp i, 0, 8
add x6, x0, #(\i)
add x7, sp, #(\i*2)
bl inv_txfm_add_vert_8x16_neon
.endr
add sp, sp, #512
ret x15
endfunc
.macro def_fn_16x16 txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_16x16_8bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc 16, 16, 2
.endif
.ifc \txfm1, identity
adr x9, inv_txfm_horz_identity_16x8_neon
.else
adr x9, inv_txfm_horz_16x8_neon
adr x4, inv_\txfm1\()_8h_x16_neon
.endif
adr x5, inv_\txfm2\()_8h_x16_neon
mov x13, #\eob_half
b inv_txfm_add_16x16_neon
endfunc
.endm
def_fn_16x16 dct, dct, 36
def_fn_16x16 identity, identity, 36
def_fn_16x16 dct, adst, 36
def_fn_16x16 dct, flipadst, 36
def_fn_16x16 dct, identity, 8
def_fn_16x16 adst, dct, 36
def_fn_16x16 adst, adst, 36
def_fn_16x16 adst, flipadst, 36
def_fn_16x16 flipadst, dct, 36
def_fn_16x16 flipadst, adst, 36
def_fn_16x16 flipadst, flipadst, 36
def_fn_16x16 identity, dct, 8
.macro def_fn_416_base variant
function inv_txfm_\variant\()add_16x4_neon
mov x15, x30
movi v4.8h, #0
.ifc \variant, identity_
.irp i, v16.4h, v17.4h, v18.4h, v19.4h
ld1 {\i}, [x2]
st1 {v4.4h}, [x2], #8
.endr
.irp i, v16.d, v17.d, v18.d, v19.d
ld1 {\i}[1], [x2]
st1 {v4.4h}, [x2], #8
.endr
mov w16, #2*(5793-4096)*8
dup v0.4h, w16
.irp i, v20.4h, v21.4h, v22.4h, v23.4h
ld1 {\i}, [x2]
st1 {v4.4h}, [x2], #8
.endr
.irp i, v20.d, v21.d, v22.d, v23.d
ld1 {\i}[1], [x2]
st1 {v4.4h}, [x2], #8
.endr
identity_8x16_shift1 v0.h[0]
b L(itx_16x4_epilog)
.else
.irp i, v16.4h, v17.4h, v18.4h, v19.4h, v20.4h, v21.4h, v22.4h, v23.4h, v24.4h, v25.4h, v26.4h, v27.4h, v28.4h, v29.4h, v30.4h, v31.4h
ld1 {\i}, [x2]
st1 {v4.4h}, [x2], #8
.endr
blr x4
ins v16.d[1], v20.d[0]
ins v17.d[1], v21.d[0]
ins v18.d[1], v22.d[0]
ins v19.d[1], v23.d[0]
.irp i, v16.8h, v17.8h, v18.8h, v19.8h
srshr \i, \i, #1
.endr
ins v24.d[1], v28.d[0]
ins v25.d[1], v29.d[0]
ins v26.d[1], v30.d[0]
ins v27.d[1], v31.d[0]
srshr v20.8h, v24.8h, #1
srshr v21.8h, v25.8h, #1
srshr v22.8h, v26.8h, #1
srshr v23.8h, v27.8h, #1
L(itx_16x4_epilog):
transpose_4x8h v16, v17, v18, v19, v2, v3, v4, v5
blr x5
mov x6, x0
load_add_store_8x4 x6, x7
transpose_4x8h_mov v20, v21, v22, v23, v2, v3, v4, v5, v16, v17, v18, v19
blr x5
add x6, x0, #8
load_add_store_8x4 x6, x7
ret x15
.endif
endfunc
function inv_txfm_\variant\()add_4x16_neon
mov x15, x30
movi v2.8h, #0
mov x11, #32
cmp w3, w13
b.lt 1f
add x6, x2, #16
.ifc \variant, identity_
.irp i, v24.8h, v25.8h, v26.8h, v27.8h
ld1 {\i}, [x6]
st1 {v2.8h}, [x6], x11
.endr
mov w16, #(5793-4096)*8
dup v0.4h, w16
identity_8x4_shift1 v24, v25, v26, v27, v0.h[0]
.else
.irp i, v16.8h, v17.8h, v18.8h, v19.8h
ld1 {\i}, [x6]
st1 {v2.8h}, [x6], x11
.endr
blr x4
srshr v24.8h, v16.8h, #1
srshr v25.8h, v17.8h, #1
srshr v26.8h, v18.8h, #1
srshr v27.8h, v19.8h, #1
.endif
transpose_4x8h v24, v25, v26, v27, v4, v5, v6, v7
ins v28.d[0], v24.d[1]
ins v29.d[0], v25.d[1]
ins v30.d[0], v26.d[1]
ins v31.d[0], v27.d[1]
b 2f
1:
.irp i, v24.4h, v25.4h, v26.4h, v27.4h, v28.4h, v29.4h, v30.4h, v31.4h
movi \i, #0
.endr
2:
movi v2.8h, #0
.irp i, v16.8h, v17.8h, v18.8h, v19.8h
ld1 {\i}, [x2]
st1 {v2.8h}, [x2], x11
.endr
.ifc \variant, identity_
mov w16, #(5793-4096)*8
dup v0.4h, w16
identity_8x4_shift1 v16, v17, v18, v19, v0.h[0]
b L(itx_4x16_epilog)
.else
blr x4
.irp i, v16.8h, v17.8h, v18.8h, v19.8h
srshr \i, \i, #1
.endr
L(itx_4x16_epilog):
transpose_4x8h v16, v17, v18, v19, v4, v5, v6, v7
ins v20.d[0], v16.d[1]
ins v21.d[0], v17.d[1]
ins v22.d[0], v18.d[1]
ins v23.d[0], v19.d[1]
blr x5
load_add_store_4x16 x0, x6
ret x15
.endif
endfunc
.endm
def_fn_416_base identity_
def_fn_416_base
.macro def_fn_416 w, h, txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 1
.endif
.if \w == 4
.ifnc \txfm1, identity
adr x4, inv_\txfm1\()_8h_x\w\()_neon
.endif
adr x5, inv_\txfm2\()_4h_x\h\()_neon
mov w13, #\eob_half
.else
.ifnc \txfm1, identity
adr x4, inv_\txfm1\()_4h_x\w\()_neon
.endif
adr x5, inv_\txfm2\()_8h_x\h\()_neon
.endif
.ifc \txfm1, identity
b inv_txfm_identity_add_\w\()x\h\()_neon
.else
b inv_txfm_add_\w\()x\h\()_neon
.endif
endfunc
.endm
.macro def_fns_416 w, h
def_fn_416 \w, \h, dct, dct, 29
def_fn_416 \w, \h, identity, identity, 29
def_fn_416 \w, \h, dct, adst, 29
def_fn_416 \w, \h, dct, flipadst, 29
def_fn_416 \w, \h, dct, identity, 8
def_fn_416 \w, \h, adst, dct, 29
def_fn_416 \w, \h, adst, adst, 29
def_fn_416 \w, \h, adst, flipadst, 29
def_fn_416 \w, \h, flipadst, dct, 29
def_fn_416 \w, \h, flipadst, adst, 29
def_fn_416 \w, \h, flipadst, flipadst, 29
def_fn_416 \w, \h, identity, dct, 32
def_fn_416 \w, \h, adst, identity, 8
def_fn_416 \w, \h, flipadst, identity, 8
def_fn_416 \w, \h, identity, adst, 32
def_fn_416 \w, \h, identity, flipadst, 32
.endm
def_fns_416 4, 16
def_fns_416 16, 4
.macro def_fn_816_base variant
function inv_txfm_\variant\()add_16x8_neon
mov x15, x30
movi v4.8h, #0
mov w16, #2896*8
dup v0.4h, w16
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
ld1 {\i}, [x2]
st1 {v4.8h}, [x2], #16
.endr
scale_input .8h, v0.h[0], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .8h, v0.h[0], v24, v25, v26, v27, v28, v29, v30, v31
.ifc \variant, identity_
mov w16, #2*(5793-4096)*8
dup v0.4h, w16
identity_8x16_shift1 v0.h[0]
b L(itx_16x8_epilog)
.else
blr x4
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
srshr \i, \i, #1
.endr
L(itx_16x8_epilog):
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v2, v3
blr x5
mov x6, x0
load_add_store_8x8 x6, x7
transpose_8x8h_mov v24, v25, v26, v27, v28, v29, v30, v31, v2, v3, v16, v17, v18, v19, v20, v21, v22, v23
blr x5
add x0, x0, #8
load_add_store_8x8 x0, x7
ret x15
.endif
endfunc
function inv_txfm_\variant\()add_8x16_neon
mov x15, x30
movi v4.8h, #0
mov w16, #2896*8
dup v0.4h, w16
mov x11, #32
cmp w3, w13
b.lt 1f
add x6, x2, #16
.ifc \variant, identity_
.irp i, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
ld1 {\i}, [x6]
st1 {v4.8h}, [x6], x11
.endr
scale_input .8h, v0.h[0], v24, v25, v26, v27, v28, v29, v30, v31
// The identity shl #1 and downshift srshr #1 cancel out
.else
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
ld1 {\i}, [x6]
st1 {v4.8h}, [x6], x11
.endr
scale_input .8h, v0.h[0], v16, v17, v18, v19, v20, v21, v22, v23
blr x4
srshr v24.8h, v16.8h, #1
srshr v25.8h, v17.8h, #1
srshr v26.8h, v18.8h, #1
srshr v27.8h, v19.8h, #1
srshr v28.8h, v20.8h, #1
srshr v29.8h, v21.8h, #1
srshr v30.8h, v22.8h, #1
srshr v31.8h, v23.8h, #1
.endif
transpose_8x8h v24, v25, v26, v27, v28, v29, v30, v31, v2, v3
b 2f
1:
.irp i, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
movi \i, #0
.endr
2:
movi v4.8h, #0
mov w16, #2896*8
dup v0.4h, w16
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
ld1 {\i}, [x2]
st1 {v4.8h}, [x2], x11
.endr
scale_input .8h, v0.h[0], v16, v17, v18, v19, v20, v21, v22, v23
.ifc \variant, identity_
// The identity shl #1 and downshift srshr #1 cancel out
b L(itx_8x16_epilog)
.else
blr x4
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
srshr \i, \i, #1
.endr
L(itx_8x16_epilog):
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v2, v3
blr x5
load_add_store_8x16 x0, x6
ret x15
.endif
endfunc
.endm
def_fn_816_base identity_
def_fn_816_base
.macro def_fn_816 w, h, txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 1
.endif
.ifnc \txfm1, identity
adr x4, inv_\txfm1\()_8h_x\w\()_neon
.endif
adr x5, inv_\txfm2\()_8h_x\h\()_neon
.if \w == 8
mov x13, #\eob_half
.endif
.ifc \txfm1, identity
b inv_txfm_identity_add_\w\()x\h\()_neon
.else
b inv_txfm_add_\w\()x\h\()_neon
.endif
endfunc
.endm
.macro def_fns_816 w, h
def_fn_816 \w, \h, dct, dct, 43
def_fn_816 \w, \h, identity, identity, 43
def_fn_816 \w, \h, dct, adst, 43
def_fn_816 \w, \h, dct, flipadst, 43
def_fn_816 \w, \h, dct, identity, 8
def_fn_816 \w, \h, adst, dct, 43
def_fn_816 \w, \h, adst, adst, 43
def_fn_816 \w, \h, adst, flipadst, 43
def_fn_816 \w, \h, flipadst, dct, 43
def_fn_816 \w, \h, flipadst, adst, 43
def_fn_816 \w, \h, flipadst, flipadst, 43
def_fn_816 \w, \h, identity, dct, 64
def_fn_816 \w, \h, adst, identity, 8
def_fn_816 \w, \h, flipadst, identity, 8
def_fn_816 \w, \h, identity, adst, 64
def_fn_816 \w, \h, identity, flipadst, 64
.endm
def_fns_816 8, 16
def_fns_816 16, 8
function inv_dct32_odd_8h_x16_neon, export=1
movrel x16, idct_coeffs, 2*16
ld1 {v0.8h, v1.8h}, [x16]
sub x16, x16, #2*16
smull_smlsl v2, v3, v16, v31, v0.h[0], v0.h[1], .8h // -> t16a
smull_smlal v4, v5, v16, v31, v0.h[1], v0.h[0], .8h // -> t31a
smull_smlsl v6, v7, v24, v23, v0.h[2], v0.h[3], .8h // -> t17a
sqrshrn_sz v16, v2, v3, #12, .8h // t16a
sqrshrn_sz v31, v4, v5, #12, .8h // t31a
smull_smlal v2, v3, v24, v23, v0.h[3], v0.h[2], .8h // -> t30a
smull_smlsl v4, v5, v20, v27, v0.h[4], v0.h[5], .8h // -> t18a
sqrshrn_sz v24, v6, v7, #12, .8h // t17a
sqrshrn_sz v23, v2, v3, #12, .8h // t30a
smull_smlal v6, v7, v20, v27, v0.h[5], v0.h[4], .8h // -> t29a
smull_smlsl v2, v3, v28, v19, v0.h[6], v0.h[7], .8h // -> t19a
sqrshrn_sz v20, v4, v5, #12, .8h // t18a
sqrshrn_sz v27, v6, v7, #12, .8h // t29a
smull_smlal v4, v5, v28, v19, v0.h[7], v0.h[6], .8h // -> t28a
smull_smlsl v6, v7, v18, v29, v1.h[0], v1.h[1], .8h // -> t20a
sqrshrn_sz v28, v2, v3, #12, .8h // t19a
sqrshrn_sz v19, v4, v5, #12, .8h // t28a
smull_smlal v2, v3, v18, v29, v1.h[1], v1.h[0], .8h // -> t27a
smull_smlsl v4, v5, v26, v21, v1.h[2], v1.h[3], .8h // -> t21a
sqrshrn_sz v18, v6, v7, #12, .8h // t20a
sqrshrn_sz v29, v2, v3, #12, .8h // t27a
smull_smlal v6, v7, v26, v21, v1.h[3], v1.h[2], .8h // -> t26a
smull_smlsl v2, v3, v22, v25, v1.h[4], v1.h[5], .8h // -> t22a
sqrshrn_sz v26, v4, v5, #12, .8h // t21a
sqrshrn_sz v21, v6, v7, #12, .8h // t26a
smull_smlal v4, v5, v22, v25, v1.h[5], v1.h[4], .8h // -> t25a
smull_smlsl v6, v7, v30, v17, v1.h[6], v1.h[7], .8h // -> t23a
sqrshrn_sz v22, v2, v3, #12, .8h // t22a
sqrshrn_sz v25, v4, v5, #12, .8h // t25a
smull_smlal v2, v3, v30, v17, v1.h[7], v1.h[6], .8h // -> t24a
sqrshrn_sz v30, v6, v7, #12, .8h // t23a
sqrshrn_sz v17, v2, v3, #12, .8h // t24a
ld1 {v0.8h}, [x16]
sqsub v2.8h, v16.8h, v24.8h // t17
sqadd v16.8h, v16.8h, v24.8h // t16
sqsub v3.8h, v31.8h, v23.8h // t30
sqadd v31.8h, v31.8h, v23.8h // t31
sqsub v24.8h, v28.8h, v20.8h // t18
sqadd v28.8h, v28.8h, v20.8h // t19
sqadd v23.8h, v18.8h, v26.8h // t20
sqsub v18.8h, v18.8h, v26.8h // t21
sqsub v20.8h, v30.8h, v22.8h // t22
sqadd v30.8h, v30.8h, v22.8h // t23
sqadd v26.8h, v17.8h, v25.8h // t24
sqsub v17.8h, v17.8h, v25.8h // t25
sqsub v22.8h, v29.8h, v21.8h // t26
sqadd v29.8h, v29.8h, v21.8h // t27
sqadd v25.8h, v19.8h, v27.8h // t28
sqsub v19.8h, v19.8h, v27.8h // t29
smull_smlsl v4, v5, v3, v2, v0.h[4], v0.h[5], .8h // -> t17a
smull_smlal v6, v7, v3, v2, v0.h[5], v0.h[4], .8h // -> t30a
smull_smlal v2, v3, v19, v24, v0.h[5], v0.h[4], .8h // -> t18a
sqrshrn_sz v21, v4, v5, #12, .8h // t17a
sqrshrn_sz v27, v6, v7, #12, .8h // t30a
neg v2.4s, v2.4s // -> t18a
neg v3.4s, v3.4s // -> t18a
smull_smlsl v4, v5, v19, v24, v0.h[4], v0.h[5], .8h // -> t29a
smull_smlsl v6, v7, v22, v18, v0.h[6], v0.h[7], .8h // -> t21a
sqrshrn_sz v19, v2, v3, #12, .8h // t18a
sqrshrn_sz v24, v4, v5, #12, .8h // t29a
smull_smlal v2, v3, v22, v18, v0.h[7], v0.h[6], .8h // -> t26a
smull_smlal v4, v5, v17, v20, v0.h[7], v0.h[6], .8h // -> t22a
sqrshrn_sz v22, v6, v7, #12, .8h // t21a
sqrshrn_sz v18, v2, v3, #12, .8h // t26a
neg v4.4s, v4.4s // -> t22a
neg v5.4s, v5.4s // -> t22a
smull_smlsl v6, v7, v17, v20, v0.h[6], v0.h[7], .8h // -> t25a
sqrshrn_sz v17, v4, v5, #12, .8h // t22a
sqrshrn_sz v20, v6, v7, #12, .8h // t25a
sqsub v2.8h, v27.8h, v24.8h // t29
sqadd v27.8h, v27.8h, v24.8h // t30
sqsub v3.8h, v21.8h, v19.8h // t18
sqadd v21.8h, v21.8h, v19.8h // t17
sqsub v24.8h, v16.8h, v28.8h // t19a
sqadd v16.8h, v16.8h, v28.8h // t16a
sqsub v19.8h, v30.8h, v23.8h // t20a
sqadd v30.8h, v30.8h, v23.8h // t23a
sqsub v28.8h, v17.8h, v22.8h // t21
sqadd v17.8h, v17.8h, v22.8h // t22
sqadd v23.8h, v26.8h, v29.8h // t24a
sqsub v26.8h, v26.8h, v29.8h // t27a
sqadd v22.8h, v20.8h, v18.8h // t25
sqsub v20.8h, v20.8h, v18.8h // t26
sqsub v29.8h, v31.8h, v25.8h // t28a
sqadd v31.8h, v31.8h, v25.8h // t31a
smull_smlsl v4, v5, v2, v3, v0.h[2], v0.h[3], .8h // -> t18a
smull_smlal v6, v7, v2, v3, v0.h[3], v0.h[2], .8h // -> t29a
smull_smlsl v2, v3, v29, v24, v0.h[2], v0.h[3], .8h // -> t19
sqrshrn_sz v18, v4, v5, #12, .8h // t18a
sqrshrn_sz v25, v6, v7, #12, .8h // t29a
smull_smlal v4, v5, v29, v24, v0.h[3], v0.h[2], .8h // -> t28
smull_smlal v6, v7, v26, v19, v0.h[3], v0.h[2], .8h // -> t20
sqrshrn_sz v29, v2, v3, #12, .8h // t19
sqrshrn_sz v24, v4, v5, #12, .8h // t28
neg v6.4s, v6.4s // -> t20
neg v7.4s, v7.4s // -> t20
smull_smlsl v2, v3, v26, v19, v0.h[2], v0.h[3], .8h // -> t27
smull_smlal v4, v5, v20, v28, v0.h[3], v0.h[2], .8h // -> t21a
sqrshrn_sz v26, v6, v7, #12, .8h // t20
sqrshrn_sz v19, v2, v3, #12, .8h // t27
neg v4.4s, v4.4s // -> t21a
neg v5.4s, v5.4s // -> t21a
smull_smlsl v6, v7, v20, v28, v0.h[2], v0.h[3], .8h // -> t26a
sqrshrn_sz v20, v4, v5, #12, .8h // t21a
sqrshrn_sz v28, v6, v7, #12, .8h // t26a
sqsub v2.8h, v16.8h, v30.8h // t23
sqadd v16.8h, v16.8h, v30.8h // t16 = out16
sqsub v3.8h, v31.8h, v23.8h // t24
sqadd v31.8h, v31.8h, v23.8h // t31 = out31
sqsub v23.8h, v21.8h, v17.8h // t22a
sqadd v17.8h, v21.8h, v17.8h // t17a = out17
sqadd v30.8h, v27.8h, v22.8h // t30a = out30
sqsub v21.8h, v27.8h, v22.8h // t25a
sqsub v27.8h, v18.8h, v20.8h // t21
sqadd v18.8h, v18.8h, v20.8h // t18 = out18
sqadd v4.8h, v29.8h, v26.8h // t19a = out19
sqsub v26.8h, v29.8h, v26.8h // t20a
sqadd v29.8h, v25.8h, v28.8h // t29 = out29
sqsub v25.8h, v25.8h, v28.8h // t26
sqadd v28.8h, v24.8h, v19.8h // t28a = out28
sqsub v24.8h, v24.8h, v19.8h // t27a
mov v19.16b, v4.16b // out19
smull_smlsl v4, v5, v24, v26, v0.h[0], v0.h[0], .8h // -> t20
smull_smlal v6, v7, v24, v26, v0.h[0], v0.h[0], .8h // -> t27
sqrshrn_sz v20, v4, v5, #12, .8h // t20
sqrshrn_sz v22, v6, v7, #12, .8h // t27
smull_smlal v4, v5, v25, v27, v0.h[0], v0.h[0], .8h // -> t26a
smull_smlsl v6, v7, v25, v27, v0.h[0], v0.h[0], .8h // -> t21a
mov v27.16b, v22.16b // t27
sqrshrn_sz v26, v4, v5, #12, .8h // t26a
smull_smlsl v24, v25, v21, v23, v0.h[0], v0.h[0], .8h // -> t22
smull_smlal v4, v5, v21, v23, v0.h[0], v0.h[0], .8h // -> t25
sqrshrn_sz v21, v6, v7, #12, .8h // t21a
sqrshrn_sz v22, v24, v25, #12, .8h // t22
sqrshrn_sz v25, v4, v5, #12, .8h // t25
smull_smlsl v4, v5, v3, v2, v0.h[0], v0.h[0], .8h // -> t23a
smull_smlal v6, v7, v3, v2, v0.h[0], v0.h[0], .8h // -> t24a
sqrshrn_sz v23, v4, v5, #12, .8h // t23a
sqrshrn_sz v24, v6, v7, #12, .8h // t24a
ret
endfunc
.macro def_horz_32 scale=0, shift=2, suffix
function inv_txfm_horz\suffix\()_dct_32x8_neon
mov x14, x30
movi v7.8h, #0
lsl x8, x8, #1
.if \scale
mov w16, #2896*8
dup v0.4h, w16
.endif
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
ld1 {\i}, [x7]
st1 {v7.8h}, [x7], x8
.endr
sub x7, x7, x8, lsl #4
add x7, x7, x8, lsr #1
.if \scale
scale_input .8h, v0.h[0], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .8h, v0.h[0], v24, v25, v26, v27, v28, v29, v30, v31
.endif
bl inv_dct_8h_x16_neon
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v4, v5
transpose_8x8h v24, v25, v26, v27, v28, v29, v30, v31, v4, v5
.macro store1 r0, r1
st1 {\r0}, [x6], #16
st1 {\r1}, [x6], #16
add x6, x6, #32
.endm
store1 v16.8h, v24.8h
store1 v17.8h, v25.8h
store1 v18.8h, v26.8h
store1 v19.8h, v27.8h
store1 v20.8h, v28.8h
store1 v21.8h, v29.8h
store1 v22.8h, v30.8h
store1 v23.8h, v31.8h
.purgem store1
sub x6, x6, #64*8
movi v7.8h, #0
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
ld1 {\i}, [x7]
st1 {v7.8h}, [x7], x8
.endr
.if \scale
// This relies on the fact that the idct also leaves the right coeff in v0.h[1]
scale_input .8h, v0.h[1], v16, v17, v18, v19, v20, v21, v22, v23
scale_input .8h, v0.h[1], v24, v25, v26, v27, v28, v29, v30, v31
.endif
bl inv_dct32_odd_8h_x16_neon
transpose_8x8h v31, v30, v29, v28, v27, v26, v25, v24, v4, v5
transpose_8x8h v23, v22, v21, v20, v19, v18, v17, v16, v4, v5
.macro store2 r0, r1, shift
ld1 {v4.8h, v5.8h}, [x6]
sqsub v7.8h, v4.8h, \r0
sqsub v6.8h, v5.8h, \r1
sqadd v4.8h, v4.8h, \r0
sqadd v5.8h, v5.8h, \r1
rev64 v6.8h, v6.8h
rev64 v7.8h, v7.8h
srshr v4.8h, v4.8h, #\shift
srshr v5.8h, v5.8h, #\shift
srshr v6.8h, v6.8h, #\shift
srshr v7.8h, v7.8h, #\shift
ext v6.16b, v6.16b, v6.16b, #8
st1 {v4.8h, v5.8h}, [x6], #32
ext v7.16b, v7.16b, v7.16b, #8
st1 {v6.8h, v7.8h}, [x6], #32
.endm
store2 v31.8h, v23.8h, \shift
store2 v30.8h, v22.8h, \shift
store2 v29.8h, v21.8h, \shift
store2 v28.8h, v20.8h, \shift
store2 v27.8h, v19.8h, \shift
store2 v26.8h, v18.8h, \shift
store2 v25.8h, v17.8h, \shift
store2 v24.8h, v16.8h, \shift
.purgem store2
ret x14
endfunc
.endm
def_horz_32 scale=0, shift=2
def_horz_32 scale=1, shift=1, suffix=_scale
function inv_txfm_add_vert_dct_8x32_neon
mov x14, x30
lsl x8, x8, #1
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
ld1 {v\i\().8h}, [x7], x8
.endr
sub x7, x7, x8, lsl #4
bl inv_dct_8h_x16_neon
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
st1 {v\i\().8h}, [x7], x8
.endr
sub x7, x7, x8, lsl #4
add x7, x7, x8, lsr #1
.irp i, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
ld1 {v\i\().8h}, [x7], x8
.endr
sub x7, x7, x8, lsl #4
sub x7, x7, x8, lsr #1
bl inv_dct32_odd_8h_x16_neon
neg x9, x8
mov x10, x6
.macro combine r0, r1, r2, r3, op, stride
ld1 {v5.8h}, [x7], \stride
ld1 {v2.8b}, [x10], x1
ld1 {v6.8h}, [x7], \stride
ld1 {v3.8b}, [x10], x1
\op v5.8h, v5.8h, \r0
ld1 {v7.8h}, [x7], \stride
ld1 {v4.8b}, [x10], x1
srshr v5.8h, v5.8h, #4
\op v6.8h, v6.8h, \r1
uaddw v5.8h, v5.8h, v2.8b
srshr v6.8h, v6.8h, #4
\op v7.8h, v7.8h, \r2
sqxtun v2.8b, v5.8h
ld1 {v5.8h}, [x7], \stride
uaddw v6.8h, v6.8h, v3.8b
srshr v7.8h, v7.8h, #4
\op v5.8h, v5.8h, \r3
st1 {v2.8b}, [x6], x1
ld1 {v2.8b}, [x10], x1
sqxtun v3.8b, v6.8h
uaddw v7.8h, v7.8h, v4.8b
srshr v5.8h, v5.8h, #4
st1 {v3.8b}, [x6], x1
sqxtun v4.8b, v7.8h
uaddw v5.8h, v5.8h, v2.8b
st1 {v4.8b}, [x6], x1
sqxtun v2.8b, v5.8h
st1 {v2.8b}, [x6], x1
.endm
combine v31.8h, v30.8h, v29.8h, v28.8h, sqadd, x8
combine v27.8h, v26.8h, v25.8h, v24.8h, sqadd, x8
combine v23.8h, v22.8h, v21.8h, v20.8h, sqadd, x8
combine v19.8h, v18.8h, v17.8h, v16.8h, sqadd, x8
sub x7, x7, x8
combine v16.8h, v17.8h, v18.8h, v19.8h, sqsub, x9
combine v20.8h, v21.8h, v22.8h, v23.8h, sqsub, x9
combine v24.8h, v25.8h, v26.8h, v27.8h, sqsub, x9
combine v28.8h, v29.8h, v30.8h, v31.8h, sqsub, x9
.purgem combine
ret x14
endfunc
const eob_32x32
.short 36, 136, 300, 1024
endconst
const eob_16x32
.short 36, 151, 279, 512
endconst
const eob_16x32_shortside
.short 36, 512
endconst
const eob_8x32
.short 43, 107, 171, 256
endconst
function inv_txfm_add_identity_identity_32x32_8bpc_neon, export=1
movi v0.8h, #0
movrel x13, eob_32x32
mov x8, #2*32
1:
mov w9, #0
movrel x12, eob_32x32
2:
add w9, w9, #8
.irp i, 16, 17, 18, 19, 20, 21, 22, 23
ld1 {v\i\().8h}, [x2]
st1 {v0.8h}, [x2], x8
.endr
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v4, v5
load_add_store_8x8 x0, x7, shiftbits=2
ldrh w11, [x12], #2
sub x0, x0, x1, lsl #3
add x0, x0, #8
cmp w3, w11
b.ge 2b
ldrh w11, [x13], #2
cmp w3, w11
b.lt 9f
sub x0, x0, w9, uxtw
add x0, x0, x1, lsl #3
msub x2, x8, x9, x2
add x2, x2, #2*8
b 1b
9:
ret
endfunc
.macro shift_8_regs op, shift
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
\op \i, \i, #\shift
.endr
.endm
.macro def_identity_1632 w, h, wshort, hshort
function inv_txfm_add_identity_identity_\w\()x\h\()_8bpc_neon, export=1
mov w16, #2896*8
mov w17, #2*(5793-4096)*8
dup v1.4h, w16
movi v0.8h, #0
mov v1.h[1], w17
movrel x13, eob_16x32\hshort
mov x8, #2*\h
1:
mov w9, #0
movrel x12, eob_16x32\wshort
2:
add w9, w9, #8
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
ld1 {\i}, [x2]
st1 {v0.8h}, [x2], x8
.endr
scale_input .8h, v1.h[0], v16, v17, v18, v19, v20, v21, v22, v23
.if \w == 16
// 16x32
identity_8x8_shift1 v1.h[1]
.else
// 32x16
shift_8_regs sqshl, 1
identity_8x8 v1.h[1]
.endif
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v4, v5
.if \w == 16
load_add_store_8x8 x0, x7, shiftbits=2
.else
load_add_store_8x8 x0, x7, shiftbits=4
.endif
ldrh w11, [x12], #2
sub x0, x0, x1, lsl #3
add x0, x0, #8
cmp w3, w11
b.ge 2b
ldrh w11, [x13], #2
cmp w3, w11
b.lt 9f
sub x0, x0, w9, uxtw
add x0, x0, x1, lsl #3
msub x2, x8, x9, x2
add x2, x2, #2*8
b 1b
9:
ret
endfunc
.endm
def_identity_1632 16, 32, _shortside,
def_identity_1632 32, 16, , _shortside
.macro def_identity_832 w, h
function inv_txfm_add_identity_identity_\w\()x\h\()_8bpc_neon, export=1
movi v0.8h, #0
movrel x13, eob_8x32
mov w8, #2*\h
1:
ldrh w12, [x13], #2
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
ld1 {\i}, [x2]
st1 {v0.8h}, [x2], x8
.endr
.if \w == 8
// 8x32
shift_8_regs srshr, 1
.endif
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v4, v5
cmp w3, w12
.if \w == 8
load_add_store_8x8 x0, x7, shiftbits=2
.else
load_add_store_8x8 x0, x7, shiftbits=3
.endif
b.lt 9f
.if \w == 8
sub x2, x2, x8, lsl #3
add x2, x2, #2*8
.else
sub x0, x0, x1, lsl #3
add x0, x0, #8
.endif
b 1b
9:
ret
endfunc
.endm
def_identity_832 8, 32
def_identity_832 32, 8
function inv_txfm_add_dct_dct_32x32_8bpc_neon, export=1
idct_dc 32, 32, 2
mov x15, x30
sub sp, sp, #2048
movrel x13, eob_32x32
ldrh w12, [x13], #2
.irp i, 0, 8, 16, 24
add x6, sp, #(\i*32*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.if \i < 24
ldrh w12, [x13], #2
.endif
.endif
add x7, x2, #(\i*2)
mov x8, #32*2
bl inv_txfm_horz_dct_32x8_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #4
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8, 16, 24
add x6, x0, #(\i)
add x7, sp, #(\i*2)
mov x8, #32*2
bl inv_txfm_add_vert_dct_8x32_neon
.endr
add sp, sp, #2048
ret x15
endfunc
function inv_txfm_add_dct_dct_16x32_8bpc_neon, export=1
idct_dc 16, 32, 1
mov x15, x30
sub sp, sp, #1024
movrel x13, eob_16x32
ldrh w12, [x13], #2
adr x4, inv_dct_8h_x16_neon
.irp i, 0, 8, 16, 24
add x6, sp, #(\i*16*2)
add x7, x2, #(\i*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.if \i < 24
ldrh w12, [x13], #2
.endif
.endif
mov x8, #2*32
bl inv_txfm_horz_scale_16x8_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #8
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8
add x6, x0, #(\i)
add x7, sp, #(\i*2)
mov x8, #16*2
bl inv_txfm_add_vert_dct_8x32_neon
.endr
add sp, sp, #1024
ret x15
endfunc
function inv_txfm_add_dct_dct_32x16_8bpc_neon, export=1
idct_dc 32, 16, 1
mov x15, x30
sub sp, sp, #1024
adr x5, inv_dct_8h_x16_neon
.irp i, 0, 8
add x6, sp, #(\i*32*2)
add x7, x2, #(\i*2)
.if \i > 0
mov w8, #(16 - \i)
cmp w3, #36
b.lt 1f
.endif
mov x8, #2*16
bl inv_txfm_horz_scale_dct_32x8_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #4
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
mov x8, #32*2
.irp i, 0, 8, 16, 24
add x6, x0, #(\i)
add x7, sp, #(\i*2)
bl inv_txfm_add_vert_8x16_neon
.endr
add sp, sp, #1024
ret x15
endfunc
function inv_txfm_add_dct_dct_8x32_8bpc_neon, export=1
idct_dc 8, 32, 2
mov x15, x30
sub sp, sp, #512
movrel x13, eob_8x32
movi v28.8h, #0
mov x8, #2*32
mov w9, #32
mov x6, sp
1:
.irp i, 16, 17, 18, 19, 20, 21, 22, 23
ld1 {v\i\().8h}, [x2]
st1 {v28.8h}, [x2], x8
.endr
ldrh w12, [x13], #2
sub x2, x2, x8, lsl #3
sub w9, w9, #8
add x2, x2, #2*8
bl inv_dct_8h_x8_neon
.irp i, 16, 17, 18, 19, 20, 21, 22, 23
srshr v\i\().8h, v\i\().8h, #2
.endr
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v24, v25
st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x6], #64
cmp w3, w12
st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x6], #64
b.ge 1b
cbz w9, 3f
movi v29.8h, #0
movi v30.8h, #0
movi v31.8h, #0
2:
subs w9, w9, #8
.rept 2
st1 {v28.8h,v29.8h,v30.8h,v31.8h}, [x6], #64
.endr
b.gt 2b
3:
mov x6, x0
mov x7, sp
mov x8, #8*2
bl inv_txfm_add_vert_dct_8x32_neon
add sp, sp, #512
ret x15
endfunc
function inv_txfm_add_dct_dct_32x8_8bpc_neon, export=1
idct_dc 32, 8, 2
mov x15, x30
sub sp, sp, #512
mov x6, sp
mov x7, x2
mov x8, #8*2
bl inv_txfm_horz_dct_32x8_neon
mov x8, #2*32
mov w9, #0
1:
add x6, x0, x9
add x7, sp, x9, lsl #1 // #(\i*2)
.irp i, 16, 17, 18, 19, 20, 21, 22, 23
ld1 {v\i\().8h}, [x7], x8
.endr
add w9, w9, #8
bl inv_dct_8h_x8_neon
cmp w9, #32
load_add_store_8x8 x6, x7
b.lt 1b
add sp, sp, #512
ret x15
endfunc
function inv_dct64_step1_neon
// in1/31/17/15 -> t32a/33/34a/35/60/61a/62/63a
// in7/25/23/ 9 -> t56a/57/58a/59/36/37a/38/39a
// in5/27/21/11 -> t40a/41/42a/43/52/53a/54/55a
// in3/29/19/13 -> t48a/49/50a/51/44/45a/46/47a
ld1 {v0.8h, v1.8h}, [x17], #32
sqrdmulh v23.8h, v16.8h, v0.h[1] // t63a
sqrdmulh v16.8h, v16.8h, v0.h[0] // t32a
sqrdmulh v22.8h, v17.8h, v0.h[2] // t62a
sqrdmulh v17.8h, v17.8h, v0.h[3] // t33a
sqrdmulh v21.8h, v18.8h, v0.h[5] // t61a
sqrdmulh v18.8h, v18.8h, v0.h[4] // t34a
sqrdmulh v20.8h, v19.8h, v0.h[6] // t60a
sqrdmulh v19.8h, v19.8h, v0.h[7] // t35a
sqadd v24.8h, v16.8h, v17.8h // t32
sqsub v25.8h, v16.8h, v17.8h // t33
sqsub v26.8h, v19.8h, v18.8h // t34
sqadd v27.8h, v19.8h, v18.8h // t35
sqadd v28.8h, v20.8h, v21.8h // t60
sqsub v29.8h, v20.8h, v21.8h // t61
sqsub v30.8h, v23.8h, v22.8h // t62
sqadd v31.8h, v23.8h, v22.8h // t63
smull_smlal v2, v3, v29, v26, v1.h[0], v1.h[1], .8h // -> t34a
smull_smlsl v4, v5, v29, v26, v1.h[1], v1.h[0], .8h // -> t61a
neg v2.4s, v2.4s // t34a
neg v3.4s, v3.4s // t34a
smull_smlsl v6, v7, v30, v25, v1.h[1], v1.h[0], .8h // -> t33a
sqrshrn_sz v26, v2, v3, #12, .8h // t34a
smull_smlal v2, v3, v30, v25, v1.h[0], v1.h[1], .8h // -> t62a
sqrshrn_sz v29, v4, v5, #12, .8h // t61a
sqrshrn_sz v25, v6, v7, #12, .8h // t33a
sqrshrn_sz v30, v2, v3, #12, .8h // t62a
sqadd v16.8h, v24.8h, v27.8h // t32a
sqsub v19.8h, v24.8h, v27.8h // t35a
sqadd v17.8h, v25.8h, v26.8h // t33
sqsub v18.8h, v25.8h, v26.8h // t34
sqsub v20.8h, v31.8h, v28.8h // t60a
sqadd v23.8h, v31.8h, v28.8h // t63a
sqsub v21.8h, v30.8h, v29.8h // t61
sqadd v22.8h, v30.8h, v29.8h // t62
smull_smlal v2, v3, v21, v18, v1.h[2], v1.h[3], .8h // -> t61a
smull_smlsl v4, v5, v21, v18, v1.h[3], v1.h[2], .8h // -> t34a
smull_smlal v6, v7, v20, v19, v1.h[2], v1.h[3], .8h // -> t60
sqrshrn_sz v21, v2, v3, #12, .8h // t61a
sqrshrn_sz v18, v4, v5, #12, .8h // t34a
smull_smlsl v2, v3, v20, v19, v1.h[3], v1.h[2], .8h // -> t35
sqrshrn_sz v20, v6, v7, #12, .8h // t60
sqrshrn_sz v19, v2, v3, #12, .8h // t35
st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x6], #64
st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x6], #64
ret
endfunc
function inv_dct64_step2_neon
movrel x16, idct_coeffs
ld1 {v0.4h}, [x16]
1:
// t32a/33/34a/35/60/61a/62/63a
// t56a/57/58a/59/36/37a/38/39a
// t40a/41/42a/43/52/53a/54/55a
// t48a/49/50a/51/44/45a/46/47a
ldr q16, [x6, #2*8*0] // t32a
ldr q17, [x9, #2*8*8] // t39a
ldr q18, [x9, #2*8*0] // t63a
ldr q19, [x6, #2*8*8] // t56a
ldr q20, [x6, #2*8*16] // t40a
ldr q21, [x9, #2*8*24] // t47a
ldr q22, [x9, #2*8*16] // t55a
ldr q23, [x6, #2*8*24] // t48a
sqadd v24.8h, v16.8h, v17.8h // t32
sqsub v25.8h, v16.8h, v17.8h // t39
sqadd v26.8h, v18.8h, v19.8h // t63
sqsub v27.8h, v18.8h, v19.8h // t56
sqsub v28.8h, v21.8h, v20.8h // t40
sqadd v29.8h, v21.8h, v20.8h // t47
sqadd v30.8h, v23.8h, v22.8h // t48
sqsub v31.8h, v23.8h, v22.8h // t55
smull_smlal v2, v3, v27, v25, v0.h[3], v0.h[2], .8h // -> t56a
smull_smlsl v4, v5, v27, v25, v0.h[2], v0.h[3], .8h // -> t39a
smull_smlal v6, v7, v31, v28, v0.h[3], v0.h[2], .8h // -> t40a
sqrshrn_sz v25, v2, v3, #12, .8h // t56a
sqrshrn_sz v27, v4, v5, #12, .8h // t39a
neg v6.4s, v6.4s // t40a
neg v7.4s, v7.4s // t40a
smull_smlsl v2, v3, v31, v28, v0.h[2], v0.h[3], .8h // -> t55a
sqrshrn_sz v31, v6, v7, #12, .8h // t40a
sqrshrn_sz v28, v2, v3, #12, .8h // t55a
sqadd v16.8h, v24.8h, v29.8h // t32a
sqsub v19.8h, v24.8h, v29.8h // t47a
sqadd v17.8h, v27.8h, v31.8h // t39
sqsub v18.8h, v27.8h, v31.8h // t40
sqsub v20.8h, v26.8h, v30.8h // t48a
sqadd v23.8h, v26.8h, v30.8h // t63a
sqsub v21.8h, v25.8h, v28.8h // t55
sqadd v22.8h, v25.8h, v28.8h // t56
smull_smlsl v2, v3, v21, v18, v0.h[0], v0.h[0], .8h // -> t40a
smull_smlal v4, v5, v21, v18, v0.h[0], v0.h[0], .8h // -> t55a
smull_smlsl v6, v7, v20, v19, v0.h[0], v0.h[0], .8h // -> t47
sqrshrn_sz v18, v2, v3, #12, .8h // t40a
sqrshrn_sz v21, v4, v5, #12, .8h // t55a
smull_smlal v2, v3, v20, v19, v0.h[0], v0.h[0], .8h // -> t48
sqrshrn_sz v19, v6, v7, #12, .8h // t47
sqrshrn_sz v20, v2, v3, #12, .8h // t48
str q16, [x6, #2*8*0] // t32a
str q17, [x9, #2*8*0] // t39
str q18, [x6, #2*8*8] // t40a
str q19, [x9, #2*8*8] // t47
str q20, [x6, #2*8*16] // t48
str q21, [x9, #2*8*16] // t55a
str q22, [x6, #2*8*24] // t56
str q23, [x9, #2*8*24] // t63a
add x6, x6, #2*8
sub x9, x9, #2*8
cmp x6, x9
b.lt 1b
ret
endfunc
.macro load8 src, strd, zero, clear
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
.if \clear
ld1 {\i}, [\src]
st1 {\zero}, [\src], \strd
.else
ld1 {\i}, [\src], \strd
.endif
.endr
.endm
.macro store16 dst
.irp i, v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
st1 {\i}, [\dst], #16
.endr
.endm
.macro clear_upper8
.irp i, v24.8h, v25.8h, v26.8h, v27.8h, v28.8h, v29.8h, v30.8h, v31.8h
movi \i, #0
.endr
.endm
.macro movi_if reg, val, cond
.if \cond
movi \reg, \val
.endif
.endm
.macro movdup_if reg, gpr, val, cond
.if \cond
mov \gpr, \val
dup \reg, \gpr
.endif
.endm
.macro st1_if regs, dst, cond
.if \cond
st1 \regs, \dst
.endif
.endm
.macro str_if reg, dst, cond
.if \cond
str \reg, \dst
.endif
.endm
.macro stroff_if reg, dst, dstoff, cond
.if \cond
str \reg, \dst, \dstoff
.endif
.endm
.macro scale_if cond, c, r0, r1, r2, r3, r4, r5, r6, r7
.if \cond
scale_input .8h, \c, \r0, \r1, \r2, \r3, \r4, \r5, \r6, \r7
.endif
.endm
.macro def_dct64_func suffix, clear=0, scale=0
function inv_txfm_dct\suffix\()_8h_x64_neon, export=1
mov x14, x30
mov x6, sp
lsl x8, x8, #2
movdup_if v0.4h, w16, #2896*8, \scale
movi_if v7.8h, #0, \clear
load8 x7, x8, v7.8h, \clear
clear_upper8
sub x7, x7, x8, lsl #3
add x7, x7, x8, lsr #1
scale_if \scale, v0.h[0], v16, v17, v18, v19, v20, v21, v22, v23
bl inv_dct_8h_x16_neon
store16 x6
movdup_if v0.4h, w16, #2896*8, \scale
movi_if v7.8h, #0, \clear
load8 x7, x8, v7.8h, \clear
clear_upper8
sub x7, x7, x8, lsl #3
lsr x8, x8, #1
sub x7, x7, x8, lsr #1
scale_if \scale, v0.h[0], v16, v17, v18, v19, v20, v21, v22, v23
bl inv_dct32_odd_8h_x16_neon
add x10, x6, #16*15
sub x6, x6, #16*16
mov x9, #-16
.macro store_addsub r0, r1, r2, r3
ld1 {v2.8h}, [x6], #16
ld1 {v3.8h}, [x6], #16
sqadd v6.8h, v2.8h, \r0
sqsub \r0, v2.8h, \r0
ld1 {v4.8h}, [x6], #16
sqadd v7.8h, v3.8h, \r1
sqsub \r1, v3.8h, \r1
ld1 {v5.8h}, [x6], #16
sqadd v2.8h, v4.8h, \r2
sub x6, x6, #16*4
sqsub \r2, v4.8h, \r2
st1 {v6.8h}, [x6], #16
st1 {\r0}, [x10], x9
sqadd v3.8h, v5.8h, \r3
sqsub \r3, v5.8h, \r3
st1 {v7.8h}, [x6], #16
st1 {\r1}, [x10], x9
st1 {v2.8h}, [x6], #16
st1 {\r2}, [x10], x9
st1 {v3.8h}, [x6], #16
st1 {\r3}, [x10], x9
.endm
store_addsub v31.8h, v30.8h, v29.8h, v28.8h
store_addsub v27.8h, v26.8h, v25.8h, v24.8h
store_addsub v23.8h, v22.8h, v21.8h, v20.8h
store_addsub v19.8h, v18.8h, v17.8h, v16.8h
.purgem store_addsub
add x6, x6, #2*8*16
movrel x17, idct64_coeffs
movdup_if v0.4h, w16, #2896*8, \scale
movi_if v7.8h, #0, \clear
add x9, x7, x8, lsl #4 // offset 16
add x10, x7, x8, lsl #3 // offset 8
sub x9, x9, x8 // offset 15
sub x11, x10, x8 // offset 7
ld1 {v16.8h}, [x7] // in1 (offset 0)
ld1 {v17.8h}, [x9] // in31 (offset 15)
ld1 {v18.8h}, [x10] // in17 (offset 8)
ld1 {v19.8h}, [x11] // in15 (offset 7)
st1_if {v7.8h}, [x7], \clear
st1_if {v7.8h}, [x9], \clear
st1_if {v7.8h}, [x10], \clear
st1_if {v7.8h}, [x11], \clear
scale_if \scale, v0.h[0], v16, v17, v18, v19
bl inv_dct64_step1_neon
movdup_if v0.4h, w16, #2896*8, \scale
movi_if v7.8h, #0, \clear
add x7, x7, x8, lsl #2 // offset 4
sub x9, x9, x8, lsl #2 // offset 11
sub x10, x7, x8 // offset 3
add x11, x9, x8 // offset 12
ld1 {v16.8h}, [x10] // in7 (offset 3)
ld1 {v17.8h}, [x11] // in25 (offset 12)
ld1 {v18.8h}, [x9] // in23 (offset 11)
ld1 {v19.8h}, [x7] // in9 (offset 4)
st1_if {v7.8h}, [x7], \clear
st1_if {v7.8h}, [x9], \clear
st1_if {v7.8h}, [x10], \clear
st1_if {v7.8h}, [x11], \clear
scale_if \scale, v0.h[0], v16, v17, v18, v19
bl inv_dct64_step1_neon
movdup_if v0.4h, w16, #2896*8, \scale
movi_if v7.8h, #0, \clear
sub x10, x10, x8, lsl #1 // offset 1
sub x9, x9, x8, lsl #1 // offset 9
add x7, x7, x8 // offset 5
add x11, x11, x8 // offset 13
ldr q16, [x10, x8] // in5 (offset 2)
ldr q17, [x11] // in27 (offset 13)
ldr q18, [x9, x8] // in21 (offset 10)
ldr q19, [x7] // in11 (offset 5)
stroff_if q7, [x10, x8], \clear
str_if q7, [x11], \clear
stroff_if q7, [x9, x8], \clear
str_if q7, [x7], \clear
scale_if \scale, v0.h[0], v16, v17, v18, v19
bl inv_dct64_step1_neon
movdup_if v0.4h, w16, #2896*8, \scale
movi_if v7.8h, #0, \clear
ldr q16, [x10] // in3 (offset 1)
ldr q17, [x11, x8] // in29 (offset 14)
ldr q18, [x9] // in19 (offset 9)
ldr q19, [x7, x8] // in13 (offset 6)
str_if q7, [x10], \clear
stroff_if q7, [x11, x8], \clear
str_if q7, [x9], \clear
stroff_if q7, [x7, x8], \clear
scale_if \scale, v0.h[0], v16, v17, v18, v19
bl inv_dct64_step1_neon
sub x6, x6, #2*8*32
add x9, x6, #2*8*7
bl inv_dct64_step2_neon
ret x14
endfunc
.endm
def_dct64_func
def_dct64_func _clear, clear=1
def_dct64_func _clear_scale, clear=1, scale=1
function inv_txfm_horz_dct_64x8_neon
mov x14, x30
mov x7, sp
add x8, sp, #2*8*(64 - 4)
add x9, x6, #2*56
mov x10, #2*64
mov x11, #-2*8*4
dup v7.8h, w12
1:
ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x7], #64
ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x8], x11
ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x7], #64
ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x8], x11
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v4, v5
transpose_8x8h v31, v30, v29, v28, v27, v26, v25, v24, v4, v5
.macro store_addsub src0, src1, src2, src3
sqsub v1.8h, \src0, \src1
sqadd v0.8h, \src0, \src1
sqsub v3.8h, \src2, \src3
srshl v1.8h, v1.8h, v7.8h
sqadd v2.8h, \src2, \src3
srshl v0.8h, v0.8h, v7.8h
srshl v3.8h, v3.8h, v7.8h
rev64 v1.8h, v1.8h
srshl v2.8h, v2.8h, v7.8h
rev64 v3.8h, v3.8h
ext v1.16b, v1.16b, v1.16b, #8
st1 {v0.8h}, [x6], x10
ext v3.16b, v3.16b, v3.16b, #8
st1 {v1.8h}, [x9], x10
st1 {v2.8h}, [x6], x10
st1 {v3.8h}, [x9], x10
.endm
store_addsub v16.8h, v31.8h, v17.8h, v30.8h
store_addsub v18.8h, v29.8h, v19.8h, v28.8h
store_addsub v20.8h, v27.8h, v21.8h, v26.8h
store_addsub v22.8h, v25.8h, v23.8h, v24.8h
.purgem store_addsub
sub x6, x6, x10, lsl #3
sub x9, x9, x10, lsl #3
add x6, x6, #16
sub x9, x9, #16
cmp x7, x8
b.lt 1b
ret x14
endfunc
function inv_txfm_add_vert_dct_8x64_neon
mov x14, x30
lsl x8, x8, #1
mov x7, sp
add x8, sp, #2*8*(64 - 4)
add x9, x6, x1, lsl #6
sub x9, x9, x1
neg x10, x1
mov x11, #-2*8*4
1:
ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x7], #64
ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x8], x11
ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x7], #64
ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x8], x11
.macro add_dest_addsub src0, src1, src2, src3
ld1 {v0.8b}, [x6], x1
ld1 {v1.8b}, [x9], x10
sqadd v4.8h, \src0, \src1
ld1 {v2.8b}, [x6]
sqsub v5.8h, \src0, \src1
ld1 {v3.8b}, [x9]
sqadd v6.8h, \src2, \src3
sqsub v7.8h, \src2, \src3
sub x6, x6, x1
sub x9, x9, x10
srshr v4.8h, v4.8h, #4
srshr v5.8h, v5.8h, #4
srshr v6.8h, v6.8h, #4
uaddw v4.8h, v4.8h, v0.8b
srshr v7.8h, v7.8h, #4
uaddw v5.8h, v5.8h, v1.8b
uaddw v6.8h, v6.8h, v2.8b
sqxtun v0.8b, v4.8h
uaddw v7.8h, v7.8h, v3.8b
sqxtun v1.8b, v5.8h
st1 {v0.8b}, [x6], x1
sqxtun v2.8b, v6.8h
st1 {v1.8b}, [x9], x10
sqxtun v3.8b, v7.8h
st1 {v2.8b}, [x6], x1
st1 {v3.8b}, [x9], x10
.endm
add_dest_addsub v16.8h, v31.8h, v17.8h, v30.8h
add_dest_addsub v18.8h, v29.8h, v19.8h, v28.8h
add_dest_addsub v20.8h, v27.8h, v21.8h, v26.8h
add_dest_addsub v22.8h, v25.8h, v23.8h, v24.8h
.purgem add_dest_addsub
cmp x7, x8
b.lt 1b
ret x14
endfunc
function inv_txfm_add_dct_dct_64x64_8bpc_neon, export=1
idct_dc 64, 64, 2
mov x15, x30
sub_sp 64*32*2+64*8*2
add x5, sp, #64*8*2
movrel x13, eob_32x32
.irp i, 0, 8, 16, 24
add x6, x5, #(\i*64*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.endif
add x7, x2, #(\i*2)
mov x8, #32*2
mov x12, #-2 // shift
bl inv_txfm_dct_clear_8h_x64_neon
add x6, x5, #(\i*64*2)
bl inv_txfm_horz_dct_64x8_neon
.if \i < 24
ldrh w12, [x13], #2
.endif
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #2
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8, 16, 24, 32, 40, 48, 56
add x7, x5, #(\i*2)
mov x8, #64*2
bl inv_txfm_dct_8h_x64_neon
add x6, x0, #(\i)
bl inv_txfm_add_vert_dct_8x64_neon
.endr
add sp, x5, #64*32*2
ret x15
endfunc
function inv_txfm_add_dct_dct_64x32_8bpc_neon, export=1
idct_dc 64, 32, 1
mov x15, x30
sub_sp 64*32*2+64*8*2
add x5, sp, #64*8*2
movrel x13, eob_32x32
.irp i, 0, 8, 16, 24
add x6, x5, #(\i*64*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.endif
add x7, x2, #(\i*2)
mov x8, #32*2
mov x12, #-1 // shift
bl inv_txfm_dct_clear_scale_8h_x64_neon
add x6, x5, #(\i*64*2)
bl inv_txfm_horz_dct_64x8_neon
.if \i < 24
ldrh w12, [x13], #2
.endif
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #2
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8, 16, 24, 32, 40, 48, 56
add x6, x0, #(\i)
add x7, x5, #(\i*2)
mov x8, #64*2
bl inv_txfm_add_vert_dct_8x32_neon
.endr
add sp, x5, #64*32*2
ret x15
endfunc
function inv_txfm_add_dct_dct_32x64_8bpc_neon, export=1
idct_dc 32, 64, 1
mov x15, x30
sub_sp 32*32*2+64*8*2
add x5, sp, #64*8*2
movrel x13, eob_32x32
ldrh w12, [x13], #2
.irp i, 0, 8, 16, 24
add x6, x5, #(\i*32*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.if \i < 24
ldrh w12, [x13], #2
.endif
.endif
add x7, x2, #(\i*2)
mov x8, #32*2
bl inv_txfm_horz_scale_dct_32x8_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #4
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8, 16, 24
add x7, x5, #(\i*2)
mov x8, #32*2
bl inv_txfm_dct_8h_x64_neon
add x6, x0, #(\i)
bl inv_txfm_add_vert_dct_8x64_neon
.endr
add sp, x5, #32*32*2
ret x15
endfunc
function inv_txfm_add_dct_dct_64x16_8bpc_neon, export=1
idct_dc 64, 16, 2
mov x15, x30
sub_sp 64*16*2+64*8*2
add x4, sp, #64*8*2
movrel x13, eob_16x32
.irp i, 0, 8
add x6, x4, #(\i*64*2)
.if \i > 0
mov w8, #(16 - \i)
cmp w3, w12
b.lt 1f
.endif
add x7, x2, #(\i*2)
mov x8, #16*2
mov x12, #-2 // shift
bl inv_txfm_dct_clear_8h_x64_neon
add x6, x4, #(\i*64*2)
bl inv_txfm_horz_dct_64x8_neon
.if \i < 8
ldrh w12, [x13], #2
.endif
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #2
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
adr x5, inv_dct_8h_x16_neon
mov x8, #64*2
.irp i, 0, 8, 16, 24, 32, 40, 48, 56
add x6, x0, #(\i)
add x7, x4, #(\i*2)
bl inv_txfm_add_vert_8x16_neon
.endr
add sp, x4, #64*16*2
ret x15
endfunc
function inv_txfm_add_dct_dct_16x64_8bpc_neon, export=1
idct_dc 16, 64, 2
mov x15, x30
sub_sp 16*32*2+64*8*2
add x5, sp, #64*8*2
movrel x13, eob_16x32
ldrh w12, [x13], #2
adr x4, inv_dct_8h_x16_neon
.irp i, 0, 8, 16, 24
add x6, x5, #(\i*16*2)
.if \i > 0
mov w8, #(32 - \i)
cmp w3, w12
b.lt 1f
.if \i < 24
ldrh w12, [x13], #2
.endif
.endif
add x7, x2, #(\i*2)
mov x8, #32*2
bl inv_txfm_horz_16x8_neon
.endr
b 3f
1:
movi v4.8h, #0
movi v5.8h, #0
movi v6.8h, #0
movi v7.8h, #0
2:
subs w8, w8, #8
.rept 4
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], #64
.endr
b.gt 2b
3:
.irp i, 0, 8
add x7, x5, #(\i*2)
mov x8, #16*2
bl inv_txfm_dct_8h_x64_neon
add x6, x0, #(\i)
bl inv_txfm_add_vert_dct_8x64_neon
.endr
add sp, x5, #16*32*2
ret x15
endfunc
|
Admenri/urge
| 58,215
|
third_party/dav1d/src/arm/64/mc16_sve.S
|
/*
* Copyright © 2024, Arm Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#define PREP_BIAS 32, lsl #8 // 8192
#define PREP_BIAS_NEG 224, lsl #8 // -8192
#if HAVE_SVE2
ENABLE_SVE
ENABLE_SVE2
// No spaces in these expressions, due to gas-preprocessor. It is translated by
// -1 to save the negative offset when getting the address of `mc_subpel_filters`.
#define REGULAR1 (((0*15-1)<<7)|(3*15-1))
#define SMOOTH1 (((1*15-1)<<7)|(4*15-1))
#define SHARP1 (((2*15-1)<<7)|(3*15-1))
#define FUNC_ALIGN 2
#define JUMP_ALIGN 2
#define LOOP_ALIGN 2
// Shuffle indices to permute horizontal samples in preparation for input to
// 16-bit SDOT instructions. The 8-tap horizontal convolution uses sample
// indices in the interval of [-3, 4] relative to the current sample position.
const h_tbl_sve, align=4
.byte 0, 1, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 7, 8, 9
.byte 4, 5, 6, 7, 8, 9, 10, 11, 6, 7, 8, 9, 10, 11, 12, 13
endconst
// Vertical convolutions also use 16-bit SDOT instructions, where two 128-bit
// registers contain a transposed 4x4 matrix of values. Subsequent iterations
// of the vertical convolution can reuse the 3x4 sub-matrix from the previous
// loop iteration. These shuffle indices shift and merge this 4x4 matrix with
// the values of a new line.
const v_tbl_sve, align=4
.byte 2, 3, 4, 5, 6, 7, 16, 17, 10, 11, 12, 13, 14, 15, 24, 25
.byte 2, 3, 4, 5, 6, 7, 16, 17, 10, 11, 12, 13, 14, 15, 18, 19
.byte 2, 3, 4, 5, 6, 7, 20, 21, 10, 11, 12, 13, 14, 15, 22, 23
.byte 2, 3, 4, 5, 6, 7, 24, 25, 10, 11, 12, 13, 14, 15, 26, 27
.byte 2, 3, 4, 5, 6, 7, 28, 29, 10, 11, 12, 13, 14, 15, 30, 31
endconst
.macro make_8tap_fn op, type, type_h, type_v, isa, jump=1
function \op\()_8tap_\type\()_16bpc_\isa, export=1, align=FUNC_ALIGN
mov x9, \type_h
mov x10, \type_v
.if \jump
b \op\()_8tap_\isa
.endif
endfunc
.endm
.macro filter_8tap_fn type, isa, dst, d_strd, src, s_strd, w, h, mx, my, bdmax, xmx, xmy, ldst, lsrc, wd_strd, ws_strd
make_8tap_fn \type, sharp, SHARP1, SHARP1, \isa
make_8tap_fn \type, sharp_smooth, SHARP1, SMOOTH1, \isa
make_8tap_fn \type, sharp_regular, SHARP1, REGULAR1, \isa
make_8tap_fn \type, smooth_sharp, SMOOTH1, SHARP1, \isa
make_8tap_fn \type, smooth, SMOOTH1, SMOOTH1, \isa
make_8tap_fn \type, smooth_regular, SMOOTH1, REGULAR1, \isa
make_8tap_fn \type, regular_sharp, REGULAR1, SHARP1, \isa
make_8tap_fn \type, regular_smooth, REGULAR1, SMOOTH1, \isa
make_8tap_fn \type, regular, REGULAR1, REGULAR1, \isa, jump=0
function \type\()_8tap_\isa, align=FUNC_ALIGN
clz w8, \w
mov w11, #0x4081 // (1<<14) | (1<<7) | 1
ptrue p0.b, vl16
sub w8, w8, #24 // for jump tables
movrel x12, X(mc_subpel_filters)
cbnz \mx, L(\type\()_8tap_h_hv_\isa)
.ifc \type, prep
cbz \my, prep_sve
.else // put
cbnz \my, L(\type\()_8tap_v_\isa)
mov w9, w8
b X(put_16bpc_neon)
.align JUMP_ALIGN
.endif
L(\type\()_8tap_v_\isa):
madd \my, \my, w11, w10
movrel x13, v_tbl_sve
.ifc \bdmax, w8 // put case, but skip
ld1r {v5.8h}, [sp] // loading into w8
.endif
sub \src, \src, \s_strd // src - s_strd
ubfx w11, \my, #7, #7
and \my, \my, #0x7F
ldr q6, [x13]
cmp \h, #4
csel \my, \my, w11, le
sub \src, \src, \s_strd, lsl #1 // src - 3 * s_strd
add \xmy, x12, \xmy, lsl #3 // subpel V filter address
ldp q28, q29, [x13, #16]
ld1sb {z7.h}, p0/z, [\xmy]
.ifc \type, prep
clz \bdmax, \bdmax
sub \bdmax, \bdmax, #24
dup v5.4s, \bdmax
.endif
cmp \w, #8
b.lt 40f
// .align JUMP_ALIGN // fallthrough
80: // V - 8xN+
ldp q30, q31, [x13, #48]
.ifc \type, prep
add \wd_strd, \w, \w // d_strd = 2 * w
.endif
.align LOOP_ALIGN
81:
add \lsrc, \src, \s_strd, lsl #1
ldr q16, [\src]
ldr q17, [\src, \s_strd]
ldr q18, [\lsrc]
ldr q19, [\lsrc, \s_strd]
add \lsrc, \lsrc, \s_strd, lsl #1
mov \ldst, \dst
ldr q20, [\lsrc]
ldr q21, [\lsrc, \s_strd]
add \lsrc, \lsrc, \s_strd, lsl #1
ldr q22, [\lsrc]
ldr q23, [\lsrc, \s_strd]
add \lsrc, \lsrc, \s_strd, lsl #1
sub w8, \h, #1
zip1 v0.8h, v16.8h, v17.8h
zip2 v1.8h, v16.8h, v17.8h
zip1 v2.8h, v18.8h, v19.8h
zip2 v3.8h, v18.8h, v19.8h
zip1 v18.8h, v20.8h, v21.8h
zip2 v21.8h, v20.8h, v21.8h
zip1 v24.8h, v22.8h, v23.8h
zip2 v27.8h, v22.8h, v23.8h
zip1 v16.4s, v0.4s, v2.4s
zip2 v19.4s, v0.4s, v2.4s
zip1 v22.4s, v1.4s, v3.4s
zip2 v25.4s, v1.4s, v3.4s
zip1 v17.4s, v18.4s, v24.4s
zip2 v20.4s, v18.4s, v24.4s
zip1 v23.4s, v21.4s, v27.4s
zip2 v26.4s, v21.4s, v27.4s
.align LOOP_ALIGN
8:
ld1 {v18.16b}, [\lsrc], \s_strd
movi v0.2d, #0
movi v1.2d, #0
movi v2.2d, #0
movi v3.2d, #0
mov v21.16b, v18.16b
mov v24.16b, v18.16b
mov v27.16b, v18.16b
sdot z0.d, z16.h, z7.h[0]
tbl v16.16b, {v16.16b, v17.16b}, v6.16b
sdot z1.d, z19.h, z7.h[0]
tbl v19.16b, {v19.16b, v20.16b}, v6.16b
sdot z2.d, z22.h, z7.h[0]
tbl v22.16b, {v22.16b, v23.16b}, v6.16b
subs w8, w8, #1
sdot z3.d, z25.h, z7.h[0]
tbl v25.16b, {v25.16b, v26.16b}, v6.16b
sdot z0.d, z17.h, z7.h[1]
tbl v17.16b, {v17.16b, v18.16b}, v28.16b
sdot z1.d, z20.h, z7.h[1]
tbl v20.16b, {v20.16b, v21.16b}, v29.16b
sdot z2.d, z23.h, z7.h[1]
tbl v23.16b, {v23.16b, v24.16b}, v30.16b
sdot z3.d, z26.h, z7.h[1]
tbl v26.16b, {v26.16b, v27.16b}, v31.16b
uzp1 v0.4s, v0.4s, v1.4s
uzp1 v1.4s, v2.4s, v3.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
uzp1 v0.8h, v0.8h, v1.8h
sub z0.h, z0.h, #PREP_BIAS
.else // put
sqrshrun v0.4h, v0.4s, #6
sqrshrun2 v0.8h, v1.4s, #6
umin v0.8h, v0.8h, v5.8h
.endif
st1 {v0.16b}, [\ldst], \d_strd
b.gt 8b
movi v0.2d, #0
movi v1.2d, #0
movi v2.2d, #0
movi v3.2d, #0
sdot z0.d, z16.h, z7.h[0]
sdot z1.d, z19.h, z7.h[0]
sdot z2.d, z22.h, z7.h[0]
sdot z3.d, z25.h, z7.h[0]
sdot z0.d, z17.h, z7.h[1]
sdot z1.d, z20.h, z7.h[1]
sdot z2.d, z23.h, z7.h[1]
sdot z3.d, z26.h, z7.h[1]
subs \w, \w, #8
uzp1 v0.4s, v0.4s, v1.4s
uzp1 v1.4s, v2.4s, v3.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
uzp1 v0.8h, v0.8h, v1.8h
sub z0.h, z0.h, #PREP_BIAS
.else // put
sqrshrun v0.4h, v0.4s, #6
sqrshrun2 v0.8h, v1.4s, #6
umin v0.8h, v0.8h, v5.8h
.endif
str q0, [\ldst]
add \dst, \dst, #16
add \src, \src, #16
b.gt 81b
ret
.align JUMP_ALIGN
40: // V - 4xN, put only: 2xN
.ifc \type, put
lsr \d_strd, \d_strd, #1 // hword index for `st1h`
whilelt p1.h, wzr, \w // masking for writes
.endif
cmp \h, #4
b.le 44f
ldr d16, [\src]
ldr d17, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr d18, [\src]
ldr d19, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr d20, [\src]
ldr d21, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr d22, [\src]
ldr d23, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
sub \h, \h, #2
zip1 v0.8h, v16.8h, v17.8h
zip1 v2.8h, v18.8h, v19.8h
zip1 v18.8h, v20.8h, v21.8h
zip1 v24.8h, v22.8h, v23.8h
zip1 v16.4s, v0.4s, v2.4s
zip2 v19.4s, v0.4s, v2.4s
zip1 v17.4s, v18.4s, v24.4s
zip2 v20.4s, v18.4s, v24.4s
.align LOOP_ALIGN
4:
ldr d18, [\src]
ldr d24, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
movi v0.2d, #0
movi v1.2d, #0
movi v2.2d, #0
movi v3.2d, #0
mov v21.16b, v18.16b
mov v27.16b, v24.16b
sdot z0.d, z16.h, z7.h[0]
tbl v22.16b, {v16.16b, v17.16b}, v6.16b
sdot z1.d, z19.h, z7.h[0]
tbl v25.16b, {v19.16b, v20.16b}, v6.16b
sdot z0.d, z17.h, z7.h[1]
tbl v23.16b, {v17.16b, v18.16b}, v28.16b
sdot z1.d, z20.h, z7.h[1]
tbl v26.16b, {v20.16b, v21.16b}, v29.16b
subs \h, \h, #2
sdot z2.d, z22.h, z7.h[0]
tbl v16.16b, {v22.16b, v23.16b}, v6.16b
sdot z3.d, z25.h, z7.h[0]
tbl v19.16b, {v25.16b, v26.16b}, v6.16b
sdot z2.d, z23.h, z7.h[1]
tbl v17.16b, {v23.16b, v24.16b}, v28.16b
sdot z3.d, z26.h, z7.h[1]
tbl v20.16b, {v26.16b, v27.16b}, v29.16b
uzp1 v0.4s, v0.4s, v1.4s
uzp1 v1.4s, v2.4s, v3.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
uzp1 v0.8h, v0.8h, v1.8h
sub z0.h, z0.h, #PREP_BIAS
str q0, [\dst], #16
.else // put
sqrshrun v0.4h, v0.4s, #6
sqrshrun v1.4h, v1.4s, #6
umin v0.4h, v0.4h, v5.4h
umin v1.4h, v1.4h, v5.4h
st1h {z0.h}, p1, [\dst]
st1h {z1.h}, p1, [\dst, \d_strd, lsl #1]
add \dst, \dst, \d_strd, lsl #2
.endif
b.gt 4b
ldr d18, [\src]
movi v0.2d, #0
movi v1.2d, #0
movi v2.2d, #0
movi v3.2d, #0
mov v21.16b, v18.16b
sdot z0.d, z16.h, z7.h[0]
tbl v22.16b, {v16.16b, v17.16b}, v6.16b
sdot z1.d, z19.h, z7.h[0]
tbl v25.16b, {v19.16b, v20.16b}, v6.16b
sdot z0.d, z17.h, z7.h[1]
tbl v23.16b, {v17.16b, v18.16b}, v28.16b
sdot z1.d, z20.h, z7.h[1]
tbl v26.16b, {v20.16b, v21.16b}, v29.16b
sdot z2.d, z22.h, z7.h[0]
sdot z3.d, z25.h, z7.h[0]
sdot z2.d, z23.h, z7.h[1]
sdot z3.d, z26.h, z7.h[1]
uzp1 v0.4s, v0.4s, v1.4s
uzp1 v1.4s, v2.4s, v3.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
uzp1 v0.8h, v0.8h, v1.8h
sub z0.h, z0.h, #PREP_BIAS
str q0, [\dst]
.else // put
sqrshrun v0.4h, v0.4s, #6
sqrshrun v1.4h, v1.4s, #6
umin v0.4h, v0.4h, v5.4h
umin v1.4h, v1.4h, v5.4h
st1h {z0.h}, p1, [\dst]
st1h {z1.h}, p1, [\dst, \d_strd, lsl #1]
.endif
ret
.align JUMP_ALIGN
44: // V - 4x4, put only: 4x2, 2x4, 2x2
add \src, \src, \s_strd, lsl #1 // src - s_strd
subs \h, \h, #2
ldr d16, [\src]
ldr d17, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ldr d18, [\src]
ldr d19, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
ext v7.16b, v7.16b, v7.16b, #4 // [\xmy + 2 * 2]
zip1 v0.8h, v16.8h, v17.8h
zip1 v2.8h, v18.8h, v19.8h
zip1 v16.4s, v0.4s, v2.4s
zip2 v19.4s, v0.4s, v2.4s
.ifc \type, put
b.eq 42f
.endif
ldr d17, [\src]
ldr d23, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
movi v0.2d, #0
movi v1.2d, #0
movi v2.2d, #0
movi v3.2d, #0
mov v20.16b, v17.16b
mov v26.16b, v23.16b
sdot z0.d, z16.h, z7.h[0]
tbl v22.16b, {v16.16b, v17.16b}, v28.16b
sdot z1.d, z19.h, z7.h[0]
tbl v25.16b, {v19.16b, v20.16b}, v29.16b
sdot z2.d, z22.h, z7.h[0]
tbl v16.16b, {v22.16b, v23.16b}, v28.16b
sdot z3.d, z25.h, z7.h[0]
tbl v19.16b, {v25.16b, v26.16b}, v29.16b
uzp1 v0.4s, v0.4s, v1.4s
uzp1 v1.4s, v2.4s, v3.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
uzp1 v0.8h, v0.8h, v1.8h
sub z0.h, z0.h, #PREP_BIAS
str q0, [\dst], #16
.else // put
sqrshrun v0.4h, v0.4s, #6
sqrshrun v1.4h, v1.4s, #6
umin v0.4h, v0.4h, v5.4h
umin v1.4h, v1.4h, v5.4h
st1h {z0.h}, p1, [\dst]
st1h {z1.h}, p1, [\dst, \d_strd, lsl #1]
add \dst, \dst, \d_strd, lsl #2
.endif
.ifc \type, put
.align JUMP_ALIGN
42:
.endif
ldr d17, [\src]
movi v0.2d, #0
movi v1.2d, #0
movi v2.2d, #0
movi v3.2d, #0
mov v20.16b, v17.16b
sdot z0.d, z16.h, z7.h[0]
tbl v22.16b, {v16.16b, v17.16b}, v28.16b
sdot z1.d, z19.h, z7.h[0]
tbl v25.16b, {v19.16b, v20.16b}, v29.16b
sdot z2.d, z22.h, z7.h[0]
sdot z3.d, z25.h, z7.h[0]
uzp1 v0.4s, v0.4s, v1.4s
uzp1 v1.4s, v2.4s, v3.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
uzp1 v0.8h, v0.8h, v1.8h
sub z0.h, z0.h, #PREP_BIAS
str q0, [\dst]
.else // put
sqrshrun v0.4h, v0.4s, #6
sqrshrun v1.4h, v1.4s, #6
umin v0.4h, v0.4h, v5.4h
umin v1.4h, v1.4h, v5.4h
st1h {z0.h}, p1, [\dst]
st1h {z1.h}, p1, [\dst, \d_strd, lsl #1]
.endif
ret
.align JUMP_ALIGN
L(\type\()_8tap_h_hv_\isa):
madd \mx, \mx, w11, w9
movrel x13, h_tbl_sve
sub \src, \src, #6 // src - 3 * 2
ubfx w9, \mx, #7, #7
and \mx, \mx, #0x7F
cmp \w, #4
csel \mx, \mx, w9, le
ldp q30, q31, [x13]
add \xmx, x12, \xmx, lsl #3 // subpel H filter address
cbz \my, L(\type\()_8tap_h_\isa)
// HV cases
madd w14, \my, w11, w10
.ifc \bdmax, w8
ldr \bdmax, [sp]
.endif
ubfx w11, w14, #7, #7
and w14, w14, #0x7F
ld1sb {z4.h}, p0/z, [\xmx]
cmp \h, #4
csel w14, w14, w11, le
.ifc \type, put
dup v29.8h, \bdmax
.endif
clz \bdmax, \bdmax
add \xmy, x12, x14, lsl #3 // subpel V filter address
ld1sb {z7.h}, p0/z, [\xmy]
.ifc \type, put
mov w9, #12
sub w9, w9, \bdmax
dup v6.4s, w9
.endif
sub \bdmax, \bdmax, #24
mov x15, x30
sub \src, \src, \s_strd // src - s_strd - 3 * 2
dup v5.4s, \bdmax
cmp w10, SHARP1
b.ne L(\type\()_6tap_hv_\isa) // vertical != SHARP1
// HV 8-tap cases
cmp \w, #4
b.le 40f
// .align JUMP_ALIGN // fallthrough
80: // HV8 - 8xN+
.ifc \type, prep
add \wd_strd, \w, \w // d_strd = 2 * w
.endif
cmp \h, #4
b.le 84f
sub \src, \src, \s_strd, lsl #1 // src - 3 * s_strd - 3 * 2
.align LOOP_ALIGN
81:
mov \lsrc, \src
mov \ldst, \dst
mov w8, \h
bl L(\type\()_hv_filter8_\isa)
uzp1 v16.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v17.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v18.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v19.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v20.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v21.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v22.8h, v23.8h, v24.8h
.align LOOP_ALIGN
8:
ldp q24, q28, [\lsrc]
smull v0.4s, v16.4h, v7.h[0]
smull2 v1.4s, v16.8h, v7.h[0]
mov v16.16b, v17.16b
movi v2.2d, #0
movi v3.2d, #0
tbl v23.16b, {v24.16b}, v30.16b
tbl v24.16b, {v24.16b}, v31.16b
ldur q26, [\lsrc, #8]
smlal v0.4s, v17.4h, v7.h[1]
smlal2 v1.4s, v17.8h, v7.h[1]
mov v17.16b, v18.16b
add \lsrc, \lsrc, \s_strd
sdot z2.d, z23.h, z4.h[0]
sdot z3.d, z24.h, z4.h[0]
movi v23.2d, #0
movi v24.2d, #0
tbl v25.16b, {v26.16b}, v30.16b
tbl v26.16b, {v26.16b}, v31.16b
smlal v0.4s, v18.4h, v7.h[2]
smlal2 v1.4s, v18.8h, v7.h[2]
mov v18.16b, v19.16b
sdot z23.d, z25.h, z4.h[0]
sdot z24.d, z26.h, z4.h[0]
tbl v27.16b, {v28.16b}, v30.16b
tbl v28.16b, {v28.16b}, v31.16b
smlal v0.4s, v19.4h, v7.h[3]
smlal2 v1.4s, v19.8h, v7.h[3]
mov v19.16b, v20.16b
subs w8, w8, #1
sdot z2.d, z25.h, z4.h[1]
sdot z3.d, z26.h, z4.h[1]
sdot z23.d, z27.h, z4.h[1]
sdot z24.d, z28.h, z4.h[1]
smlal v0.4s, v20.4h, v7.h[4]
smlal2 v1.4s, v20.8h, v7.h[4]
mov v20.16b, v21.16b
uzp1 v3.4s, v2.4s, v3.4s
uzp1 v24.4s, v23.4s, v24.4s
smlal v0.4s, v21.4h, v7.h[5]
smlal2 v1.4s, v21.8h, v7.h[5]
mov v21.16b, v22.16b
srshl v23.4s, v3.4s, v5.4s
srshl v24.4s, v24.4s, v5.4s
smlal v0.4s, v22.4h, v7.h[6]
smlal2 v1.4s, v22.8h, v7.h[6]
uzp1 v22.8h, v23.8h, v24.8h
smlal v0.4s, v22.4h, v7.h[7]
smlal2 v1.4s, v22.8h, v7.h[7]
.ifc \type, prep
rshrn v0.4h, v0.4s, #6
rshrn2 v0.8h, v1.4s, #6
sub z0.h, z0.h, #PREP_BIAS
.else // put
srshl v0.4s, v0.4s, v6.4s
srshl v1.4s, v1.4s, v6.4s
sqxtun v0.4h, v0.4s
sqxtun2 v0.8h, v1.4s
umin v0.8h, v0.8h, v29.8h
.endif
st1 {v0.8h}, [\ldst], \d_strd
b.gt 8b
subs \w, \w, #8
add \src, \src, #16
add \dst, \dst, #16
b.gt 81b
ret x15
.align JUMP_ALIGN
40: // HV8 - 4xN, put only: 2xN
.ifc \type, put
lsr \d_strd, \d_strd, #1 // hword index for `st1h`
whilelt p1.h, wzr, \w // masking for writes
.endif
ext v4.16b, v4.16b, v4.16b, #4 // [\xmy + 2 * 2]
add \src, \src, #4
cmp \h, #4
b.le 44f
sub \src, \src, \s_strd, lsl #1 // src - 3 * s_strd - 3 * 2
bl L(\type\()_hv_filter4_\isa)
xtn v16.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v17.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v18.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v19.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v20.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v21.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v22.4h, v0.4s
.align LOOP_ALIGN
4:
ld1 {v3.16b}, [\src], \s_strd
smull v24.4s, v16.4h, v7.h[0]
smlal v24.4s, v17.4h, v7.h[1]
tbl v2.16b, {v3.16b}, v30.16b
tbl v3.16b, {v3.16b}, v31.16b
movi v0.2d, #0
movi v1.2d, #0
mov v16.16b, v17.16b
mov v17.16b, v18.16b
smlal v24.4s, v18.4h, v7.h[2]
smlal v24.4s, v19.4h, v7.h[3]
sdot z0.d, z2.h, z4.h[0]
sdot z1.d, z3.h, z4.h[0]
mov v18.16b, v19.16b
mov v19.16b, v20.16b
uzp1 v0.4s, v0.4s, v1.4s
smlal v24.4s, v20.4h, v7.h[4]
smlal v24.4s, v21.4h, v7.h[5]
srshl v0.4s, v0.4s, v5.4s
mov v20.16b, v21.16b
mov v21.16b, v22.16b
subs \h, \h, #1
smlal v24.4s, v22.4h, v7.h[6]
xtn v22.4h, v0.4s
smlal v24.4s, v22.4h, v7.h[7]
.ifc \type, prep
rshrn v0.4h, v24.4s, #6
sub z0.h, z0.h, #PREP_BIAS
str d0, [\dst], #8
.else // put
srshl v0.4s, v24.4s, v6.4s
sqxtun v0.4h, v0.4s
umin v0.4h, v0.4h, v29.4h
st1h {z0.h}, p1, [\dst]
add \dst, \dst, \d_strd, lsl #1
.endif
b.gt 4b
ret x15
.align JUMP_ALIGN
L(\type\()_6tap_hv_\isa):
cmp \w, #4
b.le 46f
// .align JUMP_ALIGN // fallthrough
80: // HV6 - 8xN+
.ifc \type, prep
add \wd_strd, \w, \w // d_strd = 2 * w
.endif
cmp \h, #4
b.le 84f
sub \src, \src, \s_strd // src - 2 * s_strd - 3 * 2
.align LOOP_ALIGN
81:
mov \lsrc, \src
mov \ldst, \dst
mov w8, \h
bl L(\type\()_hv_filter8_\isa)
uzp1 v16.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v17.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v18.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v19.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v20.8h, v23.8h, v24.8h
.align LOOP_ALIGN
8:
ldp q24, q28, [\lsrc]
smull v0.4s, v16.4h, v7.h[1]
smull2 v1.4s, v16.8h, v7.h[1]
mov v16.16b, v17.16b
tbl v23.16b, {v24.16b}, v30.16b
tbl v24.16b, {v24.16b}, v31.16b
movi v2.2d, #0
movi v3.2d, #0
ldur q26, [\lsrc, #8]
add \lsrc, \lsrc, \s_strd
sdot z2.d, z23.h, z4.h[0]
sdot z3.d, z24.h, z4.h[0]
tbl v25.16b, {v26.16b}, v30.16b
tbl v26.16b, {v26.16b}, v31.16b
movi v23.2d, #0
movi v24.2d, #0
sdot z23.d, z25.h, z4.h[0]
sdot z24.d, z26.h, z4.h[0]
tbl v27.16b, {v28.16b}, v30.16b
tbl v28.16b, {v28.16b}, v31.16b
smlal v0.4s, v17.4h, v7.h[2]
smlal2 v1.4s, v17.8h, v7.h[2]
mov v17.16b, v18.16b
sdot z2.d, z25.h, z4.h[1]
sdot z3.d, z26.h, z4.h[1]
sdot z23.d, z27.h, z4.h[1]
sdot z24.d, z28.h, z4.h[1]
smlal v0.4s, v18.4h, v7.h[3]
smlal2 v1.4s, v18.8h, v7.h[3]
mov v18.16b, v19.16b
uzp1 v3.4s, v2.4s, v3.4s
uzp1 v24.4s, v23.4s, v24.4s
smlal v0.4s, v19.4h, v7.h[4]
smlal2 v1.4s, v19.8h, v7.h[4]
mov v19.16b, v20.16b
srshl v23.4s, v3.4s, v5.4s
srshl v24.4s, v24.4s, v5.4s
smlal v0.4s, v20.4h, v7.h[5]
smlal2 v1.4s, v20.8h, v7.h[5]
subs w8, w8, #1
uzp1 v20.8h, v23.8h, v24.8h
smlal v0.4s, v20.4h, v7.h[6]
smlal2 v1.4s, v20.8h, v7.h[6]
.ifc \type, prep
rshrn v0.4h, v0.4s, #6
rshrn2 v0.8h, v1.4s, #6
sub z0.h, z0.h, #PREP_BIAS
.else // put
srshl v0.4s, v0.4s, v6.4s
srshl v1.4s, v1.4s, v6.4s
sqxtun v0.4h, v0.4s
sqxtun2 v0.8h, v1.4s
umin v0.8h, v0.8h, v29.8h
.endif
st1 {v0.8h}, [\ldst], \d_strd
b.gt 8b
add \dst, \dst, #16
subs \w, \w, #8
add \src, \src, #16
b.gt 81b
ret x15
.align LOOP_ALIGN
84: // HV4 - 8x4, 8x2
mov \lsrc, \src
mov \ldst, \dst
mov w8, \h
bl L(\type\()_hv_filter8_\isa)
uzp1 v17.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v18.8h, v23.8h, v24.8h
bl L(\type\()_hv_filter8_\isa)
uzp1 v19.8h, v23.8h, v24.8h
.align LOOP_ALIGN
81:
ldp q24, q28, [\lsrc]
ldur q26, [\lsrc, #8]
add \lsrc, \lsrc, \s_strd
tbl v23.16b, {v24.16b}, v30.16b
tbl v24.16b, {v24.16b}, v31.16b
movi v2.2d, #0
movi v3.2d, #0
sdot z2.d, z23.h, z4.h[0]
sdot z3.d, z24.h, z4.h[0]
tbl v25.16b, {v26.16b}, v30.16b
tbl v26.16b, {v26.16b}, v31.16b
movi v23.2d, #0
movi v24.2d, #0
sdot z23.d, z25.h, z4.h[0]
sdot z24.d, z26.h, z4.h[0]
tbl v27.16b, {v28.16b}, v30.16b
tbl v28.16b, {v28.16b}, v31.16b
sdot z2.d, z25.h, z4.h[1]
sdot z3.d, z26.h, z4.h[1]
sdot z23.d, z27.h, z4.h[1]
sdot z24.d, z28.h, z4.h[1]
smull v0.4s, v17.4h, v7.h[2]
smull2 v1.4s, v17.8h, v7.h[2]
mov v17.16b, v18.16b
subs w8, w8, #1
uzp1 v3.4s, v2.4s, v3.4s
uzp1 v24.4s, v23.4s, v24.4s
smlal v0.4s, v18.4h, v7.h[3]
smlal2 v1.4s, v18.8h, v7.h[3]
mov v18.16b, v19.16b
srshl v23.4s, v3.4s, v5.4s
srshl v24.4s, v24.4s, v5.4s
smlal v0.4s, v19.4h, v7.h[4]
smlal2 v1.4s, v19.8h, v7.h[4]
uzp1 v19.8h, v23.8h, v24.8h
smlal v0.4s, v19.4h, v7.h[5]
smlal2 v1.4s, v19.8h, v7.h[5]
.ifc \type, prep
rshrn v0.4h, v0.4s, #6
rshrn2 v0.8h, v1.4s, #6
sub z0.h, z0.h, #PREP_BIAS
.else // put
srshl v0.4s, v0.4s, v6.4s
srshl v1.4s, v1.4s, v6.4s
sqxtun v0.4h, v0.4s
sqxtun2 v0.8h, v1.4s
umin v0.8h, v0.8h, v29.8h
.endif
st1 {v0.8h}, [\ldst], \d_strd
b.gt 81b
subs \w, \w, #8
add \dst, \dst, #16
add \src, \src, #16
b.gt 84b
ret x15
.align FUNC_ALIGN
L(\type\()_hv_filter8_\isa):
ldp q24, q28, [\lsrc]
ldur q26, [\lsrc, #8]
add \lsrc, \lsrc, \s_strd
tbl v23.16b, {v24.16b}, v30.16b
tbl v24.16b, {v24.16b}, v31.16b
movi v2.2d, #0
movi v3.2d, #0
sdot z2.d, z23.h, z4.h[0]
sdot z3.d, z24.h, z4.h[0]
tbl v25.16b, {v26.16b}, v30.16b
tbl v26.16b, {v26.16b}, v31.16b
movi v23.2d, #0
movi v24.2d, #0
sdot z23.d, z25.h, z4.h[0]
sdot z24.d, z26.h, z4.h[0]
tbl v27.16b, {v28.16b}, v30.16b
tbl v28.16b, {v28.16b}, v31.16b
sdot z2.d, z25.h, z4.h[1]
sdot z3.d, z26.h, z4.h[1]
sdot z23.d, z27.h, z4.h[1]
sdot z24.d, z28.h, z4.h[1]
uzp1 v3.4s, v2.4s, v3.4s
uzp1 v24.4s, v23.4s, v24.4s
srshl v23.4s, v3.4s, v5.4s
srshl v24.4s, v24.4s, v5.4s
ret
.align FUNC_ALIGN
L(\type\()_hv_filter4_\isa):
ld1 {v3.16b}, [\src], \s_strd
tbl v2.16b, {v3.16b}, v30.16b
tbl v3.16b, {v3.16b}, v31.16b
movi v0.2d, #0
movi v1.2d, #0
sdot z0.d, z2.h, z4.h[0]
sdot z1.d, z3.h, z4.h[0]
uzp1 v0.4s, v0.4s, v1.4s
srshl v0.4s, v0.4s, v5.4s
ret
.align JUMP_ALIGN
46: // H4V6 - 4xN, put only: 2xN
.ifc \type, put
lsr \d_strd, \d_strd, #1 // hword index for `st1h`
whilelt p1.h, wzr, \w // masking for writes
.endif
ext v4.16b, v4.16b, v4.16b, #4 // [\xmy + 2 * 2]
add \src, \src, #4
cmp \h, #4
b.le 44f
sub \src, \src, \s_strd // src - 2 * s_strd - 3 * 2
bl L(\type\()_hv_filter4_\isa)
xtn v16.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v17.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v18.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v19.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v20.4h, v0.4s
.align LOOP_ALIGN
4:
ld1 {v3.16b}, [\src], \s_strd
smull v24.4s, v16.4h, v7.h[1]
smlal v24.4s, v17.4h, v7.h[2]
tbl v2.16b, {v3.16b}, v30.16b
tbl v3.16b, {v3.16b}, v31.16b
movi v0.2d, #0
movi v1.2d, #0
sdot z0.d, z2.h, z4.h[0]
sdot z1.d, z3.h, z4.h[0]
mov v16.16b, v17.16b
mov v17.16b, v18.16b
smlal v24.4s, v18.4h, v7.h[3]
smlal v24.4s, v19.4h, v7.h[4]
uzp1 v0.4s, v0.4s, v1.4s
mov v18.16b, v19.16b
mov v19.16b, v20.16b
subs \h, \h, #1
srshl v0.4s, v0.4s, v5.4s
smlal v24.4s, v20.4h, v7.h[5]
xtn v20.4h, v0.4s
smlal v24.4s, v20.4h, v7.h[6]
.ifc \type, prep
rshrn v0.4h, v24.4s, #6
sub z0.h, z0.h, #PREP_BIAS
str d0, [\dst], #8
.else // put
srshl v0.4s, v24.4s, v6.4s
sqxtun v0.4h, v0.4s
umin v0.4h, v0.4h, v29.4h
st1h {z0.h}, p1, [\dst]
add \dst, \dst, \d_strd, lsl #1
.endif
b.gt 4b
ret x15
.align JUMP_ALIGN
44: // H4V4 - 4x4, put only: 4x2, 2x4, 2x2
bl L(\type\()_hv_filter4_\isa)
xtn v17.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v18.4h, v0.4s
bl L(\type\()_hv_filter4_\isa)
xtn v19.4h, v0.4s
.align LOOP_ALIGN
4:
ld1 {v3.16b}, [\src], \s_strd
smull v24.4s, v17.4h, v7.h[2]
smlal v24.4s, v18.4h, v7.h[3]
tbl v2.16b, {v3.16b}, v30.16b
tbl v3.16b, {v3.16b}, v31.16b
movi v0.2d, #0
movi v1.2d, #0
sdot z0.d, z2.h, z4.h[0]
sdot z1.d, z3.h, z4.h[0]
uzp1 v0.4s, v0.4s, v1.4s
mov v17.16b, v18.16b
mov v18.16b, v19.16b
subs \h, \h, #1
srshl v0.4s, v0.4s, v5.4s
smlal v24.4s, v19.4h, v7.h[4]
xtn v19.4h, v0.4s
smlal v24.4s, v19.4h, v7.h[5]
.ifc \type, prep
rshrn v0.4h, v24.4s, #6
sub z0.h, z0.h, #PREP_BIAS
str d0, [\dst], #8
.else // put
srshl v0.4s, v24.4s, v6.4s
sqxtun v0.4h, v0.4s
umin v0.4h, v0.4h, v29.4h
st1h {z0.h}, p1, [\dst]
add \dst, \dst, \d_strd, lsl #1
.endif
b.gt 4b
ret x15
.align JUMP_ALIGN
L(\type\()_8tap_h_\isa):
movrel x11, \type\()_8tap_h_\isa\()_tbl
ldrsw x12, [x11, x8, lsl #2]
.ifc \bdmax, w8
ldr \bdmax, [sp]
.endif
.ifc \type, prep
clz \bdmax, \bdmax
sub \bdmax, \bdmax, #24
dup v5.4s, \bdmax
.else // put
mov w9, #34 // rounding for 10-bit case
mov w10, #40 // rounding for 12-bit case
cmp \bdmax, #0xFFF
csel w9, w9, w10, ne // select rounding based on \bdmax
dup v5.8h, \bdmax
dup v6.2d, x9
.endif
add x11, x11, x12
ld1sb {z4.h}, p0/z, [\xmx]
br x11
.align JUMP_ALIGN
20: // H - 4xN, put only: 2xN
40:
AARCH64_VALID_JUMP_TARGET
add \src, \src, #4 // src - 1 * 2
ext v4.16b, v4.16b, v4.16b, #4 // [\xmy + 2 * 2]
.ifc \type, put
lsr \d_strd, \d_strd, #1 // hword index for `st1h`
whilelt p1.h, wzr, \w // masking for writes
.endif
.align LOOP_ALIGN
4:
ldr q17, [\src]
ldr q19, [\src, \s_strd]
add \src, \src, \s_strd, lsl #1
.ifc \type, prep
movi v0.2d, #0
movi v1.2d, #0
movi v2.2d, #0
movi v3.2d, #0
.else
mov v0.16b, v6.16b
mov v1.16b, v6.16b
mov v2.16b, v6.16b
mov v3.16b, v6.16b
.endif
tbl v16.16b, {v17.16b}, v30.16b
tbl v17.16b, {v17.16b}, v31.16b
sdot z0.d, z16.h, z4.h[0]
sdot z1.d, z17.h, z4.h[0]
subs \h, \h, #2
tbl v18.16b, {v19.16b}, v30.16b
tbl v19.16b, {v19.16b}, v31.16b
sdot z2.d, z18.h, z4.h[0]
sdot z3.d, z19.h, z4.h[0]
uzp1 v0.4s, v0.4s, v1.4s
uzp1 v1.4s, v2.4s, v3.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
uzp1 v0.8h, v0.8h, v1.8h
sub z0.h, z0.h, #PREP_BIAS
str q0, [\dst], #16
.else // put
sqshrun v0.4h, v0.4s, #6
sqshrun v1.4h, v1.4s, #6
umin v0.4h, v0.4h, v5.4h
umin v1.4h, v1.4h, v5.4h
st1h {z0.h}, p1, [\dst]
st1h {z1.h}, p1, [\dst, \d_strd, lsl #1]
add \dst, \dst, \d_strd, lsl #2
.endif
b.gt 4b
ret
.align JUMP_ALIGN
80: // H - 8xN
AARCH64_VALID_JUMP_TARGET
.align LOOP_ALIGN
8:
ldp q17, q21, [\src]
ldur q19, [\src, #8]
.ifc \type, prep
movi v0.2d, #0
movi v2.2d, #0
.else
mov v0.16b, v6.16b
mov v2.16b, v6.16b
.endif
tbl v16.16b, {v17.16b}, v30.16b
tbl v17.16b, {v17.16b}, v31.16b
add \src, \src, \s_strd
sdot z0.d, z16.h, z4.h[0]
sdot z2.d, z17.h, z4.h[0]
tbl v18.16b, {v19.16b}, v30.16b
tbl v19.16b, {v19.16b}, v31.16b
.ifc \type, prep
movi v16.2d, #0
movi v17.2d, #0
.else
mov v16.16b, v6.16b
mov v17.16b, v6.16b
.endif
ldp q23, q27, [\src]
ldur q25, [\src, #8]
sdot z16.d, z18.h, z4.h[0]
sdot z17.d, z19.h, z4.h[0]
tbl v22.16b, {v23.16b}, v30.16b
tbl v23.16b, {v23.16b}, v31.16b
.ifc \type, prep
movi v1.2d, #0
movi v3.2d, #0
.else
mov v1.16b, v6.16b
mov v3.16b, v6.16b
.endif
add \src, \src, \s_strd
sdot z1.d, z22.h, z4.h[0]
sdot z3.d, z23.h, z4.h[0]
tbl v24.16b, {v25.16b}, v30.16b
tbl v25.16b, {v25.16b}, v31.16b
.ifc \type, prep
movi v22.2d, #0
movi v23.2d, #0
.else
mov v22.16b, v6.16b
mov v23.16b, v6.16b
.endif
sdot z22.d, z24.h, z4.h[0]
sdot z23.d, z25.h, z4.h[0]
tbl v20.16b, {v21.16b}, v30.16b
tbl v21.16b, {v21.16b}, v31.16b
sdot z0.d, z18.h, z4.h[1]
sdot z2.d, z19.h, z4.h[1]
tbl v26.16b, {v27.16b}, v30.16b
tbl v27.16b, {v27.16b}, v31.16b
sdot z16.d, z20.h, z4.h[1]
sdot z17.d, z21.h, z4.h[1]
sdot z1.d, z24.h, z4.h[1]
sdot z3.d, z25.h, z4.h[1]
sdot z22.d, z26.h, z4.h[1]
sdot z23.d, z27.h, z4.h[1]
subs \h, \h, #2
uzp1 v0.4s, v0.4s, v2.4s
uzp1 v2.4s, v16.4s, v17.4s
uzp1 v1.4s, v1.4s, v3.4s
uzp1 v3.4s, v22.4s, v23.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v2.4s, v2.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
srshl v3.4s, v3.4s, v5.4s
uzp1 v0.8h, v0.8h, v2.8h
uzp1 v1.8h, v1.8h, v3.8h
sub z0.h, z0.h, #PREP_BIAS
sub z1.h, z1.h, #PREP_BIAS
stp q0, q1, [\dst], #32
.else // put
sqshrun v0.4h, v0.4s, #6
sqshrun2 v0.8h, v2.4s, #6
sqshrun v1.4h, v1.4s, #6
sqshrun2 v1.8h, v3.4s, #6
umin v0.8h, v0.8h, v5.8h
umin v1.8h, v1.8h, v5.8h
st1 {v0.16b}, [\dst], \d_strd
st1 {v1.16b}, [\dst], \d_strd
.endif
b.gt 8b
ret
.align JUMP_ALIGN
160: // H - 16xN
AARCH64_VALID_JUMP_TARGET
.align LOOP_ALIGN
16:
ldp q17, q21, [\src]
ldur q19, [\src, #8]
.ifc \type, prep
movi v0.2d, #0
movi v2.2d, #0
.else
mov v0.16b, v6.16b
mov v2.16b, v6.16b
.endif
tbl v16.16b, {v17.16b}, v30.16b
tbl v17.16b, {v17.16b}, v31.16b
sdot z0.d, z16.h, z4.h[0]
sdot z2.d, z17.h, z4.h[0]
tbl v18.16b, {v19.16b}, v30.16b
tbl v19.16b, {v19.16b}, v31.16b
.ifc \type, prep
movi v16.2d, #0
movi v17.2d, #0
.else
mov v16.16b, v6.16b
mov v17.16b, v6.16b
.endif
ldur q25, [\src, #24]
ldr q27, [\src, #32]
sdot z16.d, z18.h, z4.h[0]
sdot z17.d, z19.h, z4.h[0]
tbl v22.16b, {v21.16b}, v30.16b
tbl v23.16b, {v21.16b}, v31.16b
.ifc \type, prep
movi v1.2d, #0
movi v3.2d, #0
.else
mov v1.16b, v6.16b
mov v3.16b, v6.16b
.endif
add \src, \src, \s_strd
sdot z1.d, z22.h, z4.h[0]
sdot z3.d, z23.h, z4.h[0]
tbl v24.16b, {v25.16b}, v30.16b
tbl v25.16b, {v25.16b}, v31.16b
.ifc \type, prep
movi v22.2d, #0
movi v23.2d, #0
.else
mov v22.16b, v6.16b
mov v23.16b, v6.16b
.endif
sdot z22.d, z24.h, z4.h[0]
sdot z23.d, z25.h, z4.h[0]
tbl v20.16b, {v21.16b}, v30.16b
tbl v21.16b, {v21.16b}, v31.16b
sdot z0.d, z18.h, z4.h[1]
sdot z2.d, z19.h, z4.h[1]
tbl v26.16b, {v27.16b}, v30.16b
tbl v27.16b, {v27.16b}, v31.16b
sdot z16.d, z20.h, z4.h[1]
sdot z17.d, z21.h, z4.h[1]
sdot z1.d, z24.h, z4.h[1]
sdot z3.d, z25.h, z4.h[1]
sdot z22.d, z26.h, z4.h[1]
sdot z23.d, z27.h, z4.h[1]
subs \h, \h, #1
uzp1 v0.4s, v0.4s, v2.4s
uzp1 v2.4s, v16.4s, v17.4s
uzp1 v1.4s, v1.4s, v3.4s
uzp1 v3.4s, v22.4s, v23.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v2.4s, v2.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
srshl v3.4s, v3.4s, v5.4s
uzp1 v0.8h, v0.8h, v2.8h
uzp1 v1.8h, v1.8h, v3.8h
sub z0.h, z0.h, #PREP_BIAS
sub z1.h, z1.h, #PREP_BIAS
stp q0, q1, [\dst], #32
.else // put
sqshrun v0.4h, v0.4s, #6
sqshrun2 v0.8h, v2.4s, #6
sqshrun v1.4h, v1.4s, #6
sqshrun2 v1.8h, v3.4s, #6
umin v0.8h, v0.8h, v5.8h
umin v1.8h, v1.8h, v5.8h
st1 {v0.16b, v1.16b}, [\dst], \d_strd
.endif
b.gt 16b
ret
.align JUMP_ALIGN
320: // H - 32xN+
640:
1280:
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
sub \d_strd, \d_strd, \w, uxtw #1
.endif
sub \s_strd, \s_strd, \w, uxtw #1
mov w8, \w
.align LOOP_ALIGN
32:
ldp q17, q21, [\src]
ldur q19, [\src, #8]
.ifc \type, prep
movi v0.2d, #0
movi v2.2d, #0
.else
mov v0.16b, v6.16b
mov v2.16b, v6.16b
.endif
tbl v16.16b, {v17.16b}, v30.16b
tbl v17.16b, {v17.16b}, v31.16b
sdot z0.d, z16.h, z4.h[0]
sdot z2.d, z17.h, z4.h[0]
tbl v18.16b, {v19.16b}, v30.16b
tbl v19.16b, {v19.16b}, v31.16b
.ifc \type, prep
movi v16.2d, #0
movi v17.2d, #0
.else
mov v16.16b, v6.16b
mov v17.16b, v6.16b
.endif
ldur q25, [\src, #24]
sdot z16.d, z18.h, z4.h[0]
sdot z17.d, z19.h, z4.h[0]
ldr q27, [\src, #32]!
tbl v22.16b, {v21.16b}, v30.16b
tbl v23.16b, {v21.16b}, v31.16b
.ifc \type, prep
movi v1.2d, #0
movi v3.2d, #0
.else
mov v1.16b, v6.16b
mov v3.16b, v6.16b
.endif
sdot z1.d, z22.h, z4.h[0]
sdot z3.d, z23.h, z4.h[0]
tbl v24.16b, {v25.16b}, v30.16b
tbl v25.16b, {v25.16b}, v31.16b
.ifc \type, prep
movi v22.2d, #0
movi v23.2d, #0
.else
mov v22.16b, v6.16b
mov v23.16b, v6.16b
.endif
sdot z22.d, z24.h, z4.h[0]
sdot z23.d, z25.h, z4.h[0]
tbl v20.16b, {v21.16b}, v30.16b
tbl v21.16b, {v21.16b}, v31.16b
sdot z0.d, z18.h, z4.h[1]
sdot z2.d, z19.h, z4.h[1]
tbl v26.16b, {v27.16b}, v30.16b
tbl v27.16b, {v27.16b}, v31.16b
sdot z16.d, z20.h, z4.h[1]
sdot z17.d, z21.h, z4.h[1]
sdot z1.d, z24.h, z4.h[1]
sdot z3.d, z25.h, z4.h[1]
sdot z22.d, z26.h, z4.h[1]
sdot z23.d, z27.h, z4.h[1]
subs w8, w8, #16
uzp1 v0.4s, v0.4s, v2.4s
uzp1 v2.4s, v16.4s, v17.4s
uzp1 v1.4s, v1.4s, v3.4s
uzp1 v3.4s, v22.4s, v23.4s
.ifc \type, prep
srshl v0.4s, v0.4s, v5.4s
srshl v2.4s, v2.4s, v5.4s
srshl v1.4s, v1.4s, v5.4s
srshl v3.4s, v3.4s, v5.4s
uzp1 v0.8h, v0.8h, v2.8h
uzp1 v1.8h, v1.8h, v3.8h
sub z0.h, z0.h, #PREP_BIAS
sub z1.h, z1.h, #PREP_BIAS
.else // put
sqshrun v0.4h, v0.4s, #6
sqshrun2 v0.8h, v2.4s, #6
sqshrun v1.4h, v1.4s, #6
sqshrun2 v1.8h, v3.4s, #6
umin v0.8h, v0.8h, v5.8h
umin v1.8h, v1.8h, v5.8h
.endif
stp q0, q1, [\dst], #32
b.gt 32b
add \src, \src, \s_strd
.ifc \type, put
add \dst, \dst, \d_strd
.endif
subs \h, \h, #1
mov w8, \w
b.gt 32b
ret
endfunc
jumptable \type\()_8tap_h_\isa\()_tbl
.word 1280b - \type\()_8tap_h_\isa\()_tbl
.word 640b - \type\()_8tap_h_\isa\()_tbl
.word 320b - \type\()_8tap_h_\isa\()_tbl
.word 160b - \type\()_8tap_h_\isa\()_tbl
.word 80b - \type\()_8tap_h_\isa\()_tbl
.word 40b - \type\()_8tap_h_\isa\()_tbl
.ifc \type, put
.word 20b - \type\()_8tap_h_\isa\()_tbl
.endif
endjumptable
.endm
function prep_sve
movrel x9, prep_tbl
mov w6, #19
ldrsw x8, [x9, x8, lsl #2]
sub w6, w6, w7, lsr #8 // 19 - bdmax / 256
add x9, x9, x8
movi v30.8h, #PREP_BIAS_NEG
dup v29.8h, w6 // 10b: 1 << 4, 12b: 1 << 2
br x9
.align JUMP_ALIGN
40: // prep - 4xN
AARCH64_VALID_JUMP_TARGET
.align LOOP_ALIGN
4:
ldr d0, [x1]
ldr d1, [x1, x2]
add x1, x1, x2, lsl #1
subs w4, w4, #2
mad z0.h, p0/m, z29.h, z30.h
mad z1.h, p0/m, z29.h, z30.h
stp d0, d1, [x0], #16
b.gt 4b
ret
.align JUMP_ALIGN
80: // prep - 8xN
AARCH64_VALID_JUMP_TARGET
.align LOOP_ALIGN
8:
ld1 {v0.8h}, [x1], x2
ld1 {v1.8h}, [x1], x2
subs w4, w4, #2
mad z0.h, p0/m, z29.h, z30.h
mad z1.h, p0/m, z29.h, z30.h
stp q0, q1, [x0], #32
b.gt 8b
ret
.align JUMP_ALIGN
160: // prep - 16xN
AARCH64_VALID_JUMP_TARGET
.align LOOP_ALIGN
16:
ld1 {v0.8h, v1.8h}, [x1], x2
mad z0.h, p0/m, z29.h, z30.h
mad z1.h, p0/m, z29.h, z30.h
subs w4, w4, #2
ld1 {v2.8h, v3.8h}, [x1], x2
mad z2.h, p0/m, z29.h, z30.h
mad z3.h, p0/m, z29.h, z30.h
stp q0, q1, [x0]
stp q2, q3, [x0, #32]
add x0, x0, #64
b.gt 16b
ret
.align JUMP_ALIGN
320: // prep - 32xN
AARCH64_VALID_JUMP_TARGET
.align LOOP_ALIGN
32:
ldp q0, q1, [x1]
mad z0.h, p0/m, z29.h, z30.h
mad z1.h, p0/m, z29.h, z30.h
ldp q2, q3, [x1, #32]
subs w4, w4, #1
mad z2.h, p0/m, z29.h, z30.h
mad z3.h, p0/m, z29.h, z30.h
add x1, x1, x2
stp q0, q1, [x0]
stp q2, q3, [x0, #32]
add x0, x0, #64
b.gt 32b
ret
.align JUMP_ALIGN
640: // prep - 64xN
AARCH64_VALID_JUMP_TARGET
.align LOOP_ALIGN
64:
ldp q0, q1, [x1]
mad z0.h, p0/m, z29.h, z30.h
mad z1.h, p0/m, z29.h, z30.h
ldp q2, q3, [x1, #32]
mad z2.h, p0/m, z29.h, z30.h
mad z3.h, p0/m, z29.h, z30.h
ldp q4, q5, [x1, #64]
mad z4.h, p0/m, z29.h, z30.h
mad z5.h, p0/m, z29.h, z30.h
ldp q6, q7, [x1, #96]
add x1, x1, x2
subs w4, w4, #1
mad z6.h, p0/m, z29.h, z30.h
mad z7.h, p0/m, z29.h, z30.h
stp q0, q1, [x0]
stp q2, q3, [x0, #32]
stp q4, q5, [x0, #64]
stp q6, q7, [x0, #96]
add x0, x0, #128
b.gt 64b
ret
.align JUMP_ALIGN
1280: // prep - 128xN
AARCH64_VALID_JUMP_TARGET
.align LOOP_ALIGN
128:
ldp q0, q1, [x1]
mad z0.h, p0/m, z29.h, z30.h
mad z1.h, p0/m, z29.h, z30.h
ldp q2, q3, [x1, #32]
mad z2.h, p0/m, z29.h, z30.h
mad z3.h, p0/m, z29.h, z30.h
ldp q4, q5, [x1, #64]
mad z4.h, p0/m, z29.h, z30.h
mad z5.h, p0/m, z29.h, z30.h
ldp q6, q7, [x1, #96]
mad z6.h, p0/m, z29.h, z30.h
mad z7.h, p0/m, z29.h, z30.h
ldp q16, q17, [x1, #128]
mad z16.h, p0/m, z29.h, z30.h
mad z17.h, p0/m, z29.h, z30.h
ldp q18, q19, [x1, #160]
mad z18.h, p0/m, z29.h, z30.h
mad z19.h, p0/m, z29.h, z30.h
ldp q20, q21, [x1, #192]
mad z20.h, p0/m, z29.h, z30.h
mad z21.h, p0/m, z29.h, z30.h
ldp q22, q23, [x1, #224]
add x1, x1, x2
mad z22.h, p0/m, z29.h, z30.h
mad z23.h, p0/m, z29.h, z30.h
subs w4, w4, #1
stp q0, q1, [x0]
stp q2, q3, [x0, #32]
stp q4, q5, [x0, #64]
stp q6, q7, [x0, #96]
stp q16, q17, [x0, #128]
stp q18, q19, [x0, #160]
stp q20, q21, [x0, #192]
stp q22, q23, [x0, #224]
add x0, x0, #256
b.gt 128b
ret
endfunc
jumptable prep_tbl
.word 1280b - prep_tbl
.word 640b - prep_tbl
.word 320b - prep_tbl
.word 160b - prep_tbl
.word 80b - prep_tbl
.word 40b - prep_tbl
endjumptable
// dst(x0), d_strd(x9), src(x1), s_strd(x2), w(w3), h(w4), mx(w5), my(w6), bdmax(w7)
// xmx(x5), xmy(x6), ldst(x5), lsrc(x6), wd_strd(w9), ws_strd(w2)
filter_8tap_fn prep, sve2, x0, x9, x1, x2, w3, w4, w5, w6, w7, x5, x6, x5, x6, w9, w2
// dst(x0) d_strd(x1) src(x2) s_strd(x3) w(w4) h(w5) mx(w6) my(w7), bdmax(w8)
// xmx(x6), xmy(x7), ldst(x6), lsrc(x7), wd_strd(w1), ws_strd(w3)
filter_8tap_fn put, sve2, x0, x1, x2, x3, w4, w5, w6, w7, w8, x6, x7, x6, x7, w1, w3
DISABLE_SVE2
DISABLE_SVE
#endif // HAVE_SVE2
|
Admenri/urge
| 73,464
|
third_party/dav1d/src/arm/64/filmgrain.S
|
/*
* Copyright © 2021, VideoLAN and dav1d authors
* Copyright © 2021, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#include "src/arm/asm-offsets.h"
#define GRAIN_WIDTH 82
#define GRAIN_HEIGHT 73
#define SUB_GRAIN_WIDTH 44
#define SUB_GRAIN_HEIGHT 38
.macro increment_seed steps, shift=1
lsr w11, w2, #3
lsr w12, w2, #12
lsr w13, w2, #1
eor w11, w2, w11 // (r >> 0) ^ (r >> 3)
eor w12, w12, w13 // (r >> 12) ^ (r >> 1)
eor w11, w11, w12 // (r >> 0) ^ (r >> 3) ^ (r >> 12) ^ (r >> 1)
.if \shift
lsr w2, w2, #\steps
.endif
and w11, w11, #((1 << \steps) - 1) // bit
.if \shift
orr w2, w2, w11, lsl #(16 - \steps) // *state
.else
orr w2, w2, w11, lsl #16 // *state
.endif
.endm
.macro read_rand dest, bits, age
ubfx \dest, x2, #16 - \bits - \age, #\bits
.endm
.macro read_shift_rand dest, bits
ubfx \dest, x2, #17 - \bits, #\bits
lsr w2, w2, #1
.endm
// special calling convention:
// w2 holds seed
// x3 holds dav1d_gaussian_sequence
// clobbers x11-x15
// returns in v0.8h
function get_gaussian_neon
increment_seed 4
read_rand x14, 11, 3
read_rand x15, 11, 2
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {v0.h}[0], [x14]
read_rand x14, 11, 1
ld1 {v0.h}[1], [x15]
add x14, x3, x14, lsl #1
read_rand x15, 11, 0
increment_seed 4
add x15, x3, x15, lsl #1
ld1 {v0.h}[2], [x14]
read_rand x14, 11, 3
ld1 {v0.h}[3], [x15]
add x14, x3, x14, lsl #1
read_rand x15, 11, 2
ld1 {v0.h}[4], [x14]
add x15, x3, x15, lsl #1
read_rand x14, 11, 1
ld1 {v0.h}[5], [x15]
read_rand x15, 11, 0
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {v0.h}[6], [x14]
ld1 {v0.h}[7], [x15]
ret
endfunc
.macro get_grain_row r0, r1, r2, r3, r4, r5
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn \r0\().8b, \r5\().8h
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn2 \r0\().16b, \r5\().8h
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn \r1\().8b, \r5\().8h
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn2 \r1\().16b, \r5\().8h
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn \r2\().8b, \r5\().8h
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn2 \r2\().16b, \r5\().8h
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn \r3\().8b, \r5\().8h
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn2 \r3\().16b, \r5\().8h
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn \r4\().8b, \r5\().8h
bl get_gaussian_neon
srshl \r5\().8h, v0.8h, v31.8h
xtn2 \r4\().16b, \r5\().8h
increment_seed 2
read_rand x14, 11, 1
read_rand x15, 11, 0
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {\r5\().h}[0], [x14]
ld1 {\r5\().h}[1], [x15]
srshl v0.4h, \r5\().4h, v31.4h
xtn \r5\().8b, v0.8h
.endm
.macro store_grain_row r0, r1, r2, r3, r4, r5
st1 {\r0\().16b,\r1\().16b}, [x0], #32
st1 {\r2\().16b,\r3\().16b}, [x0], #32
st1 {\r4\().16b}, [x0], #16
st1 {\r5\().h}[0], [x0], #2
.endm
.macro get_grain_row_44 r0, r1, r2
bl get_gaussian_neon
srshl \r2\().8h, v0.8h, v31.8h
xtn \r0\().8b, \r2\().8h
bl get_gaussian_neon
srshl \r2\().8h, v0.8h, v31.8h
xtn2 \r0\().16b, \r2\().8h
bl get_gaussian_neon
srshl \r2\().8h, v0.8h, v31.8h
xtn \r1\().8b, \r2\().8h
bl get_gaussian_neon
srshl \r2\().8h, v0.8h, v31.8h
xtn2 \r1\().16b, \r2\().8h
bl get_gaussian_neon
srshl \r2\().8h, v0.8h, v31.8h
xtn \r2\().8b, \r2\().8h
increment_seed 4
read_rand x14, 11, 3
read_rand x15, 11, 2
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {v0.h}[0], [x14]
read_rand x14, 11, 1
ld1 {v0.h}[1], [x15]
read_rand x15, 11, 0
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {v0.h}[2], [x14]
ld1 {v0.h}[3], [x15]
srshl v0.4h, v0.4h, v31.4h
xtn2 \r2\().16b, v0.8h
.endm
.macro store_grain_row_44 r0, r1, r2
st1 {\r0\().16b,\r1\().16b}, [x0], #32
st1 {\r2\().16b}, [x0]
add x0, x0, #GRAIN_WIDTH-32
.endm
function get_grain_2_neon
increment_seed 2
read_rand x14, 11, 1
read_rand x15, 11, 0
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {v0.h}[0], [x14]
ld1 {v0.h}[1], [x15]
srshl v0.4h, v0.4h, v31.4h
xtn v0.8b, v0.8h
ret
endfunc
.macro get_grain_2 dst
bl get_grain_2_neon
.ifnc \dst, v0
mov \dst\().8b, v0.8b
.endif
.endm
// w15 holds the number of entries to produce
// w14, w16 and w17 hold the previous output entries
// v0 holds the vector of produced entries
// v1 holds the input vector of sums from above
.macro output_lag n
function output_lag\n\()_neon
1:
read_shift_rand x13, 11
mov w11, v1.s[0]
ldrsh w12, [x3, x13, lsl #1]
ext v0.16b, v0.16b, v0.16b, #1
.if \n == 1
madd w11, w14, w4, w11 // sum (above) + *coeff * prev output
.elseif \n == 2
madd w11, w16, w4, w11 // sum (above) + *coeff * prev output 1
madd w11, w14, w17, w11 // += *coeff * prev output 2
mov w16, w14
.else
madd w11, w17, w4, w11 // sum (above) + *coeff * prev output 1
madd w11, w16, w20, w11 // sum (above) + *coeff * prev output 2
madd w11, w14, w21, w11 // += *coeff * prev output 3
mov w17, w16
mov w16, w14
.endif
add w14, w11, w8 // 1 << (ar_coeff_shift - 1)
add w12, w12, w10 // 1 << (4 + grain_scale_shift - 1)
asr w14, w14, w7 // >> ar_coeff_shift
asr w12, w12, w9 // >> (4 + grain_scale_shift)
add w14, w14, w12
cmp w14, w5
csel w14, w14, w5, le
cmp w14, w6
csel w14, w14, w6, ge
subs w15, w15, #1
ext v1.16b, v1.16b, v1.16b, #4
ins v0.b[15], w14
b.gt 1b
ret
endfunc
.endm
output_lag 1
output_lag 2
output_lag 3
function sum_lag1_above_neon
smull v2.8h, v3.8b, v28.8b
smull2 v3.8h, v3.16b, v28.16b
smull v4.8h, v0.8b, v27.8b
smull2 v5.8h, v0.16b, v27.16b
smull v6.8h, v1.8b, v29.8b
smull2 v7.8h, v1.16b, v29.16b
saddl v0.4s, v2.4h, v4.4h
saddl2 v1.4s, v2.8h, v4.8h
saddl v2.4s, v3.4h, v5.4h
saddl2 v3.4s, v3.8h, v5.8h
saddw v4.4s, v0.4s, v6.4h
saddw2 v5.4s, v1.4s, v6.8h
saddw v6.4s, v2.4s, v7.4h
saddw2 v7.4s, v3.4s, v7.8h
ret
endfunc
.macro sum_lag_n_body lag, type, uv_layout, edge, elems, store, uv_coeff
bl sum_\lag\()_above_neon
.ifc \type, uv_420
add x12, x19, #GRAIN_WIDTH
ld1 {v22.16b, v23.16b}, [x19], #32
ld1 {v24.16b, v25.16b}, [x12]
saddlp v22.8h, v22.16b
saddlp v23.8h, v23.16b
saddlp v24.8h, v24.16b
saddlp v25.8h, v25.16b
add v22.8h, v22.8h, v24.8h
add v23.8h, v23.8h, v25.8h
rshrn v0.8b, v22.8h, #2
rshrn2 v0.16b, v23.8h, #2
.endif
.ifc \type, uv_422
ld1 {v22.16b, v23.16b}, [x19], #32
saddlp v22.8h, v22.16b
saddlp v23.8h, v23.16b
rshrn v0.8b, v22.8h, #1
rshrn2 v0.16b, v23.8h, #1
.endif
.ifc \type, uv_444
ld1 {v0.16b}, [x19], #16
.endif
.if \uv_layout
.ifnb \uv_coeff
dup v1.16b, \uv_coeff
smull v2.8h, v0.8b, v1.8b
smull2 v3.8h, v0.16b, v1.16b
.else
smull v2.8h, v0.8b, v30.8b
smull2 v3.8h, v0.16b, v30.16b
.endif
saddw v4.4s, v4.4s, v2.4h
saddw2 v5.4s, v5.4s, v2.8h
saddw v6.4s, v6.4s, v3.4h
saddw2 v7.4s, v7.4s, v3.8h
.endif
.if \uv_layout && \elems == 16
b sum_\lag\()_y_\edge\()_start
.elseif \uv_layout == 444 && \elems == 15
b sum_\lag\()_y_\edge\()_start
.elseif \uv_layout == 422 && \elems == 9
b sum_\lag\()_uv_420_\edge\()_start
.else
sum_\lag\()_\type\()_\edge\()_start:
.ifc \edge, left
increment_seed 4
read_rand x12, 11, 3
read_rand x13, 11, 2
read_rand x14, 11, 1
add x12, x3, x12, lsl #1
add x13, x3, x13, lsl #1
add x14, x3, x14, lsl #1
ld1 {v0.h}[5], [x12]
ld1 {v0.h}[6], [x13]
ld1 {v0.h}[7], [x14]
lsl x2, x2, #1 // shift back the state as if we'd done increment_seed with shift=0
srshl v0.8h, v0.8h, v31.8h
xtn2 v0.16b, v0.8h
ext v4.16b, v4.16b, v4.16b, #12
.ifc \lag, lag3
smov w17, v0.b[13]
.endif
.ifnc \lag, lag1
smov w16, v0.b[14]
.endif
smov w14, v0.b[15]
mov v1.16b, v4.16b
mov w15, #1
bl output_\lag\()_neon
.else
increment_seed 4, shift=0
mov v1.16b, v4.16b
mov w15, #4
bl output_\lag\()_neon
.endif
increment_seed 4, shift=0
mov v1.16b, v5.16b
mov w15, #4
bl output_\lag\()_neon
increment_seed 4, shift=0
mov v1.16b, v6.16b
.if \elems == 9
mov w15, #1
bl output_\lag\()_neon
lsr w2, w2, #3
read_rand x12, 11, 2
read_rand x13, 11, 1
read_rand x14, 11, 0
add x12, x3, x12, lsl #1
add x13, x3, x13, lsl #1
add x14, x3, x14, lsl #1
ld1 {v1.h}[0], [x12]
ld1 {v1.h}[1], [x13]
ld1 {v1.h}[2], [x14]
srshl v1.4h, v1.4h, v31.4h
xtn v1.8b, v1.8h
ext v0.16b, v0.16b, v1.16b, #7
.else
mov w15, #4
bl output_\lag\()_neon
increment_seed 4, shift=0
mov v1.16b, v7.16b
.ifc \edge, right
mov w15, #3
bl output_\lag\()_neon
read_shift_rand x15, 11
add x15, x3, x15, lsl #1
ld1 {v1.h}[0], [x15]
srshl v1.4h, v1.4h, v31.4h
ext v0.16b, v0.16b, v1.16b, #1
.else
mov w15, #4
bl output_\lag\()_neon
.endif
.endif
.if \store
st1 {v0.16b}, [x0], #16
.endif
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
.endif
.endm
.macro sum_lag1_func type, uv_layout, edge, elems=16
function sum_\type\()_lag1_\edge\()_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
sum_lag_n_body lag1, \type, \uv_layout, \edge, \elems, store=0
endfunc
.endm
sum_lag1_func y, 0, left
sum_lag1_func y, 0, mid
sum_lag1_func y, 0, right, 15
sum_lag1_func uv_444, 444, left
sum_lag1_func uv_444, 444, mid
sum_lag1_func uv_444, 444, right, 15
sum_lag1_func uv_422, 422, left
sum_lag1_func uv_422, 422, mid
sum_lag1_func uv_422, 422, right, 9
sum_lag1_func uv_420, 420, left
sum_lag1_func uv_420, 420, mid
sum_lag1_func uv_420, 420, right, 9
.macro sum_lag1 type, dst, left, mid, right, edge=mid
mov v3.16b, \mid\().16b
ext v0.16b, \left\().16b, \mid\().16b, #15
ext v1.16b, \mid\().16b, \right\().16b, #1
bl sum_\type\()_lag1_\edge\()_neon
mov \dst\().16b, v0.16b
.endm
.macro sum_y_lag1 dst, left, mid, right, edge=mid
sum_lag1 y, \dst, \left, \mid, \right, \edge
.endm
.macro sum_uv_444_lag1 dst, left, mid, right, edge=mid
sum_lag1 uv_444, \dst, \left, \mid, \right, \edge
.endm
.macro sum_uv_422_lag1 dst, left, mid, right, edge=mid
sum_lag1 uv_422, \dst, \left, \mid, \right, \edge
.endm
.macro sum_uv_420_lag1 dst, left, mid, right, edge=mid
sum_lag1 uv_420, \dst, \left, \mid, \right, \edge
.endm
function sum_lag2_above_neon
sub x12, x0, #2*GRAIN_WIDTH - 16
sub x13, x0, #1*GRAIN_WIDTH - 16
ld1 {v18.16b}, [x12] // load top right
ld1 {v21.16b}, [x13]
ext v22.16b, v16.16b, v17.16b, #14 // top left, top mid
dup v26.16b, v30.b[0]
ext v23.16b, v16.16b, v17.16b, #15
dup v27.16b, v30.b[1]
ext v0.16b, v17.16b, v18.16b, #1 // top mid, top right
dup v28.16b, v30.b[3]
ext v1.16b, v17.16b, v18.16b, #2
dup v29.16b, v30.b[4]
smull v2.8h, v22.8b, v26.8b
smull2 v3.8h, v22.16b, v26.16b
smull v4.8h, v23.8b, v27.8b
smull2 v5.8h, v23.16b, v27.16b
smull v6.8h, v0.8b, v28.8b
smull2 v7.8h, v0.16b, v28.16b
smull v0.8h, v1.8b, v29.8b
smull2 v1.8h, v1.16b, v29.16b
saddl v22.4s, v2.4h, v4.4h
saddl2 v23.4s, v2.8h, v4.8h
saddl v26.4s, v3.4h, v5.4h
saddl2 v27.4s, v3.8h, v5.8h
saddl v2.4s, v0.4h, v6.4h
saddl2 v3.4s, v0.8h, v6.8h
saddl v6.4s, v1.4h, v7.4h
saddl2 v7.4s, v1.8h, v7.8h
add v4.4s, v22.4s, v2.4s
add v5.4s, v23.4s, v3.4s
add v6.4s, v26.4s, v6.4s
add v7.4s, v27.4s, v7.4s
ext v22.16b, v19.16b, v20.16b, #14 // top left, top mid
dup v26.16b, v30.b[5]
ext v23.16b, v19.16b, v20.16b, #15
dup v27.16b, v30.b[6]
ext v0.16b, v20.16b, v21.16b, #1 // top mid, top right
dup v28.16b, v30.b[8]
ext v1.16b, v20.16b, v21.16b, #2
dup v29.16b, v30.b[9]
smull v2.8h, v22.8b, v26.8b
smull2 v3.8h, v22.16b, v26.16b
smull v22.8h, v23.8b, v27.8b
smull2 v23.8h, v23.16b, v27.16b
smull v26.8h, v0.8b, v28.8b
smull2 v27.8h, v0.16b, v28.16b
smull v28.8h, v1.8b, v29.8b
smull2 v29.8h, v1.16b, v29.16b
saddl v0.4s, v2.4h, v22.4h
saddl2 v1.4s, v2.8h, v22.8h
saddl v2.4s, v3.4h, v23.4h
saddl2 v3.4s, v3.8h, v23.8h
saddl v22.4s, v26.4h, v28.4h
saddl2 v23.4s, v26.8h, v28.8h
saddl v26.4s, v27.4h, v29.4h
saddl2 v27.4s, v27.8h, v29.8h
add v0.4s, v0.4s, v22.4s
add v1.4s, v1.4s, v23.4s
add v2.4s, v2.4s, v26.4s
add v3.4s, v3.4s, v27.4s
dup v26.16b, v30.b[2]
dup v27.16b, v30.b[7]
smull v22.8h, v17.8b, v26.8b
smull2 v23.8h, v17.16b, v26.16b
smull v24.8h, v20.8b, v27.8b
smull2 v25.8h, v20.16b, v27.16b
add v4.4s, v4.4s, v0.4s
add v5.4s, v5.4s, v1.4s
add v6.4s, v6.4s, v2.4s
add v7.4s, v7.4s, v3.4s
mov v16.16b, v17.16b
mov v17.16b, v18.16b
saddl v0.4s, v22.4h, v24.4h
saddl2 v1.4s, v22.8h, v24.8h
saddl v2.4s, v23.4h, v25.4h
saddl2 v3.4s, v23.8h, v25.8h
mov v19.16b, v20.16b
mov v20.16b, v21.16b
add v4.4s, v4.4s, v0.4s
add v5.4s, v5.4s, v1.4s
add v6.4s, v6.4s, v2.4s
add v7.4s, v7.4s, v3.4s
ret
endfunc
.macro sum_lag2_func type, uv_layout, edge, elems=16
function sum_\type\()_lag2_\edge\()_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
.ifc \edge, left
sub x12, x0, #2*GRAIN_WIDTH
sub x13, x0, #1*GRAIN_WIDTH
ld1 {v17.16b}, [x12] // load the previous block right above
ld1 {v20.16b}, [x13]
.endif
sum_lag_n_body lag2, \type, \uv_layout, \edge, \elems, store=1, uv_coeff=v30.b[12]
endfunc
.endm
sum_lag2_func y, 0, left
sum_lag2_func y, 0, mid
sum_lag2_func y, 0, right, 15
sum_lag2_func uv_444, 444, left
sum_lag2_func uv_444, 444, mid
sum_lag2_func uv_444, 444, right, 15
sum_lag2_func uv_422, 422, left
sum_lag2_func uv_422, 422, mid
sum_lag2_func uv_422, 422, right, 9
sum_lag2_func uv_420, 420, left
sum_lag2_func uv_420, 420, mid
sum_lag2_func uv_420, 420, right, 9
function sum_lag3_above_neon
sub x11, x0, #3*GRAIN_WIDTH - 16
sub x12, x0, #2*GRAIN_WIDTH - 16
sub x13, x0, #1*GRAIN_WIDTH - 16
ld1 {v15.16b}, [x11] // load top right
ld1 {v18.16b}, [x12]
ld1 {v21.16b}, [x13]
ext v8.16b, v13.16b, v14.16b, #13 // top left, top mid
dup v22.16b, v29.b[0]
ext v9.16b, v13.16b, v14.16b, #14
dup v23.16b, v29.b[1]
ext v10.16b, v13.16b, v14.16b, #15
dup v24.16b, v29.b[2]
dup v25.16b, v29.b[3]
ext v11.16b, v14.16b, v15.16b, #1 // top mid, top right
dup v26.16b, v29.b[4]
ext v12.16b, v14.16b, v15.16b, #2
dup v27.16b, v29.b[5]
ext v13.16b, v14.16b, v15.16b, #3
dup v28.16b, v29.b[6]
smull v0.8h, v8.8b, v22.8b
smull2 v1.8h, v8.16b, v22.16b
smull v2.8h, v9.8b, v23.8b
smull2 v3.8h, v9.16b, v23.16b
smull v8.8h, v10.8b, v24.8b
smull2 v9.8h, v10.16b, v24.16b
smull v10.8h, v11.8b, v26.8b
smull2 v11.8h, v11.16b, v26.16b
saddl v22.4s, v0.4h, v2.4h
saddl2 v23.4s, v0.8h, v2.8h
saddl v24.4s, v1.4h, v3.4h
saddl2 v26.4s, v1.8h, v3.8h
saddl v0.4s, v8.4h, v10.4h
saddl2 v1.4s, v8.8h, v10.8h
saddl v2.4s, v9.4h, v11.4h
saddl2 v3.4s, v9.8h, v11.8h
smull v8.8h, v12.8b, v27.8b
smull2 v9.8h, v12.16b, v27.16b
smull v10.8h, v13.8b, v28.8b
smull2 v11.8h, v13.16b, v28.16b
smull v12.8h, v14.8b, v25.8b
smull2 v13.8h, v14.16b, v25.16b
add v4.4s, v22.4s, v0.4s
add v5.4s, v23.4s, v1.4s
add v6.4s, v24.4s, v2.4s
add v7.4s, v26.4s, v3.4s
saddl v0.4s, v8.4h, v10.4h
saddl2 v1.4s, v8.8h, v10.8h
saddl v2.4s, v9.4h, v11.4h
saddl2 v3.4s, v9.8h, v11.8h
add v4.4s, v4.4s, v0.4s
add v5.4s, v5.4s, v1.4s
add v6.4s, v6.4s, v2.4s
add v7.4s, v7.4s, v3.4s
saddw v4.4s, v4.4s, v12.4h
saddw2 v5.4s, v5.4s, v12.8h
saddw v6.4s, v6.4s, v13.4h
saddw2 v7.4s, v7.4s, v13.8h
ext v8.16b, v16.16b, v17.16b, #13 // top left, top mid
dup v22.16b, v29.b[7]
ext v9.16b, v16.16b, v17.16b, #14
dup v23.16b, v29.b[8]
ext v10.16b, v16.16b, v17.16b, #15
dup v24.16b, v29.b[9]
dup v25.16b, v29.b[10]
ext v11.16b, v17.16b, v18.16b, #1 // top mid, top right
dup v26.16b, v29.b[11]
ext v12.16b, v17.16b, v18.16b, #2
dup v27.16b, v29.b[12]
ext v13.16b, v17.16b, v18.16b, #3
dup v28.16b, v29.b[13]
smull v0.8h, v8.8b, v22.8b
smull2 v1.8h, v8.16b, v22.16b
smull v2.8h, v9.8b, v23.8b
smull2 v3.8h, v9.16b, v23.16b
smull v8.8h, v10.8b, v24.8b
smull2 v9.8h, v10.16b, v24.16b
smull v10.8h, v11.8b, v26.8b
smull2 v11.8h, v11.16b, v26.16b
saddl v22.4s, v0.4h, v2.4h
saddl2 v23.4s, v0.8h, v2.8h
saddl v24.4s, v1.4h, v3.4h
saddl2 v26.4s, v1.8h, v3.8h
saddl v0.4s, v8.4h, v10.4h
saddl2 v1.4s, v8.8h, v10.8h
saddl v2.4s, v9.4h, v11.4h
saddl2 v3.4s, v9.8h, v11.8h
smull v8.8h, v12.8b, v27.8b
smull2 v9.8h, v12.16b, v27.16b
smull v10.8h, v13.8b, v28.8b
smull2 v11.8h, v13.16b, v28.16b
smull v12.8h, v17.8b, v25.8b
smull2 v13.8h, v17.16b, v25.16b
add v22.4s, v22.4s, v0.4s
add v23.4s, v23.4s, v1.4s
add v24.4s, v24.4s, v2.4s
add v26.4s, v26.4s, v3.4s
saddl v0.4s, v8.4h, v10.4h
saddl2 v1.4s, v8.8h, v10.8h
saddl v2.4s, v9.4h, v11.4h
saddl2 v3.4s, v9.8h, v11.8h
add v4.4s, v4.4s, v22.4s
add v5.4s, v5.4s, v23.4s
add v6.4s, v6.4s, v24.4s
add v7.4s, v7.4s, v26.4s
add v4.4s, v4.4s, v0.4s
add v5.4s, v5.4s, v1.4s
add v6.4s, v6.4s, v2.4s
add v7.4s, v7.4s, v3.4s
saddw v4.4s, v4.4s, v12.4h
saddw2 v5.4s, v5.4s, v12.8h
saddw v6.4s, v6.4s, v13.4h
saddw2 v7.4s, v7.4s, v13.8h
ext v8.16b, v19.16b, v20.16b, #13 // top left, top mid
dup v22.16b, v29.b[14]
ext v9.16b, v19.16b, v20.16b, #14
dup v23.16b, v29.b[15]
ext v10.16b, v19.16b, v20.16b, #15
dup v24.16b, v30.b[0]
dup v25.16b, v30.b[1]
ext v11.16b, v20.16b, v21.16b, #1 // top mid, top right
dup v26.16b, v30.b[2]
ext v12.16b, v20.16b, v21.16b, #2
dup v27.16b, v30.b[3]
ext v13.16b, v20.16b, v21.16b, #3
dup v28.16b, v30.b[4]
smull v0.8h, v8.8b, v22.8b
smull2 v1.8h, v8.16b, v22.16b
smull v2.8h, v9.8b, v23.8b
smull2 v3.8h, v9.16b, v23.16b
smull v8.8h, v10.8b, v24.8b
smull2 v9.8h, v10.16b, v24.16b
smull v10.8h, v11.8b, v26.8b
smull2 v11.8h, v11.16b, v26.16b
saddl v22.4s, v0.4h, v2.4h
saddl2 v23.4s, v0.8h, v2.8h
saddl v24.4s, v1.4h, v3.4h
saddl2 v26.4s, v1.8h, v3.8h
saddl v0.4s, v8.4h, v10.4h
saddl2 v1.4s, v8.8h, v10.8h
saddl v2.4s, v9.4h, v11.4h
saddl2 v3.4s, v9.8h, v11.8h
smull v8.8h, v12.8b, v27.8b
smull2 v9.8h, v12.16b, v27.16b
smull v10.8h, v13.8b, v28.8b
smull2 v11.8h, v13.16b, v28.16b
smull v12.8h, v20.8b, v25.8b
smull2 v19.8h, v20.16b, v25.16b
add v22.4s, v22.4s, v0.4s
add v23.4s, v23.4s, v1.4s
add v24.4s, v24.4s, v2.4s
add v26.4s, v26.4s, v3.4s
saddl v0.4s, v8.4h, v10.4h
saddl2 v1.4s, v8.8h, v10.8h
saddl v2.4s, v9.4h, v11.4h
saddl2 v3.4s, v9.8h, v11.8h
add v4.4s, v4.4s, v22.4s
add v5.4s, v5.4s, v23.4s
add v6.4s, v6.4s, v24.4s
add v7.4s, v7.4s, v26.4s
mov v13.16b, v14.16b
mov v14.16b, v15.16b
add v4.4s, v4.4s, v0.4s
add v5.4s, v5.4s, v1.4s
add v6.4s, v6.4s, v2.4s
add v7.4s, v7.4s, v3.4s
mov v16.16b, v17.16b
mov v17.16b, v18.16b
saddw v4.4s, v4.4s, v12.4h
saddw2 v5.4s, v5.4s, v12.8h
saddw v6.4s, v6.4s, v19.4h
saddw2 v7.4s, v7.4s, v19.8h
mov v19.16b, v20.16b
mov v20.16b, v21.16b
ret
endfunc
.macro sum_lag3_func type, uv_layout, edge, elems=16
function sum_\type\()_lag3_\edge\()_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
.ifc \edge, left
sub x11, x0, #3*GRAIN_WIDTH
sub x12, x0, #2*GRAIN_WIDTH
sub x13, x0, #1*GRAIN_WIDTH
ld1 {v14.16b}, [x11] // load the previous block right above
ld1 {v17.16b}, [x12]
ld1 {v20.16b}, [x13]
.endif
sum_lag_n_body lag3, \type, \uv_layout, \edge, \elems, store=1, uv_coeff=v30.b[8]
endfunc
.endm
sum_lag3_func y, 0, left
sum_lag3_func y, 0, mid
sum_lag3_func y, 0, right, 15
sum_lag3_func uv_444, 444, left
sum_lag3_func uv_444, 444, mid
sum_lag3_func uv_444, 444, right, 15
sum_lag3_func uv_422, 422, left
sum_lag3_func uv_422, 422, mid
sum_lag3_func uv_422, 422, right, 9
sum_lag3_func uv_420, 420, left
sum_lag3_func uv_420, 420, mid
sum_lag3_func uv_420, 420, right, 9
function generate_grain_rows_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
1:
get_grain_row v16, v17, v18, v19, v20, v21
subs w1, w1, #1
store_grain_row v16, v17, v18, v19, v20, v21
b.gt 1b
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
function generate_grain_rows_44_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
1:
get_grain_row_44 v16, v17, v18
subs w1, w1, #1
store_grain_row_44 v16, v17, v18
b.gt 1b
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
function get_grain_row_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
get_grain_row v16, v17, v18, v19, v20, v21
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
function get_grain_row_44_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
get_grain_row_44 v16, v17, v18
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
function add_uv_444_coeff_lag0_neon
add_coeff_lag0_start:
smull v2.8h, v0.8b, v27.8b
smull2 v3.8h, v0.16b, v27.16b
srshl v2.8h, v2.8h, v28.8h
srshl v3.8h, v3.8h, v28.8h
saddw v2.8h, v2.8h, v1.8b
saddw2 v3.8h, v3.8h, v1.16b
sqxtn v2.8b, v2.8h
sqxtn2 v2.16b, v3.8h
ret
endfunc
function add_uv_420_coeff_lag0_neon
ld1 {v4.16b, v5.16b}, [x19], #32
ld1 {v6.16b, v7.16b}, [x12], #32
saddlp v4.8h, v4.16b
saddlp v5.8h, v5.16b
saddlp v6.8h, v6.16b
saddlp v7.8h, v7.16b
add v4.8h, v4.8h, v6.8h
add v5.8h, v5.8h, v7.8h
rshrn v4.8b, v4.8h, #2
rshrn2 v4.16b, v5.8h, #2
and v0.16b, v4.16b, v0.16b
b add_coeff_lag0_start
endfunc
function add_uv_422_coeff_lag0_neon
ld1 {v4.16b, v5.16b}, [x19], #32
saddlp v4.8h, v4.16b
saddlp v5.8h, v5.16b
rshrn v4.8b, v4.8h, #1
rshrn2 v4.16b, v5.8h, #1
and v0.16b, v4.16b, v0.16b
b add_coeff_lag0_start
endfunc
.macro gen_grain_82 type
function generate_grain_\type\()_8bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
stp x30, x19, [sp, #-96]!
.ifc \type, uv_444
mov w13, w3
mov w14, #28
add x19, x1, #3*GRAIN_WIDTH
mov x1, x2
mul w13, w13, w14
.endif
movrel x3, X(gaussian_sequence)
ldr w2, [x1, #FGD_SEED]
ldr w9, [x1, #FGD_GRAIN_SCALE_SHIFT]
.ifc \type, y
add x4, x1, #FGD_AR_COEFFS_Y
.else
add x4, x1, #FGD_AR_COEFFS_UV
.endif
movrel x16, gen_grain_\type\()_tbl
ldr w17, [x1, #FGD_AR_COEFF_LAG]
add w9, w9, #4
ldrsw x17, [x16, w17, uxtw #2]
dup v31.8h, w9 // 4 + data->grain_scale_shift
add x16, x16, x17
neg v31.8h, v31.8h
.ifc \type, uv_444
cmp w13, #0
mov w11, #0x49d8
mov w14, #0xb524
add x4, x4, w13, uxtw // Add offset to ar_coeffs_uv[1]
csel w11, w11, w14, ne
.endif
ldr w7, [x1, #FGD_AR_COEFF_SHIFT]
mov w8, #1
mov w10, #1
lsl w8, w8, w7 // 1 << ar_coeff_shift
lsl w10, w10, w9 // 1 << (4 + data->grain_scale_shift)
lsr w8, w8, #1 // 1 << (ar_coeff_shift - 1)
lsr w10, w10, #1 // 1 << (4 + data->grain_scale_shift - 1)
mov w5, #127
mov w6, #-128
.ifc \type, uv_444
eor w2, w2, w11
.endif
br x16
L(generate_grain_\type\()_lag0):
AARCH64_VALID_JUMP_TARGET
.ifc \type, y
mov w1, #GRAIN_HEIGHT
bl generate_grain_rows_neon
.else
dup v28.8h, w7
ld1r {v27.16b}, [x4] // ar_coeffs_uv[0]
movi v0.16b, #0
movi v1.16b, #255
ext v29.16b, v0.16b, v1.16b, #13
ext v30.16b, v1.16b, v0.16b, #1
neg v28.8h, v28.8h
mov w1, #3
bl generate_grain_rows_neon
mov w1, #GRAIN_HEIGHT-3
1:
ld1 {v22.16b, v23.16b, v24.16b, v25.16b}, [x19], #64
bl get_grain_row_neon
and v0.16b, v22.16b, v29.16b
mov v1.16b, v16.16b
bl add_uv_444_coeff_lag0_neon
mov v0.16b, v23.16b
mov v1.16b, v17.16b
mov v16.16b, v2.16b
bl add_uv_444_coeff_lag0_neon
ld1 {v26.16b}, [x19], #16
mov v0.16b, v24.16b
mov v1.16b, v18.16b
mov v17.16b, v2.16b
bl add_uv_444_coeff_lag0_neon
add x19, x19, #2
mov v0.16b, v25.16b
mov v1.16b, v19.16b
mov v18.16b, v2.16b
bl add_uv_444_coeff_lag0_neon
and v0.16b, v26.16b, v30.16b
mov v1.16b, v20.16b
mov v19.16b, v2.16b
bl add_uv_444_coeff_lag0_neon
mov v20.16b, v2.16b
subs w1, w1, #1
store_grain_row v16, v17, v18, v19, v20, v21
b.gt 1b
.endif
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag1):
AARCH64_VALID_JUMP_TARGET
ld1r {v27.16b}, [x4], #1 // ar_coeffs_y[0]
ld1r {v28.16b}, [x4], #1 // ar_coeffs_y[1]
ld1r {v29.16b}, [x4] // ar_coeffs_y[2]
.ifc \type, y
ldrsb w4, [x4, #1] // ar_coeffs_y[3]
.else
add x4, x4, #2
.endif
mov w1, #3
.ifc \type, uv_444
ld1r {v30.16b}, [x4] // ar_coeffs_uv[4]
ldursb w4, [x4, #-1] // ar_coeffs_uv[3]
.endif
bl generate_grain_rows_neon
mov w1, #GRAIN_HEIGHT - 3
1:
sum_\type\()_lag1 v22, v16, v16, v17, left
sum_\type\()_lag1 v23, v16, v17, v18
sum_\type\()_lag1 v24, v17, v18, v19
sum_\type\()_lag1 v25, v18, v19, v20
sum_\type\()_lag1 v20, v19, v20, v21, right
get_grain_2 v21
subs w1, w1, #1
.ifc \type, uv_444
add x19, x19, #2
.endif
store_grain_row v22, v23, v24, v25, v20, v21
mov v16.16b, v22.16b
mov v17.16b, v23.16b
mov v18.16b, v24.16b
mov v19.16b, v25.16b
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag2):
AARCH64_VALID_JUMP_TARGET
ld1 {v30.16b}, [x4] // ar_coeffs_y[0-11], ar_coeffs_uv[0-12]
smov w4, v30.b[10]
smov w17, v30.b[11]
mov w1, #3
bl generate_grain_rows_neon
mov w1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag2_left_neon
bl sum_\type\()_lag2_mid_neon
bl sum_\type\()_lag2_mid_neon
bl sum_\type\()_lag2_mid_neon
bl sum_\type\()_lag2_right_neon
get_grain_2 v16
subs w1, w1, #1
.ifc \type, uv_444
add x19, x19, #2
.endif
st1 {v16.h}[0], [x0], #2
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag3):
AARCH64_VALID_JUMP_TARGET
ld1 {v29.16b, v30.16b}, [x4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
stp x20, x21, [sp, #80]
smov w4, v30.b[5]
smov w20, v30.b[6]
smov w21, v30.b[7]
mov w1, #3
bl generate_grain_rows_neon
mov w1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag3_left_neon
bl sum_\type\()_lag3_mid_neon
bl sum_\type\()_lag3_mid_neon
bl sum_\type\()_lag3_mid_neon
bl sum_\type\()_lag3_right_neon
get_grain_2 v16
subs w1, w1, #1
.ifc \type, uv_444
add x19, x19, #2
.endif
st1 {v16.h}[0], [x0], #2
b.gt 1b
ldp x20, x21, [sp, #80]
ldp d14, d15, [sp, #64]
ldp d12, d13, [sp, #48]
ldp d10, d11, [sp, #32]
ldp d8, d9, [sp, #16]
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
jumptable gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag0) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag1) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag2) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag3) - gen_grain_\type\()_tbl
endjumptable
.endm
gen_grain_82 y
gen_grain_82 uv_444
.macro set_height dst, type
.ifc \type, uv_420
mov \dst, #SUB_GRAIN_HEIGHT-3
.else
mov \dst, #GRAIN_HEIGHT-3
.endif
.endm
.macro increment_y_ptr reg, type
.ifc \type, uv_420
add \reg, \reg, #2*GRAIN_WIDTH-(3*32)
.else
sub \reg, \reg, #3*32-GRAIN_WIDTH
.endif
.endm
.macro gen_grain_44 type
function generate_grain_\type\()_8bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
stp x30, x19, [sp, #-96]!
mov w13, w3
mov w14, #28
add x19, x1, #3*GRAIN_WIDTH-3
mov x1, x2
mul w13, w13, w14
movrel x3, X(gaussian_sequence)
ldr w2, [x1, #FGD_SEED]
ldr w9, [x1, #FGD_GRAIN_SCALE_SHIFT]
add x4, x1, #FGD_AR_COEFFS_UV
movrel x16, gen_grain_\type\()_tbl
ldr w17, [x1, #FGD_AR_COEFF_LAG]
add w9, w9, #4
ldrsw x17, [x16, w17, uxtw #2]
dup v31.8h, w9 // 4 + data->grain_scale_shift
add x16, x16, x17
neg v31.8h, v31.8h
cmp w13, #0
mov w11, #0x49d8
mov w14, #0xb524
add x4, x4, w13, uxtw // Add offset to ar_coeffs_uv[1]
csel w11, w11, w14, ne
ldr w7, [x1, #FGD_AR_COEFF_SHIFT]
mov w8, #1
mov w10, #1
lsl w8, w8, w7 // 1 << ar_coeff_shift
lsl w10, w10, w9 // 1 << (4 + data->grain_scale_shift)
lsr w8, w8, #1 // 1 << (ar_coeff_shift - 1)
lsr w10, w10, #1 // 1 << (4 + data->grain_scale_shift - 1)
mov w5, #127
mov w6, #-128
eor w2, w2, w11
br x16
L(generate_grain_\type\()_lag0):
AARCH64_VALID_JUMP_TARGET
dup v28.8h, w7
ld1r {v27.16b}, [x4] // ar_coeffs_uv[0]
movi v0.16b, #0
movi v1.16b, #255
ext v29.16b, v0.16b, v1.16b, #13
ext v30.16b, v1.16b, v0.16b, #7
neg v28.8h, v28.8h
mov w1, #3
bl generate_grain_rows_44_neon
set_height w1, \type
1:
bl get_grain_row_44_neon
.ifc \type, uv_420
add x12, x19, #GRAIN_WIDTH
.endif
mov v0.16b, v29.16b
mov v1.16b, v16.16b
bl add_\type\()_coeff_lag0_neon
movi v0.16b, #255
mov v1.16b, v17.16b
mov v16.16b, v2.16b
bl add_\type\()_coeff_lag0_neon
mov v0.16b, v30.16b
mov v1.16b, v18.16b
mov v17.16b, v2.16b
bl add_\type\()_coeff_lag0_neon
mov v18.16b, v2.16b
subs w1, w1, #1
increment_y_ptr x19, \type
store_grain_row_44 v16, v17, v18
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag1):
AARCH64_VALID_JUMP_TARGET
ld1r {v27.16b}, [x4], #1 // ar_coeffs_uv[0]
ld1r {v28.16b}, [x4], #1 // ar_coeffs_uv[1]
ld1r {v29.16b}, [x4] // ar_coeffs_uv[2]
add x4, x4, #2
mov w1, #3
ld1r {v30.16b}, [x4] // ar_coeffs_u4[4]
ldursb w4, [x4, #-1] // ar_coeffs_uv[3]
bl generate_grain_rows_44_neon
set_height w1, \type
1:
sum_\type\()_lag1 v20, v16, v16, v17, left
sum_\type\()_lag1 v21, v16, v17, v18
sum_\type\()_lag1 v18, v17, v18, v18, right
subs w1, w1, #1
increment_y_ptr x19, \type
store_grain_row_44 v20, v21, v18
mov v16.16b, v20.16b
mov v17.16b, v21.16b
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag2):
AARCH64_VALID_JUMP_TARGET
ld1 {v30.16b}, [x4] // ar_coeffs_uv[0-12]
smov w4, v30.b[10]
smov w17, v30.b[11]
mov w1, #3
bl generate_grain_rows_44_neon
set_height w1, \type
1:
bl sum_\type\()_lag2_left_neon
bl sum_\type\()_lag2_mid_neon
bl sum_\type\()_lag2_right_neon
subs w1, w1, #1
increment_y_ptr x19, \type
add x0, x0, #GRAIN_WIDTH-48
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag3):
AARCH64_VALID_JUMP_TARGET
ldr q29, [x4] // ar_coeffs_uv[0-15]
ldr q30, [x4, #16] // ar_coeffs_uv[16-24]
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
stp x20, x21, [sp, #80]
smov w4, v30.b[5]
smov w20, v30.b[6]
smov w21, v30.b[7]
mov w1, #3
bl generate_grain_rows_44_neon
set_height w1, \type
1:
bl sum_\type\()_lag3_left_neon
bl sum_\type\()_lag3_mid_neon
bl sum_\type\()_lag3_right_neon
subs w1, w1, #1
increment_y_ptr x19, \type
add x0, x0, #GRAIN_WIDTH-48
b.gt 1b
ldp x20, x21, [sp, #80]
ldp d14, d15, [sp, #64]
ldp d12, d13, [sp, #48]
ldp d10, d11, [sp, #32]
ldp d8, d9, [sp, #16]
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
jumptable gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag0) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag1) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag2) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag3) - gen_grain_\type\()_tbl
endjumptable
.endm
gen_grain_44 uv_420
gen_grain_44 uv_422
.macro gather_interleaved dst1, dst2, src1, src2, off
umov w14, \src1[0+\off]
umov w15, \src2[8+\off]
umov w16, \src1[2+\off]
add x14, x14, x3
umov w17, \src2[10+\off]
add x15, x15, x3
ld1 {\dst1}[0+\off], [x14]
umov w14, \src1[4+\off]
add x16, x16, x3
ld1 {\dst2}[8+\off], [x15]
umov w15, \src2[12+\off]
add x17, x17, x3
ld1 {\dst1}[2+\off], [x16]
umov w16, \src1[6+\off]
add x14, x14, x3
ld1 {\dst2}[10+\off], [x17]
umov w17, \src2[14+\off]
add x15, x15, x3
ld1 {\dst1}[4+\off], [x14]
add x16, x16, x3
ld1 {\dst2}[12+\off], [x15]
add x17, x17, x3
ld1 {\dst1}[6+\off], [x16]
ld1 {\dst2}[14+\off], [x17]
.endm
.macro gather dst1, dst2, src1, src2
gather_interleaved \dst1, \dst2, \src1, \src2, 0
gather_interleaved \dst2, \dst1, \src2, \src1, 0
gather_interleaved \dst1, \dst2, \src1, \src2, 1
gather_interleaved \dst2, \dst1, \src2, \src1, 1
.endm
function gather32_neon
gather v4.b, v5.b, v0.b, v1.b
ret
endfunc
function gather16_neon
gather_interleaved v4.b, v5.b, v0.b, v0.b, 0
gather_interleaved v4.b, v5.b, v0.b, v0.b, 1
ins v4.d[1], v5.d[1]
ret
endfunc
const overlap_coeffs_0, align=4
.byte 27, 17, 0, 0, 0, 0, 0, 0
.byte 17, 27, 32, 32, 32, 32, 32, 32
endconst
const overlap_coeffs_1, align=4
.byte 23, 0, 0, 0, 0, 0, 0, 0
.byte 22, 32, 32, 32, 32, 32, 32, 32
endconst
.macro calc_offset offx, offy, src, sx, sy
and \offy, \src, #0xF // randval & 0xF
lsr \offx, \src, #4 // randval >> 4
.if \sy == 0
add \offy, \offy, \offy // 2 * (randval & 0xF)
.endif
.if \sx == 0
add \offx, \offx, \offx // 2 * (randval >> 4)
.endif
.endm
.macro add_offset dst, offx, offy, src, stride
madd \dst, \stride, \offy, \src // grain_lut += grain_stride * offy
add \dst, \dst, \offx, uxtw // grain_lut += offx
.endm
// void dav1d_fgy_32x32_8bpc_neon(pixel *const dst, const pixel *const src,
// const ptrdiff_t stride,
// const uint8_t scaling[SCALING_SIZE],
// const int scaling_shift,
// const entry grain_lut[][GRAIN_WIDTH],
// const int offsets[][2],
// const int h, const ptrdiff_t clip,
// const ptrdiff_t type);
function fgy_32x32_8bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
ldr w11, [x6, #8] // offsets[1][0]
ldr w13, [x6, #4] // offsets[0][1]
ldr w15, [x6, #12] // offsets[1][1]
ldr w6, [x6] // offsets[0][0]
ldr w8, [sp, #16] // clip
mov x9, #GRAIN_WIDTH // grain_lut stride
neg w4, w4
dup v29.8h, w4 // -scaling_shift
movrel x16, overlap_coeffs_0
cbz w8, 1f
// clip
movi v30.16b, #16
movi v31.16b, #235
b 2f
1:
// no clip
movi v30.16b, #0
movi v31.16b, #255
2:
ld1 {v27.8b, v28.8b}, [x16] // overlap_coeffs
add x5, x5, #9 // grain_lut += 9
add x5, x5, x9, lsl #3 // grain_lut += 8 * grain_stride
add x5, x5, x9 // grain_lut += grain_stride
calc_offset w11, w12, w11, 0, 0
calc_offset w13, w14, w13, 0, 0
calc_offset w15, w16, w15, 0, 0
calc_offset w6, w10, w6, 0, 0
add_offset x12, w11, x12, x5, x9
add_offset x14, w13, x14, x5, x9
add_offset x16, w15, x16, x5, x9
add_offset x5, w6, x10, x5, x9
ldr w11, [sp, #24] // type
movrel x13, fgy_loop_tbl
add x4, x12, #32 // grain_lut += FG_BLOCK_SIZE * bx
add x6, x14, x9, lsl #5 // grain_lut += grain_stride * FG_BLOCK_SIZE * by
tst w11, #1
ldrsw x11, [x13, w11, uxtw #2]
add x8, x16, x9, lsl #5 // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add x8, x8, #32 // grain_lut += FG_BLOCK_SIZE * bx
add x11, x13, x11
b.eq 1f
// y overlap
dup v6.16b, v27.b[0]
dup v7.16b, v27.b[1]
mov w10, w7 // backup actual h
mov w7, #2
1:
br x11
endfunc
function fgy_loop_neon
.macro fgy ox, oy
L(loop_\ox\oy):
AARCH64_VALID_JUMP_TARGET
1:
ld1 {v0.16b, v1.16b}, [x1], x2 // src
.if \ox
ld1 {v20.8b}, [x4], x9 // grain_lut old
.endif
.if \oy
ld1 {v22.16b, v23.16b}, [x6], x9 // grain_lut top
.endif
.if \ox && \oy
ld1 {v21.8b}, [x8], x9 // grain_lut top old
.endif
ld1 {v18.16b, v19.16b}, [x5], x9 // grain_lut
bl gather32_neon
.if \ox
smull v20.8h, v20.8b, v27.8b
smlal v20.8h, v18.8b, v28.8b
.endif
.if \oy
.if \ox
smull v21.8h, v21.8b, v27.8b
smlal v21.8h, v22.8b, v28.8b
sqrshrn v20.8b, v20.8h, #5
sqrshrn v21.8b, v21.8h, #5
.endif
.if \ox
smull v16.8h, v20.8b, v7.8b
.else
smull v16.8h, v18.8b, v7.8b
.endif
smull2 v17.8h, v18.16b, v7.16b
smull v18.8h, v19.8b, v7.8b
smull2 v19.8h, v19.16b, v7.16b
.if \ox
smlal v16.8h, v21.8b, v6.8b
.else
smlal v16.8h, v22.8b, v6.8b
.endif
smlal2 v17.8h, v22.16b, v6.16b
smlal v18.8h, v23.8b, v6.8b
smlal2 v19.8h, v23.16b, v6.16b
sqrshrn v22.8b, v16.8h, #5
sqrshrn2 v22.16b, v17.8h, #5
sqrshrn v23.8b, v18.8h, #5
sqrshrn2 v23.16b, v19.8h, #5
.endif
// sxtl of grain
.if \oy
sxtl v16.8h, v22.8b
sxtl2 v17.8h, v22.16b
sxtl v18.8h, v23.8b
sxtl2 v19.8h, v23.16b
.elseif \ox
sqrshrn v20.8b, v20.8h, #5
sxtl2 v17.8h, v18.16b
sxtl v18.8h, v19.8b
sxtl2 v19.8h, v19.16b
sxtl v16.8h, v20.8b
.else
sxtl v16.8h, v18.8b
sxtl2 v17.8h, v18.16b
sxtl v18.8h, v19.8b
sxtl2 v19.8h, v19.16b
.endif
uxtl v2.8h, v4.8b // scaling
uxtl2 v3.8h, v4.16b
uxtl v4.8h, v5.8b
uxtl2 v5.8h, v5.16b
mul v16.8h, v16.8h, v2.8h // scaling * grain
mul v17.8h, v17.8h, v3.8h
mul v18.8h, v18.8h, v4.8h
mul v19.8h, v19.8h, v5.8h
srshl v16.8h, v16.8h, v29.8h // round2(scaling * grain, scaling_shift)
srshl v17.8h, v17.8h, v29.8h
srshl v18.8h, v18.8h, v29.8h
srshl v19.8h, v19.8h, v29.8h
uaddw v16.8h, v16.8h, v0.8b // *src + noise
uaddw2 v17.8h, v17.8h, v0.16b
uaddw v18.8h, v18.8h, v1.8b
uaddw2 v19.8h, v19.8h, v1.16b
sqxtun v0.8b, v16.8h
sqxtun2 v0.16b, v17.8h
sqxtun v1.8b, v18.8h
sqxtun2 v1.16b, v19.8h
umax v0.16b, v0.16b, v30.16b
umax v1.16b, v1.16b, v30.16b
umin v0.16b, v0.16b, v31.16b
umin v1.16b, v1.16b, v31.16b
subs w7, w7, #1
.if \oy
dup v6.16b, v28.b[0]
dup v7.16b, v28.b[1]
.endif
st1 {v0.16b, v1.16b}, [x0], x2 // dst
b.gt 1b
.if \oy
cmp w10, #2
sub w7, w10, #2 // restore actual remaining h
b.gt L(loop_\ox\()0)
.endif
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
.endm
fgy 0, 0
fgy 0, 1
fgy 1, 0
fgy 1, 1
endfunc
jumptable fgy_loop_tbl
.word L(loop_00) - fgy_loop_tbl
.word L(loop_01) - fgy_loop_tbl
.word L(loop_10) - fgy_loop_tbl
.word L(loop_11) - fgy_loop_tbl
endjumptable
// void dav1d_fguv_32x32_420_8bpc_neon(pixel *const dst,
// const pixel *const src,
// const ptrdiff_t stride,
// const uint8_t scaling[SCALING_SIZE],
// const Dav1dFilmGrainData *const data,
// const entry grain_lut[][GRAIN_WIDTH],
// const pixel *const luma_row,
// const ptrdiff_t luma_stride,
// const int offsets[][2],
// const ptrdiff_t h, const ptrdiff_t uv,
// const ptrdiff_t is_id,
// const ptrdiff_t type);
.macro fguv layout, sx, sy
function fguv_32x32_\layout\()_8bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-32]!
str d8, [sp, #16]
ldp x8, x9, [sp, #32] // offsets, h
ldp x10, x11, [sp, #48] // uv, is_id
ldr w13, [x4, #FGD_SCALING_SHIFT]
ldr w12, [x4, #FGD_CLIP_TO_RESTRICTED_RANGE]
neg w13, w13 // -scaling_shift
// !csfl
add x10, x4, x10, lsl #2 // + 4*uv
add x14, x10, #FGD_UV_LUMA_MULT
add x15, x10, #FGD_UV_MULT
add x10, x10, #FGD_UV_OFFSET
ld1 {v8.h}[0], [x14] // uv_luma_mult
ld1r {v24.8h}, [x10] // uv_offset
ld1 {v8.h}[1], [x15] // uv_mult
dup v29.8h, w13 // -scaling_shift
cbz w12, 1f
// clip
movi v30.16b, #16
movi v31.16b, #240
cbz w11, 2f
// is_id
movi v31.16b, #235
b 2f
1:
// no clip
movi v30.16b, #0
movi v31.16b, #255
2:
ldr w12, [x8, #8] // offsets[1][0]
ldr w14, [x8, #4] // offsets[0][1]
ldr w16, [x8, #12] // offsets[1][1]
ldr w8, [x8] // offsets[0][0]
mov x10, #GRAIN_WIDTH // grain_lut stride
add x5, x5, #(3 + (2 >> \sx)*3) // grain_lut += 9 or 6
.if \sy
add x5, x5, x10, lsl #2 // grain_lut += 4 * grain_stride
add x5, x5, x10, lsl #1 // grain_lut += 2 * grain_stride
.else
add x5, x5, x10, lsl #3 // grain_lut += 8 * grain_stride
add x5, x5, x10 // grain_lut += grain_stride
.endif
calc_offset w12, w13, w12, \sx, \sy
calc_offset w14, w15, w14, \sx, \sy
calc_offset w16, w17, w16, \sx, \sy
calc_offset w8, w11, w8, \sx, \sy
add_offset x13, w12, x13, x5, x10
add_offset x15, w14, x15, x5, x10
add_offset x17, w16, x17, x5, x10
add_offset x5, w8, x11, x5, x10
add x4, x13, #(32 >> \sx) // grain_lut += FG_BLOCK_SIZE * bx
add x8, x15, x10, lsl #(5 - \sy) // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add x11, x17, x10, lsl #(5 - \sy) // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add x11, x11, #(32 >> \sx) // grain_lut += FG_BLOCK_SIZE * bx
ldr w13, [sp, #64] // type
movrel x16, overlap_coeffs_\sx
movrel x14, fguv_loop_sx\sx\()_tbl
ld1 {v27.8b, v28.8b}, [x16] // overlap_coeffs
tst w13, #1
ldrsw x13, [x14, w13, uxtw #2]
b.eq 1f
// y overlap
sub w12, w9, #(2 >> \sy) // backup remaining h
mov w9, #(2 >> \sy)
1:
add x13, x14, x13
.if \sy
movi v25.16b, #23
movi v26.16b, #22
.else
movi v25.16b, #27
movi v26.16b, #17
.endif
.if \sy
add x7, x7, x7 // luma_stride *= 2
.endif
br x13
endfunc
.endm
fguv 420, 1, 1
fguv 422, 1, 0
fguv 444, 0, 0
function fguv_loop_sx0_neon
.macro fguv_loop_sx0 csfl, ox, oy
L(fguv_loop_sx0_csfl\csfl\()_\ox\oy):
AARCH64_VALID_JUMP_TARGET
1:
ld1 {v0.16b, v1.16b}, [x6], x7 // luma
ld1 {v6.16b, v7.16b}, [x1], x2 // src
.if \ox
ld1 {v20.8b}, [x4], x10 // grain_lut old
.endif
.if \oy
ld1 {v22.16b, v23.16b}, [x8], x10 // grain_lut top
.endif
.if \ox && \oy
ld1 {v21.8b}, [x11], x10 // grain_lut top old
.endif
ld1 {v18.16b, v19.16b}, [x5], x10 // grain_lut
.if !\csfl
uxtl v2.8h, v0.8b
uxtl2 v3.8h, v0.16b
uxtl v4.8h, v1.8b
uxtl2 v5.8h, v1.16b
uxtl v0.8h, v6.8b
uxtl2 v1.8h, v6.16b
uxtl v16.8h, v7.8b
uxtl2 v17.8h, v7.16b
mul v2.8h, v2.8h, v8.h[0]
mul v3.8h, v3.8h, v8.h[0]
mul v4.8h, v4.8h, v8.h[0]
mul v5.8h, v5.8h, v8.h[0]
mul v0.8h, v0.8h, v8.h[1]
mul v1.8h, v1.8h, v8.h[1]
mul v16.8h, v16.8h, v8.h[1]
mul v17.8h, v17.8h, v8.h[1]
sqadd v2.8h, v2.8h, v0.8h
sqadd v3.8h, v3.8h, v1.8h
sqadd v4.8h, v4.8h, v16.8h
sqadd v5.8h, v5.8h, v17.8h
sshr v2.8h, v2.8h, #6
sshr v3.8h, v3.8h, #6
sshr v4.8h, v4.8h, #6
sshr v5.8h, v5.8h, #6
add v2.8h, v2.8h, v24.8h
add v3.8h, v3.8h, v24.8h
add v4.8h, v4.8h, v24.8h
add v5.8h, v5.8h, v24.8h
sqxtun v0.8b, v2.8h
sqxtun2 v0.16b, v3.8h
sqxtun v1.8b, v4.8h
sqxtun2 v1.16b, v5.8h
.endif
bl gather32_neon
.if \ox
smull v20.8h, v20.8b, v27.8b
smlal v20.8h, v18.8b, v28.8b
.endif
.if \oy
.if \ox
smull v21.8h, v21.8b, v27.8b
smlal v21.8h, v22.8b, v28.8b
sqrshrn v20.8b, v20.8h, #5
sqrshrn v21.8b, v21.8h, #5
.endif
.if \ox
smull v16.8h, v20.8b, v26.8b
.else
smull v16.8h, v18.8b, v26.8b
.endif
smull2 v17.8h, v18.16b, v26.16b
smull v18.8h, v19.8b, v26.8b
smull2 v19.8h, v19.16b, v26.16b
.if \ox
smlal v16.8h, v21.8b, v25.8b
.else
smlal v16.8h, v22.8b, v25.8b
.endif
smlal2 v17.8h, v22.16b, v25.16b
smlal v18.8h, v23.8b, v25.8b
smlal2 v19.8h, v23.16b, v25.16b
sqrshrn v22.8b, v16.8h, #5
sqrshrn2 v22.16b, v17.8h, #5
sqrshrn v23.8b, v18.8h, #5
sqrshrn2 v23.16b, v19.8h, #5
.endif
// sxtl of grain
.if \oy
sxtl v16.8h, v22.8b
sxtl2 v17.8h, v22.16b
sxtl v18.8h, v23.8b
sxtl2 v19.8h, v23.16b
.elseif \ox
sqrshrn v20.8b, v20.8h, #5
sxtl2 v17.8h, v18.16b
sxtl v18.8h, v19.8b
sxtl2 v19.8h, v19.16b
sxtl v16.8h, v20.8b
.else
sxtl v16.8h, v18.8b
sxtl2 v17.8h, v18.16b
sxtl v18.8h, v19.8b
sxtl2 v19.8h, v19.16b
.endif
uxtl v2.8h, v4.8b // scaling
uxtl2 v3.8h, v4.16b
uxtl v4.8h, v5.8b
uxtl2 v5.8h, v5.16b
mul v16.8h, v16.8h, v2.8h // scaling * grain
mul v17.8h, v17.8h, v3.8h
mul v18.8h, v18.8h, v4.8h
mul v19.8h, v19.8h, v5.8h
srshl v16.8h, v16.8h, v29.8h // round2(scaling * grain, scaling_shift)
srshl v17.8h, v17.8h, v29.8h
srshl v18.8h, v18.8h, v29.8h
srshl v19.8h, v19.8h, v29.8h
uaddw v16.8h, v16.8h, v6.8b // *src + noise
uaddw2 v17.8h, v17.8h, v6.16b
uaddw v18.8h, v18.8h, v7.8b
uaddw2 v19.8h, v19.8h, v7.16b
sqxtun v0.8b, v16.8h
sqxtun2 v0.16b, v17.8h
sqxtun v1.8b, v18.8h
sqxtun2 v1.16b, v19.8h
umax v0.16b, v0.16b, v30.16b
umax v1.16b, v1.16b, v30.16b
umin v0.16b, v0.16b, v31.16b
umin v1.16b, v1.16b, v31.16b
subs w9, w9, #1
.if \oy
dup v25.16b, v28.b[0]
dup v26.16b, v28.b[1]
.endif
st1 {v0.16b, v1.16b}, [x0], x2 // dst
b.gt 1b
.if \oy
cmp w12, #0
mov w9, w12 // restore actual remaining h
b.gt L(fguv_loop_sx0_csfl\csfl\()_\ox\()0)
.endif
b 9f
.endm
fguv_loop_sx0 0, 0, 0
fguv_loop_sx0 0, 0, 1
fguv_loop_sx0 0, 1, 0
fguv_loop_sx0 0, 1, 1
fguv_loop_sx0 1, 0, 0
fguv_loop_sx0 1, 0, 1
fguv_loop_sx0 1, 1, 0
fguv_loop_sx0 1, 1, 1
9:
ldr d8, [sp, #16]
ldr x30, [sp], #32
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
jumptable fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl0_00) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl0_01) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl0_10) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl0_11) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl1_00) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl1_01) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl1_10) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl1_11) - fguv_loop_sx0_tbl
endjumptable
function fguv_loop_sx1_neon
.macro fguv_loop_sx1 csfl, ox, oy
L(fguv_loop_sx1_csfl\csfl\()_\ox\oy):
AARCH64_VALID_JUMP_TARGET
1:
ld1 {v0.16b, v1.16b}, [x6], x7 // luma
ld1 {v6.16b}, [x1], x2 // src
.if \ox
ld1 {v20.8b}, [x4], x10 // grain_lut old
.endif
.if \oy
ld1 {v22.16b}, [x8], x10 // grain_lut top
.endif
.if \ox && \oy
ld1 {v21.8b}, [x11], x10 // grain_lut top old
.endif
ld1 {v18.16b}, [x5], x10 // grain_lut
uaddlp v2.8h, v0.16b
uaddlp v3.8h, v1.16b
.if \csfl
rshrn v0.8b, v2.8h, #1
rshrn2 v0.16b, v3.8h, #1
.else
urshr v2.8h, v2.8h, #1
urshr v3.8h, v3.8h, #1
uxtl v0.8h, v6.8b
uxtl2 v1.8h, v6.16b
mul v2.8h, v2.8h, v8.h[0]
mul v3.8h, v3.8h, v8.h[0]
mul v0.8h, v0.8h, v8.h[1]
mul v1.8h, v1.8h, v8.h[1]
sqadd v2.8h, v2.8h, v0.8h
sqadd v3.8h, v3.8h, v1.8h
sshr v2.8h, v2.8h, #6
sshr v3.8h, v3.8h, #6
add v2.8h, v2.8h, v24.8h
add v3.8h, v3.8h, v24.8h
sqxtun v0.8b, v2.8h
sqxtun2 v0.16b, v3.8h
.endif
bl gather16_neon
.if \ox
smull v20.8h, v20.8b, v27.8b
smlal v20.8h, v18.8b, v28.8b
.endif
.if \oy
.if \ox
smull v21.8h, v21.8b, v27.8b
smlal v21.8h, v22.8b, v28.8b
sqrshrn v20.8b, v20.8h, #5
sqrshrn v21.8b, v21.8h, #5
.endif
.if \ox
smull v16.8h, v20.8b, v26.8b
.else
smull v16.8h, v18.8b, v26.8b
.endif
smull2 v17.8h, v18.16b, v26.16b
.if \ox
smlal v16.8h, v21.8b, v25.8b
.else
smlal v16.8h, v22.8b, v25.8b
.endif
smlal2 v17.8h, v22.16b, v25.16b
sqrshrn v22.8b, v16.8h, #5
sqrshrn2 v22.16b, v17.8h, #5
.endif
// sxtl of grain
.if \oy
sxtl v16.8h, v22.8b
sxtl2 v17.8h, v22.16b
.elseif \ox
sqrshrn v20.8b, v20.8h, #5
sxtl2 v17.8h, v18.16b
sxtl v16.8h, v20.8b
.else
sxtl v16.8h, v18.8b
sxtl2 v17.8h, v18.16b
.endif
uxtl v2.8h, v4.8b // scaling
uxtl2 v3.8h, v4.16b
mul v16.8h, v16.8h, v2.8h // scaling * grain
mul v17.8h, v17.8h, v3.8h
srshl v16.8h, v16.8h, v29.8h // round2(scaling * grain, scaling_shift)
srshl v17.8h, v17.8h, v29.8h
uaddw v16.8h, v16.8h, v6.8b // *src + noise
uaddw2 v17.8h, v17.8h, v6.16b
sqxtun v0.8b, v16.8h
sqxtun2 v0.16b, v17.8h
umax v0.16b, v0.16b, v30.16b
umin v0.16b, v0.16b, v31.16b
.if \oy
mov v16.16b, v25.16b
.endif
subs w9, w9, #1
.if \oy
mov v25.16b, v26.16b
mov v26.16b, v16.16b
.endif
st1 {v0.16b}, [x0], x2 // dst
b.gt 1b
.if \oy
cmp w12, #0
mov w9, w12 // restore actual remaining h
b.gt L(fguv_loop_sx1_csfl\csfl\()_\ox\()0)
.endif
b 9f
.endm
fguv_loop_sx1 0, 0, 0
fguv_loop_sx1 0, 0, 1
fguv_loop_sx1 0, 1, 0
fguv_loop_sx1 0, 1, 1
fguv_loop_sx1 1, 0, 0
fguv_loop_sx1 1, 0, 1
fguv_loop_sx1 1, 1, 0
fguv_loop_sx1 1, 1, 1
9:
ldr d8, [sp, #16]
ldr x30, [sp], #32
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
jumptable fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl0_00) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl0_01) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl0_10) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl0_11) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl1_00) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl1_01) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl1_10) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl1_11) - fguv_loop_sx1_tbl
endjumptable
|
Admenri/urge
| 238,391
|
third_party/dav1d/src/arm/64/ipred16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
// void ipred_dc_128_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height,
// const int bitdepth_max);
function ipred_dc_128_16bpc_neon, export=1
ldr w8, [sp]
clz w3, w3
movrel x5, ipred_dc_128_tbl
sub w3, w3, #25
ldrsw x3, [x5, w3, uxtw #2]
dup v0.8h, w8
add x5, x5, x3
add x6, x0, x1
lsl x1, x1, #1
urshr v0.8h, v0.8h, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
4:
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
subs w4, w4, #4
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
mov v1.16b, v0.16b
16:
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
mov v1.16b, v0.16b
mov v2.16b, v0.16b
mov v3.16b, v0.16b
32:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
mov v1.16b, v0.16b
mov v2.16b, v0.16b
mov v3.16b, v0.16b
sub x1, x1, #64
64:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
b.gt 64b
ret
endfunc
jumptable ipred_dc_128_tbl
.word 640b - ipred_dc_128_tbl
.word 320b - ipred_dc_128_tbl
.word 160b - ipred_dc_128_tbl
.word 80b - ipred_dc_128_tbl
.word 40b - ipred_dc_128_tbl
endjumptable
// void ipred_v_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_v_16bpc_neon, export=1
clz w3, w3
movrel x5, ipred_v_tbl
sub w3, w3, #25
ldrsw x3, [x5, w3, uxtw #2]
add x2, x2, #2
add x5, x5, x3
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.4h}, [x2]
4:
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
subs w4, w4, #4
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h}, [x2]
8:
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h}, [x2]
16:
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x2]
32:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], #64
sub x1, x1, #64
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2]
64:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], x1
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], x1
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x6], x1
b.gt 64b
ret
endfunc
jumptable ipred_v_tbl
.word 640b - ipred_v_tbl
.word 320b - ipred_v_tbl
.word 160b - ipred_v_tbl
.word 80b - ipred_v_tbl
.word 40b - ipred_v_tbl
endjumptable
// void ipred_h_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_h_16bpc_neon, export=1
clz w3, w3
movrel x5, ipred_h_tbl
sub w3, w3, #25
ldrsw x3, [x5, w3, uxtw #2]
sub x2, x2, #8
add x5, x5, x3
mov x7, #-8
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
4:
ld4r {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], x7
st1 {v3.4h}, [x0], x1
st1 {v2.4h}, [x6], x1
subs w4, w4, #4
st1 {v1.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ld4r {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], x7
st1 {v3.8h}, [x0], x1
st1 {v2.8h}, [x6], x1
subs w4, w4, #4
st1 {v1.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ld4r {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], x7
str q3, [x0, #16]
str q2, [x6, #16]
st1 {v3.8h}, [x0], x1
st1 {v2.8h}, [x6], x1
subs w4, w4, #4
str q1, [x0, #16]
str q0, [x6, #16]
st1 {v1.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
32:
ld4r {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], x7
str q3, [x0, #16]
str q2, [x6, #16]
stp q3, q3, [x0, #32]
stp q2, q2, [x6, #32]
st1 {v3.8h}, [x0], x1
st1 {v2.8h}, [x6], x1
subs w4, w4, #4
str q1, [x0, #16]
str q0, [x6, #16]
stp q1, q1, [x0, #32]
stp q0, q0, [x6, #32]
st1 {v1.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
64:
ld4r {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], x7
str q3, [x0, #16]
str q2, [x6, #16]
stp q3, q3, [x0, #32]
stp q2, q2, [x6, #32]
stp q3, q3, [x0, #64]
stp q2, q2, [x6, #64]
stp q3, q3, [x0, #96]
stp q2, q2, [x6, #96]
st1 {v3.8h}, [x0], x1
st1 {v2.8h}, [x6], x1
subs w4, w4, #4
str q1, [x0, #16]
str q0, [x6, #16]
stp q1, q1, [x0, #32]
stp q0, q0, [x6, #32]
stp q1, q1, [x0, #64]
stp q0, q0, [x6, #64]
stp q1, q1, [x0, #96]
stp q0, q0, [x6, #96]
st1 {v1.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
b.gt 64b
ret
endfunc
jumptable ipred_h_tbl
.word 640b - ipred_h_tbl
.word 320b - ipred_h_tbl
.word 160b - ipred_h_tbl
.word 80b - ipred_h_tbl
.word 40b - ipred_h_tbl
endjumptable
// void ipred_dc_top_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_top_16bpc_neon, export=1
clz w3, w3
movrel x5, ipred_dc_top_tbl
sub w3, w3, #25
ldrsw x3, [x5, w3, uxtw #2]
add x2, x2, #2
add x5, x5, x3
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.4h}, [x2]
addv h0, v0.4h
urshr v0.4h, v0.4h, #2
dup v0.4h, v0.h[0]
4:
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
subs w4, w4, #4
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h}, [x2]
addv h0, v0.8h
urshr v0.4h, v0.4h, #3
dup v0.8h, v0.h[0]
8:
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h}, [x2]
addp v0.8h, v0.8h, v1.8h
addv h0, v0.8h
urshr v2.4h, v0.4h, #4
dup v0.8h, v2.h[0]
dup v1.8h, v2.h[0]
16:
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x2]
addp v0.8h, v0.8h, v1.8h
addp v2.8h, v2.8h, v3.8h
addp v0.8h, v0.8h, v2.8h
uaddlv s0, v0.8h
rshrn v4.4h, v0.4s, #5
dup v0.8h, v4.h[0]
dup v1.8h, v4.h[0]
dup v2.8h, v4.h[0]
dup v3.8h, v4.h[0]
32:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], #64
addp v0.8h, v0.8h, v1.8h
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2]
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v6.8h, v6.8h, v7.8h
addp v0.8h, v0.8h, v2.8h
addp v4.8h, v4.8h, v6.8h
addp v0.8h, v0.8h, v4.8h
uaddlv s0, v0.8h
rshrn v4.4h, v0.4s, #6
sub x1, x1, #64
dup v0.8h, v4.h[0]
dup v1.8h, v4.h[0]
dup v2.8h, v4.h[0]
dup v3.8h, v4.h[0]
64:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
b.gt 64b
ret
endfunc
jumptable ipred_dc_top_tbl
.word 640b - ipred_dc_top_tbl
.word 320b - ipred_dc_top_tbl
.word 160b - ipred_dc_top_tbl
.word 80b - ipred_dc_top_tbl
.word 40b - ipred_dc_top_tbl
endjumptable
// void ipred_dc_left_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_left_16bpc_neon, export=1
sub x2, x2, w4, uxtw #1
clz w3, w3
clz w7, w4
movrel x5, ipred_dc_left_tbl
sub w3, w3, #20 // 25 leading bits, minus table offset 5
sub w7, w7, #25
ldrsw x3, [x5, w3, uxtw #2]
ldrsw x7, [x5, w7, uxtw #2]
add x3, x5, x3
add x5, x5, x7
add x6, x0, x1
lsl x1, x1, #1
br x5
L(ipred_dc_left_h4):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.4h}, [x2]
addv h0, v0.4h
urshr v0.4h, v0.4h, #2
dup v0.8h, v0.h[0]
br x3
L(ipred_dc_left_w4):
AARCH64_VALID_JUMP_TARGET
1:
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
subs w4, w4, #4
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
b.gt 1b
ret
L(ipred_dc_left_h8):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h}, [x2]
addv h0, v0.8h
urshr v0.4h, v0.4h, #3
dup v0.8h, v0.h[0]
br x3
L(ipred_dc_left_w8):
AARCH64_VALID_JUMP_TARGET
1:
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
b.gt 1b
ret
L(ipred_dc_left_h16):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h}, [x2]
addp v0.8h, v0.8h, v1.8h
addv h0, v0.8h
urshr v2.4h, v0.4h, #4
dup v0.8h, v2.h[0]
dup v1.8h, v2.h[0]
br x3
L(ipred_dc_left_w16):
AARCH64_VALID_JUMP_TARGET
mov v1.16b, v0.16b
1:
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
b.gt 1b
ret
L(ipred_dc_left_h32):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x2]
addp v0.8h, v0.8h, v1.8h
addp v2.8h, v2.8h, v3.8h
addp v0.8h, v0.8h, v2.8h
uaddlp v0.4s, v0.8h
addv s0, v0.4s
rshrn v4.4h, v0.4s, #5
dup v0.8h, v4.h[0]
br x3
L(ipred_dc_left_w32):
AARCH64_VALID_JUMP_TARGET
mov v1.16b, v0.16b
mov v2.16b, v0.16b
mov v3.16b, v0.16b
1:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
b.gt 1b
ret
L(ipred_dc_left_h64):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], #64
addp v0.8h, v0.8h, v1.8h
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2]
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v6.8h, v6.8h, v7.8h
addp v0.8h, v0.8h, v2.8h
addp v4.8h, v4.8h, v6.8h
addp v0.8h, v0.8h, v4.8h
uaddlv s0, v0.8h
rshrn v4.4h, v0.4s, #6
dup v0.8h, v4.h[0]
br x3
L(ipred_dc_left_w64):
AARCH64_VALID_JUMP_TARGET
mov v1.16b, v0.16b
mov v2.16b, v0.16b
mov v3.16b, v0.16b
sub x1, x1, #64
1:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
b.gt 1b
ret
endfunc
jumptable ipred_dc_left_tbl
.word L(ipred_dc_left_h64) - ipred_dc_left_tbl
.word L(ipred_dc_left_h32) - ipred_dc_left_tbl
.word L(ipred_dc_left_h16) - ipred_dc_left_tbl
.word L(ipred_dc_left_h8) - ipred_dc_left_tbl
.word L(ipred_dc_left_h4) - ipred_dc_left_tbl
.word L(ipred_dc_left_w64) - ipred_dc_left_tbl
.word L(ipred_dc_left_w32) - ipred_dc_left_tbl
.word L(ipred_dc_left_w16) - ipred_dc_left_tbl
.word L(ipred_dc_left_w8) - ipred_dc_left_tbl
.word L(ipred_dc_left_w4) - ipred_dc_left_tbl
endjumptable
// void ipred_dc_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_16bpc_neon, export=1
sub x2, x2, w4, uxtw #1
add w7, w3, w4 // width + height
clz w3, w3
clz w6, w4
dup v16.4s, w7 // width + height
movrel x5, ipred_dc_tbl
rbit w7, w7 // rbit(width + height)
sub w3, w3, #20 // 25 leading bits, minus table offset 5
sub w6, w6, #25
clz w7, w7 // ctz(width + height)
ldrsw x3, [x5, w3, uxtw #2]
ldrsw x6, [x5, w6, uxtw #2]
neg w7, w7 // -ctz(width + height)
add x3, x5, x3
add x5, x5, x6
ushr v16.4s, v16.4s, #1 // (width + height) >> 1
dup v17.4s, w7 // -ctz(width + height)
add x6, x0, x1
lsl x1, x1, #1
br x5
L(ipred_dc_h4):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.4h}, [x2], #8
uaddlv s0, v0.4h
add x2, x2, #2
br x3
L(ipred_dc_w4):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.4h}, [x2]
add v0.2s, v0.2s, v16.2s
uaddlv s1, v1.4h
cmp w4, #4
add v0.2s, v0.2s, v1.2s
ushl v0.2s, v0.2s, v17.2s
b.eq 1f
// h = 8/16
cmp w4, #16
mov w16, #0x6667
mov w17, #0xAAAB
csel w16, w16, w17, eq
dup v16.2s, w16
mul v0.2s, v0.2s, v16.2s
ushr v0.2s, v0.2s, #17
1:
dup v0.4h, v0.h[0]
2:
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
subs w4, w4, #4
st1 {v0.4h}, [x0], x1
st1 {v0.4h}, [x6], x1
b.gt 2b
ret
L(ipred_dc_h8):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h}, [x2], #16
uaddlv s0, v0.8h
add x2, x2, #2
br x3
L(ipred_dc_w8):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.8h}, [x2]
add v0.2s, v0.2s, v16.2s
uaddlv s1, v1.8h
cmp w4, #8
add v0.2s, v0.2s, v1.2s
ushl v0.2s, v0.2s, v17.2s
b.eq 1f
// h = 4/16/32
cmp w4, #32
mov w16, #0x6667
mov w17, #0xAAAB
csel w16, w16, w17, eq
dup v16.2s, w16
mul v0.2s, v0.2s, v16.2s
ushr v0.2s, v0.2s, #17
1:
dup v0.8h, v0.h[0]
2:
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h}, [x0], x1
st1 {v0.8h}, [x6], x1
b.gt 2b
ret
L(ipred_dc_h16):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h}, [x2], #32
addp v0.8h, v0.8h, v1.8h
add x2, x2, #2
uaddlv s0, v0.8h
br x3
L(ipred_dc_w16):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.8h, v2.8h}, [x2]
add v0.2s, v0.2s, v16.2s
addp v1.8h, v1.8h, v2.8h
uaddlv s1, v1.8h
cmp w4, #16
add v0.2s, v0.2s, v1.2s
ushl v4.2s, v0.2s, v17.2s
b.eq 1f
// h = 4/8/32/64
tst w4, #(32+16+8) // 16 added to make a consecutive bitmask
mov w16, #0x6667
mov w17, #0xAAAB
csel w16, w16, w17, eq
dup v16.2s, w16
mul v4.2s, v4.2s, v16.2s
ushr v4.2s, v4.2s, #17
1:
dup v0.8h, v4.h[0]
dup v1.8h, v4.h[0]
2:
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v0.8h, v1.8h}, [x6], x1
b.gt 2b
ret
L(ipred_dc_h32):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], #64
addp v0.8h, v0.8h, v1.8h
addp v2.8h, v2.8h, v3.8h
addp v0.8h, v0.8h, v2.8h
add x2, x2, #2
uaddlv s0, v0.8h
br x3
L(ipred_dc_w32):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.8h, v2.8h, v3.8h, v4.8h}, [x2]
add v0.2s, v0.2s, v16.2s
addp v1.8h, v1.8h, v2.8h
addp v3.8h, v3.8h, v4.8h
addp v1.8h, v1.8h, v3.8h
uaddlv s1, v1.8h
cmp w4, #32
add v0.2s, v0.2s, v1.2s
ushl v4.2s, v0.2s, v17.2s
b.eq 1f
// h = 8/16/64
cmp w4, #8
mov w16, #0x6667
mov w17, #0xAAAB
csel w16, w16, w17, eq
dup v16.2s, w16
mul v4.2s, v4.2s, v16.2s
ushr v4.2s, v4.2s, #17
1:
dup v0.8h, v4.h[0]
dup v1.8h, v4.h[0]
dup v2.8h, v4.h[0]
dup v3.8h, v4.h[0]
2:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
b.gt 2b
ret
L(ipred_dc_h64):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], #64
addp v0.8h, v0.8h, v1.8h
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], #64
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v6.8h, v6.8h, v7.8h
addp v0.8h, v0.8h, v2.8h
addp v4.8h, v4.8h, v6.8h
addp v0.8h, v0.8h, v4.8h
add x2, x2, #2
uaddlv s0, v0.8h
br x3
L(ipred_dc_w64):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.8h, v2.8h, v3.8h, v4.8h}, [x2], #64
add v0.2s, v0.2s, v16.2s
addp v1.8h, v1.8h, v2.8h
ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x2]
addp v3.8h, v3.8h, v4.8h
addp v20.8h, v20.8h, v21.8h
addp v22.8h, v22.8h, v23.8h
addp v1.8h, v1.8h, v3.8h
addp v20.8h, v20.8h, v22.8h
addp v1.8h, v1.8h, v20.8h
uaddlv s1, v1.8h
cmp w4, #64
add v0.2s, v0.2s, v1.2s
ushl v4.2s, v0.2s, v17.2s
b.eq 1f
// h = 16/32
cmp w4, #16
mov w16, #0x6667
mov w17, #0xAAAB
csel w16, w16, w17, eq
dup v16.2s, w16
mul v4.2s, v4.2s, v16.2s
ushr v4.2s, v4.2s, #17
1:
sub x1, x1, #64
dup v0.8h, v4.h[0]
dup v1.8h, v4.h[0]
dup v2.8h, v4.h[0]
dup v3.8h, v4.h[0]
2:
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x1
b.gt 2b
ret
endfunc
jumptable ipred_dc_tbl
.word L(ipred_dc_h64) - ipred_dc_tbl
.word L(ipred_dc_h32) - ipred_dc_tbl
.word L(ipred_dc_h16) - ipred_dc_tbl
.word L(ipred_dc_h8) - ipred_dc_tbl
.word L(ipred_dc_h4) - ipred_dc_tbl
.word L(ipred_dc_w64) - ipred_dc_tbl
.word L(ipred_dc_w32) - ipred_dc_tbl
.word L(ipred_dc_w16) - ipred_dc_tbl
.word L(ipred_dc_w8) - ipred_dc_tbl
.word L(ipred_dc_w4) - ipred_dc_tbl
endjumptable
// void ipred_paeth_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_paeth_16bpc_neon, export=1
clz w9, w3
movrel x5, ipred_paeth_tbl
sub w9, w9, #25
ldrsw x9, [x5, w9, uxtw #2]
ld1r {v4.8h}, [x2]
add x8, x2, #2
sub x2, x2, #8
add x5, x5, x9
mov x7, #-8
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v5.2d}, [x8]
sub v6.8h, v5.8h, v4.8h // top - topleft
4:
ld4r {v0.4h, v1.4h, v2.4h, v3.4h}, [x2], x7
zip1 v0.2d, v0.2d, v1.2d
zip1 v2.2d, v2.2d, v3.2d
add v16.8h, v6.8h, v0.8h // base
add v17.8h, v6.8h, v2.8h
sabd v20.8h, v5.8h, v16.8h // tdiff
sabd v21.8h, v5.8h, v17.8h
sabd v22.8h, v4.8h, v16.8h // tldiff
sabd v23.8h, v4.8h, v17.8h
sabd v16.8h, v0.8h, v16.8h // ldiff
sabd v17.8h, v2.8h, v17.8h
umin v18.8h, v20.8h, v22.8h // min(tdiff, tldiff)
umin v19.8h, v21.8h, v23.8h
cmge v20.8h, v22.8h, v20.8h // tldiff >= tdiff
cmge v21.8h, v23.8h, v21.8h
cmge v16.8h, v18.8h, v16.8h // min(tdiff, tldiff) >= ldiff
cmge v17.8h, v19.8h, v17.8h
bsl v21.16b, v5.16b, v4.16b // tdiff <= tldiff ? top : topleft
bsl v20.16b, v5.16b, v4.16b
bit v21.16b, v2.16b, v17.16b // ldiff <= min ? left : ...
bit v20.16b, v0.16b, v16.16b
st1 {v21.d}[1], [x0], x1
st1 {v21.d}[0], [x6], x1
subs w4, w4, #4
st1 {v20.d}[1], [x0], x1
st1 {v20.d}[0], [x6], x1
b.gt 4b
ret
80:
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
ld1 {v5.8h}, [x8], #16
mov w9, w3
// Set up pointers for four rows in parallel; x0, x6, x5, x10
add x5, x0, x1
add x10, x6, x1
lsl x1, x1, #1
sub x1, x1, w3, uxtw #1
1:
ld4r {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], x7
2:
sub v6.8h, v5.8h, v4.8h // top - topleft
add v16.8h, v6.8h, v0.8h // base
add v17.8h, v6.8h, v1.8h
add v18.8h, v6.8h, v2.8h
add v19.8h, v6.8h, v3.8h
sabd v20.8h, v5.8h, v16.8h // tdiff
sabd v21.8h, v5.8h, v17.8h
sabd v22.8h, v5.8h, v18.8h
sabd v23.8h, v5.8h, v19.8h
sabd v24.8h, v4.8h, v16.8h // tldiff
sabd v25.8h, v4.8h, v17.8h
sabd v26.8h, v4.8h, v18.8h
sabd v27.8h, v4.8h, v19.8h
sabd v16.8h, v0.8h, v16.8h // ldiff
sabd v17.8h, v1.8h, v17.8h
sabd v18.8h, v2.8h, v18.8h
sabd v19.8h, v3.8h, v19.8h
umin v28.8h, v20.8h, v24.8h // min(tdiff, tldiff)
umin v29.8h, v21.8h, v25.8h
umin v30.8h, v22.8h, v26.8h
umin v31.8h, v23.8h, v27.8h
cmge v20.8h, v24.8h, v20.8h // tldiff >= tdiff
cmge v21.8h, v25.8h, v21.8h
cmge v22.8h, v26.8h, v22.8h
cmge v23.8h, v27.8h, v23.8h
cmge v16.8h, v28.8h, v16.8h // min(tdiff, tldiff) >= ldiff
cmge v17.8h, v29.8h, v17.8h
cmge v18.8h, v30.8h, v18.8h
cmge v19.8h, v31.8h, v19.8h
bsl v23.16b, v5.16b, v4.16b // tdiff <= tldiff ? top : topleft
bsl v22.16b, v5.16b, v4.16b
bsl v21.16b, v5.16b, v4.16b
bsl v20.16b, v5.16b, v4.16b
bit v23.16b, v3.16b, v19.16b // ldiff <= min ? left : ...
bit v22.16b, v2.16b, v18.16b
bit v21.16b, v1.16b, v17.16b
bit v20.16b, v0.16b, v16.16b
st1 {v23.8h}, [x0], #16
st1 {v22.8h}, [x6], #16
subs w3, w3, #8
st1 {v21.8h}, [x5], #16
st1 {v20.8h}, [x10], #16
b.le 8f
ld1 {v5.8h}, [x8], #16
b 2b
8:
subs w4, w4, #4
b.le 9f
// End of horizontal loop, move pointers to next four rows
sub x8, x8, w9, uxtw #1
add x0, x0, x1
add x6, x6, x1
// Load the top row as early as possible
ld1 {v5.8h}, [x8], #16
add x5, x5, x1
add x10, x10, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_paeth_tbl
.word 640b - ipred_paeth_tbl
.word 320b - ipred_paeth_tbl
.word 160b - ipred_paeth_tbl
.word 80b - ipred_paeth_tbl
.word 40b - ipred_paeth_tbl
endjumptable
// void ipred_smooth_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_16bpc_neon, export=1
movrel x10, X(sm_weights)
add x11, x10, w4, uxtw
add x10, x10, w3, uxtw
clz w9, w3
movrel x5, ipred_smooth_tbl
sub x12, x2, w4, uxtw #1
sub w9, w9, #25
ldrsw x9, [x5, w9, uxtw #2]
ld1r {v4.8h}, [x12] // bottom
add x8, x2, #2
add x5, x5, x9
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v6.2d}, [x8] // top
ld1r {v7.2s}, [x10] // weights_hor
sub x2, x2, #8
mov x7, #-8
dup v5.8h, v6.h[3] // right
sub v6.8h, v6.8h, v4.8h // top-bottom
uxtl v7.8h, v7.8b // weights_hor
add v31.4h, v4.4h, v5.4h // bottom+right
4:
ld4r {v0.4h, v1.4h, v2.4h, v3.4h}, [x2], x7 // left
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x11], #4 // weights_ver
ushll v20.4s, v31.4h, #8 // (bottom+right)*256
ushll v21.4s, v31.4h, #8
ushll v22.4s, v31.4h, #8
ushll v23.4s, v31.4h, #8
zip1 v1.2d, v1.2d, v0.2d // left, flipped
zip1 v0.2d, v3.2d, v2.2d
zip1 v16.2s, v16.2s, v17.2s // weights_ver
zip1 v18.2s, v18.2s, v19.2s
sub v0.8h, v0.8h, v5.8h // left-right
sub v1.8h, v1.8h, v5.8h
uxtl v16.8h, v16.8b // weights_ver
uxtl v18.8h, v18.8b
smlal v20.4s, v0.4h, v7.4h // += (left-right)*weights_hor
smlal2 v21.4s, v0.8h, v7.8h
smlal v22.4s, v1.4h, v7.4h
smlal2 v23.4s, v1.8h, v7.8h
smlal v20.4s, v6.4h, v16.4h // += (top-bottom)*weights_ver
smlal2 v21.4s, v6.8h, v16.8h
smlal v22.4s, v6.4h, v18.4h
smlal2 v23.4s, v6.8h, v18.8h
rshrn v20.4h, v20.4s, #9
rshrn v21.4h, v21.4s, #9
rshrn v22.4h, v22.4s, #9
rshrn v23.4h, v23.4s, #9
st1 {v20.4h}, [x0], x1
st1 {v21.4h}, [x6], x1
subs w4, w4, #4
st1 {v22.4h}, [x0], x1
st1 {v23.4h}, [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v6.8h}, [x8] // top
ld1 {v7.8b}, [x10] // weights_hor
sub x2, x2, #8
mov x7, #-8
dup v5.8h, v6.h[7] // right
sub v6.8h, v6.8h, v4.8h // top-bottom
uxtl v7.8h, v7.8b // weights_hor
add v31.4h, v4.4h, v5.4h // bottom+right
8:
ld4r {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], x7 // left
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x11], #4 // weights_ver
ushll v20.4s, v31.4h, #8 // (bottom+right)*256
ushll v21.4s, v31.4h, #8
ushll v22.4s, v31.4h, #8
ushll v23.4s, v31.4h, #8
ushll v24.4s, v31.4h, #8
ushll v25.4s, v31.4h, #8
ushll v26.4s, v31.4h, #8
ushll v27.4s, v31.4h, #8
sub v0.8h, v0.8h, v5.8h // left-right
sub v1.8h, v1.8h, v5.8h
sub v2.8h, v2.8h, v5.8h
sub v3.8h, v3.8h, v5.8h
uxtl v16.8h, v16.8b // weights_ver
uxtl v17.8h, v17.8b
uxtl v18.8h, v18.8b
uxtl v19.8h, v19.8b
smlal v20.4s, v3.4h, v7.4h // += (left-right)*weights_hor
smlal2 v21.4s, v3.8h, v7.8h // (left flipped)
smlal v22.4s, v2.4h, v7.4h
smlal2 v23.4s, v2.8h, v7.8h
smlal v24.4s, v1.4h, v7.4h
smlal2 v25.4s, v1.8h, v7.8h
smlal v26.4s, v0.4h, v7.4h
smlal2 v27.4s, v0.8h, v7.8h
smlal v20.4s, v6.4h, v16.4h // += (top-bottom)*weights_ver
smlal2 v21.4s, v6.8h, v16.8h
smlal v22.4s, v6.4h, v17.4h
smlal2 v23.4s, v6.8h, v17.8h
smlal v24.4s, v6.4h, v18.4h
smlal2 v25.4s, v6.8h, v18.8h
smlal v26.4s, v6.4h, v19.4h
smlal2 v27.4s, v6.8h, v19.8h
rshrn v20.4h, v20.4s, #9
rshrn2 v20.8h, v21.4s, #9
rshrn v21.4h, v22.4s, #9
rshrn2 v21.8h, v23.4s, #9
rshrn v22.4h, v24.4s, #9
rshrn2 v22.8h, v25.4s, #9
rshrn v23.4h, v26.4s, #9
rshrn2 v23.8h, v27.4s, #9
st1 {v20.8h}, [x0], x1
st1 {v21.8h}, [x6], x1
subs w4, w4, #4
st1 {v22.8h}, [x0], x1
st1 {v23.8h}, [x6], x1
b.gt 8b
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
add x12, x2, w3, uxtw #1
sub x1, x1, w3, uxtw #1
ld1r {v5.8h}, [x12] // right
sub x2, x2, #4
mov x7, #-4
mov w9, w3
add v31.4h, v4.4h, v5.4h // bottom+right
1:
ld2r {v0.8h, v1.8h}, [x2], x7 // left
ld2r {v16.8b, v17.8b}, [x11], #2 // weights_ver
sub v0.8h, v0.8h, v5.8h // left-right
sub v1.8h, v1.8h, v5.8h
uxtl v16.8h, v16.8b // weights_ver
uxtl v17.8h, v17.8b
2:
ld1 {v7.16b}, [x10], #16 // weights_hor
ld1 {v2.8h, v3.8h}, [x8], #32 // top
ushll v20.4s, v31.4h, #8 // (bottom+right)*256
ushll v21.4s, v31.4h, #8
ushll v22.4s, v31.4h, #8
ushll v23.4s, v31.4h, #8
ushll v24.4s, v31.4h, #8
ushll v25.4s, v31.4h, #8
ushll v26.4s, v31.4h, #8
ushll v27.4s, v31.4h, #8
uxtl v6.8h, v7.8b // weights_hor
uxtl2 v7.8h, v7.16b
sub v2.8h, v2.8h, v4.8h // top-bottom
sub v3.8h, v3.8h, v4.8h
smlal v20.4s, v1.4h, v6.4h // += (left-right)*weights_hor
smlal2 v21.4s, v1.8h, v6.8h // (left flipped)
smlal v22.4s, v1.4h, v7.4h
smlal2 v23.4s, v1.8h, v7.8h
smlal v24.4s, v0.4h, v6.4h
smlal2 v25.4s, v0.8h, v6.8h
smlal v26.4s, v0.4h, v7.4h
smlal2 v27.4s, v0.8h, v7.8h
smlal v20.4s, v2.4h, v16.4h // += (top-bottom)*weights_ver
smlal2 v21.4s, v2.8h, v16.8h
smlal v22.4s, v3.4h, v16.4h
smlal2 v23.4s, v3.8h, v16.8h
smlal v24.4s, v2.4h, v17.4h
smlal2 v25.4s, v2.8h, v17.8h
smlal v26.4s, v3.4h, v17.4h
smlal2 v27.4s, v3.8h, v17.8h
rshrn v20.4h, v20.4s, #9
rshrn2 v20.8h, v21.4s, #9
rshrn v21.4h, v22.4s, #9
rshrn2 v21.8h, v23.4s, #9
rshrn v22.4h, v24.4s, #9
rshrn2 v22.8h, v25.4s, #9
rshrn v23.4h, v26.4s, #9
rshrn2 v23.8h, v27.4s, #9
subs w3, w3, #16
st1 {v20.8h, v21.8h}, [x0], #32
st1 {v22.8h, v23.8h}, [x6], #32
b.gt 2b
subs w4, w4, #2
b.le 9f
sub x8, x8, w9, uxtw #1
sub x10, x10, w9, uxtw
add x0, x0, x1
add x6, x6, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_smooth_tbl
.word 640b - ipred_smooth_tbl
.word 320b - ipred_smooth_tbl
.word 160b - ipred_smooth_tbl
.word 80b - ipred_smooth_tbl
.word 40b - ipred_smooth_tbl
endjumptable
// void ipred_smooth_v_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_v_16bpc_neon, export=1
movrel x7, X(sm_weights)
add x7, x7, w4, uxtw
clz w9, w3
movrel x5, ipred_smooth_v_tbl
sub x8, x2, w4, uxtw #1
sub w9, w9, #25
ldrsw x9, [x5, w9, uxtw #2]
ld1r {v4.8h}, [x8] // bottom
add x2, x2, #2
add x5, x5, x9
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v6.2d}, [x2] // top
sub v6.8h, v6.8h, v4.8h // top-bottom
4:
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x7], #4 // weights_ver
zip1 v16.2s, v16.2s, v17.2s // weights_ver
zip1 v18.2s, v18.2s, v19.2s
ushll v16.8h, v16.8b, #7 // weights_ver << 7
ushll v18.8h, v18.8b, #7
sqrdmulh v20.8h, v6.8h, v16.8h // ((top-bottom)*weights_ver + 128) >> 8
sqrdmulh v21.8h, v6.8h, v18.8h
add v20.8h, v20.8h, v4.8h
add v21.8h, v21.8h, v4.8h
st1 {v20.d}[0], [x0], x1
st1 {v20.d}[1], [x6], x1
subs w4, w4, #4
st1 {v21.d}[0], [x0], x1
st1 {v21.d}[1], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v6.8h}, [x2] // top
sub v6.8h, v6.8h, v4.8h // top-bottom
8:
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x7], #4 // weights_ver
ushll v16.8h, v16.8b, #7 // weights_ver << 7
ushll v17.8h, v17.8b, #7
ushll v18.8h, v18.8b, #7
ushll v19.8h, v19.8b, #7
sqrdmulh v20.8h, v6.8h, v16.8h // ((top-bottom)*weights_ver + 128) >> 8
sqrdmulh v21.8h, v6.8h, v17.8h
sqrdmulh v22.8h, v6.8h, v18.8h
sqrdmulh v23.8h, v6.8h, v19.8h
add v20.8h, v20.8h, v4.8h
add v21.8h, v21.8h, v4.8h
add v22.8h, v22.8h, v4.8h
add v23.8h, v23.8h, v4.8h
st1 {v20.8h}, [x0], x1
st1 {v21.8h}, [x6], x1
subs w4, w4, #4
st1 {v22.8h}, [x0], x1
st1 {v23.8h}, [x6], x1
b.gt 8b
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
// Set up pointers for four rows in parallel; x0, x6, x5, x8
add x5, x0, x1
add x8, x6, x1
lsl x1, x1, #1
sub x1, x1, w3, uxtw #1
mov w9, w3
1:
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x7], #4 // weights_ver
ushll v16.8h, v16.8b, #7 // weights_ver << 7
ushll v17.8h, v17.8b, #7
ushll v18.8h, v18.8b, #7
ushll v19.8h, v19.8b, #7
2:
ld1 {v2.8h, v3.8h}, [x2], #32 // top
sub v2.8h, v2.8h, v4.8h // top-bottom
sub v3.8h, v3.8h, v4.8h
sqrdmulh v20.8h, v2.8h, v16.8h // ((top-bottom)*weights_ver + 128) >> 8
sqrdmulh v21.8h, v3.8h, v16.8h
sqrdmulh v22.8h, v2.8h, v17.8h
sqrdmulh v23.8h, v3.8h, v17.8h
sqrdmulh v24.8h, v2.8h, v18.8h
sqrdmulh v25.8h, v3.8h, v18.8h
sqrdmulh v26.8h, v2.8h, v19.8h
sqrdmulh v27.8h, v3.8h, v19.8h
add v20.8h, v20.8h, v4.8h
add v21.8h, v21.8h, v4.8h
add v22.8h, v22.8h, v4.8h
add v23.8h, v23.8h, v4.8h
add v24.8h, v24.8h, v4.8h
add v25.8h, v25.8h, v4.8h
add v26.8h, v26.8h, v4.8h
add v27.8h, v27.8h, v4.8h
subs w3, w3, #16
st1 {v20.8h, v21.8h}, [x0], #32
st1 {v22.8h, v23.8h}, [x6], #32
st1 {v24.8h, v25.8h}, [x5], #32
st1 {v26.8h, v27.8h}, [x8], #32
b.gt 2b
subs w4, w4, #4
b.le 9f
sub x2, x2, w9, uxtw #1
add x0, x0, x1
add x6, x6, x1
add x5, x5, x1
add x8, x8, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_smooth_v_tbl
.word 640b - ipred_smooth_v_tbl
.word 320b - ipred_smooth_v_tbl
.word 160b - ipred_smooth_v_tbl
.word 80b - ipred_smooth_v_tbl
.word 40b - ipred_smooth_v_tbl
endjumptable
// void ipred_smooth_h_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_h_16bpc_neon, export=1
movrel x8, X(sm_weights)
add x8, x8, w3, uxtw
clz w9, w3
movrel x5, ipred_smooth_h_tbl
add x12, x2, w3, uxtw #1
sub w9, w9, #25
ldrsw x9, [x5, w9, uxtw #2]
ld1r {v5.8h}, [x12] // right
add x5, x5, x9
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v7.2s}, [x8] // weights_hor
sub x2, x2, #8
mov x7, #-8
ushll v7.8h, v7.8b, #7 // weights_hor << 7
4:
ld4r {v0.4h, v1.4h, v2.4h, v3.4h}, [x2], x7 // left
zip1 v1.2d, v1.2d, v0.2d // left, flipped
zip1 v0.2d, v3.2d, v2.2d
sub v0.8h, v0.8h, v5.8h // left-right
sub v1.8h, v1.8h, v5.8h
sqrdmulh v20.8h, v0.8h, v7.8h // ((left-right)*weights_hor + 128) >> 8
sqrdmulh v21.8h, v1.8h, v7.8h
add v20.8h, v20.8h, v5.8h
add v21.8h, v21.8h, v5.8h
st1 {v20.d}[0], [x0], x1
st1 {v20.d}[1], [x6], x1
subs w4, w4, #4
st1 {v21.d}[0], [x0], x1
st1 {v21.d}[1], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v7.8b}, [x8] // weights_hor
sub x2, x2, #8
mov x7, #-8
ushll v7.8h, v7.8b, #7 // weights_hor << 7
8:
ld4r {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], x7 // left
sub v3.8h, v3.8h, v5.8h // left-right
sub v2.8h, v2.8h, v5.8h
sub v1.8h, v1.8h, v5.8h
sub v0.8h, v0.8h, v5.8h
sqrdmulh v20.8h, v3.8h, v7.8h // ((left-right)*weights_hor + 128) >> 8
sqrdmulh v21.8h, v2.8h, v7.8h // (left flipped)
sqrdmulh v22.8h, v1.8h, v7.8h
sqrdmulh v23.8h, v0.8h, v7.8h
add v20.8h, v20.8h, v5.8h
add v21.8h, v21.8h, v5.8h
add v22.8h, v22.8h, v5.8h
add v23.8h, v23.8h, v5.8h
st1 {v20.8h}, [x0], x1
st1 {v21.8h}, [x6], x1
subs w4, w4, #4
st1 {v22.8h}, [x0], x1
st1 {v23.8h}, [x6], x1
b.gt 8b
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
sub x2, x2, #8
mov x7, #-8
// Set up pointers for four rows in parallel; x0, x6, x5, x10
add x5, x0, x1
add x10, x6, x1
lsl x1, x1, #1
sub x1, x1, w3, uxtw #1
mov w9, w3
1:
ld4r {v0.8h, v1.8h, v2.8h, v3.8h}, [x2], x7 // left
sub v0.8h, v0.8h, v5.8h // left-right
sub v1.8h, v1.8h, v5.8h
sub v2.8h, v2.8h, v5.8h
sub v3.8h, v3.8h, v5.8h
2:
ld1 {v7.16b}, [x8], #16 // weights_hor
ushll v6.8h, v7.8b, #7 // weights_hor << 7
ushll2 v7.8h, v7.16b, #7
sqrdmulh v20.8h, v3.8h, v6.8h // ((left-right)*weights_hor + 128) >> 8
sqrdmulh v21.8h, v3.8h, v7.8h // (left flipped)
sqrdmulh v22.8h, v2.8h, v6.8h
sqrdmulh v23.8h, v2.8h, v7.8h
sqrdmulh v24.8h, v1.8h, v6.8h
sqrdmulh v25.8h, v1.8h, v7.8h
sqrdmulh v26.8h, v0.8h, v6.8h
sqrdmulh v27.8h, v0.8h, v7.8h
add v20.8h, v20.8h, v5.8h
add v21.8h, v21.8h, v5.8h
add v22.8h, v22.8h, v5.8h
add v23.8h, v23.8h, v5.8h
add v24.8h, v24.8h, v5.8h
add v25.8h, v25.8h, v5.8h
add v26.8h, v26.8h, v5.8h
add v27.8h, v27.8h, v5.8h
subs w3, w3, #16
st1 {v20.8h, v21.8h}, [x0], #32
st1 {v22.8h, v23.8h}, [x6], #32
st1 {v24.8h, v25.8h}, [x5], #32
st1 {v26.8h, v27.8h}, [x10], #32
b.gt 2b
subs w4, w4, #4
b.le 9f
sub x8, x8, w9, uxtw
add x0, x0, x1
add x6, x6, x1
add x5, x5, x1
add x10, x10, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_smooth_h_tbl
.word 640b - ipred_smooth_h_tbl
.word 320b - ipred_smooth_h_tbl
.word 160b - ipred_smooth_h_tbl
.word 80b - ipred_smooth_h_tbl
.word 40b - ipred_smooth_h_tbl
endjumptable
const padding_mask_buf
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
padding_mask:
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
endconst
// void ipred_z1_upsample_edge_16bpc_neon(pixel *out, const int hsz,
// const pixel *const in, const int end,
// const int bitdepth_max);
function ipred_z1_upsample_edge_16bpc_neon, export=1
dup v30.8h, w4 // bitdepth_max
movrel x4, padding_mask
ld1 {v0.8h, v1.8h}, [x2] // in[]
add x5, x2, w3, uxtw #1 // in[end]
sub x4, x4, w3, uxtw #1
ld1r {v2.8h}, [x5] // padding
ld1 {v3.8h, v4.8h}, [x4] // padding_mask
movi v31.8h, #9
bit v0.16b, v2.16b, v3.16b // padded in[]
bit v1.16b, v2.16b, v4.16b
ext v4.16b, v0.16b, v1.16b, #2
ext v5.16b, v1.16b, v2.16b, #2
ext v6.16b, v0.16b, v1.16b, #4
ext v7.16b, v1.16b, v2.16b, #4
ext v16.16b, v0.16b, v1.16b, #6
ext v17.16b, v1.16b, v2.16b, #6
add v18.8h, v4.8h, v6.8h // in[i+1] + in[i+2]
add v19.8h, v5.8h, v7.8h
add v20.8h, v0.8h, v16.8h
add v21.8h, v1.8h, v17.8h
umull v22.4s, v18.4h, v31.4h // 9*(in[i+1] + in[i+2])
umull2 v23.4s, v18.8h, v31.8h
umull v24.4s, v19.4h, v31.4h
umull2 v25.4s, v19.8h, v31.8h
usubw v22.4s, v22.4s, v20.4h
usubw2 v23.4s, v23.4s, v20.8h
usubw v24.4s, v24.4s, v21.4h
usubw2 v25.4s, v25.4s, v21.8h
sqrshrun v16.4h, v22.4s, #4
sqrshrun2 v16.8h, v23.4s, #4
sqrshrun v17.4h, v24.4s, #4
sqrshrun2 v17.8h, v25.4s, #4
smin v16.8h, v16.8h, v30.8h
smin v17.8h, v17.8h, v30.8h
zip1 v0.8h, v4.8h, v16.8h
zip2 v1.8h, v4.8h, v16.8h
zip1 v2.8h, v5.8h, v17.8h
zip2 v3.8h, v5.8h, v17.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0]
ret
endfunc
// void ipred_z2_upsample_edge_16bpc_neon(pixel *out, const int sz,
// const pixel *const in,
// const int bitdepth_max);
function ipred_z2_upsample_edge_16bpc_neon, export=1
dup v30.8h, w3 // bitdepth_max
// Here, sz is 4 or 8, and we produce 2*sz+1 output elements.
movrel x4, padding_mask
ld1 {v0.8h, v1.8h}, [x2] // in[]
add x5, x2, w1, uxtw #1 // in[sz]
sub x4, x4, w1, uxtw #1
ld1r {v3.8h}, [x2] // in[0] for padding
ld1r {v2.8h}, [x5] // padding
ld1 {v4.8h, v5.8h}, [x4] // padding_mask
movi v31.8h, #9
bit v0.16b, v2.16b, v4.16b // padded in[]
bit v1.16b, v2.16b, v5.16b
ext v4.16b, v3.16b, v0.16b, #14
ext v5.16b, v0.16b, v1.16b, #2
ext v6.16b, v0.16b, v1.16b, #4
add v16.8h, v0.8h, v5.8h // in[i+0] + in[i+1]
add v17.8h, v4.8h, v6.8h // in[i-1] + in[i+2]
umull v18.4s, v16.4h, v31.4h // 9*(in[i+1] + in[i+2])
umull2 v19.4s, v16.8h, v31.8h
usubw v18.4s, v18.4s, v17.4h
usubw2 v19.4s, v19.4s, v17.8h
sqrshrun v16.4h, v18.4s, #4
sqrshrun2 v16.8h, v19.4s, #4
add x5, x0, #2*16
smin v16.8h, v16.8h, v30.8h
zip1 v4.8h, v0.8h, v16.8h
zip2 v5.8h, v0.8h, v16.8h
st1 {v2.h}[0], [x5]
// In case sz=8, output one single pixel in out[16].
st1 {v4.8h, v5.8h}, [x0]
ret
endfunc
const edge_filter
.short 0, 4, 8, 0
.short 0, 5, 6, 0
// Leaving out the coeffs for strength=3
// .byte 2, 4, 4, 0
endconst
// void ipred_z1_filter_edge_16bpc_neon(pixel *out, const int sz,
// const pixel *const in, const int end,
// const int strength);
function ipred_z1_filter_edge_16bpc_neon, export=1
cmp w4, #3
b.eq L(fivetap) // if (strength == 3) goto fivetap
movrel x5, edge_filter, -6
add x5, x5, w4, uxtw #3 // edge_filter + 2*((strength - 1)*4 + 1)
ld1 {v31.s}[0], [x5] // kernel[1-2]
ld1 {v0.8h}, [x2], #16
dup v30.8h, v31.h[0]
dup v31.8h, v31.h[1]
1:
// in[end], is the last valid pixel. We produce 16 pixels out by
// using 18 pixels in - the last pixel used is [17] of the ones
// read/buffered.
cmp w3, #17
ld1 {v1.8h, v2.8h}, [x2], #32
b.lt 2f
ext v3.16b, v0.16b, v1.16b, #2
ext v4.16b, v1.16b, v2.16b, #2
ext v5.16b, v0.16b, v1.16b, #4
ext v6.16b, v1.16b, v2.16b, #4
mul v16.8h, v0.8h, v30.8h
mla v16.8h, v3.8h, v31.8h
mla v16.8h, v5.8h, v30.8h
mul v17.8h, v1.8h, v30.8h
mla v17.8h, v4.8h, v31.8h
mla v17.8h, v6.8h, v30.8h
subs w1, w1, #16
mov v0.16b, v2.16b
urshr v16.8h, v16.8h, #4
urshr v17.8h, v17.8h, #4
sub w3, w3, #16
st1 {v16.8h, v17.8h}, [x0], #32
b.gt 1b
ret
2:
// Right padding
// x2[w3-24] is the padding pixel (x2 points 24 pixels ahead)
movrel x5, padding_mask
sub w6, w3, #24
sub x5, x5, w3, uxtw #1
add x6, x2, w6, sxtw #1
ld1 {v3.8h, v4.8h}, [x5] // padding_mask
ld1r {v2.8h}, [x6]
bit v0.16b, v2.16b, v3.16b // Pad v0-v1
bit v1.16b, v2.16b, v4.16b
// Filter one block
ext v3.16b, v0.16b, v1.16b, #2
ext v4.16b, v1.16b, v2.16b, #2
ext v5.16b, v0.16b, v1.16b, #4
ext v6.16b, v1.16b, v2.16b, #4
mul v16.8h, v0.8h, v30.8h
mla v16.8h, v3.8h, v31.8h
mla v16.8h, v5.8h, v30.8h
mul v17.8h, v1.8h, v30.8h
mla v17.8h, v4.8h, v31.8h
mla v17.8h, v6.8h, v30.8h
subs w1, w1, #16
urshr v16.8h, v16.8h, #4
urshr v17.8h, v17.8h, #4
st1 {v16.8h, v17.8h}, [x0], #32
b.le 9f
5:
// After one block, any remaining output would only be filtering
// padding - thus just store the padding.
subs w1, w1, #16
st1 {v2.16b}, [x0], #16
b.gt 5b
9:
ret
L(fivetap):
sub x2, x2, #2 // topleft -= 1 pixel
movi v29.8h, #2
ld1 {v0.8h}, [x2], #16
movi v30.8h, #4
movi v31.8h, #4
ins v0.h[0], v0.h[1]
1:
// in[end+1], is the last valid pixel. We produce 16 pixels out by
// using 20 pixels in - the last pixel used is [19] of the ones
// read/buffered.
cmp w3, #18
ld1 {v1.8h, v2.8h}, [x2], #32
b.lt 2f // if (end + 1 < 19)
ext v3.16b, v0.16b, v1.16b, #2
ext v4.16b, v1.16b, v2.16b, #2
ext v5.16b, v0.16b, v1.16b, #4
ext v6.16b, v1.16b, v2.16b, #4
ext v16.16b, v0.16b, v1.16b, #6
ext v17.16b, v1.16b, v2.16b, #6
ext v18.16b, v0.16b, v1.16b, #8
ext v19.16b, v1.16b, v2.16b, #8
mul v20.8h, v0.8h, v29.8h
mla v20.8h, v3.8h, v30.8h
mla v20.8h, v5.8h, v31.8h
mla v20.8h, v16.8h, v30.8h
mla v20.8h, v18.8h, v29.8h
mul v21.8h, v1.8h, v29.8h
mla v21.8h, v4.8h, v30.8h
mla v21.8h, v6.8h, v31.8h
mla v21.8h, v17.8h, v30.8h
mla v21.8h, v19.8h, v29.8h
subs w1, w1, #16
mov v0.16b, v2.16b
urshr v20.8h, v20.8h, #4
urshr v21.8h, v21.8h, #4
sub w3, w3, #16
st1 {v20.8h, v21.8h}, [x0], #32
b.gt 1b
ret
2:
// Right padding
// x2[w3+1-24] is the padding pixel (x2 points 24 pixels ahead)
movrel x5, padding_mask, -2
sub w6, w3, #23
sub x5, x5, w3, uxtw #1
add x6, x2, w6, sxtw #1
ld1 {v3.8h, v4.8h, v5.8h}, [x5] // padding_mask
ld1r {v28.8h}, [x6]
bit v0.16b, v28.16b, v3.16b // Pad v0-v2
bit v1.16b, v28.16b, v4.16b
bit v2.16b, v28.16b, v5.16b
4:
// Filter one block
ext v3.16b, v0.16b, v1.16b, #2
ext v4.16b, v1.16b, v2.16b, #2
ext v5.16b, v0.16b, v1.16b, #4
ext v6.16b, v1.16b, v2.16b, #4
ext v16.16b, v0.16b, v1.16b, #6
ext v17.16b, v1.16b, v2.16b, #6
ext v18.16b, v0.16b, v1.16b, #8
ext v19.16b, v1.16b, v2.16b, #8
mul v20.8h, v0.8h, v29.8h
mla v20.8h, v3.8h, v30.8h
mla v20.8h, v5.8h, v31.8h
mla v20.8h, v16.8h, v30.8h
mla v20.8h, v18.8h, v29.8h
mul v21.8h, v1.8h, v29.8h
mla v21.8h, v4.8h, v30.8h
mla v21.8h, v6.8h, v31.8h
mla v21.8h, v17.8h, v30.8h
mla v21.8h, v19.8h, v29.8h
subs w1, w1, #16
mov v0.16b, v2.16b
mov v1.16b, v28.16b
mov v2.16b, v28.16b
urshr v20.8h, v20.8h, #4
urshr v21.8h, v21.8h, #4
sub w3, w3, #16
st1 {v20.8h, v21.8h}, [x0], #32
b.le 9f
// v0-v1[w3+1] is the last valid pixel; if (w3 + 1 > 0) we need to
// filter properly once more - aka (w3 >= 0).
cmp w3, #0
b.ge 4b
5:
// When w3 <= 0, all remaining pixels in v0-v1 are equal to the
// last valid pixel - thus just output that without filtering.
subs w1, w1, #8
st1 {v28.8h}, [x0], #16
b.gt 5b
9:
ret
endfunc
// void ipred_pixel_set_16bpc_neon(pixel *out, const pixel px,
// const int n);
function ipred_pixel_set_16bpc_neon, export=1
dup v0.8h, w1
1:
subs w2, w2, #8
st1 {v0.8h}, [x0], #16
b.gt 1b
ret
endfunc
// void ipred_z1_fill1_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const top,
// const int width, const int height,
// const int dx, const int max_base_x);
function ipred_z1_fill1_16bpc_neon, export=1
clz w9, w3
movrel x8, ipred_z1_fill1_tbl
sub w9, w9, #25
ldrsw x9, [x8, w9, uxtw #2]
add x10, x2, w6, uxtw #1 // top[max_base_x]
add x8, x8, x9
ld1r {v31.8h}, [x10] // padding
mov w7, w5
mov w15, #64
br x8
40:
AARCH64_VALID_JUMP_TARGET
4:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 49f
lsl w8, w8, #1
lsl w10, w10, #1
ldr q0, [x2, w8, uxtw] // top[base]
ldr q2, [x2, w10, uxtw]
dup v4.4h, w9 // frac
dup v5.4h, w11
ext v1.16b, v0.16b, v0.16b, #2 // top[base+1]
ext v3.16b, v2.16b, v2.16b, #2
sub v6.4h, v1.4h, v0.4h // top[base+1]-top[base]
sub v7.4h, v3.4h, v2.4h
ushll v16.4s, v0.4h, #6 // top[base]*64
ushll v17.4s, v2.4h, #6
smlal v16.4s, v6.4h, v4.4h // + top[base+1]*frac
smlal v17.4s, v7.4h, v5.4h
rshrn v16.4h, v16.4s, #6
rshrn v17.4h, v17.4s, #6
st1 {v16.4h}, [x0], x1
add w7, w7, w5 // xpos += dx
subs w4, w4, #2
st1 {v17.4h}, [x0], x1
b.gt 4b
ret
49:
st1 {v31.4h}, [x0], x1
subs w4, w4, #2
st1 {v31.4h}, [x0], x1
b.gt 49b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 89f
add x8, x2, w8, uxtw #1
add x10, x2, w10, uxtw #1
dup v4.8h, w9 // frac
dup v5.8h, w11
ld1 {v0.8h}, [x8] // top[base]
ld1 {v2.8h}, [x10]
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
ldr h1, [x8, #16]
ldr h3, [x10, #16]
dup v6.8h, w9 // 64 - frac
dup v7.8h, w11
ext v1.16b, v0.16b, v1.16b, #2 // top[base+1]
ext v3.16b, v2.16b, v3.16b, #2
umull v16.4s, v0.4h, v6.4h // top[base]*(64-frac)
umlal v16.4s, v1.4h, v4.4h // + top[base+1]*frac
umull2 v17.4s, v0.8h, v6.8h
umlal2 v17.4s, v1.8h, v4.8h
umull v18.4s, v2.4h, v7.4h
umlal v18.4s, v3.4h, v5.4h
umull2 v19.4s, v2.8h, v7.8h
umlal2 v19.4s, v3.8h, v5.8h
rshrn v16.4h, v16.4s, #6
rshrn2 v16.8h, v17.4s, #6
rshrn v17.4h, v18.4s, #6
rshrn2 v17.8h, v19.4s, #6
st1 {v16.8h}, [x0], x1
add w7, w7, w5 // xpos += dx
subs w4, w4, #2
st1 {v17.8h}, [x0], x1
b.gt 8b
ret
89:
st1 {v31.8h}, [x0], x1
subs w4, w4, #2
st1 {v31.8h}, [x0], x1
b.gt 89b
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
mov w12, w3
add x13, x0, x1
lsl x1, x1, #1
sub x1, x1, w3, uxtw #1
1:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 169f
add x8, x2, w8, uxtw #1
add x10, x2, w10, uxtw #1
dup v6.8h, w9 // frac
dup v7.8h, w11
ld1 {v0.8h, v1.8h, v2.8h}, [x8], #48 // top[base]
ld1 {v3.8h, v4.8h, v5.8h}, [x10], #48
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
dup v16.8h, w9 // 64 - frac
dup v17.8h, w11
add w7, w7, w5 // xpos += dx
2:
ext v18.16b, v0.16b, v1.16b, #2 // top[base+1]
ext v19.16b, v1.16b, v2.16b, #2
ext v20.16b, v3.16b, v4.16b, #2
ext v21.16b, v4.16b, v5.16b, #2
subs w3, w3, #16
umull v22.4s, v0.4h, v16.4h // top[base]*(64-frac)
umlal v22.4s, v18.4h, v6.4h // + top[base+1]*frac
umull2 v23.4s, v0.8h, v16.8h
umlal2 v23.4s, v18.8h, v6.8h
umull v24.4s, v1.4h, v16.4h
umlal v24.4s, v19.4h, v6.4h
umull2 v25.4s, v1.8h, v16.8h
umlal2 v25.4s, v19.8h, v6.8h
umull v26.4s, v3.4h, v17.4h
umlal v26.4s, v20.4h, v7.4h
umull2 v27.4s, v3.8h, v17.8h
umlal2 v27.4s, v20.8h, v7.8h
umull v28.4s, v4.4h, v17.4h
umlal v28.4s, v21.4h, v7.4h
umull2 v29.4s, v4.8h, v17.8h
umlal2 v29.4s, v21.8h, v7.8h
rshrn v22.4h, v22.4s, #6
rshrn2 v22.8h, v23.4s, #6
rshrn v23.4h, v24.4s, #6
rshrn2 v23.8h, v25.4s, #6
rshrn v24.4h, v26.4s, #6
rshrn2 v24.8h, v27.4s, #6
rshrn v25.4h, v28.4s, #6
rshrn2 v25.8h, v29.4s, #6
st1 {v22.8h, v23.8h}, [x0], #32
st1 {v24.8h, v25.8h}, [x13], #32
b.le 3f
mov v0.16b, v2.16b
ld1 {v1.8h, v2.8h}, [x8], #32 // top[base]
mov v3.16b, v5.16b
ld1 {v4.8h, v5.8h}, [x10], #32
b 2b
3:
subs w4, w4, #2
b.le 9f
add x0, x0, x1
add x13, x13, x1
mov w3, w12
b 1b
9:
ret
169:
st1 {v31.8h}, [x0], #16
subs w3, w3, #8
st1 {v31.8h}, [x13], #16
b.gt 169b
subs w4, w4, #2
b.le 9b
add x0, x0, x1
add x13, x13, x1
mov w3, w12
b 169b
endfunc
jumptable ipred_z1_fill1_tbl
.word 640b - ipred_z1_fill1_tbl
.word 320b - ipred_z1_fill1_tbl
.word 160b - ipred_z1_fill1_tbl
.word 80b - ipred_z1_fill1_tbl
.word 40b - ipred_z1_fill1_tbl
endjumptable
function ipred_z1_fill2_16bpc_neon, export=1
cmp w3, #8
add x10, x2, w6, uxtw // top[max_base_x]
ld1r {v31.16b}, [x10] // padding
mov w7, w5
mov w15, #64
b.eq 8f
4: // w == 4
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 49f
lsl w8, w8, #1
lsl w10, w10, #1
ldr q0, [x2, w8, uxtw] // top[base]
ldr q2, [x2, w10, uxtw]
dup v4.4h, w9 // frac
dup v5.4h, w11
uzp2 v1.8h, v0.8h, v0.8h // top[base+1]
uzp1 v0.8h, v0.8h, v0.8h // top[base]
uzp2 v3.8h, v2.8h, v2.8h
uzp1 v2.8h, v2.8h, v2.8h
sub v6.4h, v1.4h, v0.4h // top[base+1]-top[base]
sub v7.4h, v3.4h, v2.4h
ushll v16.4s, v0.4h, #6 // top[base]*64
ushll v17.4s, v2.4h, #6
smlal v16.4s, v6.4h, v4.4h // + top[base+1]*frac
smlal v17.4s, v7.4h, v5.4h
rshrn v16.4h, v16.4s, #6
rshrn v17.4h, v17.4s, #6
st1 {v16.4h}, [x0], x1
add w7, w7, w5 // xpos += dx
subs w4, w4, #2
st1 {v17.4h}, [x0], x1
b.gt 4b
ret
49:
st1 {v31.4h}, [x0], x1
subs w4, w4, #2
st1 {v31.4h}, [x0], x1
b.gt 49b
ret
8: // w == 8
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 89f
add x8, x2, w8, uxtw #1
add x10, x2, w10, uxtw #1
dup v4.8h, w9 // frac
dup v5.8h, w11
ld1 {v0.8h, v1.8h}, [x8] // top[base]
ld1 {v2.8h, v3.8h}, [x10]
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
dup v6.8h, w9 // 64 - frac
dup v7.8h, w11
uzp2 v20.8h, v0.8h, v1.8h // top[base+1]
uzp1 v0.8h, v0.8h, v1.8h // top[base]
uzp2 v21.8h, v2.8h, v3.8h
uzp1 v2.8h, v2.8h, v3.8h
umull v16.4s, v0.4h, v6.4h // top[base]*(64-frac)
umlal v16.4s, v20.4h, v4.4h // + top[base+1]*frac
umull2 v17.4s, v0.8h, v6.8h
umlal2 v17.4s, v20.8h, v4.8h
umull v18.4s, v2.4h, v7.4h
umlal v18.4s, v21.4h, v5.4h
umull2 v19.4s, v2.8h, v7.8h
umlal2 v19.4s, v21.8h, v5.8h
rshrn v16.4h, v16.4s, #6
rshrn2 v16.8h, v17.4s, #6
rshrn v17.4h, v18.4s, #6
rshrn2 v17.8h, v19.4s, #6
st1 {v16.8h}, [x0], x1
add w7, w7, w5 // xpos += dx
subs w4, w4, #2
st1 {v17.8h}, [x0], x1
b.gt 8b
ret
89:
st1 {v31.8h}, [x0], x1
subs w4, w4, #2
st1 {v31.8h}, [x0], x1
b.gt 89b
ret
endfunc
// void ipred_reverse_16bpc_neon(pixel *dst, const pixel *const src,
// const int n);
function ipred_reverse_16bpc_neon, export=1
sub x1, x1, #16
add x3, x0, #8
mov x4, #16
1:
ld1 {v0.8h}, [x1]
subs w2, w2, #8
rev64 v0.8h, v0.8h
sub x1, x1, #16
st1 {v0.d}[1], [x0], x4
st1 {v0.d}[0], [x3], x4
b.gt 1b
ret
endfunc
const increments
.short 0, 1, 2, 3, 4, 5, 6, 7
endconst
// void ipred_z2_fill1_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const top,
// const pixel *const left,
// const int width, const int height,
// const int dx, const int dy);
function ipred_z2_fill1_16bpc_neon, export=1
clz w10, w4
movrel x9, ipred_z2_fill1_tbl
sub w10, w10, #25
ldrsw x10, [x9, w10, uxtw #2]
mov w8, #(1 << 6) // xpos = 1 << 6
add x9, x9, x10
sub w8, w8, w6 // xpos -= dx
movrel x11, increments
ld1 {v31.8h}, [x11] // increments
neg w7, w7 // -dy
br x9
40:
AARCH64_VALID_JUMP_TARGET
dup v30.4h, w7 // -dy
movi v17.8b, #1
mul v16.4h, v31.4h, v30.4h // {0,1,2,3}* -dy
movi v25.8h, #0x3e
add v30.4h, v16.4h, v30.4h // -= dy
// Worst case height for w=4 is 16, but we need at least h+1 elements
ld1 {v0.8h, v1.8h, v2.8h}, [x3] // left[]
movi v26.8h, #64
movi v19.16b, #4
shrn v29.8b, v30.8h, #6 // ypos >> 6
and v27.8b, v30.8b, v25.8b // frac_y
add v29.8b, v29.8b, v17.8b // base_y = (ypos >> 6) + 1
movi v23.4h, #1, lsl #8
shl v29.8b, v29.8b, #1 // 2*base_y
zip1 v29.8b, v29.8b, v29.8b // duplicate elements
movi v17.8b, #2
add v29.8b, v29.8b, v23.8b // 2*base, 2*base+1, ...
add v30.8b, v29.8b, v17.8b // base_y + 1 (*2)
add v28.8b, v29.8b, v19.8b // base_y + 2 (*2)
tbl v18.8b, {v0.16b}, v29.8b // left[base_y]
trn1 v30.2d, v30.2d, v28.2d // base_y + 1, base_y + 2
sub v28.4h, v26.4h, v27.4h // 64 - frac_y
trn1 v31.2d, v31.2d, v31.2d // {0,1,2,3,0,1,2,3}
trn1 v27.2d, v27.2d, v27.2d // frac_y
trn1 v28.2d, v28.2d, v28.2d // 64 - frac_y
movi v29.16b, #4
4:
asr w9, w8, #6 // base_x
dup v16.4h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-4 // base_x <= -4
asr w11, w8, #6 // base_x
b.le 49f
lsl w9, w9, #1
lsl w11, w11, #1
dup v17.4h, w8 // xpos
ldr q4, [x2, w9, sxtw] // top[base_x]
ldr q6, [x2, w11, sxtw]
trn1 v16.2d, v16.2d, v17.2d // xpos
// Cut corners here; only doing tbl over v0-v1 here; we only
// seem to need the last pixel, from v2, after skipping to the
// left-only codepath below.
tbl v19.16b, {v0.16b, v1.16b}, v30.16b // left[base_y+1], left[base_y+2]
sshr v20.8h, v16.8h, #6 // first base_x for each row
ext v5.16b, v4.16b, v4.16b, #2 // top[base_x+1]
ext v7.16b, v6.16b, v6.16b, #2
and v16.16b, v16.16b, v25.16b // frac_x
trn1 v18.2d, v18.2d, v19.2d // left[base_y], left[base_y+1]
trn1 v4.2d, v4.2d, v6.2d // top[base_x]
trn1 v5.2d, v5.2d, v7.2d // top[base_x+1]
sub v17.8h, v26.8h, v16.8h // 64 - frac_x
add v20.8h, v20.8h, v31.8h // actual base_x
umull v21.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v21.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v22.4s, v18.8h, v28.8h
umlal2 v22.4s, v19.8h, v27.8h
umull v23.4s, v4.4h, v17.4h // top[base_x]-*(64-frac_x)
umlal v23.4s, v5.4h, v16.4h // + top[base_x+1]*frac_x
umull2 v24.4s, v4.8h, v17.8h
umlal2 v24.4s, v5.8h, v16.8h
cmge v20.8h, v20.8h, #0
rshrn v21.4h, v21.4s, #6
rshrn2 v21.8h, v22.4s, #6
rshrn v22.4h, v23.4s, #6
rshrn2 v22.8h, v24.4s, #6
bit v21.16b, v22.16b, v20.16b
st1 {v21.d}[0], [x0], x1
sub w8, w8, w6 // xpos -= dx
subs w5, w5, #2
st1 {v21.d}[1], [x0], x1
b.le 9f
ext v18.16b, v19.16b, v19.16b, #8
add v30.16b, v30.16b, v29.16b // base_y += 2 (*2)
b 4b
49:
tbl v19.16b, {v0.16b, v1.16b, v2.16b}, v30.16b // left[base_y+1], left[base_y+2]
trn1 v18.2d, v18.2d, v19.2d // left[base_y], left[base_y+1]
umull v20.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v20.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v21.4s, v18.8h, v28.8h
umlal2 v21.4s, v19.8h, v27.8h
rshrn v20.4h, v20.4s, #6
rshrn2 v20.8h, v21.4s, #6
st1 {v20.d}[0], [x0], x1
subs w5, w5, #2
st1 {v20.d}[1], [x0], x1
b.le 9f
ext v18.16b, v19.16b, v19.16b, #8
add v30.16b, v30.16b, v29.16b // base_y += 2 (*2)
b 49b
9:
ret
80:
AARCH64_VALID_JUMP_TARGET
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
dup v18.8h, w7 // -dy
add x3, x3, #2 // Skip past left[0]
mul v16.8h, v31.8h, v18.8h // {0,1,2,3,4,5,6,7}* -dy
movi v25.8h, #0x3e
add v16.8h, v16.8h, v18.8h // -= dy
// Worst case height for w=8 is 32.
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x3] // left[]
ld1r {v15.8h}, [x2] // left[0] == top[0]
movi v26.8h, #64
movi v19.16b, #4
shrn v29.8b, v16.8h, #6 // ypos >> 6
and v27.16b, v16.16b, v25.16b // frac_y
movi v23.8h, #1, lsl #8
shl v29.8b, v29.8b, #1 // 2*base_y
mov v18.16b, v15.16b // left[0]
zip1 v29.16b, v29.16b, v29.16b // duplicate elements
movi v17.16b, #2
add v29.16b, v29.16b, v23.16b // 2*base, 2*base+1, ...
// Cut corners here; for the first row we don't expect to need to
// read outside of v0.
tbx v18.16b, {v0.16b}, v29.16b // left[base_y]
add v30.16b, v29.16b, v19.16b // base_y + 2 (*2)
add v29.16b, v29.16b, v17.16b // base_y + 1 (*2)
sub v28.8h, v26.8h, v27.8h // 64 - frac_y
movi v24.16b, #4
8:
asr w9, w8, #6 // base_x
dup v16.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-16 // base_x <= -16
asr w11, w8, #6 // base_x
b.le 89f
dup v17.8h, w8 // xpos
add x9, x2, w9, sxtw #1
add x11, x2, w11, sxtw #1
ld1 {v4.8h, v5.8h}, [x9] // top[base_x]
mov v19.16b, v15.16b // left[0]
ld1 {v6.8h, v7.8h}, [x11]
tbx v19.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+1]
mov v20.16b, v15.16b // left[0]
sshr v21.8h, v16.8h, #6 // first base_x
sshr v22.8h, v17.8h, #6
tbx v20.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v30.16b // left[base_y+2]
ext v5.16b, v4.16b, v5.16b, #2 // top[base_x+1]
ext v7.16b, v6.16b, v7.16b, #2
and v16.16b, v16.16b, v25.16b // frac_x
and v17.16b, v17.16b, v25.16b
umull v10.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v10.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
sub v8.8h, v26.8h, v16.8h // 64 - frac_x
sub v9.8h, v26.8h, v17.8h
umull2 v11.4s, v18.8h, v28.8h
umlal2 v11.4s, v19.8h, v27.8h
add v21.8h, v21.8h, v31.8h // actual base_x
add v22.8h, v22.8h, v31.8h
umull v12.4s, v19.4h, v28.4h
umlal v12.4s, v20.4h, v27.4h
umull2 v13.4s, v19.8h, v28.8h
umlal2 v13.4s, v20.8h, v27.8h
rshrn v10.4h, v10.4s, #6
rshrn2 v10.8h, v11.4s, #6
rshrn v11.4h, v12.4s, #6
rshrn2 v11.8h, v13.4s, #6
umull v12.4s, v4.4h, v8.4h // top[base_x]-*(64-frac_x)
umlal v12.4s, v5.4h, v16.4h // + top[base_x+1]*frac_x
umull2 v13.4s, v4.8h, v8.8h
umlal2 v13.4s, v5.8h, v16.8h
umull v14.4s, v6.4h, v9.4h
umlal v14.4s, v7.4h, v17.4h
umull2 v18.4s, v6.8h, v9.8h
umlal2 v18.4s, v7.8h, v17.8h
cmge v21.8h, v21.8h, #0
cmge v22.8h, v22.8h, #0
rshrn v12.4h, v12.4s, #6
rshrn2 v12.8h, v13.4s, #6
rshrn v13.4h, v14.4s, #6
rshrn2 v13.8h, v18.4s, #6
bit v10.16b, v12.16b, v21.16b
bit v11.16b, v13.16b, v22.16b
st1 {v10.8h}, [x0], x1
subs w5, w5, #2
sub w8, w8, w6 // xpos -= dx
st1 {v11.8h}, [x0], x1
b.le 9f
mov v18.16b, v20.16b
add v29.16b, v29.16b, v24.16b // base_y += 2 (*2)
add v30.16b, v30.16b, v24.16b // base_y += 2 (*2)
b 8b
89:
mov v19.16b, v15.16b
mov v20.16b, v15.16b
tbx v19.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+1]
tbx v20.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v30.16b // left[base_y+2]
umull v4.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v4.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v5.4s, v18.8h, v28.8h
umlal2 v5.4s, v19.8h, v27.8h
umull v6.4s, v19.4h, v28.4h
umlal v6.4s, v20.4h, v27.4h
umull2 v7.4s, v19.8h, v28.8h
umlal2 v7.4s, v20.8h, v27.8h
rshrn v4.4h, v4.4s, #6
rshrn2 v4.8h, v5.4s, #6
rshrn v5.4h, v6.4s, #6
rshrn2 v5.8h, v7.4s, #6
st1 {v4.8h}, [x0], x1
subs w5, w5, #2
st1 {v5.8h}, [x0], x1
b.le 9f
mov v18.16b, v20.16b
add v29.16b, v29.16b, v24.16b // base_y += 2 (*2)
add v30.16b, v30.16b, v24.16b // base_y += 2 (*2)
b 89b
9:
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
dup v25.8h, w7 // -dy
add x3, x3, #2 // Skip past left[0]
add x13, x0, x1 // alternating row
lsl x1, x1, #1 // stride *= 2
sub x1, x1, w4, uxtw #1 // stride -= width
movi v11.8h, #8
mul v26.8h, v31.8h, v25.8h // {0,1,2,3,4,5,6,7}* -dy
add v26.8h, v26.8h, v25.8h // -= dy
mul v25.8h, v25.8h, v11.8h // -8*dy
// Worst case height is 64, but we can only fit 32 pixels into
// v0-v3 usable within one tbx instruction. As long as base_y is
// up to 32, we use tbx.
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x3] // left[]
ld1r {v15.8h}, [x2] // left[0] == top[0]
mov w12, w4 // orig w
neg w14, w4 // -w
1:
mov v23.16b, v26.16b // reset ypos
asr w9, w8, #6 // base_x
dup v16.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, w14 // base_x <= -2*w
asr w11, w8, #6 // base_x
b.le 169f
dup v17.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
add x9, x2, w9, sxtw #1
add x11, x2, w11, sxtw #1
sshr v21.8h, v16.8h, #6 // first base_x
sshr v22.8h, v17.8h, #6
ld1 {v4.8h}, [x9], #16 // top[base_x]
ld1 {v6.8h}, [x11], #16
movi v10.8h, #0x3e
movi v11.8h, #64
and v16.16b, v16.16b, v10.16b // frac_x
and v17.16b, v17.16b, v10.16b
sub v8.8h, v11.8h, v16.8h // 64 - frac_x
sub v9.8h, v11.8h, v17.8h
add v21.8h, v21.8h, v31.8h // actual base_x
add v22.8h, v22.8h, v31.8h
2:
smov w10, v22.h[0]
shrn v29.8b, v23.8h, #6 // ypos >> 6
movi v12.8h, #64
cmp w10, #0 // base_x (bottom left) >= 0
smov w10, v29.b[0] // base_y[0]
movi v10.8h, #0x3e
b.ge 4f
and v27.16b, v23.16b, v10.16b // frac_y
cmp w10, #(32-3)
mov v18.16b, v15.16b // left[0]
sub v28.8h, v12.8h, v27.8h // 64 - frac_y
b.gt 22f
21:
// base_y < 32, using tbx
shl v29.8b, v29.8b, #1 // 2*base_y
movi v11.8h, #1, lsl #8
zip1 v29.16b, v29.16b, v29.16b // duplicate elements
add v29.16b, v29.16b, v11.16b // 2*base, 2*base+1, ...
movi v13.16b, #2
tbx v18.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y]
add v29.16b, v29.16b, v13.16b // base_y + 1 (*2)
mov v19.16b, v15.16b // left[0]
tbx v19.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+1]
add v29.16b, v29.16b, v13.16b // base_y + 2 (*2)
mov v20.16b, v15.16b // left[0]
tbx v20.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+2]
b 23f
22:
// base_y >= 32, using separate loads.
smov w15, v29.b[1]
smov w16, v29.b[2]
add x10, x3, w10, sxtw #1
smov w17, v29.b[3]
add x15, x3, w15, sxtw #1
ld3 {v18.h, v19.h, v20.h}[0], [x10]
smov w10, v29.b[4]
add x16, x3, w16, sxtw #1
ld3 {v18.h, v19.h, v20.h}[1], [x15]
smov w15, v29.b[5]
add x17, x3, w17, sxtw #1
ld3 {v18.h, v19.h, v20.h}[2], [x16]
smov w16, v29.b[6]
add x10, x3, w10, sxtw #1
ld3 {v18.h, v19.h, v20.h}[3], [x17]
smov w17, v29.b[7]
add x15, x3, w15, sxtw #1
add x16, x3, w16, sxtw #1
ld3 {v18.h, v19.h, v20.h}[4], [x10]
add x17, x3, w17, sxtw #1
ld3 {v18.h, v19.h, v20.h}[5], [x15]
ld3 {v18.h, v19.h, v20.h}[6], [x16]
ld3 {v18.h, v19.h, v20.h}[7], [x17]
23:
ld1 {v5.8h}, [x9], #16 // top[base_x]
ld1 {v7.8h}, [x11], #16
add v23.8h, v23.8h, v25.8h // ypos -= 8*dy
umull v10.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v10.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v11.4s, v18.8h, v28.8h
umlal2 v11.4s, v19.8h, v27.8h
umull v12.4s, v19.4h, v28.4h
umlal v12.4s, v20.4h, v27.4h
umull2 v13.4s, v19.8h, v28.8h
umlal2 v13.4s, v20.8h, v27.8h
ext v18.16b, v4.16b, v5.16b, #2 // top[base_x+1]
ext v19.16b, v6.16b, v7.16b, #2
rshrn v10.4h, v10.4s, #6
rshrn2 v10.8h, v11.4s, #6
rshrn v11.4h, v12.4s, #6
rshrn2 v11.8h, v13.4s, #6
umull v12.4s, v4.4h, v8.4h // top[base_x]-*(64-frac_x)
umlal v12.4s, v18.4h, v16.4h // + top[base_x+1]*frac_x
umull2 v13.4s, v4.8h, v8.8h
umlal2 v13.4s, v18.8h, v16.8h
umull v14.4s, v6.4h, v9.4h
umlal v14.4s, v19.4h, v17.4h
umull2 v20.4s, v6.8h, v9.8h
umlal2 v20.4s, v19.8h, v17.8h
cmge v18.8h, v21.8h, #0
cmge v19.8h, v22.8h, #0
rshrn v12.4h, v12.4s, #6
rshrn2 v12.8h, v13.4s, #6
rshrn v13.4h, v14.4s, #6
rshrn2 v13.8h, v20.4s, #6
bit v10.16b, v12.16b, v18.16b
bit v11.16b, v13.16b, v19.16b
st1 {v10.8h}, [x0], #16
subs w4, w4, #8
st1 {v11.8h}, [x13], #16
b.le 3f
movi v10.8h, #8
mov v4.16b, v5.16b
mov v6.16b, v7.16b
add v21.8h, v21.8h, v10.8h // base_x += 8
add v22.8h, v22.8h, v10.8h
b 2b
3:
subs w5, w5, #2
b.le 9f
movi v10.8h, #128
add x0, x0, x1
add x13, x13, x1
mov w4, w12 // reset w
add v26.8h, v26.8h, v10.8h // ypos += 2*(1<<6)
b 1b
4: // The rest of the row only predicted from top[]
ld1 {v5.8h}, [x9], #16 // top[base_x]
ld1 {v7.8h}, [x11], #16
ext v18.16b, v4.16b, v5.16b, #2 // top[base_x+1]
ext v19.16b, v6.16b, v7.16b, #2
umull v12.4s, v4.4h, v8.4h // top[base_x]-*(64-frac_x)
umlal v12.4s, v18.4h, v16.4h // + top[base_x+1]*frac_x
umull2 v13.4s, v4.8h, v8.8h
umlal2 v13.4s, v18.8h, v16.8h
umull v14.4s, v6.4h, v9.4h
umlal v14.4s, v19.4h, v17.4h
umull2 v20.4s, v6.8h, v9.8h
umlal2 v20.4s, v19.8h, v17.8h
rshrn v12.4h, v12.4s, #6
rshrn2 v12.8h, v13.4s, #6
rshrn v13.4h, v14.4s, #6
rshrn2 v13.8h, v20.4s, #6
st1 {v12.8h}, [x0], #16
subs w4, w4, #8
st1 {v13.8h}, [x13], #16
b.le 3b
mov v4.16b, v5.16b
mov v6.16b, v7.16b
b 4b
169: // The rest of the block only predicted from left[]
add x1, x1, w4, uxtw #1 // restore stride
mov w12, w5 // orig remaining h
1:
movi v12.8h, #64
movi v10.8h, #0x3e
shrn v29.8b, v23.8h, #6 // ypos >> 6
and v27.16b, v23.16b, v10.16b // frac_y
smov w10, v29.b[0] // base_y[0]
shl v29.8b, v29.8b, #1 // 2*base_y
movi v11.8h, #1, lsl #8
zip1 v29.16b, v29.16b, v29.16b // duplicate elements
add v23.8h, v23.8h, v25.8h // ypos -= 8*dy
add v29.16b, v29.16b, v11.16b // 2*base, 2*base+1, ...
cmp w10, #(32-1)
mov v18.16b, v15.16b // left[0]
movi v21.16b, #2
sub v28.8h, v12.8h, v27.8h // 64 - frac_y
b.gt 31f
tbx v18.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y]
add v29.16b, v29.16b, v21.16b // base_y + 1 (*2)
2:
// base_y < 32, using tbx.
smov w10, v29.b[0] // base_y[0]
mov v19.16b, v15.16b // left[0]
cmp w10, #(64-4)
b.gt 32f
tbx v19.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+1]
add v29.16b, v29.16b, v21.16b // base_y + 2 (*2)
mov v20.16b, v15.16b // left[0]
tbx v20.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+2]
add v29.16b, v29.16b, v21.16b // next base_y
umull v10.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v10.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v11.4s, v18.8h, v28.8h
umlal2 v11.4s, v19.8h, v27.8h
umull v12.4s, v19.4h, v28.4h
umlal v12.4s, v20.4h, v27.4h
umull2 v13.4s, v19.8h, v28.8h
umlal2 v13.4s, v20.8h, v27.8h
rshrn v10.4h, v10.4s, #6
rshrn2 v10.8h, v11.4s, #6
rshrn v11.4h, v12.4s, #6
rshrn2 v11.8h, v13.4s, #6
st1 {v10.8h}, [x0], x1
subs w5, w5, #2
st1 {v11.8h}, [x13], x1
b.le 4f
mov v18.16b, v20.16b
b 2b
31: // base_y >= 32, using separate loads, loading v18 if we had to bail
// in the prologue.
smov w10, v29.b[0]
smov w15, v29.b[2]
movi v21.16b, #2
smov w16, v29.b[4]
add x10, x3, w10, sxtw
smov w17, v29.b[6]
add x15, x3, w15, sxtw
ld1 {v18.h}[0], [x10]
smov w10, v29.b[8]
add x16, x3, w16, sxtw
ld1 {v18.h}[1], [x15]
smov w15, v29.b[10]
add x17, x3, w17, sxtw
ld1 {v18.h}[2], [x16]
smov w16, v29.b[12]
add x10, x3, w10, sxtw
ld1 {v18.h}[3], [x17]
smov w17, v29.b[14]
add x15, x3, w15, sxtw
add x16, x3, w16, sxtw
ld1 {v18.h}[4], [x10]
add x17, x3, w17, sxtw
ld1 {v18.h}[5], [x15]
add v29.16b, v29.16b, v21.16b // next base_y
ld1 {v18.h}[6], [x16]
ld1 {v18.h}[7], [x17]
32: // base_y >= 32, using separate loads.
cmp w5, #4
b.lt 34f
33: // h >= 4, preserving v18 from the previous round, loading v19-v22.
smov w10, v29.b[0]
subs w5, w5, #4
smov w15, v29.b[2]
movi v10.16b, #8
smov w16, v29.b[4]
add x10, x3, w10, sxtw
smov w17, v29.b[6]
add x15, x3, w15, sxtw
ld4 {v19.h, v20.h, v21.h, v22.h}[0], [x10]
smov w10, v29.b[8]
add x16, x3, w16, sxtw
ld4 {v19.h, v20.h, v21.h, v22.h}[1], [x15]
smov w15, v29.b[10]
add x17, x3, w17, sxtw
ld4 {v19.h, v20.h, v21.h, v22.h}[2], [x16]
smov w16, v29.b[12]
add x10, x3, w10, sxtw
ld4 {v19.h, v20.h, v21.h, v22.h}[3], [x17]
smov w17, v29.b[14]
add x15, x3, w15, sxtw
add x16, x3, w16, sxtw
ld4 {v19.h, v20.h, v21.h, v22.h}[4], [x10]
add x17, x3, w17, sxtw
ld4 {v19.h, v20.h, v21.h, v22.h}[5], [x15]
ld4 {v19.h, v20.h, v21.h, v22.h}[6], [x16]
add v29.16b, v29.16b, v10.16b // next base_y
ld4 {v19.h, v20.h, v21.h, v22.h}[7], [x17]
umull v10.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v10.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v11.4s, v18.8h, v28.8h
umlal2 v11.4s, v19.8h, v27.8h
umull v12.4s, v19.4h, v28.4h
umlal v12.4s, v20.4h, v27.4h
umull2 v13.4s, v19.8h, v28.8h
umlal2 v13.4s, v20.8h, v27.8h
rshrn v10.4h, v10.4s, #6
rshrn2 v10.8h, v11.4s, #6
rshrn v11.4h, v12.4s, #6
rshrn2 v11.8h, v13.4s, #6
umull v12.4s, v20.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v12.4s, v21.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v13.4s, v20.8h, v28.8h
umlal2 v13.4s, v21.8h, v27.8h
umull v14.4s, v21.4h, v28.4h
umlal v14.4s, v22.4h, v27.4h
umull2 v18.4s, v21.8h, v28.8h
umlal2 v18.4s, v22.8h, v27.8h
rshrn v12.4h, v12.4s, #6
rshrn2 v12.8h, v13.4s, #6
rshrn v13.4h, v14.4s, #6
rshrn2 v13.8h, v18.4s, #6
st1 {v10.8h}, [x0], x1
cmp w5, #2
st1 {v11.8h}, [x13], x1
st1 {v12.8h}, [x0], x1
st1 {v13.8h}, [x13], x1
b.lt 4f
mov v18.16b, v22.16b
b.gt 33b
34: // h == 2, preserving v18 from the previous round, loading v19-v20.
smov w10, v29.b[0]
smov w15, v29.b[2]
movi v21.16b, #4
smov w16, v29.b[4]
add x10, x3, w10, sxtw
smov w17, v29.b[6]
add x15, x3, w15, sxtw
ld2 {v19.h, v20.h}[0], [x10]
smov w10, v29.b[8]
add x16, x3, w16, sxtw
ld2 {v19.h, v20.h}[1], [x15]
smov w15, v29.b[10]
add x17, x3, w17, sxtw
ld2 {v19.h, v20.h}[2], [x16]
smov w16, v29.b[12]
add x10, x3, w10, sxtw
ld2 {v19.h, v20.h}[3], [x17]
smov w17, v29.b[14]
add x15, x3, w15, sxtw
add x16, x3, w16, sxtw
ld2 {v19.h, v20.h}[4], [x10]
add x17, x3, w17, sxtw
ld2 {v19.h, v20.h}[5], [x15]
ld2 {v19.h, v20.h}[6], [x16]
add v29.16b, v29.16b, v21.16b // next base_y
ld2 {v19.h, v20.h}[7], [x17]
umull v10.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v10.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v11.4s, v18.8h, v28.8h
umlal2 v11.4s, v19.8h, v27.8h
umull v12.4s, v19.4h, v28.4h
umlal v12.4s, v20.4h, v27.4h
umull2 v13.4s, v19.8h, v28.8h
umlal2 v13.4s, v20.8h, v27.8h
rshrn v10.4h, v10.4s, #6
rshrn2 v10.8h, v11.4s, #6
rshrn v11.4h, v12.4s, #6
rshrn2 v11.8h, v13.4s, #6
st1 {v10.8h}, [x0], x1
st1 {v11.8h}, [x13], x1
// The h==2 case only happens once at the end, if at all.
4:
subs w4, w4, #8
b.le 9f
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
lsl x1, x1, #1
add x0, x0, #16
add x13, x13, #16
mov w5, w12 // reset h
b 1b
9:
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret
endfunc
jumptable ipred_z2_fill1_tbl
.word 640b - ipred_z2_fill1_tbl
.word 320b - ipred_z2_fill1_tbl
.word 160b - ipred_z2_fill1_tbl
.word 80b - ipred_z2_fill1_tbl
.word 40b - ipred_z2_fill1_tbl
endjumptable
function ipred_z2_fill2_16bpc_neon, export=1
cmp w4, #8
mov w8, #(2 << 6) // xpos = 2 << 6
sub w8, w8, w6 // xpos -= dx
movrel x11, increments
ld1 {v31.8h}, [x11] // increments
neg w7, w7 // -dy
b.eq 80f
40:
dup v30.4h, w7 // -dy
movi v17.8b, #1
mul v16.4h, v31.4h, v30.4h // {0,1,2,3}* -dy
movi v25.8h, #0x3e
add v30.4h, v16.4h, v30.4h // -= dy
// For upsample_top, w <= 8 and h <= 8; we may need up to h+1 elements
// from left.
ld1 {v0.8h, v1.8h}, [x3] // left[]
movi v26.8h, #64
movi v19.16b, #4
shrn v29.8b, v30.8h, #6 // ypos >> 6
and v27.8b, v30.8b, v25.8b // frac_y
add v29.8b, v29.8b, v17.8b // base_y = (ypos >> 6) + 1
movi v23.4h, #1, lsl #8
shl v29.8b, v29.8b, #1 // 2*base_y
zip1 v29.8b, v29.8b, v29.8b // duplicate elements
movi v17.8b, #2
add v29.8b, v29.8b, v23.8b // 2*base, 2*base+1, ...
add v30.8b, v29.8b, v17.8b // base_y + 1 (*2)
add v28.8b, v29.8b, v19.8b // base_y + 2 (*2)
tbl v18.8b, {v0.16b}, v29.8b // left[base_y]
trn1 v30.2d, v30.2d, v28.2d // base_y + 1, base_y + 2
sub v28.4h, v26.4h, v27.4h // 64 - frac_y
trn1 v31.2d, v31.2d, v31.2d // {0,1,2,3,0,1,2,3}
trn1 v27.2d, v27.2d, v27.2d // frac_y
trn1 v28.2d, v28.2d, v28.2d // 64 - frac_y
movi v29.16b, #4
add v31.8h, v31.8h, v31.8h // {0,2,4,6,0,2,4,6}
4:
asr w9, w8, #6 // base_x
dup v16.4h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-8 // base_x <= -8
asr w11, w8, #6 // base_x
b.le 49f
lsl w9, w9, #1
lsl w11, w11, #1
dup v17.4h, w8 // xpos
ldr q4, [x2, w9, sxtw] // top[base_x]
ldr q6, [x2, w11, sxtw]
trn1 v16.2d, v16.2d, v17.2d // xpos
tbl v19.16b, {v0.16b, v1.16b}, v30.16b // left[base_y+1], left[base_y+2]
sshr v20.8h, v16.8h, #6 // first base_x for each row
uzp2 v5.8h, v4.8h, v6.8h // top[base_x+1]
uzp1 v4.8h, v4.8h, v6.8h // top[base_x]
and v16.16b, v16.16b, v25.16b // frac_x
trn1 v18.2d, v18.2d, v19.2d // left[base_y], left[base_y+1]
sub v17.8h, v26.8h, v16.8h // 64 - frac_x
add v20.8h, v20.8h, v31.8h // actual base_x
umull v21.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v21.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v22.4s, v18.8h, v28.8h
umlal2 v22.4s, v19.8h, v27.8h
umull v23.4s, v4.4h, v17.4h // top[base_x]-*(64-frac_x)
umlal v23.4s, v5.4h, v16.4h // + top[base_x+1]*frac_x
umull2 v24.4s, v4.8h, v17.8h
umlal2 v24.4s, v5.8h, v16.8h
cmge v20.8h, v20.8h, #0
rshrn v21.4h, v21.4s, #6
rshrn2 v21.8h, v22.4s, #6
rshrn v22.4h, v23.4s, #6
rshrn2 v22.8h, v24.4s, #6
bit v21.16b, v22.16b, v20.16b
st1 {v21.d}[0], [x0], x1
sub w8, w8, w6 // xpos -= dx
subs w5, w5, #2
st1 {v21.d}[1], [x0], x1
b.le 9f
ext v18.16b, v19.16b, v19.16b, #8
add v30.16b, v30.16b, v29.16b // base_y += 2 (*2)
b 4b
49:
tbl v19.16b, {v0.16b, v1.16b}, v30.16b // left[base_y+1], left[base_y+2]
trn1 v18.2d, v18.2d, v19.2d // left[base_y], left[base_y+1]
umull v20.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v20.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v21.4s, v18.8h, v28.8h
umlal2 v21.4s, v19.8h, v27.8h
rshrn v20.4h, v20.4s, #6
rshrn2 v20.8h, v21.4s, #6
st1 {v20.d}[0], [x0], x1
subs w5, w5, #2
st1 {v20.d}[1], [x0], x1
b.le 9f
ext v18.16b, v19.16b, v19.16b, #8
add v30.16b, v30.16b, v29.16b // base_y += 2 (*2)
b 49b
9:
ret
80:
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
dup v18.8h, w7 // -dy
movi v17.8b, #1
mul v16.8h, v31.8h, v18.8h // {0,1,2,3,4,5,6,7}* -dy
movi v25.8h, #0x3e
add v16.8h, v16.8h, v18.8h // -= dy
// For upsample_top, w <= 8 and h <= 8; we may need up to h+1 elements
// from left.
ld1 {v0.8h, v1.8h}, [x3] // left[]
movi v26.8h, #64
movi v19.16b, #4
shrn v29.8b, v16.8h, #6 // ypos >> 6
and v27.16b, v16.16b, v25.16b // frac_y
add v29.8b, v29.8b, v17.8b // base_y = (ypos >> 6) + 1
movi v23.8h, #1, lsl #8
shl v29.8b, v29.8b, #1 // 2*base_y
zip1 v29.16b, v29.16b, v29.16b // duplicate elements
movi v17.16b, #2
add v29.16b, v29.16b, v23.16b // 2*base, 2*base+1, ...
// Cut corners here; for the first row we don't expect to need to
// read outside of v0.
tbl v18.16b, {v0.16b}, v29.16b // left[base_y]
add v30.16b, v29.16b, v19.16b // base_y + 2 (*2)
add v29.16b, v29.16b, v17.16b // base_y + 1 (*2)
sub v28.8h, v26.8h, v27.8h // 64 - frac_y
movi v24.16b, #4
add v31.16b, v31.16b, v31.16b // {0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14}
8:
asr w9, w8, #6 // base_x
dup v16.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-16 // base_x <= -16
asr w11, w8, #6 // base_x
b.le 89f
dup v17.8h, w8 // xpos
add x9, x2, w9, sxtw #1
add x11, x2, w11, sxtw #1
ld1 {v4.8h, v5.8h}, [x9] // top[base_x]
ld1 {v6.8h, v7.8h}, [x11]
tbl v19.16b, {v0.16b, v1.16b}, v29.16b // left[base_y+1]
sshr v21.8h, v16.8h, #6 // first base_x
sshr v22.8h, v17.8h, #6
tbl v20.16b, {v0.16b, v1.16b}, v30.16b // left[base_y+2]
uzp2 v2.8h, v4.8h, v5.8h // top[base_x+1]
uzp1 v4.8h, v4.8h, v5.8h // top[base_x]
uzp2 v3.8h, v6.8h, v7.8h
uzp1 v6.8h, v6.8h, v7.8h
mov v5.16b, v2.16b
mov v7.16b, v3.16b
and v16.16b, v16.16b, v25.16b // frac_x
and v17.16b, v17.16b, v25.16b
umull v10.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v10.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
sub v8.8h, v26.8h, v16.8h // 64 - frac_x
sub v9.8h, v26.8h, v17.8h
umull2 v11.4s, v18.8h, v28.8h
umlal2 v11.4s, v19.8h, v27.8h
add v21.8h, v21.8h, v31.8h // actual base_x
add v22.8h, v22.8h, v31.8h
umull v12.4s, v19.4h, v28.4h
umlal v12.4s, v20.4h, v27.4h
umull2 v13.4s, v19.8h, v28.8h
umlal2 v13.4s, v20.8h, v27.8h
rshrn v10.4h, v10.4s, #6
rshrn2 v10.8h, v11.4s, #6
rshrn v11.4h, v12.4s, #6
rshrn2 v11.8h, v13.4s, #6
umull v12.4s, v4.4h, v8.4h // top[base_x]-*(64-frac_x)
umlal v12.4s, v5.4h, v16.4h // + top[base_x+1]*frac_x
umull2 v13.4s, v4.8h, v8.8h
umlal2 v13.4s, v5.8h, v16.8h
umull v14.4s, v6.4h, v9.4h
umlal v14.4s, v7.4h, v17.4h
umull2 v18.4s, v6.8h, v9.8h
umlal2 v18.4s, v7.8h, v17.8h
cmge v21.8h, v21.8h, #0
cmge v22.8h, v22.8h, #0
rshrn v12.4h, v12.4s, #6
rshrn2 v12.8h, v13.4s, #6
rshrn v13.4h, v14.4s, #6
rshrn2 v13.8h, v18.4s, #6
bit v10.16b, v12.16b, v21.16b
bit v11.16b, v13.16b, v22.16b
st1 {v10.8h}, [x0], x1
subs w5, w5, #2
sub w8, w8, w6 // xpos -= dx
st1 {v11.8h}, [x0], x1
b.le 9f
mov v18.16b, v20.16b
add v29.16b, v29.16b, v24.16b // base_y += 2 (*2)
add v30.16b, v30.16b, v24.16b // base_y += 2 (*2)
b 8b
89:
tbl v19.16b, {v0.16b, v1.16b}, v29.16b // left[base_y+1]
tbl v20.16b, {v0.16b, v1.16b}, v30.16b // left[base_y+2]
umull v4.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v4.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v5.4s, v18.8h, v28.8h
umlal2 v5.4s, v19.8h, v27.8h
umull v6.4s, v19.4h, v28.4h
umlal v6.4s, v20.4h, v27.4h
umull2 v7.4s, v19.8h, v28.8h
umlal2 v7.4s, v20.8h, v27.8h
rshrn v4.4h, v4.4s, #6
rshrn2 v4.8h, v5.4s, #6
rshrn v5.4h, v6.4s, #6
rshrn2 v5.8h, v7.4s, #6
st1 {v4.8h}, [x0], x1
subs w5, w5, #2
st1 {v5.8h}, [x0], x1
b.le 9f
mov v18.16b, v20.16b
add v29.16b, v29.16b, v24.16b // base_y += 2 (*2)
add v30.16b, v30.16b, v24.16b // base_y += 2 (*2)
b 89b
9:
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret
endfunc
function ipred_z2_fill3_16bpc_neon, export=1
cmp w4, #8
mov w8, #(1 << 6) // xpos = 1 << 6
sub w8, w8, w6 // xpos -= dx
movrel x11, increments
ld1 {v31.8h}, [x11] // increments
neg w7, w7 // -dy
b.eq 80f
40:
dup v30.4h, w7 // -dy
movi v17.8b, #1
mul v16.4h, v31.4h, v30.4h // {0,1,2,3}* -dy
movi v25.8h, #0x3e
add v30.4h, v16.4h, v30.4h // -= dy
// For upsample_left, w <= 8 and h <= 8; we may need up to 2*h+1 elements.
ld1 {v0.8h, v1.8h, v2.8h}, [x3] // left[]
movi v26.8h, #64
movi v19.16b, #2
shrn v29.8b, v30.8h, #6 // ypos >> 6
and v27.8b, v30.8b, v25.8b // frac_y
add v29.8b, v29.8b, v19.8b // base_y = (ypos >> 6) + 2
movi v23.4h, #1, lsl #8
shl v29.8b, v29.8b, #1 // 2*base_y
movi v19.16b, #4
zip1 v29.8b, v29.8b, v29.8b // duplicate elements
movi v17.8b, #2
add v29.8b, v29.8b, v23.8b // 2*base, 2*base+1, ...
add v30.8b, v29.8b, v17.8b // base_y + 1 (*2)
add v28.8b, v29.8b, v19.8b // base_y + 2 (*2)
trn1 v31.2d, v31.2d, v31.2d // {0,1,2,3,0,1,2,3}
add v24.8b, v30.8b, v19.8b // base_y + 3 (*2)
trn1 v29.2d, v29.2d, v28.2d // base_y + 0, base_y + 2
trn1 v30.2d, v30.2d, v24.2d // base_y + 1, base_y + 3
sub v28.4h, v26.4h, v27.4h // 64 - frac_y
trn1 v27.2d, v27.2d, v27.2d // frac_y
trn1 v28.2d, v28.2d, v28.2d // 64 - frac_y
movi v24.16b, #8
4:
asr w9, w8, #6 // base_x
dup v16.4h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-4 // base_x <= -4
asr w11, w8, #6 // base_x
b.le 49f
lsl w9, w9, #1
lsl w11, w11, #1
dup v17.4h, w8 // xpos
ldr q4, [x2, w9, sxtw] // top[base_x]
ldr q6, [x2, w11, sxtw]
trn1 v16.2d, v16.2d, v17.2d // xpos
tbl v18.16b, {v0.16b, v1.16b, v2.16b}, v29.16b // left[base_y+0], left[base_y+2]
tbl v19.16b, {v0.16b, v1.16b, v2.16b}, v30.16b // left[base_y+1], left[base_y+3]
sshr v20.8h, v16.8h, #6 // first base_x for each row
ext v5.16b, v4.16b, v4.16b, #2 // top[base_x+1]
ext v7.16b, v6.16b, v6.16b, #2
and v16.16b, v16.16b, v25.16b // frac_x
trn1 v4.2d, v4.2d, v6.2d // top[base_x]
trn1 v5.2d, v5.2d, v7.2d // top[base_x+1]
sub v17.8h, v26.8h, v16.8h // 64 - frac_x
add v20.8h, v20.8h, v31.8h // actual base_x
umull v21.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v21.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v22.4s, v18.8h, v28.8h
umlal2 v22.4s, v19.8h, v27.8h
umull v23.4s, v4.4h, v17.4h // top[base_x]-*(64-frac_x)
umlal v23.4s, v5.4h, v16.4h // + top[base_x+1]*frac_x
umull2 v24.4s, v4.8h, v17.8h
umlal2 v24.4s, v5.8h, v16.8h
cmge v20.8h, v20.8h, #0
rshrn v21.4h, v21.4s, #6
rshrn2 v21.8h, v22.4s, #6
rshrn v22.4h, v23.4s, #6
rshrn2 v22.8h, v24.4s, #6
movi v24.16b, #8
bit v21.16b, v22.16b, v20.16b
st1 {v21.d}[0], [x0], x1
sub w8, w8, w6 // xpos -= dx
subs w5, w5, #2
st1 {v21.d}[1], [x0], x1
b.le 9f
add v29.16b, v29.16b, v24.16b // base_y += 4 (*2)
add v30.16b, v30.16b, v24.16b // base_y += 4 (*2)
b 4b
49:
tbl v18.16b, {v0.16b, v1.16b, v2.16b}, v29.16b // left[base_y+0], left[base_y+2]
tbl v19.16b, {v0.16b, v1.16b, v2.16b}, v30.16b // left[base_y+1], left[base_y+3]
umull v20.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v20.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v21.4s, v18.8h, v28.8h
umlal2 v21.4s, v19.8h, v27.8h
rshrn v20.4h, v20.4s, #6
rshrn2 v20.8h, v21.4s, #6
st1 {v20.d}[0], [x0], x1
subs w5, w5, #2
st1 {v20.d}[1], [x0], x1
b.le 9f
add v29.16b, v29.16b, v24.16b // base_y += 4 (*2)
add v30.16b, v30.16b, v24.16b // base_y += 4 (*2)
b 49b
9:
ret
80:
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
dup v18.8h, w7 // -dy
movi v17.16b, #2
mul v16.8h, v31.8h, v18.8h // {0,1,2,3,4,5,6,7}* -dy
movi v25.8h, #0x3e
add v16.8h, v16.8h, v18.8h // -= dy
// For upsample_left, w <= 8 and h <= 8; we may need up to 2*h+1 elements.
ld1 {v0.8h, v1.8h, v2.8h}, [x3] // left[]
movi v26.8h, #64
movi v19.16b, #4
shrn v29.8b, v16.8h, #6 // ypos >> 6
and v27.16b, v16.16b, v25.16b // frac_y
add v29.8b, v29.8b, v17.8b // base_y = (ypos >> 6) + 2
movi v23.8h, #1, lsl #8
shl v29.8b, v29.8b, #1 // 2*base_y
mov v18.16b, v15.16b // left[0]
zip1 v29.16b, v29.16b, v29.16b // duplicate elements
add v29.16b, v29.16b, v23.16b // 2*base, 2*base+1, ...
add v30.16b, v29.16b, v17.16b // base_y + 1 (*2)
sub v28.8h, v26.8h, v27.8h // 64 - frac_y
movi v24.16b, #4
8:
asr w9, w8, #6 // base_x
dup v16.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-16 // base_x <= -16
asr w11, w8, #6 // base_x
b.le 89f
dup v17.8h, w8 // xpos
add x9, x2, w9, sxtw #1
add x11, x2, w11, sxtw #1
ld1 {v4.8h, v5.8h}, [x9] // top[base_x]
ld1 {v6.8h, v7.8h}, [x11]
tbl v18.16b, {v0.16b, v1.16b, v2.16b}, v29.16b // left[base_y+0]
add v29.16b, v29.16b, v24.16b // base_y += 2 (*2)
tbl v19.16b, {v0.16b, v1.16b, v2.16b}, v30.16b // left[base_y+1]
add v30.16b, v30.16b, v24.16b
sshr v22.8h, v16.8h, #6 // first base_x
tbl v20.16b, {v0.16b, v1.16b, v2.16b}, v29.16b // left[base_y+2]
sshr v23.8h, v17.8h, #6
tbl v21.16b, {v0.16b, v1.16b, v2.16b}, v30.16b // left[base_y+3]
ext v5.16b, v4.16b, v5.16b, #2 // top[base_x+1]
ext v7.16b, v6.16b, v7.16b, #2
and v16.16b, v16.16b, v25.16b // frac_x
and v17.16b, v17.16b, v25.16b
umull v10.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v10.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
sub v8.8h, v26.8h, v16.8h // 64 - frac_x
sub v9.8h, v26.8h, v17.8h
umull2 v11.4s, v18.8h, v28.8h
umlal2 v11.4s, v19.8h, v27.8h
add v22.8h, v22.8h, v31.8h // actual base_x
add v23.8h, v23.8h, v31.8h
umull v12.4s, v20.4h, v28.4h
umlal v12.4s, v21.4h, v27.4h
umull2 v13.4s, v20.8h, v28.8h
umlal2 v13.4s, v21.8h, v27.8h
rshrn v10.4h, v10.4s, #6
rshrn2 v10.8h, v11.4s, #6
rshrn v11.4h, v12.4s, #6
rshrn2 v11.8h, v13.4s, #6
umull v12.4s, v4.4h, v8.4h // top[base_x]-*(64-frac_x)
umlal v12.4s, v5.4h, v16.4h // + top[base_x+1]*frac_x
umull2 v13.4s, v4.8h, v8.8h
umlal2 v13.4s, v5.8h, v16.8h
umull v14.4s, v6.4h, v9.4h
umlal v14.4s, v7.4h, v17.4h
umull2 v18.4s, v6.8h, v9.8h
umlal2 v18.4s, v7.8h, v17.8h
cmge v22.8h, v22.8h, #0
cmge v23.8h, v23.8h, #0
rshrn v12.4h, v12.4s, #6
rshrn2 v12.8h, v13.4s, #6
rshrn v13.4h, v14.4s, #6
rshrn2 v13.8h, v18.4s, #6
bit v10.16b, v12.16b, v22.16b
bit v11.16b, v13.16b, v23.16b
st1 {v10.8h}, [x0], x1
subs w5, w5, #2
sub w8, w8, w6 // xpos -= dx
st1 {v11.8h}, [x0], x1
b.le 9f
add v29.16b, v29.16b, v24.16b // base_y += 2 (*2)
add v30.16b, v30.16b, v24.16b
b 8b
89:
tbl v18.16b, {v0.16b, v1.16b, v2.16b}, v29.16b // left[base_y+0]
add v29.16b, v29.16b, v24.16b // base_y += 2 (*2)
tbl v19.16b, {v0.16b, v1.16b, v2.16b}, v30.16b // left[base_y+1]
add v30.16b, v30.16b, v24.16b
tbl v20.16b, {v0.16b, v1.16b, v2.16b}, v29.16b // left[base_y+2]
tbl v21.16b, {v0.16b, v1.16b, v2.16b}, v30.16b // left[base_y+3]
umull v4.4s, v18.4h, v28.4h // left[base_y]*(64-frac_y)
umlal v4.4s, v19.4h, v27.4h // + left[base_y+1]*frac_y
umull2 v5.4s, v18.8h, v28.8h
umlal2 v5.4s, v19.8h, v27.8h
umull v6.4s, v20.4h, v28.4h
umlal v6.4s, v21.4h, v27.4h
umull2 v7.4s, v20.8h, v28.8h
umlal2 v7.4s, v21.8h, v27.8h
rshrn v4.4h, v4.4s, #6
rshrn2 v4.8h, v5.4s, #6
rshrn v5.4h, v6.4s, #6
rshrn2 v5.8h, v7.4s, #6
st1 {v4.8h}, [x0], x1
subs w5, w5, #2
st1 {v5.8h}, [x0], x1
b.le 9f
add v29.16b, v29.16b, v24.16b // base_y += 2 (*2)
add v30.16b, v30.16b, v24.16b
b 89b
9:
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret
endfunc
// void ipred_z3_fill1_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const left,
// const int width, const int height,
// const int dy, const int max_base_y);
function ipred_z3_fill1_16bpc_neon, export=1
clz w9, w4
movrel x8, ipred_z3_fill1_tbl
sub w9, w9, #25
ldrsw x9, [x8, w9, uxtw #2]
add x10, x2, w6, uxtw #1 // left[max_base_y]
add x8, x8, x9
ld1r {v31.8h}, [x10] // padding
mov w7, w5
mov w15, #64
add x13, x0, x1
lsl x1, x1, #1
br x8
40:
AARCH64_VALID_JUMP_TARGET
4:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge ipred_z3_fill_padding_neon
lsl w8, w8, #1
lsl w10, w10, #1
ldr q0, [x2, w8, uxtw] // left[base]
ldr q2, [x2, w10, uxtw]
dup v4.8h, w9 // frac
dup v5.8h, w11
ext v1.16b, v0.16b, v0.16b, #2 // left[base+1]
ext v3.16b, v2.16b, v2.16b, #2
sub v6.4h, v1.4h, v0.4h // top[base+1]-top[base]
sub v7.4h, v3.4h, v2.4h
ushll v16.4s, v0.4h, #6 // top[base]*64
ushll v17.4s, v2.4h, #6
smlal v16.4s, v6.4h, v4.4h // + top[base+1]*frac
smlal v17.4s, v7.4h, v5.4h
rshrn v16.4h, v16.4s, #6
rshrn v17.4h, v17.4s, #6
subs w3, w3, #2
zip1 v18.8h, v16.8h, v17.8h
st1 {v18.s}[0], [x0], x1
st1 {v18.s}[1], [x13], x1
add w7, w7, w5 // xpos += dx
st1 {v18.s}[2], [x0]
st1 {v18.s}[3], [x13]
b.le 9f
sub x0, x0, x1 // ptr -= 4 * (2*stride)
sub x13, x13, x1
add x0, x0, #4
add x13, x13, #4
b 4b
9:
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge ipred_z3_fill_padding_neon
add x8, x2, w8, uxtw #1
add x10, x2, w10, uxtw #1
dup v4.8h, w9 // frac
dup v5.8h, w11
ld1 {v0.8h}, [x8] // left[base]
ld1 {v2.8h}, [x10]
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
ldr h1, [x8, #16]
ldr h3, [x10, #16]
dup v6.8h, w9 // 64 - frac
dup v7.8h, w11
ext v1.16b, v0.16b, v1.16b, #2 // left[base+1]
ext v3.16b, v2.16b, v3.16b, #2
umull v16.4s, v0.4h, v6.4h // left[base]*(64-frac)
umlal v16.4s, v1.4h, v4.4h // + left[base+1]*frac
umull2 v17.4s, v0.8h, v6.8h
umlal2 v17.4s, v1.8h, v4.8h
umull v18.4s, v2.4h, v7.4h
umlal v18.4s, v3.4h, v5.4h
umull2 v19.4s, v2.8h, v7.8h
umlal2 v19.4s, v3.8h, v5.8h
rshrn v16.4h, v16.4s, #6
rshrn2 v16.8h, v17.4s, #6
rshrn v17.4h, v18.4s, #6
rshrn2 v17.8h, v19.4s, #6
subs w3, w3, #2
zip1 v18.8h, v16.8h, v17.8h
zip2 v19.8h, v16.8h, v17.8h
add w7, w7, w5 // xpos += dx
st1 {v18.s}[0], [x0], x1
st1 {v18.s}[1], [x13], x1
st1 {v18.s}[2], [x0], x1
st1 {v18.s}[3], [x13], x1
st1 {v19.s}[0], [x0], x1
st1 {v19.s}[1], [x13], x1
st1 {v19.s}[2], [x0], x1
st1 {v19.s}[3], [x13], x1
b.le 9f
sub x0, x0, x1, lsl #2 // ptr -= 4 * (2*stride)
sub x13, x13, x1, lsl #2
add x0, x0, #4
add x13, x13, #4
b 8b
9:
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
mov w12, w4
1:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // ypos += dy
cmp w8, w6 // base >= max_base_y
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge ipred_z3_fill_padding_neon
add x8, x2, w8, uxtw #1
add x10, x2, w10, uxtw #1
dup v6.8h, w9 // frac
dup v7.8h, w11
ld1 {v0.8h, v1.8h, v2.8h}, [x8], #48 // left[base]
ld1 {v3.8h, v4.8h, v5.8h}, [x10], #48
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
dup v16.8h, w9 // 64 - frac
dup v17.8h, w11
add w7, w7, w5 // ypos += dy
2:
ext v18.16b, v0.16b, v1.16b, #2 // left[base+1]
ext v19.16b, v1.16b, v2.16b, #2
ext v20.16b, v3.16b, v4.16b, #2
ext v21.16b, v4.16b, v5.16b, #2
subs w4, w4, #16
umull v22.4s, v0.4h, v16.4h // left[base]*(64-frac)
umlal v22.4s, v18.4h, v6.4h // + left[base+1]*frac
umull2 v23.4s, v0.8h, v16.8h
umlal2 v23.4s, v18.8h, v6.8h
umull v24.4s, v1.4h, v16.4h
umlal v24.4s, v19.4h, v6.4h
umull2 v25.4s, v1.8h, v16.8h
umlal2 v25.4s, v19.8h, v6.8h
umull v26.4s, v3.4h, v17.4h
umlal v26.4s, v20.4h, v7.4h
umull2 v27.4s, v3.8h, v17.8h
umlal2 v27.4s, v20.8h, v7.8h
umull v28.4s, v4.4h, v17.4h
umlal v28.4s, v21.4h, v7.4h
umull2 v29.4s, v4.8h, v17.8h
umlal2 v29.4s, v21.8h, v7.8h
rshrn v22.4h, v22.4s, #6
rshrn2 v22.8h, v23.4s, #6
rshrn v23.4h, v24.4s, #6
rshrn2 v23.8h, v25.4s, #6
rshrn v24.4h, v26.4s, #6
rshrn2 v24.8h, v27.4s, #6
rshrn v25.4h, v28.4s, #6
rshrn2 v25.8h, v29.4s, #6
zip1 v18.8h, v22.8h, v24.8h
zip2 v19.8h, v22.8h, v24.8h
zip1 v20.8h, v23.8h, v25.8h
zip2 v21.8h, v23.8h, v25.8h
st1 {v18.s}[0], [x0], x1
st1 {v18.s}[1], [x13], x1
st1 {v18.s}[2], [x0], x1
st1 {v18.s}[3], [x13], x1
st1 {v19.s}[0], [x0], x1
st1 {v19.s}[1], [x13], x1
st1 {v19.s}[2], [x0], x1
st1 {v19.s}[3], [x13], x1
st1 {v20.s}[0], [x0], x1
st1 {v20.s}[1], [x13], x1
st1 {v20.s}[2], [x0], x1
st1 {v20.s}[3], [x13], x1
st1 {v21.s}[0], [x0], x1
st1 {v21.s}[1], [x13], x1
st1 {v21.s}[2], [x0], x1
st1 {v21.s}[3], [x13], x1
b.le 3f
mov v0.16b, v2.16b
ld1 {v1.8h, v2.8h}, [x8], #32 // left[base]
mov v3.16b, v5.16b
ld1 {v4.8h, v5.8h}, [x10], #32
b 2b
3:
subs w3, w3, #2
b.le 9f
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
lsl x1, x1, #1
add x0, x0, #4
add x13, x13, #4
mov w4, w12
b 1b
9:
ret
endfunc
jumptable ipred_z3_fill1_tbl
.word 640b - ipred_z3_fill1_tbl
.word 320b - ipred_z3_fill1_tbl
.word 160b - ipred_z3_fill1_tbl
.word 80b - ipred_z3_fill1_tbl
.word 40b - ipred_z3_fill1_tbl
endjumptable
function ipred_z3_fill_padding_neon, export=0
cmp w3, #8
movrel x8, ipred_z3_fill_padding_tbl
b.gt ipred_z3_fill_padding_wide
// w3 = remaining width, w4 = constant height
mov w12, w4
1:
// Fill a WxH rectangle with padding. W can be any number;
// this fills the exact width by filling in the largest
// power of two in the remaining width, and repeating.
clz w9, w3
sub w9, w9, #25
ldrsw x9, [x8, w9, uxtw #2]
add x9, x8, x9
br x9
20:
AARCH64_VALID_JUMP_TARGET
2:
st1 {v31.s}[0], [x0], x1
subs w4, w4, #4
st1 {v31.s}[0], [x13], x1
st1 {v31.s}[0], [x0], x1
st1 {v31.s}[0], [x13], x1
b.gt 2b
subs w3, w3, #2
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
b.le 9f
lsl x1, x1, #1
add x0, x0, #4
add x13, x13, #4
mov w4, w12
b 1b
40:
AARCH64_VALID_JUMP_TARGET
4:
st1 {v31.4h}, [x0], x1
subs w4, w4, #4
st1 {v31.4h}, [x13], x1
st1 {v31.4h}, [x0], x1
st1 {v31.4h}, [x13], x1
b.gt 4b
subs w3, w3, #4
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
b.le 9f
lsl x1, x1, #1
add x0, x0, #8
add x13, x13, #8
mov w4, w12
b 1b
80:
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
8:
st1 {v31.8h}, [x0], x1
subs w4, w4, #4
st1 {v31.8h}, [x13], x1
st1 {v31.8h}, [x0], x1
st1 {v31.8h}, [x13], x1
b.gt 8b
subs w3, w3, #8
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
b.le 9f
lsl x1, x1, #1
add x0, x0, #16
add x13, x13, #16
mov w4, w12
b 1b
9:
ret
endfunc
jumptable ipred_z3_fill_padding_tbl
.word 640b - ipred_z3_fill_padding_tbl
.word 320b - ipred_z3_fill_padding_tbl
.word 160b - ipred_z3_fill_padding_tbl
.word 80b - ipred_z3_fill_padding_tbl
.word 40b - ipred_z3_fill_padding_tbl
.word 20b - ipred_z3_fill_padding_tbl
endjumptable
function ipred_z3_fill_padding_wide
// Fill a WxH rectangle with padding, with W > 8.
lsr x1, x1, #1
mov w12, w3
sub x1, x1, w3, uxtw #1
1:
ands w5, w3, #7
b.eq 2f
// If the width isn't aligned to 8, first do one 8 pixel write
// and align the start pointer.
sub w3, w3, w5
st1 {v31.8h}, [x0]
add x0, x0, w5, uxtw #1
2:
// Fill the rest of the line with aligned 8 pixel writes.
subs w3, w3, #8
st1 {v31.8h}, [x0], #16
b.gt 2b
subs w4, w4, #1
add x0, x0, x1
b.le 9f
mov w3, w12
b 1b
9:
ret
endfunc
function ipred_z3_fill2_16bpc_neon, export=1
cmp w4, #8
add x10, x2, w6, uxtw // left[max_base_y]
ld1r {v31.16b}, [x10] // padding
mov w7, w5
mov w15, #64
add x13, x0, x1
lsl x1, x1, #1
b.eq 8f
4: // h == 4
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge ipred_z3_fill_padding_neon
lsl w8, w8, #1
lsl w10, w10, #1
ldr q0, [x2, w8, uxtw] // top[base]
ldr q2, [x2, w10, uxtw]
dup v4.4h, w9 // frac
dup v5.4h, w11
uzp2 v1.8h, v0.8h, v0.8h // top[base+1]
uzp1 v0.8h, v0.8h, v0.8h // top[base]
uzp2 v3.8h, v2.8h, v2.8h
uzp1 v2.8h, v2.8h, v2.8h
sub v6.4h, v1.4h, v0.4h // top[base+1]-top[base]
sub v7.4h, v3.4h, v2.4h
ushll v16.4s, v0.4h, #6 // top[base]*64
ushll v17.4s, v2.4h, #6
smlal v16.4s, v6.4h, v4.4h // + top[base+1]*frac
smlal v17.4s, v7.4h, v5.4h
rshrn v16.4h, v16.4s, #6
rshrn v17.4h, v17.4s, #6
subs w3, w3, #2
zip1 v18.8h, v16.8h, v17.8h
st1 {v18.s}[0], [x0], x1
st1 {v18.s}[1], [x13], x1
add w7, w7, w5 // xpos += dx
st1 {v18.s}[2], [x0]
st1 {v18.s}[3], [x13]
b.le 9f
sub x0, x0, x1 // ptr -= 4 * (2*stride)
sub x13, x13, x1
add x0, x0, #4
add x13, x13, #4
b 4b
9:
ret
8: // h == 8
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge ipred_z3_fill_padding_neon
add x8, x2, w8, uxtw #1
add x10, x2, w10, uxtw #1
dup v4.8h, w9 // frac
dup v5.8h, w11
ld1 {v0.8h, v1.8h}, [x8] // top[base]
ld1 {v2.8h, v3.8h}, [x10]
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
dup v6.8h, w9 // 64 - frac
dup v7.8h, w11
uzp2 v20.8h, v0.8h, v1.8h // top[base+1]
uzp1 v0.8h, v0.8h, v1.8h // top[base]
uzp2 v21.8h, v2.8h, v3.8h
uzp1 v2.8h, v2.8h, v3.8h
umull v16.4s, v0.4h, v6.4h // top[base]*(64-frac)
umlal v16.4s, v20.4h, v4.4h // + top[base+1]*frac
umull2 v17.4s, v0.8h, v6.8h
umlal2 v17.4s, v20.8h, v4.8h
umull v18.4s, v2.4h, v7.4h
umlal v18.4s, v21.4h, v5.4h
umull2 v19.4s, v2.8h, v7.8h
umlal2 v19.4s, v21.8h, v5.8h
rshrn v16.4h, v16.4s, #6
rshrn2 v16.8h, v17.4s, #6
rshrn v17.4h, v18.4s, #6
rshrn2 v17.8h, v19.4s, #6
subs w3, w3, #2
zip1 v18.8h, v16.8h, v17.8h
zip2 v19.8h, v16.8h, v17.8h
add w7, w7, w5 // xpos += dx
st1 {v18.s}[0], [x0], x1
st1 {v18.s}[1], [x13], x1
st1 {v18.s}[2], [x0], x1
st1 {v18.s}[3], [x13], x1
st1 {v19.s}[0], [x0], x1
st1 {v19.s}[1], [x13], x1
st1 {v19.s}[2], [x0], x1
st1 {v19.s}[3], [x13], x1
b.le 9f
sub x0, x0, x1, lsl #2 // ptr -= 4 * (2*stride)
sub x13, x13, x1, lsl #2
add x0, x0, #4
add x13, x13, #4
b 8b
9:
ret
endfunc
// void ipred_filter_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int filt_idx,
// const int max_width, const int max_height,
// const int bitdepth_max);
.macro filter_fn bpc
function ipred_filter_\bpc\()bpc_neon
and w5, w5, #511
movrel x6, X(filter_intra_taps)
lsl w5, w5, #6
add x6, x6, w5, uxtw
ld1 {v16.8b, v17.8b, v18.8b, v19.8b}, [x6], #32
clz w9, w3
movrel x5, ipred_filter\bpc\()_tbl
ld1 {v20.8b, v21.8b, v22.8b}, [x6]
sub w9, w9, #26
ldrsw x9, [x5, w9, uxtw #2]
sxtl v16.8h, v16.8b
sxtl v17.8h, v17.8b
add x5, x5, x9
sxtl v18.8h, v18.8b
sxtl v19.8h, v19.8b
add x6, x0, x1
lsl x1, x1, #1
sxtl v20.8h, v20.8b
sxtl v21.8h, v21.8b
sxtl v22.8h, v22.8b
dup v31.8h, w8
.if \bpc == 10
movi v30.8h, #0
.endif
br x5
40:
AARCH64_VALID_JUMP_TARGET
ldur d0, [x2, #2] // top (0-3)
sub x2, x2, #4
mov x7, #-4
4:
ld1 {v1.4h}, [x2], x7 // left (0-1) + topleft (2)
.if \bpc == 10
mul v2.8h, v17.8h, v0.h[0] // p1(top[0]) * filter(1)
mla v2.8h, v18.8h, v0.h[1] // p2(top[1]) * filter(2)
mla v2.8h, v19.8h, v0.h[2] // p3(top[2]) * filter(3)
mla v2.8h, v20.8h, v0.h[3] // p4(top[3]) * filter(4)
mla v2.8h, v16.8h, v1.h[2] // p0(topleft) * filter(0)
mla v2.8h, v21.8h, v1.h[1] // p5(left[0]) * filter(5)
mla v2.8h, v22.8h, v1.h[0] // p6(left[1]) * filter(6)
srshr v2.8h, v2.8h, #4
smax v2.8h, v2.8h, v30.8h
.else
smull v2.4s, v17.4h, v0.h[0] // p1(top[0]) * filter(1)
smlal v2.4s, v18.4h, v0.h[1] // p2(top[1]) * filter(2)
smlal v2.4s, v19.4h, v0.h[2] // p3(top[2]) * filter(3)
smlal v2.4s, v20.4h, v0.h[3] // p4(top[3]) * filter(4)
smlal v2.4s, v16.4h, v1.h[2] // p0(topleft) * filter(0)
smlal v2.4s, v21.4h, v1.h[1] // p5(left[0]) * filter(5)
smlal v2.4s, v22.4h, v1.h[0] // p6(left[1]) * filter(6)
smull2 v3.4s, v17.8h, v0.h[0] // p1(top[0]) * filter(1)
smlal2 v3.4s, v18.8h, v0.h[1] // p2(top[1]) * filter(2)
smlal2 v3.4s, v19.8h, v0.h[2] // p3(top[2]) * filter(3)
smlal2 v3.4s, v20.8h, v0.h[3] // p4(top[3]) * filter(4)
smlal2 v3.4s, v16.8h, v1.h[2] // p0(topleft) * filter(0)
smlal2 v3.4s, v21.8h, v1.h[1] // p5(left[0]) * filter(5)
smlal2 v3.4s, v22.8h, v1.h[0] // p6(left[1]) * filter(6)
sqrshrun v2.4h, v2.4s, #4
sqrshrun2 v2.8h, v3.4s, #4
.endif
smin v2.8h, v2.8h, v31.8h
subs w4, w4, #2
st1 {v2.d}[0], [x0], x1
ext v0.16b, v2.16b, v2.16b, #8 // move top from [4-7] to [0-3]
st1 {v2.d}[1], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ldur q0, [x2, #2] // top (0-7)
sub x2, x2, #4
mov x7, #-4
8:
ld1 {v1.4h}, [x2], x7 // left (0-1) + topleft (2)
.if \bpc == 10
mul v2.8h, v17.8h, v0.h[0] // p1(top[0]) * filter(1)
mla v2.8h, v18.8h, v0.h[1] // p2(top[1]) * filter(2)
mla v2.8h, v19.8h, v0.h[2] // p3(top[2]) * filter(3)
mla v2.8h, v20.8h, v0.h[3] // p4(top[3]) * filter(4)
mla v2.8h, v16.8h, v1.h[2] // p0(topleft) * filter(0)
mla v2.8h, v21.8h, v1.h[1] // p5(left[0]) * filter(5)
mla v2.8h, v22.8h, v1.h[0] // p6(left[1]) * filter(6)
mul v3.8h, v17.8h, v0.h[4] // p1(top[0]) * filter(1)
mla v3.8h, v18.8h, v0.h[5] // p2(top[1]) * filter(2)
mla v3.8h, v19.8h, v0.h[6] // p3(top[2]) * filter(3)
srshr v2.8h, v2.8h, #4
smax v2.8h, v2.8h, v30.8h
smin v2.8h, v2.8h, v31.8h
mla v3.8h, v20.8h, v0.h[7] // p4(top[3]) * filter(4)
mla v3.8h, v16.8h, v0.h[3] // p0(topleft) * filter(0)
mla v3.8h, v21.8h, v2.h[3] // p5(left[0]) * filter(5)
mla v3.8h, v22.8h, v2.h[7] // p6(left[1]) * filter(6)
srshr v3.8h, v3.8h, #4
smax v3.8h, v3.8h, v30.8h
.else
smull v2.4s, v17.4h, v0.h[0] // p1(top[0]) * filter(1)
smlal v2.4s, v18.4h, v0.h[1] // p2(top[1]) * filter(2)
smlal v2.4s, v19.4h, v0.h[2] // p3(top[2]) * filter(3)
smlal v2.4s, v20.4h, v0.h[3] // p4(top[3]) * filter(4)
smlal v2.4s, v16.4h, v1.h[2] // p0(topleft) * filter(0)
smlal v2.4s, v21.4h, v1.h[1] // p5(left[0]) * filter(5)
smlal v2.4s, v22.4h, v1.h[0] // p6(left[1]) * filter(6)
smull2 v3.4s, v17.8h, v0.h[0] // p1(top[0]) * filter(1)
smlal2 v3.4s, v18.8h, v0.h[1] // p2(top[1]) * filter(2)
smlal2 v3.4s, v19.8h, v0.h[2] // p3(top[2]) * filter(3)
smlal2 v3.4s, v20.8h, v0.h[3] // p4(top[3]) * filter(4)
smlal2 v3.4s, v16.8h, v1.h[2] // p0(topleft) * filter(0)
smlal2 v3.4s, v21.8h, v1.h[1] // p5(left[0]) * filter(5)
smlal2 v3.4s, v22.8h, v1.h[0] // p6(left[1]) * filter(6)
smull v4.4s, v17.4h, v0.h[4] // p1(top[0]) * filter(1)
smlal v4.4s, v18.4h, v0.h[5] // p2(top[1]) * filter(2)
smlal v4.4s, v19.4h, v0.h[6] // p3(top[2]) * filter(3)
sqrshrun v2.4h, v2.4s, #4
sqrshrun2 v2.8h, v3.4s, #4
smin v2.8h, v2.8h, v31.8h
smlal v4.4s, v20.4h, v0.h[7] // p4(top[3]) * filter(4)
smlal v4.4s, v16.4h, v0.h[3] // p0(topleft) * filter(0)
smlal v4.4s, v21.4h, v2.h[3] // p5(left[0]) * filter(5)
smlal v4.4s, v22.4h, v2.h[7] // p6(left[1]) * filter(6)
smull2 v5.4s, v17.8h, v0.h[4] // p1(top[0]) * filter(1)
smlal2 v5.4s, v18.8h, v0.h[5] // p2(top[1]) * filter(2)
smlal2 v5.4s, v19.8h, v0.h[6] // p3(top[2]) * filter(3)
smlal2 v5.4s, v20.8h, v0.h[7] // p4(top[3]) * filter(4)
smlal2 v5.4s, v16.8h, v0.h[3] // p0(topleft) * filter(0)
smlal2 v5.4s, v21.8h, v2.h[3] // p5(left[0]) * filter(5)
smlal2 v5.4s, v22.8h, v2.h[7] // p6(left[1]) * filter(6)
sqrshrun v3.4h, v4.4s, #4
sqrshrun2 v3.8h, v5.4s, #4
.endif
smin v3.8h, v3.8h, v31.8h
subs w4, w4, #2
st2 {v2.d, v3.d}[0], [x0], x1
zip2 v0.2d, v2.2d, v3.2d
st2 {v2.d, v3.d}[1], [x6], x1
b.gt 8b
ret
160:
320:
AARCH64_VALID_JUMP_TARGET
add x8, x2, #2
sub x2, x2, #4
mov x7, #-4
sub x1, x1, w3, uxtw #1
mov w9, w3
1:
ld1 {v0.4h}, [x2], x7 // left (0-1) + topleft (2)
2:
ld1 {v1.8h, v2.8h}, [x8], #32 // top(0-15)
.if \bpc == 10
mul v3.8h, v16.8h, v0.h[2] // p0(topleft) * filter(0)
mla v3.8h, v21.8h, v0.h[1] // p5(left[0]) * filter(5)
mla v3.8h, v22.8h, v0.h[0] // p6(left[1]) * filter(6)
mla v3.8h, v17.8h, v1.h[0] // p1(top[0]) * filter(1)
mla v3.8h, v18.8h, v1.h[1] // p2(top[1]) * filter(2)
mla v3.8h, v19.8h, v1.h[2] // p3(top[2]) * filter(3)
mla v3.8h, v20.8h, v1.h[3] // p4(top[3]) * filter(4)
mul v4.8h, v17.8h, v1.h[4] // p1(top[0]) * filter(1)
mla v4.8h, v18.8h, v1.h[5] // p2(top[1]) * filter(2)
mla v4.8h, v19.8h, v1.h[6] // p3(top[2]) * filter(3)
srshr v3.8h, v3.8h, #4
smax v3.8h, v3.8h, v30.8h
smin v3.8h, v3.8h, v31.8h
mla v4.8h, v20.8h, v1.h[7] // p4(top[3]) * filter(4)
mla v4.8h, v16.8h, v1.h[3] // p0(topleft) * filter(0)
mla v4.8h, v21.8h, v3.h[3] // p5(left[0]) * filter(5)
mla v4.8h, v22.8h, v3.h[7] // p6(left[1]) * filter(6)
mul v5.8h, v17.8h, v2.h[0] // p1(top[0]) * filter(1)
mla v5.8h, v18.8h, v2.h[1] // p2(top[1]) * filter(2)
mla v5.8h, v19.8h, v2.h[2] // p3(top[2]) * filter(3)
srshr v4.8h, v4.8h, #4
smax v4.8h, v4.8h, v30.8h
smin v4.8h, v4.8h, v31.8h
mla v5.8h, v20.8h, v2.h[3] // p4(top[3]) * filter(4)
mla v5.8h, v16.8h, v1.h[7] // p0(topleft) * filter(0)
mla v5.8h, v21.8h, v4.h[3] // p5(left[0]) * filter(5)
mla v5.8h, v22.8h, v4.h[7] // p6(left[1]) * filter(6)
mul v6.8h, v17.8h, v2.h[4] // p1(top[0]) * filter(1)
mla v6.8h, v18.8h, v2.h[5] // p2(top[1]) * filter(2)
mla v6.8h, v19.8h, v2.h[6] // p3(top[2]) * filter(3)
srshr v5.8h, v5.8h, #4
smax v5.8h, v5.8h, v30.8h
smin v5.8h, v5.8h, v31.8h
mla v6.8h, v20.8h, v2.h[7] // p4(top[3]) * filter(4)
mla v6.8h, v16.8h, v2.h[3] // p0(topleft) * filter(0)
mla v6.8h, v21.8h, v5.h[3] // p5(left[0]) * filter(5)
mla v6.8h, v22.8h, v5.h[7] // p6(left[1]) * filter(6)
subs w3, w3, #16
srshr v6.8h, v6.8h, #4
smax v6.8h, v6.8h, v30.8h
.else
smull v3.4s, v16.4h, v0.h[2] // p0(topleft) * filter(0)
smlal v3.4s, v21.4h, v0.h[1] // p5(left[0]) * filter(5)
smlal v3.4s, v22.4h, v0.h[0] // p6(left[1]) * filter(6)
smlal v3.4s, v17.4h, v1.h[0] // p1(top[0]) * filter(1)
smlal v3.4s, v18.4h, v1.h[1] // p2(top[1]) * filter(2)
smlal v3.4s, v19.4h, v1.h[2] // p3(top[2]) * filter(3)
smlal v3.4s, v20.4h, v1.h[3] // p4(top[3]) * filter(4)
smull2 v4.4s, v16.8h, v0.h[2] // p0(topleft) * filter(0)
smlal2 v4.4s, v21.8h, v0.h[1] // p5(left[0]) * filter(5)
smlal2 v4.4s, v22.8h, v0.h[0] // p6(left[1]) * filter(6)
smlal2 v4.4s, v17.8h, v1.h[0] // p1(top[0]) * filter(1)
smlal2 v4.4s, v18.8h, v1.h[1] // p2(top[1]) * filter(2)
smlal2 v4.4s, v19.8h, v1.h[2] // p3(top[2]) * filter(3)
smlal2 v4.4s, v20.8h, v1.h[3] // p4(top[3]) * filter(4)
smull v5.4s, v17.4h, v1.h[4] // p1(top[0]) * filter(1)
smlal v5.4s, v18.4h, v1.h[5] // p2(top[1]) * filter(2)
smlal v5.4s, v19.4h, v1.h[6] // p3(top[2]) * filter(3)
sqrshrun v3.4h, v3.4s, #4
sqrshrun2 v3.8h, v4.4s, #4
smin v3.8h, v3.8h, v31.8h
smlal v5.4s, v20.4h, v1.h[7] // p4(top[3]) * filter(4)
smlal v5.4s, v16.4h, v1.h[3] // p0(topleft) * filter(0)
smlal v5.4s, v21.4h, v3.h[3] // p5(left[0]) * filter(5)
smlal v5.4s, v22.4h, v3.h[7] // p6(left[1]) * filter(6)
smull2 v6.4s, v17.8h, v1.h[4] // p1(top[0]) * filter(1)
smlal2 v6.4s, v18.8h, v1.h[5] // p2(top[1]) * filter(2)
smlal2 v6.4s, v19.8h, v1.h[6] // p3(top[2]) * filter(3)
smlal2 v6.4s, v20.8h, v1.h[7] // p4(top[3]) * filter(4)
smlal2 v6.4s, v16.8h, v1.h[3] // p0(topleft) * filter(0)
smlal2 v6.4s, v21.8h, v3.h[3] // p5(left[0]) * filter(5)
smlal2 v6.4s, v22.8h, v3.h[7] // p6(left[1]) * filter(6)
smull v24.4s, v17.4h, v2.h[0] // p1(top[0]) * filter(1)
smlal v24.4s, v18.4h, v2.h[1] // p2(top[1]) * filter(2)
smlal v24.4s, v19.4h, v2.h[2] // p3(top[2]) * filter(3)
sqrshrun v4.4h, v5.4s, #4
sqrshrun2 v4.8h, v6.4s, #4
smin v4.8h, v4.8h, v31.8h
smlal v24.4s, v20.4h, v2.h[3] // p4(top[3]) * filter(4)
smlal v24.4s, v16.4h, v1.h[7] // p0(topleft) * filter(0)
smlal v24.4s, v21.4h, v4.h[3] // p5(left[0]) * filter(5)
smlal v24.4s, v22.4h, v4.h[7] // p6(left[1]) * filter(6)
smull2 v25.4s, v17.8h, v2.h[0] // p1(top[0]) * filter(1)
smlal2 v25.4s, v18.8h, v2.h[1] // p2(top[1]) * filter(2)
smlal2 v25.4s, v19.8h, v2.h[2] // p3(top[2]) * filter(3)
smlal2 v25.4s, v20.8h, v2.h[3] // p4(top[3]) * filter(4)
smlal2 v25.4s, v16.8h, v1.h[7] // p0(topleft) * filter(0)
smlal2 v25.4s, v21.8h, v4.h[3] // p5(left[0]) * filter(5)
smlal2 v25.4s, v22.8h, v4.h[7] // p6(left[1]) * filter(6)
smull v26.4s, v17.4h, v2.h[4] // p1(top[0]) * filter(1)
smlal v26.4s, v18.4h, v2.h[5] // p2(top[1]) * filter(2)
smlal v26.4s, v19.4h, v2.h[6] // p3(top[2]) * filter(3)
sqrshrun v5.4h, v24.4s, #4
sqrshrun2 v5.8h, v25.4s, #4
smin v5.8h, v5.8h, v31.8h
smlal v26.4s, v20.4h, v2.h[7] // p4(top[3]) * filter(4)
smlal v26.4s, v16.4h, v2.h[3] // p0(topleft) * filter(0)
smlal v26.4s, v21.4h, v5.h[3] // p5(left[0]) * filter(5)
smlal v26.4s, v22.4h, v5.h[7] // p6(left[1]) * filter(6)
smull2 v27.4s, v17.8h, v2.h[4] // p1(top[0]) * filter(1)
smlal2 v27.4s, v18.8h, v2.h[5] // p2(top[1]) * filter(2)
smlal2 v27.4s, v19.8h, v2.h[6] // p3(top[2]) * filter(3)
smlal2 v27.4s, v20.8h, v2.h[7] // p4(top[3]) * filter(4)
smlal2 v27.4s, v16.8h, v2.h[3] // p0(topleft) * filter(0)
smlal2 v27.4s, v21.8h, v5.h[3] // p5(left[0]) * filter(5)
smlal2 v27.4s, v22.8h, v5.h[7] // p6(left[1]) * filter(6)
subs w3, w3, #16
sqrshrun v6.4h, v26.4s, #4
sqrshrun2 v6.8h, v27.4s, #4
.endif
smin v6.8h, v6.8h, v31.8h
ins v0.h[2], v2.h[7]
st4 {v3.d, v4.d, v5.d, v6.d}[0], [x0], #32
ins v0.h[0], v6.h[7]
st4 {v3.d, v4.d, v5.d, v6.d}[1], [x6], #32
ins v0.h[1], v6.h[3]
b.gt 2b
subs w4, w4, #2
b.le 9f
sub x8, x6, w9, uxtw #1
add x0, x0, x1
add x6, x6, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_filter\bpc\()_tbl
.word 320b - ipred_filter\bpc\()_tbl
.word 160b - ipred_filter\bpc\()_tbl
.word 80b - ipred_filter\bpc\()_tbl
.word 40b - ipred_filter\bpc\()_tbl
endjumptable
.endm
filter_fn 10
filter_fn 12
function ipred_filter_16bpc_neon, export=1
ldr w8, [sp]
cmp w8, 0x3ff
b.le ipred_filter_10bpc_neon
b ipred_filter_12bpc_neon
endfunc
// void pal_pred_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const pal, const uint8_t *idx,
// const int w, const int h);
function pal_pred_16bpc_neon, export=1
ld1 {v30.8h}, [x2]
clz w9, w4
movrel x6, pal_pred_tbl
sub w9, w9, #25
movi v29.16b, #7
ldrsw x9, [x6, w9, uxtw #2]
movi v31.8h, #1, lsl #8
add x6, x6, x9
br x6
40:
AARCH64_VALID_JUMP_TARGET
add x2, x0, x1
lsl x1, x1, #1
4:
ld1 {v1.8b}, [x3], #8
subs w5, w5, #4
ushr v3.8b, v1.8b, #4
and v2.8b, v1.8b, v29.8b
zip1 v1.16b, v2.16b, v3.16b
// Restructure v1 from a, b, c, ... into 2*a, 2*a+1, 2*b, 2*b+1, 2*c, 2*c+1, ...
add v1.16b, v1.16b, v1.16b
zip1 v0.16b, v1.16b, v1.16b
zip2 v1.16b, v1.16b, v1.16b
add v0.8h, v0.8h, v31.8h
add v1.8h, v1.8h, v31.8h
tbl v0.16b, {v30.16b}, v0.16b
st1 {v0.d}[0], [x0], x1
tbl v1.16b, {v30.16b}, v1.16b
st1 {v0.d}[1], [x2], x1
st1 {v1.d}[0], [x0], x1
st1 {v1.d}[1], [x2], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
add x2, x0, x1
lsl x1, x1, #1
8:
ld1 {v2.16b}, [x3], #16
subs w5, w5, #4
ushr v4.16b, v2.16b, #4
and v3.16b, v2.16b, v29.16b
zip1 v2.16b, v3.16b, v4.16b
zip2 v3.16b, v3.16b, v4.16b
add v2.16b, v2.16b, v2.16b
add v3.16b, v3.16b, v3.16b
zip1 v0.16b, v2.16b, v2.16b
zip2 v1.16b, v2.16b, v2.16b
zip1 v2.16b, v3.16b, v3.16b
zip2 v3.16b, v3.16b, v3.16b
add v0.8h, v0.8h, v31.8h
add v1.8h, v1.8h, v31.8h
add v2.8h, v2.8h, v31.8h
add v3.8h, v3.8h, v31.8h
tbl v0.16b, {v30.16b}, v0.16b
tbl v1.16b, {v30.16b}, v1.16b
st1 {v0.8h}, [x0], x1
tbl v2.16b, {v30.16b}, v2.16b
st1 {v1.8h}, [x2], x1
tbl v3.16b, {v30.16b}, v3.16b
st1 {v2.8h}, [x0], x1
st1 {v3.8h}, [x2], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
add x2, x0, x1
lsl x1, x1, #1
16:
ld1 {v4.16b, v5.16b}, [x3], #32
subs w5, w5, #4
ushr v7.16b, v4.16b, #4
and v6.16b, v4.16b, v29.16b
ushr v3.16b, v5.16b, #4
and v2.16b, v5.16b, v29.16b
zip1 v4.16b, v6.16b, v7.16b
zip2 v5.16b, v6.16b, v7.16b
zip1 v6.16b, v2.16b, v3.16b
zip2 v7.16b, v2.16b, v3.16b
add v4.16b, v4.16b, v4.16b
add v5.16b, v5.16b, v5.16b
add v6.16b, v6.16b, v6.16b
add v7.16b, v7.16b, v7.16b
zip1 v0.16b, v4.16b, v4.16b
zip2 v1.16b, v4.16b, v4.16b
zip1 v2.16b, v5.16b, v5.16b
zip2 v3.16b, v5.16b, v5.16b
zip1 v4.16b, v6.16b, v6.16b
zip2 v5.16b, v6.16b, v6.16b
zip1 v6.16b, v7.16b, v7.16b
zip2 v7.16b, v7.16b, v7.16b
add v0.8h, v0.8h, v31.8h
add v1.8h, v1.8h, v31.8h
add v2.8h, v2.8h, v31.8h
add v3.8h, v3.8h, v31.8h
add v4.8h, v4.8h, v31.8h
tbl v0.16b, {v30.16b}, v0.16b
add v5.8h, v5.8h, v31.8h
tbl v1.16b, {v30.16b}, v1.16b
add v6.8h, v6.8h, v31.8h
tbl v2.16b, {v30.16b}, v2.16b
add v7.8h, v7.8h, v31.8h
tbl v3.16b, {v30.16b}, v3.16b
tbl v4.16b, {v30.16b}, v4.16b
tbl v5.16b, {v30.16b}, v5.16b
st1 {v0.8h, v1.8h}, [x0], x1
tbl v6.16b, {v30.16b}, v6.16b
st1 {v2.8h, v3.8h}, [x2], x1
tbl v7.16b, {v30.16b}, v7.16b
st1 {v4.8h, v5.8h}, [x0], x1
st1 {v6.8h, v7.8h}, [x2], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
add x2, x0, x1
lsl x1, x1, #1
32:
ld1 {v4.16b, v5.16b}, [x3], #32
subs w5, w5, #2
ushr v7.16b, v4.16b, #4
and v6.16b, v4.16b, v29.16b
ushr v3.16b, v5.16b, #4
and v2.16b, v5.16b, v29.16b
zip1 v4.16b, v6.16b, v7.16b
zip2 v5.16b, v6.16b, v7.16b
zip1 v6.16b, v2.16b, v3.16b
zip2 v7.16b, v2.16b, v3.16b
add v4.16b, v4.16b, v4.16b
add v5.16b, v5.16b, v5.16b
add v6.16b, v6.16b, v6.16b
add v7.16b, v7.16b, v7.16b
zip1 v0.16b, v4.16b, v4.16b
zip2 v1.16b, v4.16b, v4.16b
zip1 v2.16b, v5.16b, v5.16b
zip2 v3.16b, v5.16b, v5.16b
zip1 v4.16b, v6.16b, v6.16b
zip2 v5.16b, v6.16b, v6.16b
zip1 v6.16b, v7.16b, v7.16b
zip2 v7.16b, v7.16b, v7.16b
add v0.8h, v0.8h, v31.8h
add v1.8h, v1.8h, v31.8h
add v2.8h, v2.8h, v31.8h
add v3.8h, v3.8h, v31.8h
add v4.8h, v4.8h, v31.8h
tbl v0.16b, {v30.16b}, v0.16b
add v5.8h, v5.8h, v31.8h
tbl v1.16b, {v30.16b}, v1.16b
add v6.8h, v6.8h, v31.8h
tbl v2.16b, {v30.16b}, v2.16b
add v7.8h, v7.8h, v31.8h
tbl v3.16b, {v30.16b}, v3.16b
tbl v4.16b, {v30.16b}, v4.16b
tbl v5.16b, {v30.16b}, v5.16b
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
tbl v6.16b, {v30.16b}, v6.16b
tbl v7.16b, {v30.16b}, v7.16b
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
add x2, x0, #64
64:
ld1 {v4.16b, v5.16b}, [x3], #32
subs w5, w5, #1
ushr v7.16b, v4.16b, #4
and v6.16b, v4.16b, v29.16b
ushr v3.16b, v5.16b, #4
and v2.16b, v5.16b, v29.16b
zip1 v4.16b, v6.16b, v7.16b
zip2 v5.16b, v6.16b, v7.16b
zip1 v6.16b, v2.16b, v3.16b
zip2 v7.16b, v2.16b, v3.16b
add v4.16b, v4.16b, v4.16b
add v5.16b, v5.16b, v5.16b
add v6.16b, v6.16b, v6.16b
add v7.16b, v7.16b, v7.16b
zip1 v0.16b, v4.16b, v4.16b
zip2 v1.16b, v4.16b, v4.16b
zip1 v2.16b, v5.16b, v5.16b
zip2 v3.16b, v5.16b, v5.16b
zip1 v4.16b, v6.16b, v6.16b
zip2 v5.16b, v6.16b, v6.16b
zip1 v6.16b, v7.16b, v7.16b
zip2 v7.16b, v7.16b, v7.16b
add v0.8h, v0.8h, v31.8h
add v1.8h, v1.8h, v31.8h
add v2.8h, v2.8h, v31.8h
add v3.8h, v3.8h, v31.8h
add v4.8h, v4.8h, v31.8h
tbl v0.16b, {v30.16b}, v0.16b
add v5.8h, v5.8h, v31.8h
tbl v1.16b, {v30.16b}, v1.16b
add v6.8h, v6.8h, v31.8h
tbl v2.16b, {v30.16b}, v2.16b
add v7.8h, v7.8h, v31.8h
tbl v3.16b, {v30.16b}, v3.16b
tbl v4.16b, {v30.16b}, v4.16b
tbl v5.16b, {v30.16b}, v5.16b
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x1
tbl v6.16b, {v30.16b}, v6.16b
tbl v7.16b, {v30.16b}, v7.16b
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x2], x1
b.gt 64b
ret
endfunc
jumptable pal_pred_tbl
.word 640b - pal_pred_tbl
.word 320b - pal_pred_tbl
.word 160b - pal_pred_tbl
.word 80b - pal_pred_tbl
.word 40b - pal_pred_tbl
endjumptable
// void ipred_cfl_128_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha,
// const int bitdepth_max);
function ipred_cfl_128_16bpc_neon, export=1
dup v31.8h, w7 // bitdepth_max
clz w9, w3
movrel x7, ipred_cfl_128_tbl
sub w9, w9, #26
ldrsw x9, [x7, w9, uxtw #2]
urshr v0.8h, v31.8h, #1
dup v1.8h, w6 // alpha
add x7, x7, x9
add x6, x0, x1
lsl x1, x1, #1
movi v30.8h, #0
br x7
L(ipred_cfl_splat_w4):
AARCH64_VALID_JUMP_TARGET
1:
ld1 {v4.8h, v5.8h}, [x5], #32
subs w4, w4, #4
smull v2.4s, v4.4h, v1.4h // diff = ac * alpha
smull2 v3.4s, v4.8h, v1.8h
smull v4.4s, v5.4h, v1.4h
smull2 v5.4s, v5.8h, v1.8h
cmlt v16.4s, v2.4s, #0 // sign
cmlt v17.4s, v3.4s, #0
cmlt v18.4s, v4.4s, #0
cmlt v19.4s, v5.4s, #0
add v2.4s, v2.4s, v16.4s // diff + sign
add v3.4s, v3.4s, v17.4s
add v4.4s, v4.4s, v18.4s
add v5.4s, v5.4s, v19.4s
rshrn v2.4h, v2.4s, #6 // (diff + sign + 32) >> 6 = apply_sign()
rshrn2 v2.8h, v3.4s, #6
rshrn v3.4h, v4.4s, #6
rshrn2 v3.8h, v5.4s, #6
add v2.8h, v2.8h, v0.8h // dc + apply_sign()
add v3.8h, v3.8h, v0.8h
smax v2.8h, v2.8h, v30.8h
smax v3.8h, v3.8h, v30.8h
smin v2.8h, v2.8h, v31.8h
smin v3.8h, v3.8h, v31.8h
st1 {v2.d}[0], [x0], x1
st1 {v2.d}[1], [x6], x1
st1 {v3.d}[0], [x0], x1
st1 {v3.d}[1], [x6], x1
b.gt 1b
ret
L(ipred_cfl_splat_w8):
AARCH64_VALID_JUMP_TARGET
1:
ld1 {v4.8h, v5.8h}, [x5], #32
subs w4, w4, #2
smull v2.4s, v4.4h, v1.4h // diff = ac * alpha
smull2 v3.4s, v4.8h, v1.8h
smull v4.4s, v5.4h, v1.4h
smull2 v5.4s, v5.8h, v1.8h
cmlt v16.4s, v2.4s, #0 // sign
cmlt v17.4s, v3.4s, #0
cmlt v18.4s, v4.4s, #0
cmlt v19.4s, v5.4s, #0
add v2.4s, v2.4s, v16.4s // diff + sign
add v3.4s, v3.4s, v17.4s
add v4.4s, v4.4s, v18.4s
add v5.4s, v5.4s, v19.4s
rshrn v2.4h, v2.4s, #6 // (diff + sign + 32) >> 6 = apply_sign()
rshrn2 v2.8h, v3.4s, #6
rshrn v3.4h, v4.4s, #6
rshrn2 v3.8h, v5.4s, #6
add v2.8h, v2.8h, v0.8h // dc + apply_sign()
add v3.8h, v3.8h, v0.8h
smax v2.8h, v2.8h, v30.8h
smax v3.8h, v3.8h, v30.8h
smin v2.8h, v2.8h, v31.8h
smin v3.8h, v3.8h, v31.8h
st1 {v2.8h}, [x0], x1
st1 {v3.8h}, [x6], x1
b.gt 1b
ret
L(ipred_cfl_splat_w16):
AARCH64_VALID_JUMP_TARGET
add x7, x5, w3, uxtw #1
sub x1, x1, w3, uxtw #1
mov w9, w3
1:
ld1 {v2.8h, v3.8h}, [x5], #32
ld1 {v4.8h, v5.8h}, [x7], #32
subs w3, w3, #16
smull v16.4s, v2.4h, v1.4h // diff = ac * alpha
smull2 v17.4s, v2.8h, v1.8h
smull v18.4s, v3.4h, v1.4h
smull2 v19.4s, v3.8h, v1.8h
smull v2.4s, v4.4h, v1.4h
smull2 v3.4s, v4.8h, v1.8h
smull v4.4s, v5.4h, v1.4h
smull2 v5.4s, v5.8h, v1.8h
cmlt v20.4s, v16.4s, #0 // sign
cmlt v21.4s, v17.4s, #0
cmlt v22.4s, v18.4s, #0
cmlt v23.4s, v19.4s, #0
cmlt v24.4s, v2.4s, #0
cmlt v25.4s, v3.4s, #0
cmlt v26.4s, v4.4s, #0
cmlt v27.4s, v5.4s, #0
add v16.4s, v16.4s, v20.4s // diff + sign
add v17.4s, v17.4s, v21.4s
add v18.4s, v18.4s, v22.4s
add v19.4s, v19.4s, v23.4s
add v2.4s, v2.4s, v24.4s
add v3.4s, v3.4s, v25.4s
add v4.4s, v4.4s, v26.4s
add v5.4s, v5.4s, v27.4s
rshrn v16.4h, v16.4s, #6 // (diff + sign + 32) >> 6 = apply_sign()
rshrn2 v16.8h, v17.4s, #6
rshrn v17.4h, v18.4s, #6
rshrn2 v17.8h, v19.4s, #6
rshrn v6.4h, v2.4s, #6
rshrn2 v6.8h, v3.4s, #6
rshrn v7.4h, v4.4s, #6
rshrn2 v7.8h, v5.4s, #6
add v2.8h, v16.8h, v0.8h // dc + apply_sign()
add v3.8h, v17.8h, v0.8h
add v4.8h, v6.8h, v0.8h
add v5.8h, v7.8h, v0.8h
smax v2.8h, v2.8h, v30.8h
smax v3.8h, v3.8h, v30.8h
smax v4.8h, v4.8h, v30.8h
smax v5.8h, v5.8h, v30.8h
smin v2.8h, v2.8h, v31.8h
smin v3.8h, v3.8h, v31.8h
smin v4.8h, v4.8h, v31.8h
smin v5.8h, v5.8h, v31.8h
st1 {v2.8h, v3.8h}, [x0], #32
st1 {v4.8h, v5.8h}, [x6], #32
b.gt 1b
subs w4, w4, #2
add x5, x5, w9, uxtw #1
add x7, x7, w9, uxtw #1
add x0, x0, x1
add x6, x6, x1
mov w3, w9
b.gt 1b
ret
endfunc
jumptable ipred_cfl_128_tbl
ipred_cfl_splat_tbl:
.word L(ipred_cfl_splat_w16) - ipred_cfl_128_tbl
.word L(ipred_cfl_splat_w16) - ipred_cfl_128_tbl
.word L(ipred_cfl_splat_w8) - ipred_cfl_128_tbl
.word L(ipred_cfl_splat_w4) - ipred_cfl_128_tbl
endjumptable
// void ipred_cfl_top_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha,
// const int bitdepth_max);
function ipred_cfl_top_16bpc_neon, export=1
dup v31.8h, w7 // bitdepth_max
clz w9, w3
movrel x7, ipred_cfl_top_tbl
sub w9, w9, #26
ldrsw x9, [x7, w9, uxtw #2]
dup v1.8h, w6 // alpha
add x2, x2, #2
add x7, x7, x9
add x6, x0, x1
lsl x1, x1, #1
movi v30.8h, #0
br x7
4:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.4h}, [x2]
addv h0, v0.4h
urshr v0.4h, v0.4h, #2
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w4)
8:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h}, [x2]
addv h0, v0.8h
urshr v0.4h, v0.4h, #3
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w8)
16:
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8h, v3.8h}, [x2]
addp v0.8h, v2.8h, v3.8h
addv h0, v0.8h
urshr v0.4h, v0.4h, #4
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w16)
32:
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [x2]
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v0.8h, v2.8h, v4.8h
uaddlv s0, v0.8h
rshrn v0.4h, v0.4s, #5
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w16)
endfunc
jumptable ipred_cfl_top_tbl
.word 32b - ipred_cfl_top_tbl
.word 16b - ipred_cfl_top_tbl
.word 8b - ipred_cfl_top_tbl
.word 4b - ipred_cfl_top_tbl
endjumptable
// void ipred_cfl_left_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha,
// const int bitdepth_max);
function ipred_cfl_left_16bpc_neon, export=1
dup v31.8h, w7 // bitdepth_max
sub x2, x2, w4, uxtw #1
clz w9, w3
clz w8, w4
movrel x10, ipred_cfl_splat_tbl
movrel x7, ipred_cfl_left_tbl
sub w9, w9, #26
sub w8, w8, #26
ldrsw x9, [x10, w9, uxtw #2]
ldrsw x8, [x7, w8, uxtw #2]
dup v1.8h, w6 // alpha
add x9, x10, x9
add x7, x7, x8
add x6, x0, x1
lsl x1, x1, #1
movi v30.8h, #0
br x7
L(ipred_cfl_left_h4):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.4h}, [x2]
addv h0, v0.4h
urshr v0.4h, v0.4h, #2
dup v0.8h, v0.h[0]
br x9
L(ipred_cfl_left_h8):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h}, [x2]
addv h0, v0.8h
urshr v0.4h, v0.4h, #3
dup v0.8h, v0.h[0]
br x9
L(ipred_cfl_left_h16):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8h, v3.8h}, [x2]
addp v0.8h, v2.8h, v3.8h
addv h0, v0.8h
urshr v0.4h, v0.4h, #4
dup v0.8h, v0.h[0]
br x9
L(ipred_cfl_left_h32):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [x2]
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v0.8h, v2.8h, v4.8h
uaddlv s0, v0.8h
rshrn v0.4h, v0.4s, #5
dup v0.8h, v0.h[0]
br x9
endfunc
jumptable ipred_cfl_left_tbl
.word L(ipred_cfl_left_h32) - ipred_cfl_left_tbl
.word L(ipred_cfl_left_h16) - ipred_cfl_left_tbl
.word L(ipred_cfl_left_h8) - ipred_cfl_left_tbl
.word L(ipred_cfl_left_h4) - ipred_cfl_left_tbl
endjumptable
// void ipred_cfl_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha,
// const int bitdepth_max);
function ipred_cfl_16bpc_neon, export=1
dup v31.8h, w7 // bitdepth_max
sub x2, x2, w4, uxtw #1
add w8, w3, w4 // width + height
dup v1.8h, w6 // alpha
clz w9, w3
clz w6, w4
dup v16.4s, w8 // width + height
movrel x7, ipred_cfl_tbl
rbit w8, w8 // rbit(width + height)
sub w9, w9, #22 // 26 leading bits, minus table offset 4
sub w6, w6, #26
clz w8, w8 // ctz(width + height)
ldrsw x9, [x7, w9, uxtw #2]
ldrsw x6, [x7, w6, uxtw #2]
neg w8, w8 // -ctz(width + height)
add x9, x7, x9
add x7, x7, x6
ushr v16.4s, v16.4s, #1 // (width + height) >> 1
dup v17.4s, w8 // -ctz(width + height)
add x6, x0, x1
lsl x1, x1, #1
movi v30.8h, #0
br x7
L(ipred_cfl_h4):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.4h}, [x2], #8
uaddlv s0, v0.4h
add x2, x2, #2
br x9
L(ipred_cfl_w4):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.4h}, [x2]
add v0.2s, v0.2s, v16.2s
uaddlv s2, v2.4h
cmp w4, #4
add v0.2s, v0.2s, v2.2s
ushl v0.2s, v0.2s, v17.2s
b.eq 1f
// h = 8/16
cmp w4, #16
mov w16, #0x6667
mov w17, #0xAAAB
csel w16, w16, w17, eq
dup v16.2s, w16
mul v0.2s, v0.2s, v16.2s
ushr v0.2s, v0.2s, #17
1:
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w4)
L(ipred_cfl_h8):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8h}, [x2], #16
uaddlv s0, v0.8h
add x2, x2, #2
br x9
L(ipred_cfl_w8):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8h}, [x2]
add v0.2s, v0.2s, v16.2s
uaddlv s2, v2.8h
cmp w4, #8
add v0.2s, v0.2s, v2.2s
ushl v0.2s, v0.2s, v17.2s
b.eq 1f
// h = 4/16/32
cmp w4, #32
mov w16, #0x6667
mov w17, #0xAAAB
csel w16, w16, w17, eq
dup v16.2s, w16
mul v0.2s, v0.2s, v16.2s
ushr v0.2s, v0.2s, #17
1:
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w8)
L(ipred_cfl_h16):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8h, v3.8h}, [x2], #32
addp v0.8h, v2.8h, v3.8h
add x2, x2, #2
uaddlv s0, v0.8h
br x9
L(ipred_cfl_w16):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8h, v3.8h}, [x2]
add v0.2s, v0.2s, v16.2s
addp v2.8h, v2.8h, v3.8h
uaddlv s2, v2.8h
cmp w4, #16
add v0.2s, v0.2s, v2.2s
ushl v0.2s, v0.2s, v17.2s
b.eq 1f
// h = 4/8/32
tst w4, #(32+16+8) // 16 added to make a consecutive bitmask
mov w16, #0x6667
mov w17, #0xAAAB
csel w16, w16, w17, eq
dup v16.2s, w16
mul v0.2s, v0.2s, v16.2s
ushr v0.2s, v0.2s, #17
1:
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w16)
L(ipred_cfl_h32):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [x2], #64
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v0.8h, v2.8h, v4.8h
add x2, x2, #2
uaddlv s0, v0.8h
br x9
L(ipred_cfl_w32):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [x2]
add v0.4s, v0.4s, v16.4s
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v2.8h, v2.8h, v4.8h
cmp w4, #32
uaddlv s2, v2.8h
add v0.2s, v0.2s, v2.2s
ushl v0.2s, v0.2s, v17.2s
b.eq 1f
// h = 8/16
cmp w4, #8
mov w16, #0x6667
mov w17, #0xAAAB
csel w16, w16, w17, eq
dup v16.2s, w16
mul v0.2s, v0.2s, v16.2s
ushr v0.2s, v0.2s, #17
1:
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w16)
endfunc
jumptable ipred_cfl_tbl
.word L(ipred_cfl_h32) - ipred_cfl_tbl
.word L(ipred_cfl_h16) - ipred_cfl_tbl
.word L(ipred_cfl_h8) - ipred_cfl_tbl
.word L(ipred_cfl_h4) - ipred_cfl_tbl
.word L(ipred_cfl_w32) - ipred_cfl_tbl
.word L(ipred_cfl_w16) - ipred_cfl_tbl
.word L(ipred_cfl_w8) - ipred_cfl_tbl
.word L(ipred_cfl_w4) - ipred_cfl_tbl
endjumptable
// void cfl_ac_420_16bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_420_16bpc_neon, export=1
clz w8, w5
lsl w4, w4, #2
movrel x7, ipred_cfl_ac_420_tbl
sub w8, w8, #27
ldrsw x8, [x7, w8, uxtw #2]
movi v24.4s, #0
movi v25.4s, #0
movi v26.4s, #0
movi v27.4s, #0
add x7, x7, x8
sub w8, w6, w4 // height - h_pad
rbit w9, w5 // rbit(width)
rbit w10, w6 // rbit(height)
clz w9, w9 // ctz(width)
clz w10, w10 // ctz(height)
add w9, w9, w10 // log2sz
add x10, x1, x2
dup v31.4s, w9
lsl x2, x2, #1
neg v31.4s, v31.4s // -log2sz
br x7
L(ipred_cfl_ac_420_w4):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input
ld1 {v0.8h}, [x1], x2
ld1 {v1.8h}, [x10], x2
ld1 {v2.8h}, [x1], x2
ld1 {v3.8h}, [x10], x2
addp v0.8h, v0.8h, v2.8h
addp v1.8h, v1.8h, v3.8h
add v0.8h, v0.8h, v1.8h
shl v0.8h, v0.8h, #1
subs w8, w8, #2
st1 {v0.8h}, [x0], #16
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
b.gt 1b
trn2 v1.2d, v0.2d, v0.2d
trn2 v0.2d, v0.2d, v0.2d
L(ipred_cfl_ac_420_w4_hpad):
cbz w4, 3f
2: // Vertical padding (h_pad > 0)
subs w4, w4, #4
st1 {v0.8h, v1.8h}, [x0], #32
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
b.gt 2b
3:
L(ipred_cfl_ac_420_w4_calc_subtract_dc):
// Aggregate the sums
add v24.4s, v24.4s, v25.4s
add v26.4s, v26.4s, v27.4s
add v0.4s, v24.4s, v26.4s
addv s0, v0.4s // sum
sub x0, x0, w6, uxtw #3
urshl v4.2s, v0.2s, v31.2s // (sum + (1 << (log2sz - 1))) >>= log2sz
dup v4.8h, v4.h[0]
6: // Subtract dc from ac
ld1 {v0.8h, v1.8h}, [x0]
subs w6, w6, #4
sub v0.8h, v0.8h, v4.8h
sub v1.8h, v1.8h, v4.8h
st1 {v0.8h, v1.8h}, [x0], #32
b.gt 6b
ret
L(ipred_cfl_ac_420_w8):
AARCH64_VALID_JUMP_TARGET
cbnz w3, L(ipred_cfl_ac_420_w8_wpad)
1: // Copy and subsample input, without padding
ld1 {v0.8h, v1.8h}, [x1], x2
ld1 {v2.8h, v3.8h}, [x10], x2
ld1 {v4.8h, v5.8h}, [x1], x2
addp v0.8h, v0.8h, v1.8h
ld1 {v6.8h, v7.8h}, [x10], x2
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v6.8h, v6.8h, v7.8h
add v0.8h, v0.8h, v2.8h
add v4.8h, v4.8h, v6.8h
shl v0.8h, v0.8h, #1
shl v1.8h, v4.8h, #1
subs w8, w8, #2
st1 {v0.8h, v1.8h}, [x0], #32
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
b.gt 1b
mov v0.16b, v1.16b
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_420_w8_wpad):
1: // Copy and subsample input, padding 4
ld1 {v0.8h}, [x1], x2
ld1 {v1.8h}, [x10], x2
ld1 {v2.8h}, [x1], x2
ld1 {v3.8h}, [x10], x2
addp v0.8h, v0.8h, v2.8h
addp v1.8h, v1.8h, v3.8h
add v0.8h, v0.8h, v1.8h
shl v0.8h, v0.8h, #1
dup v1.4h, v0.h[3]
dup v3.4h, v0.h[7]
trn2 v2.2d, v0.2d, v0.2d
subs w8, w8, #2
st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [x0], #32
uaddw v24.4s, v24.4s, v0.4h
uaddw v25.4s, v25.4s, v1.4h
uaddw v26.4s, v26.4s, v2.4h
uaddw v27.4s, v27.4s, v3.4h
b.gt 1b
trn1 v0.2d, v2.2d, v3.2d
trn1 v1.2d, v2.2d, v3.2d
L(ipred_cfl_ac_420_w8_hpad):
cbz w4, 3f
2: // Vertical padding (h_pad > 0)
subs w4, w4, #4
st1 {v0.8h, v1.8h}, [x0], #32
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
st1 {v0.8h, v1.8h}, [x0], #32
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
b.gt 2b
3:
// Double the height and reuse the w4 summing/subtracting
lsl w6, w6, #1
b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
L(ipred_cfl_ac_420_w16):
AARCH64_VALID_JUMP_TARGET
movrel x7, ipred_cfl_ac_420_w16_tbl
ldrsw x3, [x7, w3, uxtw #2]
add x7, x7, x3
br x7
L(ipred_cfl_ac_420_w16_wpad0):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, without padding
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x1], x2
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x10], x2
addp v0.8h, v0.8h, v1.8h
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v6.8h, v6.8h, v7.8h
ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x1], x2
add v0.8h, v0.8h, v4.8h
ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x10], x2
add v2.8h, v2.8h, v6.8h
addp v16.8h, v16.8h, v17.8h
addp v18.8h, v18.8h, v19.8h
addp v20.8h, v20.8h, v21.8h
addp v22.8h, v22.8h, v23.8h
add v16.8h, v16.8h, v20.8h
add v18.8h, v18.8h, v22.8h
shl v0.8h, v0.8h, #1
shl v1.8h, v2.8h, #1
shl v2.8h, v16.8h, #1
shl v3.8h, v18.8h, #1
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad1):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 4
ldr q2, [x1, #32]
ld1 {v0.8h, v1.8h}, [x1], x2
ldr q5, [x10, #32]
ld1 {v3.8h, v4.8h}, [x10], x2
addp v2.8h, v2.8h, v2.8h
addp v0.8h, v0.8h, v1.8h
addp v5.8h, v5.8h, v5.8h
addp v3.8h, v3.8h, v4.8h
ldr q18, [x1, #32]
add v2.4h, v2.4h, v5.4h
ld1 {v16.8h, v17.8h}, [x1], x2
add v0.8h, v0.8h, v3.8h
ldr q21, [x10, #32]
ld1 {v19.8h, v20.8h}, [x10], x2
addp v18.8h, v18.8h, v18.8h
addp v16.8h, v16.8h, v17.8h
addp v21.8h, v21.8h, v21.8h
addp v19.8h, v19.8h, v20.8h
add v18.4h, v18.4h, v21.4h
add v16.8h, v16.8h, v19.8h
shl v1.4h, v2.4h, #1
shl v0.8h, v0.8h, #1
shl v3.4h, v18.4h, #1
shl v2.8h, v16.8h, #1
dup v4.4h, v1.h[3]
dup v5.4h, v3.h[3]
trn1 v1.2d, v1.2d, v4.2d
trn1 v3.2d, v3.2d, v5.2d
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad2):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 8
ld1 {v0.8h, v1.8h}, [x1], x2
ld1 {v2.8h, v3.8h}, [x10], x2
ld1 {v4.8h, v5.8h}, [x1], x2
addp v0.8h, v0.8h, v1.8h
ld1 {v6.8h, v7.8h}, [x10], x2
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v6.8h, v6.8h, v7.8h
add v0.8h, v0.8h, v2.8h
add v4.8h, v4.8h, v6.8h
shl v0.8h, v0.8h, #1
shl v2.8h, v4.8h, #1
dup v1.8h, v0.h[7]
dup v3.8h, v2.h[7]
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad3):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 12
ld1 {v0.8h}, [x1], x2
ld1 {v2.8h}, [x10], x2
ld1 {v4.8h}, [x1], x2
ld1 {v6.8h}, [x10], x2
addp v0.8h, v0.8h, v4.8h
addp v2.8h, v2.8h, v6.8h
add v0.8h, v0.8h, v2.8h
shl v0.8h, v0.8h, #1
dup v1.8h, v0.h[3]
dup v3.8h, v0.h[7]
trn2 v2.2d, v0.2d, v3.2d
trn1 v0.2d, v0.2d, v1.2d
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
L(ipred_cfl_ac_420_w16_hpad):
cbz w4, 3f
2: // Vertical padding (h_pad > 0)
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 2b
3:
// Quadruple the height and reuse the w4 summing/subtracting
lsl w6, w6, #2
b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
endfunc
jumptable ipred_cfl_ac_420_tbl
.word L(ipred_cfl_ac_420_w16) - ipred_cfl_ac_420_tbl
.word L(ipred_cfl_ac_420_w8) - ipred_cfl_ac_420_tbl
.word L(ipred_cfl_ac_420_w4) - ipred_cfl_ac_420_tbl
endjumptable
jumptable ipred_cfl_ac_420_w16_tbl
.word L(ipred_cfl_ac_420_w16_wpad0) - ipred_cfl_ac_420_w16_tbl
.word L(ipred_cfl_ac_420_w16_wpad1) - ipred_cfl_ac_420_w16_tbl
.word L(ipred_cfl_ac_420_w16_wpad2) - ipred_cfl_ac_420_w16_tbl
.word L(ipred_cfl_ac_420_w16_wpad3) - ipred_cfl_ac_420_w16_tbl
endjumptable
// void cfl_ac_422_16bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_422_16bpc_neon, export=1
clz w8, w5
lsl w4, w4, #2
movrel x7, ipred_cfl_ac_422_tbl
sub w8, w8, #27
ldrsw x8, [x7, w8, uxtw #2]
movi v24.4s, #0
movi v25.4s, #0
movi v26.4s, #0
movi v27.4s, #0
add x7, x7, x8
sub w8, w6, w4 // height - h_pad
rbit w9, w5 // rbit(width)
rbit w10, w6 // rbit(height)
clz w9, w9 // ctz(width)
clz w10, w10 // ctz(height)
add w9, w9, w10 // log2sz
add x10, x1, x2
dup v31.4s, w9
lsl x2, x2, #1
neg v31.4s, v31.4s // -log2sz
br x7
L(ipred_cfl_ac_422_w4):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input
ld1 {v0.8h}, [x1], x2
ld1 {v1.8h}, [x10], x2
ld1 {v2.8h}, [x1], x2
ld1 {v3.8h}, [x10], x2
addp v0.8h, v0.8h, v1.8h
addp v2.8h, v2.8h, v3.8h
shl v0.8h, v0.8h, #2
shl v1.8h, v2.8h, #2
subs w8, w8, #4
st1 {v0.8h, v1.8h}, [x0], #32
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
b.gt 1b
trn2 v0.2d, v1.2d, v1.2d
trn2 v1.2d, v1.2d, v1.2d
b L(ipred_cfl_ac_420_w4_hpad)
L(ipred_cfl_ac_422_w8):
AARCH64_VALID_JUMP_TARGET
cbnz w3, L(ipred_cfl_ac_422_w8_wpad)
1: // Copy and subsample input, without padding
ld1 {v0.8h, v1.8h}, [x1], x2
ld1 {v2.8h, v3.8h}, [x10], x2
ld1 {v4.8h, v5.8h}, [x1], x2
addp v0.8h, v0.8h, v1.8h
ld1 {v6.8h, v7.8h}, [x10], x2
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v6.8h, v6.8h, v7.8h
shl v0.8h, v0.8h, #2
shl v1.8h, v2.8h, #2
shl v2.8h, v4.8h, #2
shl v3.8h, v6.8h, #2
subs w8, w8, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v3.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_422_w8_wpad):
1: // Copy and subsample input, padding 4
ld1 {v0.8h}, [x1], x2
ld1 {v1.8h}, [x10], x2
ld1 {v2.8h}, [x1], x2
ld1 {v3.8h}, [x10], x2
addp v0.8h, v0.8h, v1.8h
addp v2.8h, v2.8h, v3.8h
shl v0.8h, v0.8h, #2
shl v2.8h, v2.8h, #2
dup v4.4h, v0.h[3]
dup v5.8h, v0.h[7]
dup v6.4h, v2.h[3]
dup v7.8h, v2.h[7]
trn2 v1.2d, v0.2d, v5.2d
trn1 v0.2d, v0.2d, v4.2d
trn2 v3.2d, v2.2d, v7.2d
trn1 v2.2d, v2.2d, v6.2d
subs w8, w8, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v3.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_422_w16):
AARCH64_VALID_JUMP_TARGET
movrel x7, ipred_cfl_ac_422_w16_tbl
ldrsw x3, [x7, w3, uxtw #2]
add x7, x7, x3
br x7
L(ipred_cfl_ac_422_w16_wpad0):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, without padding
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x1], x2
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x10], x2
addp v0.8h, v0.8h, v1.8h
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v6.8h, v6.8h, v7.8h
shl v0.8h, v0.8h, #2
shl v1.8h, v2.8h, #2
shl v2.8h, v4.8h, #2
shl v3.8h, v6.8h, #2
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad1):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 4
ldr q2, [x1, #32]
ld1 {v0.8h, v1.8h}, [x1], x2
ldr q6, [x10, #32]
ld1 {v4.8h, v5.8h}, [x10], x2
addp v2.8h, v2.8h, v2.8h
addp v0.8h, v0.8h, v1.8h
addp v6.8h, v6.8h, v6.8h
addp v4.8h, v4.8h, v5.8h
shl v1.4h, v2.4h, #2
shl v0.8h, v0.8h, #2
shl v3.4h, v6.4h, #2
shl v2.8h, v4.8h, #2
dup v4.4h, v1.h[3]
dup v5.4h, v3.h[3]
trn1 v1.2d, v1.2d, v4.2d
trn1 v3.2d, v3.2d, v5.2d
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad2):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 8
ld1 {v0.8h, v1.8h}, [x1], x2
ld1 {v2.8h, v3.8h}, [x10], x2
addp v0.8h, v0.8h, v1.8h
addp v2.8h, v2.8h, v3.8h
shl v0.8h, v0.8h, #2
shl v2.8h, v2.8h, #2
dup v1.8h, v0.h[7]
dup v3.8h, v2.h[7]
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad3):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 12
ld1 {v0.8h}, [x1], x2
ld1 {v2.8h}, [x10], x2
addp v0.8h, v0.8h, v0.8h
addp v2.8h, v2.8h, v2.8h
shl v0.4h, v0.4h, #2
shl v2.4h, v2.4h, #2
dup v1.8h, v0.h[3]
dup v3.8h, v2.h[3]
trn1 v0.2d, v0.2d, v1.2d
trn1 v2.2d, v2.2d, v3.2d
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
endfunc
jumptable ipred_cfl_ac_422_tbl
.word L(ipred_cfl_ac_422_w16) - ipred_cfl_ac_422_tbl
.word L(ipred_cfl_ac_422_w8) - ipred_cfl_ac_422_tbl
.word L(ipred_cfl_ac_422_w4) - ipred_cfl_ac_422_tbl
endjumptable
jumptable ipred_cfl_ac_422_w16_tbl
.word L(ipred_cfl_ac_422_w16_wpad0) - ipred_cfl_ac_422_w16_tbl
.word L(ipred_cfl_ac_422_w16_wpad1) - ipred_cfl_ac_422_w16_tbl
.word L(ipred_cfl_ac_422_w16_wpad2) - ipred_cfl_ac_422_w16_tbl
.word L(ipred_cfl_ac_422_w16_wpad3) - ipred_cfl_ac_422_w16_tbl
endjumptable
// void cfl_ac_444_16bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_444_16bpc_neon, export=1
clz w8, w5
lsl w4, w4, #2
movrel x7, ipred_cfl_ac_444_tbl
sub w8, w8, #26
ldrsw x8, [x7, w8, uxtw #2]
movi v24.4s, #0
movi v25.4s, #0
movi v26.4s, #0
movi v27.4s, #0
add x7, x7, x8
sub w8, w6, w4 // height - h_pad
rbit w9, w5 // rbit(width)
rbit w10, w6 // rbit(height)
clz w9, w9 // ctz(width)
clz w10, w10 // ctz(height)
add w9, w9, w10 // log2sz
add x10, x1, x2
dup v31.4s, w9
lsl x2, x2, #1
neg v31.4s, v31.4s // -log2sz
br x7
L(ipred_cfl_ac_444_w4):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input
ld1 {v0.4h}, [x1], x2
ld1 {v0.d}[1], [x10], x2
ld1 {v1.4h}, [x1], x2
ld1 {v1.d}[1], [x10], x2
shl v0.8h, v0.8h, #3
shl v1.8h, v1.8h, #3
subs w8, w8, #4
st1 {v0.8h, v1.8h}, [x0], #32
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
b.gt 1b
trn2 v0.2d, v1.2d, v1.2d
trn2 v1.2d, v1.2d, v1.2d
b L(ipred_cfl_ac_420_w4_hpad)
L(ipred_cfl_ac_444_w8):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input
ld1 {v0.8h}, [x1], x2
ld1 {v1.8h}, [x10], x2
ld1 {v2.8h}, [x1], x2
shl v0.8h, v0.8h, #3
ld1 {v3.8h}, [x10], x2
shl v1.8h, v1.8h, #3
shl v2.8h, v2.8h, #3
shl v3.8h, v3.8h, #3
subs w8, w8, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v3.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_444_w16):
AARCH64_VALID_JUMP_TARGET
cbnz w3, L(ipred_cfl_ac_444_w16_wpad)
1: // Copy and expand input, without padding
ld1 {v0.8h, v1.8h}, [x1], x2
ld1 {v2.8h, v3.8h}, [x10], x2
shl v0.8h, v0.8h, #3
shl v1.8h, v1.8h, #3
shl v2.8h, v2.8h, #3
shl v3.8h, v3.8h, #3
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_444_w16_wpad):
1: // Copy and expand input, padding 8
ld1 {v0.8h}, [x1], x2
ld1 {v2.8h}, [x10], x2
shl v0.8h, v0.8h, #3
shl v2.8h, v2.8h, #3
dup v1.8h, v0.h[7]
dup v3.8h, v2.h[7]
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_444_w32):
AARCH64_VALID_JUMP_TARGET
movrel x7, ipred_cfl_ac_444_w32_tbl
lsr w3, w3, #1
ldrsw x3, [x7, w3, uxtw #2]
lsr x2, x2, #1 // Restore the stride to one line increments
add x7, x7, x3
br x7
L(ipred_cfl_ac_444_w32_wpad0):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input, without padding
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x1], x2
shl v0.8h, v0.8h, #3
shl v1.8h, v1.8h, #3
shl v2.8h, v2.8h, #3
shl v3.8h, v3.8h, #3
subs w8, w8, #1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad2):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input, padding 8
ld1 {v0.8h, v1.8h, v2.8h}, [x1], x2
shl v2.8h, v2.8h, #3
shl v0.8h, v0.8h, #3
shl v1.8h, v1.8h, #3
dup v3.8h, v2.h[7]
subs w8, w8, #1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad4):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input, padding 16
ld1 {v0.8h, v1.8h}, [x1], x2
shl v1.8h, v1.8h, #3
shl v0.8h, v0.8h, #3
dup v2.8h, v1.h[7]
dup v3.8h, v1.h[7]
subs w8, w8, #1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad6):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input, padding 24
ld1 {v0.8h}, [x1], x2
shl v0.8h, v0.8h, #3
dup v1.8h, v0.h[7]
dup v2.8h, v0.h[7]
dup v3.8h, v0.h[7]
subs w8, w8, #1
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 1b
L(ipred_cfl_ac_444_w32_hpad):
cbz w4, 3f
2: // Vertical padding (h_pad > 0)
subs w4, w4, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
uaddw v24.4s, v24.4s, v0.4h
uaddw2 v25.4s, v25.4s, v0.8h
uaddw v26.4s, v26.4s, v1.4h
uaddw2 v27.4s, v27.4s, v1.8h
uaddw v24.4s, v24.4s, v2.4h
uaddw2 v25.4s, v25.4s, v2.8h
uaddw v26.4s, v26.4s, v3.4h
uaddw2 v27.4s, v27.4s, v3.8h
b.gt 2b
3:
// Multiply the height by eight and reuse the w4 subtracting
lsl w6, w6, #3
b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
endfunc
jumptable ipred_cfl_ac_444_tbl
.word L(ipred_cfl_ac_444_w32) - ipred_cfl_ac_444_tbl
.word L(ipred_cfl_ac_444_w16) - ipred_cfl_ac_444_tbl
.word L(ipred_cfl_ac_444_w8) - ipred_cfl_ac_444_tbl
.word L(ipred_cfl_ac_444_w4) - ipred_cfl_ac_444_tbl
endjumptable
jumptable ipred_cfl_ac_444_w32_tbl
.word L(ipred_cfl_ac_444_w32_wpad0) - ipred_cfl_ac_444_w32_tbl
.word L(ipred_cfl_ac_444_w32_wpad2) - ipred_cfl_ac_444_w32_tbl
.word L(ipred_cfl_ac_444_w32_wpad4) - ipred_cfl_ac_444_w32_tbl
.word L(ipred_cfl_ac_444_w32_wpad6) - ipred_cfl_ac_444_w32_tbl
endjumptable
|
Admenri/urge
| 14,936
|
third_party/dav1d/src/arm/64/looprestoration_common.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2018, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
// Series of LUTs for efficiently computing sgr's 1 - x/(x+1) table.
// In the comments, let RefTable denote the original, reference table.
const x_by_x_tables
// RangeMins
//
// Min(RefTable[i*8:i*8+8])
// First two values are zeroed.
//
// Lookup using RangeMins[(x >> 3)]
.byte 0, 0, 11, 8, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2
.byte 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0
// DiffMasks
//
// This contains a bit pattern, indicating at which index positions the value of RefTable changes. For each range
// in the RangeMins table (covering 8 RefTable entries), we have one byte; each bit indicates whether the value of
// RefTable changes at that particular index.
// Using popcount, we can integrate the diff bit field. By shifting away bits in a byte, we can refine the range of
// the integral. Finally, adding the integral to RangeMins[(x>>3)] reconstructs RefTable (for x > 15).
//
// Lookup using DiffMasks[(x >> 3)]
.byte 0x00, 0x00, 0xD4, 0x44
.byte 0x42, 0x04, 0x00, 0x00
.byte 0x00, 0x80, 0x00, 0x00
.byte 0x04, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x40, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x02
// Binary form:
// 0b00000000, 0b00000000, 0b11010100, 0b01000100
// 0b01000010, 0b00000100, 0b00000000, 0b00000000
// 0b00000000, 0b10000000, 0b00000000, 0b00000000
// 0b00000100, 0b00000000, 0b00000000, 0b00000000
// 0b00000000, 0b00000000, 0b00000000, 0b00000000
// 0b00000000, 0b01000000, 0b00000000, 0b00000000
// 0b00000000, 0b00000000, 0b00000000, 0b00000000
// 0b00000000, 0b00000000, 0b00000000, 0b00000010
// RefLo
//
// RefTable[0:16]
// i.e. First 16 elements of the original table.
// Add to the sum obtained in the rest of the other lut logic to include the first 16 bytes of RefTable.
//
// Lookup using RangeMins[x] (tbl will replace x > 15 with 0)
.byte 255, 128, 85, 64, 51, 43, 37, 32, 28, 26, 23, 21, 20, 18, 17, 16
// Pseudo assembly
//
// hi_bits = x >> 3
// tbl ref, {RefLo}, x
// tbl diffs, {DiffMasks[0:16], DiffMasks[16:32]}, hi_bits
// tbl min, {RangeMins[0:16], RangeMins[16:32]}, hi_bits
// lo_bits = x & 0x7
// diffs = diffs << lo_bits
// ref = ref + min
// integral = popcnt(diffs)
// ref = ref + integral
// return ref
endconst
// void dav1d_sgr_box3_vert_neon(int32_t **sumsq, int16_t **sum,
// int32_t *AA, int16_t *BB,
// const int w, const int s,
// const int bitdepth_max);
function sgr_box3_vert_neon, export=1
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
add w4, w4, #2
clz w9, w6 // bitdepth_max
dup v28.4s, w5 // strength
ldp x5, x6, [x0]
ldr x0, [x0, #16]
ldp x7, x8, [x1]
ldr x1, [x1, #16]
movi v31.4s, #9 // n
sub w9, w9, #24 // -bitdepth_min_8
movrel x12, x_by_x_tables
mov w13, #455 // one_by_x
ld1 {v24.16b, v25.16b, v26.16b, v27.16b}, [x12] // RangeMins, DiffMasks
movi v22.16b, #0x7
ldr q23, [x12, #64] //RefLo
dup v6.8h, w9 // -bitdepth_min_8
saddl v7.4s, v6.4h, v6.4h // -2*bitdepth_min_8
dup v30.4s, w13 // one_by_x
ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [x5], #64
ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [x6], #64
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0], #64
ld1 {v20.8h, v21.8h}, [x8], #32
ld1 {v0.8h, v1.8h}, [x7], #32
1:
ld1 {v2.8h, v3.8h}, [x1], #32
add v8.4s, v8.4s, v12.4s
add v9.4s, v9.4s, v13.4s
add v10.4s, v10.4s, v14.4s
add v11.4s, v11.4s, v15.4s
add v0.8h, v0.8h, v20.8h
add v1.8h, v1.8h, v21.8h
add v16.4s, v16.4s, v8.4s
add v17.4s, v17.4s, v9.4s
add v18.4s, v18.4s, v10.4s
add v19.4s, v19.4s, v11.4s
add v4.8h, v2.8h, v0.8h
add v5.8h, v3.8h, v1.8h
srshl v16.4s, v16.4s, v7.4s
srshl v17.4s, v17.4s, v7.4s
srshl v18.4s, v18.4s, v7.4s
srshl v19.4s, v19.4s, v7.4s
srshl v9.8h, v4.8h, v6.8h
srshl v13.8h, v5.8h, v6.8h
mul v16.4s, v16.4s, v31.4s // a * n
mul v17.4s, v17.4s, v31.4s // a * n
mul v18.4s, v18.4s, v31.4s // a * n
mul v19.4s, v19.4s, v31.4s // a * n
umull v8.4s, v9.4h, v9.4h // b * b
umull2 v9.4s, v9.8h, v9.8h // b * b
umull v12.4s, v13.4h, v13.4h // b * b
umull2 v13.4s, v13.8h, v13.8h // b * b
uqsub v16.4s, v16.4s, v8.4s // imax(a * n - b * b, 0)
uqsub v17.4s, v17.4s, v9.4s // imax(a * n - b * b, 0)
uqsub v18.4s, v18.4s, v12.4s // imax(a * n - b * b, 0)
uqsub v19.4s, v19.4s, v13.4s // imax(a * n - b * b, 0)
mul v16.4s, v16.4s, v28.4s // p * s
mul v17.4s, v17.4s, v28.4s // p * s
mul v18.4s, v18.4s, v28.4s // p * s
mul v19.4s, v19.4s, v28.4s // p * s
uqshrn v16.4h, v16.4s, #16
uqshrn2 v16.8h, v17.4s, #16
uqshrn v18.4h, v18.4s, #16
uqshrn2 v18.8h, v19.4s, #16
uqrshrn v1.8b, v16.8h, #4 // imin(z, 255)
uqrshrn2 v1.16b, v18.8h, #4 // imin(z, 255)
ld1 {v16.4s, v17.4s}, [x0], #32
subs w4, w4, #16
ushr v0.16b, v1.16b, #3
ld1 {v8.4s, v9.4s}, [x5], #32
tbl v2.16b, {v26.16b, v27.16b}, v0.16b // RangeMins
tbl v0.16b, {v24.16b, v25.16b}, v0.16b // DiffMasks
tbl v3.16b, {v23.16b}, v1.16b // RefLo
and v1.16b, v1.16b, v22.16b
ld1 {v12.4s, v13.4s}, [x6], #32
ushl v1.16b, v2.16b, v1.16b
ld1 {v20.8h, v21.8h}, [x8], #32
add v3.16b, v3.16b, v0.16b
cnt v1.16b, v1.16b
ld1 {v18.4s, v19.4s}, [x0], #32
add v3.16b, v3.16b, v1.16b
ld1 {v10.4s, v11.4s}, [x5], #32
uxtl v0.8h, v3.8b // x
uxtl2 v1.8h, v3.16b // x
ld1 {v14.4s, v15.4s}, [x6], #32
umull v2.4s, v0.4h, v4.4h // x * BB[i]
umull2 v3.4s, v0.8h, v4.8h // x * BB[i]
umull v4.4s, v1.4h, v5.4h // x * BB[i]
umull2 v5.4s, v1.8h, v5.8h // x * BB[i]
mul v2.4s, v2.4s, v30.4s // x * BB[i] * sgr_one_by_x
mul v3.4s, v3.4s, v30.4s // x * BB[i] * sgr_one_by_x
mul v4.4s, v4.4s, v30.4s // x * BB[i] * sgr_one_by_x
mul v5.4s, v5.4s, v30.4s // x * BB[i] * sgr_one_by_x
st1 {v0.8h, v1.8h}, [x3], #32
ld1 {v0.8h, v1.8h}, [x7], #32
srshr v2.4s, v2.4s, #12 // AA[i]
srshr v3.4s, v3.4s, #12 // AA[i]
srshr v4.4s, v4.4s, #12 // AA[i]
srshr v5.4s, v5.4s, #12 // AA[i]
st1 {v2.4s, v3.4s, v4.4s, v5.4s}, [x2], #64
b.gt 1b
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret
endfunc
// void dav1d_sgr_box5_vert_neon(int32_t **sumsq, int16_t **sum,
// int32_t *AA, int16_t *BB,
// const int w, const int s,
// const int bitdepth_max);
function sgr_box5_vert_neon, export=1
stp d8, d9, [sp, #-0x30]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
add w4, w4, #2
clz w15, w6 // bitdepth_max
dup v28.4s, w5 // strength
ldp x5, x6, [x0]
ldp x7, x8, [x0, #16]
ldr x0, [x0, #32]
ldp x9, x10, [x1]
ldp x11, x12, [x1, #16]
ldr x1, [x1, #32]
movi v31.4s, #25 // n
sub w15, w15, #24 // -bitdepth_min_8
movrel x13, x_by_x_tables
movi v30.4s, #164
ld1 {v24.16b, v25.16b, v26.16b, v27.16b}, [x13] // RangeMins, DiffMasks
dup v6.8h, w15 // -bitdepth_min_8
movi v19.8b, #0x7
ldr q18, [x13, #64] // RefLo
saddl v7.4s, v6.4h, v6.4h // -2*bitdepth_min_8
ld1 {v8.4s, v9.4s}, [x5], #32
ld1 {v10.4s, v11.4s}, [x6], #32
ld1 {v12.4s, v13.4s}, [x7], #32
ld1 {v16.4s, v17.4s}, [x8], #32
ld1 {v20.8h}, [x9], #16
ld1 {v21.8h}, [x10], #16
ld1 {v22.8h}, [x11], #16
ld1 {v23.8h}, [x12], #16
ld1 {v0.4s, v1.4s}, [x0], #32
ld1 {v2.8h}, [x1], #16
1:
add v8.4s, v8.4s, v10.4s
add v9.4s, v9.4s, v11.4s
add v12.4s, v12.4s, v16.4s
add v13.4s, v13.4s, v17.4s
add v20.8h, v20.8h, v21.8h
add v22.8h, v22.8h, v23.8h
add v0.4s, v0.4s, v8.4s
add v1.4s, v1.4s, v9.4s
add v2.8h, v2.8h, v20.8h
add v0.4s, v0.4s, v12.4s
add v1.4s, v1.4s, v13.4s
add v2.8h, v2.8h, v22.8h
subs w4, w4, #8
srshl v0.4s, v0.4s, v7.4s
srshl v1.4s, v1.4s, v7.4s
srshl v4.8h, v2.8h, v6.8h
mul v0.4s, v0.4s, v31.4s // a * n
mul v1.4s, v1.4s, v31.4s // a * n
umull v3.4s, v4.4h, v4.4h // b * b
umull2 v4.4s, v4.8h, v4.8h // b * b
uqsub v0.4s, v0.4s, v3.4s // imax(a * n - b * b, 0)
uqsub v1.4s, v1.4s, v4.4s // imax(a * n - b * b, 0)
mul v0.4s, v0.4s, v28.4s // p * s
mul v1.4s, v1.4s, v28.4s // p * s
ld1 {v8.4s, v9.4s}, [x5], #32
uqshrn v0.4h, v0.4s, #16
uqshrn2 v0.8h, v1.4s, #16
ld1 {v10.4s, v11.4s}, [x6], #32
uqrshrn v0.8b, v0.8h, #4 // imin(z, 255)
ld1 {v12.4s, v13.4s}, [x7], #32
ushr v1.8b, v0.8b, #3
ld1 {v16.4s, v17.4s}, [x8], #32
tbl v5.8b, {v26.16b, v27.16b}, v1.8b // RangeMins
tbl v1.8b, {v24.16b, v25.16b}, v1.8b // DiffMasks
tbl v4.8b, {v18.16b}, v0.8b // RefLo
and v0.8b, v0.8b, v19.8b
ld1 {v20.8h}, [x9], #16
ushl v5.8b, v5.8b, v0.8b
add v4.8b, v4.8b, v1.8b
ld1 {v21.8h}, [x10], #16
cnt v5.8b, v5.8b
ld1 {v22.8h}, [x11], #16
add v5.8b, v4.8b, v5.8b
ld1 {v23.8h}, [x12], #16
uxtl v5.8h, v5.8b // x
ld1 {v0.4s, v1.4s}, [x0], #32
umull v3.4s, v5.4h, v2.4h // x * BB[i]
umull2 v4.4s, v5.8h, v2.8h // x * BB[i]
mul v3.4s, v3.4s, v30.4s // x * BB[i] * sgr_one_by_x
mul v4.4s, v4.4s, v30.4s // x * BB[i] * sgr_one_by_x
srshr v3.4s, v3.4s, #12 // AA[i]
srshr v4.4s, v4.4s, #12 // AA[i]
ld1 {v2.8h}, [x1], #16
st1 {v3.4s, v4.4s}, [x2], #32
st1 {v5.8h}, [x3], #16
b.gt 1b
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x30
ret
endfunc
|
Admenri/urge
| 52,201
|
third_party/dav1d/src/arm/64/looprestoration.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2018, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
const right_ext_mask_buf
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
right_ext_mask:
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
endconst
// void dav1d_wiener_filter7_8bpc_neon(pixel *p, const ptrdiff_t stride,
// const pixel (*left)[4], const pixel *lpf,
// const int w, int h,
// const int16_t filter[2][8],
// const enum LrEdgeFlags edges);
function wiener_filter7_8bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-16]!
mov x29, sp
ld1 {v0.8h, v1.8h}, [x6]
tst w7, #4 // LR_HAVE_TOP
sub_sp 384*2*6
mov w17, #(1 << 14) - (1 << 2)
dup v30.8h, w17
movi v31.8h, #8, lsl #8
// x9 - t6
// x10 - t5
// x11 - t4
// x12 - t3
// x13 - t2
// x14 - t1
// x15 - t0
mov x14, sp // t1
b.eq L(no_top_7)
mov x16, x2 // backup left
mov x2, #0
bl wiener_filter7_h_8bpc_neon
add x3, x3, x1 // lpf += stride
mov x9, x14 // t6
mov x10, x14 // t5
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_8bpc_neon
add x3, x3, x1, lsl #2
add x3, x3, x1 // lpf += stride*5
mov x11, x14 // t4
add x14, x14, #384*2 // t1 += 384*2
mov x2, x16 // left
mov x16, x3 // backup lpf
mov x3, x0 // lpf = p
bl wiener_filter7_h_8bpc_neon
subs w5, w5, #1 // h--
mov x12, x14 // t3
mov x13, x14 // t2
b.eq L(v1_7)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_8bpc_neon
mov x13, x14 // t2
subs w5, w5, #1 // h--
b.eq L(v2_7)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_8bpc_neon
subs w5, w5, #1 // h--
b.eq L(v3_7)
add x3, x3, x1 // src += stride
L(main_7):
add x15, x14, #384*2 // t0 = t1 + 384*2
L(main_loop_7):
bl wiener_filter7_hv_8bpc_neon
subs w5, w5, #1 // h--
b.ne L(main_loop_7)
tst w7, #8 // LR_HAVE_BOTTOM
b.eq L(v3_7)
mov x3, x16 // restore lpf
mov x2, #0 // left = NULL
bl wiener_filter7_hv_8bpc_neon
bl wiener_filter7_hv_8bpc_neon
L(v1_7):
bl wiener_filter7_v_8bpc_neon
mov sp, x29
ldp x29, x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
L(no_top_7):
add x3, x3, x1, lsl #2
add x16, x3, x1, lsl #1 // lpf += stride*6, backup
mov x3, x0 // lpf = p
bl wiener_filter7_h_8bpc_neon
subs w5, w5, #1 // h--
mov x9, x14 // t6
mov x10, x14 // t5
mov x11, x14 // t4
mov x12, x14 // t3
mov x13, x14 // t2
b.eq L(v1_7)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_8bpc_neon
subs w5, w5, #1 // h--
mov x13, x14 // t2
b.eq L(v2_7)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter7_h_8bpc_neon
subs w5, w5, #1 // h--
b.eq L(v3_7)
add x3, x3, x1 // src += stride
add x15, x14, #384*2 // t0 = t1 + 384*2
bl wiener_filter7_hv_8bpc_neon
subs w5, w5, #1 // h--
b.eq L(v3_7)
add x15, x15, #384*2*4 // t0 += 384*2*4
bl wiener_filter7_hv_8bpc_neon
subs w5, w5, #1 // h--
b.ne L(main_7)
L(v3_7):
bl wiener_filter7_v_8bpc_neon
L(v2_7):
bl wiener_filter7_v_8bpc_neon
b L(v1_7)
endfunc
function wiener_filter7_h_8bpc_neon
stp x3, x4, [sp, #-32]!
str x14, [sp, #16]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
// LR_HAVE_LEFT
cbnz x2, 0f
// left == NULL
sub x3, x3, #3
ld1 {v3.16b}, [x3], #16
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v3.16b}, [x3], #16
ld1 {v2.s}[3], [x2], #4
// Move x3 back to account for the last 3 bytes we loaded earlier,
// which we'll shift out.
sub x3, x3, #3
ext v3.16b, v2.16b, v3.16b, #13
b 2f
1:
ld1 {v3.16b}, [x3], #16
// !LR_HAVE_LEFT, fill v2 with the leftmost byte
// and shift v3 to have 3x the first byte at the front.
dup v2.16b, v3.b[0]
// Move x3 back to account for the last 3 bytes we loaded before,
// which we shifted out.
sub x3, x3, #3
ext v3.16b, v2.16b, v3.16b, #13
2:
ld1 {v4.8b}, [x3], #8
uxtl v2.8h, v3.8b
uxtl2 v3.8h, v3.16b
uxtl v4.8h, v4.8b
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #19
b.ge 4f // If w >= 19, all used input pixels are valid
// 1 <= w < 19, w+3 pixels valid in v2-v4. For w>=9,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is v2/3/4.h[w+2]. x3 points at the next input, ie
// v2/3/4.h[24]. Thus read from x3[w-22] to find the padding pixel.
sub w17, w4, #22
// Insert padding in v2/3/4.h[w+3] onwards; fuse the +3 (*2) into the
// buffer pointer.
movrel x6, right_ext_mask, -6
ldr b28, [x3, w17, sxtw]
sub x6, x6, w4, uxtw #1
dup v28.8h, v28.h[0]
ld1 {v25.16b, v26.16b, v27.16b}, [x6]
bit v2.16b, v28.16b, v25.16b
bit v3.16b, v28.16b, v26.16b
bit v4.16b, v28.16b, v27.16b
4: // Loop horizontally
// Interleaving the mul/mla chains actually hurts performance
// significantly on Cortex A53, thus keeping mul/mla tightly
// chained like this.
ext v17.16b, v2.16b, v3.16b, #4
ext v19.16b, v2.16b, v3.16b, #8
ext v16.16b, v2.16b, v3.16b, #2
ext v20.16b, v2.16b, v3.16b, #10
ext v21.16b, v2.16b, v3.16b, #12
ext v18.16b, v2.16b, v3.16b, #6
add v19.8h, v19.8h, v17.8h
add v20.8h, v20.8h, v16.8h
add v21.8h, v21.8h, v2.8h
shl v22.8h, v18.8h, #7
mul v6.8h, v18.8h, v0.h[3]
mla v6.8h, v19.8h, v0.h[4]
mla v6.8h, v20.8h, v0.h[5]
mla v6.8h, v21.8h, v0.h[6]
ext v17.16b, v3.16b, v4.16b, #4
ext v19.16b, v3.16b, v4.16b, #8
ext v16.16b, v3.16b, v4.16b, #2
ext v20.16b, v3.16b, v4.16b, #10
ext v21.16b, v3.16b, v4.16b, #12
ext v18.16b, v3.16b, v4.16b, #6
add v19.8h, v19.8h, v17.8h
add v20.8h, v20.8h, v16.8h
add v21.8h, v21.8h, v3.8h
shl v23.8h, v18.8h, #7
mul v7.8h, v18.8h, v0.h[3]
mla v7.8h, v19.8h, v0.h[4]
mla v7.8h, v20.8h, v0.h[5]
mla v7.8h, v21.8h, v0.h[6]
sub v22.8h, v22.8h, v30.8h
sub v23.8h, v23.8h, v30.8h
sqadd v6.8h, v6.8h, v22.8h
sqadd v7.8h, v7.8h, v23.8h
sshr v6.8h, v6.8h, #3
sshr v7.8h, v7.8h, #3
add v6.8h, v6.8h, v31.8h
add v7.8h, v7.8h, v31.8h
subs w4, w4, #16
st1 {v6.8h, v7.8h}, [x14], #32
b.le 0f
mov v2.16b, v4.16b
ld1 {v4.16b}, [x3], #16
tst w7, #2 // LR_HAVE_RIGHT
uxtl v3.8h, v4.8b
uxtl2 v4.8h, v4.16b
b.ne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
0:
ldr x14, [sp, #16]
ldp x3, x4, [sp], #32
ret
endfunc
function wiener_filter7_v_8bpc_neon
// Backing up/restoring registers shifted, so that x9 gets the value
// of x10, etc, afterwards.
stp x10, x11, [sp, #-64]!
stp x12, x13, [sp, #16]
stp x14, x14, [sp, #32]
stp x0, x4, [sp, #48]
1:
ld1 {v20.8h, v21.8h}, [x11], #32
ld1 {v24.8h, v25.8h}, [x13], #32
ld1 {v18.8h, v19.8h}, [x10], #32
add v24.8h, v24.8h, v20.8h
ld1 {v26.8h, v27.8h}, [x14], #32
ld1 {v16.8h, v17.8h}, [x9], #32
add v28.8h, v26.8h, v18.8h
ld1 {v22.8h, v23.8h}, [x12], #32
add v16.8h, v26.8h, v16.8h
add v25.8h, v25.8h, v21.8h
smull v2.4s, v22.4h, v1.h[3]
smlal v2.4s, v24.4h, v1.h[4]
smlal v2.4s, v28.4h, v1.h[5]
smlal v2.4s, v16.4h, v1.h[6]
add v29.8h, v27.8h, v19.8h
smull2 v3.4s, v22.8h, v1.h[3]
smlal2 v3.4s, v24.8h, v1.h[4]
smlal2 v3.4s, v28.8h, v1.h[5]
smlal2 v3.4s, v16.8h, v1.h[6]
add v17.8h, v27.8h, v17.8h
smull v4.4s, v23.4h, v1.h[3]
smlal v4.4s, v25.4h, v1.h[4]
smlal v4.4s, v29.4h, v1.h[5]
smlal v4.4s, v17.4h, v1.h[6]
smull2 v5.4s, v23.8h, v1.h[3]
smlal2 v5.4s, v25.8h, v1.h[4]
smlal2 v5.4s, v29.8h, v1.h[5]
smlal2 v5.4s, v17.8h, v1.h[6]
sqrshrun v2.4h, v2.4s, #11
sqrshrun2 v2.8h, v3.4s, #11
sqrshrun v3.4h, v4.4s, #11
sqrshrun2 v3.8h, v5.4s, #11
sqxtun v2.8b, v2.8h
sqxtun2 v2.16b, v3.8h
subs w4, w4, #16
st1 {v2.16b}, [x0], #16
b.gt 1b
ldp x0, x4, [sp, #48]
ldp x13, x14, [sp, #32]
ldp x11, x12, [sp, #16]
ldp x9, x10, [sp], #64
add x0, x0, x1
ret
endfunc
function wiener_filter7_hv_8bpc_neon
// Backing up/restoring registers shifted, so that x9 gets the value
// of x10, etc, and x15==x9, afterwards.
stp x10, x11, [sp, #-80]!
stp x12, x13, [sp, #16]
stp x14, x15, [sp, #32]
stp x10, x0, [sp, #48]
stp x3, x4, [sp, #64]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
// LR_HAVE_LEFT
cbnz x2, 0f
// left == NULL
sub x3, x3, #3
ld1 {v3.16b}, [x3], #16
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v3.16b}, [x3], #16
ld1 {v2.s}[3], [x2], #4
// Move x3 back to account for the last 3 bytes we loaded earlier,
// which we'll shift out.
sub x3, x3, #3
ext v3.16b, v2.16b, v3.16b, #13
b 2f
1:
ld1 {v3.16b}, [x3], #16
// !LR_HAVE_LEFT, fill v2 with the leftmost byte
// and shift v3 to have 3x the first byte at the front.
dup v2.16b, v3.b[0]
// Move x3 back to account for the last 3 bytes we loaded before,
// which we shifted out.
sub x3, x3, #3
ext v3.16b, v2.16b, v3.16b, #13
2:
ld1 {v4.8b}, [x3], #8
uxtl v2.8h, v3.8b
uxtl2 v3.8h, v3.16b
uxtl v4.8h, v4.8b
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #19
b.ge 4f // If w >= 19, all used input pixels are valid
// 1 <= w < 19, w+3 pixels valid in v2-v4. For w>=9,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is v2/3/4.h[w+2]. x3 points at the next input, ie
// v2/3/4.h[24]. Thus read from x3[w-22] to find the padding pixel.
sub w17, w4, #22
// Insert padding in v2/3/4.h[w+3] onwards; fuse the +3 (*2) into the
// buffer pointer.
movrel x6, right_ext_mask, -6
ldr b28, [x3, w17, sxtw]
sub x6, x6, w4, uxtw #1
dup v28.8h, v28.h[0]
ld1 {v25.16b, v26.16b, v27.16b}, [x6]
bit v2.16b, v28.16b, v25.16b
bit v3.16b, v28.16b, v26.16b
bit v4.16b, v28.16b, v27.16b
4: // Loop horizontally
ext v17.16b, v2.16b, v3.16b, #4
ext v19.16b, v2.16b, v3.16b, #8
ext v16.16b, v2.16b, v3.16b, #2
ext v20.16b, v2.16b, v3.16b, #10
ext v21.16b, v2.16b, v3.16b, #12
ext v18.16b, v2.16b, v3.16b, #6
add v19.8h, v19.8h, v17.8h
add v20.8h, v20.8h, v16.8h
add v21.8h, v21.8h, v2.8h
shl v22.8h, v18.8h, #7
mul v6.8h, v18.8h, v0.h[3]
mla v6.8h, v19.8h, v0.h[4]
mla v6.8h, v20.8h, v0.h[5]
mla v6.8h, v21.8h, v0.h[6]
ext v17.16b, v3.16b, v4.16b, #4
ext v19.16b, v3.16b, v4.16b, #8
ext v16.16b, v3.16b, v4.16b, #2
ext v20.16b, v3.16b, v4.16b, #10
ext v21.16b, v3.16b, v4.16b, #12
ext v18.16b, v3.16b, v4.16b, #6
add v19.8h, v19.8h, v17.8h
add v20.8h, v20.8h, v16.8h
add v21.8h, v21.8h, v3.8h
shl v23.8h, v18.8h, #7
mul v7.8h, v18.8h, v0.h[3]
mla v7.8h, v19.8h, v0.h[4]
mla v7.8h, v20.8h, v0.h[5]
mla v7.8h, v21.8h, v0.h[6]
ld1 {v20.8h, v21.8h}, [x11], #32
sub v22.8h, v22.8h, v30.8h
sub v23.8h, v23.8h, v30.8h
ld1 {v26.8h, v27.8h}, [x13], #32
sqadd v6.8h, v6.8h, v22.8h
sqadd v7.8h, v7.8h, v23.8h
ld1 {v18.8h, v19.8h}, [x10], #32
sshr v6.8h, v6.8h, #3
sshr v7.8h, v7.8h, #3
ld1 {v28.8h, v29.8h}, [x14], #32
add v6.8h, v6.8h, v31.8h
add v7.8h, v7.8h, v31.8h
ld1 {v16.8h, v17.8h}, [x9], #32
add v26.8h, v20.8h, v26.8h
ld1 {v24.8h, v25.8h}, [x12], #32
add v28.8h, v18.8h, v28.8h
add v16.8h, v16.8h, v6.8h
add v27.8h, v21.8h, v27.8h
smull v18.4s, v24.4h, v1.h[3]
smlal v18.4s, v26.4h, v1.h[4]
smlal v18.4s, v28.4h, v1.h[5]
smlal v18.4s, v16.4h, v1.h[6]
add v29.8h, v19.8h, v29.8h
smull2 v19.4s, v24.8h, v1.h[3]
smlal2 v19.4s, v26.8h, v1.h[4]
smlal2 v19.4s, v28.8h, v1.h[5]
smlal2 v19.4s, v16.8h, v1.h[6]
add v17.8h, v17.8h, v7.8h
smull v20.4s, v25.4h, v1.h[3]
smlal v20.4s, v27.4h, v1.h[4]
smlal v20.4s, v29.4h, v1.h[5]
smlal v20.4s, v17.4h, v1.h[6]
smull2 v21.4s, v25.8h, v1.h[3]
smlal2 v21.4s, v27.8h, v1.h[4]
smlal2 v21.4s, v29.8h, v1.h[5]
smlal2 v21.4s, v17.8h, v1.h[6]
sqrshrun v18.4h, v18.4s, #11
sqrshrun2 v18.8h, v19.4s, #11
sqrshrun v19.4h, v20.4s, #11
sqrshrun2 v19.8h, v21.4s, #11
st1 {v6.8h, v7.8h}, [x15], #32
sqxtun v18.8b, v18.8h
sqxtun2 v18.16b, v19.8h
subs w4, w4, #16
st1 {v18.16b}, [x0], #16
b.le 0f
mov v2.16b, v4.16b
ld1 {v4.16b}, [x3], #16
tst w7, #2 // LR_HAVE_RIGHT
uxtl v3.8h, v4.8b
uxtl2 v4.8h, v4.16b
b.ne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
0:
ldp x3, x4, [sp, #64]
ldp x15, x0, [sp, #48]
ldp x13, x14, [sp, #32]
ldp x11, x12, [sp, #16]
ldp x9, x10, [sp], #80
add x3, x3, x1
add x0, x0, x1
ret
endfunc
// void dav1d_wiener_filter5_8bpc_neon(pixel *p, const ptrdiff_t stride,
// const pixel (*left)[4], const pixel *lpf,
// const int w, int h,
// const int16_t filter[2][8],
// const enum LrEdgeFlags edges);
function wiener_filter5_8bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-16]!
mov x29, sp
ld1 {v0.8h, v1.8h}, [x6]
tst w7, #4 // LR_HAVE_TOP
sub_sp 384*2*4
mov w17, #(1 << 14) - (1 << 2)
dup v30.8h, w17
movi v31.8h, #8, lsl #8
// x11 - t4
// x12 - t3
// x13 - t2
// x14 - t1
// x15 - t0
mov x14, sp // t1
b.eq L(no_top_5)
mov x16, x2 // backup left
mov x2, #0
bl wiener_filter5_h_8bpc_neon
add x3, x3, x1 // lpf += stride
mov x11, x14 // t4
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter5_h_8bpc_neon
add x3, x3, x1, lsl #2
add x3, x3, x1 // lpf += stride*5
mov x12, x14 // t3
add x14, x14, #384*2 // t1 += 384*2
mov x2, x16 // left
mov x16, x3 // backup lpf
mov x3, x0 // lpf = p
bl wiener_filter5_h_8bpc_neon
subs w5, w5, #1 // h--
mov x13, x14 // t2
b.eq L(v1_5)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter5_h_8bpc_neon
subs w5, w5, #1 // h--
b.eq L(v2_5)
add x3, x3, x1 // src += stride
L(main_5):
mov x15, x11 // t0 = t4
L(main_loop_5):
bl wiener_filter5_hv_8bpc_neon
subs w5, w5, #1 // h--
b.ne L(main_loop_5)
tst w7, #8 // LR_HAVE_BOTTOM
b.eq L(v2_5)
mov x3, x16 // restore lpf
mov x2, #0 // left = NULL
bl wiener_filter5_hv_8bpc_neon
bl wiener_filter5_hv_8bpc_neon
L(end_5):
mov sp, x29
ldp x29, x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
L(no_top_5):
add x3, x3, x1, lsl #2
add x16, x3, x1, lsl #1 // lpf += stride*6, backup
mov x3, x0 // lpf = p
bl wiener_filter5_h_8bpc_neon
subs w5, w5, #1 // h--
mov x11, x14 // t4
mov x12, x14 // t3
mov x13, x14 // t2
b.eq L(v1_5)
add x3, x3, x1 // src += stride
add x14, x14, #384*2 // t1 += 384*2
bl wiener_filter5_h_8bpc_neon
subs w5, w5, #1 // h--
b.eq L(v2_5)
add x3, x3, x1 // src += stride
add x15, x14, #384*2 // t0 = t1 + 384*2
bl wiener_filter5_hv_8bpc_neon
subs w5, w5, #1 // h--
b.eq L(v2_5)
add x15, x15, #384*2*3 // t0 += 384*2*3
bl wiener_filter5_hv_8bpc_neon
subs w5, w5, #1 // h--
b.ne L(main_5)
L(v2_5):
bl wiener_filter5_v_8bpc_neon
add x0, x0, x1
mov x11, x12
mov x12, x13
mov x13, x14
L(v1_5):
bl wiener_filter5_v_8bpc_neon
b L(end_5)
endfunc
function wiener_filter5_h_8bpc_neon
stp x3, x4, [sp, #-32]!
str x14, [sp, #16]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
// LR_HAVE_LEFT
cbnz x2, 0f
// left == NULL
sub x3, x3, #2
ld1 {v3.16b}, [x3], #16
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v3.16b}, [x3], #16
ld1 {v2.s}[3], [x2], #4
// Move x3 back to account for the last 2 bytes we loaded earlier,
// which we'll shift out.
sub x3, x3, #2
ext v3.16b, v2.16b, v3.16b, #14
b 2f
1:
ld1 {v3.16b}, [x3], #16
// !LR_HAVE_LEFT, fill v2 with the leftmost byte
// and shift v3 to have 3x the first byte at the front.
dup v2.16b, v3.b[0]
// Move x3 back to account for the last 2 bytes we loaded before,
// which we shifted out.
sub x3, x3, #2
ext v3.16b, v2.16b, v3.16b, #14
2:
ld1 {v4.8b}, [x3], #8
uxtl v2.8h, v3.8b
uxtl2 v3.8h, v3.16b
uxtl v4.8h, v4.8b
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #18
b.ge 4f // If w >= 18, all used input pixels are valid
// 1 <= w < 18, w+2 pixels valid in v2-v4. For w>=9,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is v2/3/4.h[w+2]. x3 points at the next input, ie
// v2/3/4.h[24]. Thus read from x3[w-23] to find the padding pixel.
sub w17, w4, #23
// Insert padding in v2/3/4.h[w+2] onwards; fuse the +2 (*2) into the
// buffer pointer.
movrel x6, right_ext_mask, -4
ldr b28, [x3, w17, sxtw]
sub x6, x6, w4, uxtw #1
dup v28.8h, v28.h[0]
ld1 {v25.16b, v26.16b, v27.16b}, [x6]
bit v2.16b, v28.16b, v25.16b
bit v3.16b, v28.16b, v26.16b
bit v4.16b, v28.16b, v27.16b
4: // Loop horizontally
// Interleaving the mul/mla chains actually hurts performance
// significantly on Cortex A53, thus keeping mul/mla tightly
// chained like this.
ext v16.16b, v2.16b, v3.16b, #2
ext v18.16b, v2.16b, v3.16b, #6
ext v19.16b, v2.16b, v3.16b, #8
ext v17.16b, v2.16b, v3.16b, #4
add v18.8h, v18.8h, v16.8h
add v19.8h, v19.8h, v2.8h
shl v22.8h, v17.8h, #7
mul v6.8h, v17.8h, v0.h[3]
mla v6.8h, v18.8h, v0.h[4]
mla v6.8h, v19.8h, v0.h[5]
ext v16.16b, v3.16b, v4.16b, #2
ext v18.16b, v3.16b, v4.16b, #6
ext v19.16b, v3.16b, v4.16b, #8
ext v17.16b, v3.16b, v4.16b, #4
add v18.8h, v18.8h, v16.8h
add v19.8h, v19.8h, v3.8h
shl v23.8h, v17.8h, #7
mul v7.8h, v17.8h, v0.h[3]
mla v7.8h, v18.8h, v0.h[4]
mla v7.8h, v19.8h, v0.h[5]
sub v22.8h, v22.8h, v30.8h
sub v23.8h, v23.8h, v30.8h
sqadd v6.8h, v6.8h, v22.8h
sqadd v7.8h, v7.8h, v23.8h
sshr v6.8h, v6.8h, #3
sshr v7.8h, v7.8h, #3
add v6.8h, v6.8h, v31.8h
add v7.8h, v7.8h, v31.8h
subs w4, w4, #16
st1 {v6.8h, v7.8h}, [x14], #32
b.le 0f
mov v2.16b, v4.16b
ld1 {v4.16b}, [x3], #16
tst w7, #2 // LR_HAVE_RIGHT
uxtl v3.8h, v4.8b
uxtl2 v4.8h, v4.16b
b.ne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
0:
ldr x14, [sp, #16]
ldp x3, x4, [sp], #32
ret
endfunc
function wiener_filter5_v_8bpc_neon
stp x11, x12, [sp, #-48]!
stp x13, x14, [sp, #16]
stp x0, x4, [sp, #32]
1:
ld1 {v18.8h, v19.8h}, [x12], #32
ld1 {v22.8h, v23.8h}, [x14], #32
ld1 {v16.8h, v17.8h}, [x11], #32
add v24.8h, v22.8h, v18.8h
ld1 {v20.8h, v21.8h}, [x13], #32
add v16.8h, v22.8h, v16.8h
add v25.8h, v23.8h, v19.8h
smull v2.4s, v20.4h, v1.h[3]
smlal v2.4s, v24.4h, v1.h[4]
smlal v2.4s, v16.4h, v1.h[5]
add v17.8h, v23.8h, v17.8h
smull2 v3.4s, v20.8h, v1.h[3]
smlal2 v3.4s, v24.8h, v1.h[4]
smlal2 v3.4s, v16.8h, v1.h[5]
smull v4.4s, v21.4h, v1.h[3]
smlal v4.4s, v25.4h, v1.h[4]
smlal v4.4s, v17.4h, v1.h[5]
smull2 v5.4s, v21.8h, v1.h[3]
smlal2 v5.4s, v25.8h, v1.h[4]
smlal2 v5.4s, v17.8h, v1.h[5]
sqrshrun v2.4h, v2.4s, #11
sqrshrun2 v2.8h, v3.4s, #11
sqrshrun v3.4h, v4.4s, #11
sqrshrun2 v3.8h, v5.4s, #11
sqxtun v2.8b, v2.8h
sqxtun2 v2.16b, v3.8h
subs w4, w4, #16
st1 {v2.16b}, [x0], #16
b.gt 1b
ldp x0, x4, [sp, #32]
ldp x13, x14, [sp, #16]
ldp x11, x12, [sp], #48
ret
endfunc
function wiener_filter5_hv_8bpc_neon
// Backing up/restoring registers shifted, so that x11 gets the value
// of x12, etc, and x15==x11, afterwards.
stp x12, x13, [sp, #-64]!
stp x14, x15, [sp, #16]
stp x12, x0, [sp, #32]
stp x3, x4, [sp, #48]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
// LR_HAVE_LEFT
cbnz x2, 0f
// left == NULL
sub x3, x3, #2
ld1 {v3.16b}, [x3], #16
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v3.16b}, [x3], #16
ld1 {v2.s}[3], [x2], #4
// Move x3 back to account for the last 2 bytes we loaded earlier,
// which we'll shift out.
sub x3, x3, #2
ext v3.16b, v2.16b, v3.16b, #14
b 2f
1:
ld1 {v3.16b}, [x3], #16
// !LR_HAVE_LEFT, fill v2 with the leftmost byte
// and shift v3 to have 2x the first byte at the front.
dup v2.16b, v3.b[0]
// Move x3 back to account for the last 2 bytes we loaded before,
// which we shifted out.
sub x3, x3, #2
ext v3.16b, v2.16b, v3.16b, #14
2:
ld1 {v4.8b}, [x3], #8
uxtl v2.8h, v3.8b
uxtl2 v3.8h, v3.16b
uxtl v4.8h, v4.8b
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #18
b.ge 4f // If w >= 18, all used input pixels are valid
// 1 <= w < 18, w+2 pixels valid in v2-v4. For w>=9,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is v2/3/4.h[w+1]. x3 points at the next input, ie
// v2/3/4.h[24]. Thus read from x3[w-23] to find the padding pixel.
sub w17, w4, #23
// Insert padding in v2/3/4.h[w+2] onwards; fuse the +2 (*2) into the
// buffer pointer.
movrel x6, right_ext_mask, -4
ldr b28, [x3, w17, sxtw]
sub x6, x6, w4, uxtw #1
dup v28.8h, v28.h[0]
ld1 {v25.16b, v26.16b, v27.16b}, [x6]
bit v2.16b, v28.16b, v25.16b
bit v3.16b, v28.16b, v26.16b
bit v4.16b, v28.16b, v27.16b
4: // Loop horizontally
ext v16.16b, v2.16b, v3.16b, #2
ext v18.16b, v2.16b, v3.16b, #6
ext v19.16b, v2.16b, v3.16b, #8
ext v17.16b, v2.16b, v3.16b, #4
add v18.8h, v18.8h, v16.8h
add v19.8h, v19.8h, v2.8h
shl v22.8h, v17.8h, #7
mul v6.8h, v17.8h, v0.h[3]
mla v6.8h, v18.8h, v0.h[4]
mla v6.8h, v19.8h, v0.h[5]
ext v16.16b, v3.16b, v4.16b, #2
ext v18.16b, v3.16b, v4.16b, #6
ext v19.16b, v3.16b, v4.16b, #8
ext v17.16b, v3.16b, v4.16b, #4
add v18.8h, v18.8h, v16.8h
add v19.8h, v19.8h, v3.8h
shl v23.8h, v17.8h, #7
mul v7.8h, v17.8h, v0.h[3]
mla v7.8h, v18.8h, v0.h[4]
mla v7.8h, v19.8h, v0.h[5]
ld1 {v18.8h, v19.8h}, [x12], #32
sub v22.8h, v22.8h, v30.8h
sub v23.8h, v23.8h, v30.8h
ld1 {v24.8h, v25.8h}, [x14], #32
sqadd v6.8h, v6.8h, v22.8h
sqadd v7.8h, v7.8h, v23.8h
ld1 {v16.8h, v17.8h}, [x11], #32
sshr v6.8h, v6.8h, #3
sshr v7.8h, v7.8h, #3
ld1 {v20.8h, v21.8h}, [x13], #32
add v6.8h, v6.8h, v31.8h
add v7.8h, v7.8h, v31.8h
add v24.8h, v24.8h, v18.8h
add v16.8h, v16.8h, v6.8h
smull v18.4s, v20.4h, v1.h[3]
smlal v18.4s, v24.4h, v1.h[4]
smlal v18.4s, v16.4h, v1.h[5]
add v25.8h, v25.8h, v19.8h
smull2 v19.4s, v20.8h, v1.h[3]
smlal2 v19.4s, v24.8h, v1.h[4]
smlal2 v19.4s, v16.8h, v1.h[5]
add v17.8h, v17.8h, v7.8h
smull v20.4s, v21.4h, v1.h[3]
smlal v20.4s, v25.4h, v1.h[4]
smlal v20.4s, v17.4h, v1.h[5]
smull2 v21.4s, v21.8h, v1.h[3]
smlal2 v21.4s, v25.8h, v1.h[4]
smlal2 v21.4s, v17.8h, v1.h[5]
sqrshrun v18.4h, v18.4s, #11
sqrshrun2 v18.8h, v19.4s, #11
sqrshrun v19.4h, v20.4s, #11
sqrshrun2 v19.8h, v21.4s, #11
st1 {v6.8h, v7.8h}, [x15], #32
sqxtun v18.8b, v18.8h
sqxtun2 v18.16b, v19.8h
subs w4, w4, #16
st1 {v18.16b}, [x0], #16
b.le 0f
mov v2.16b, v4.16b
ld1 {v4.16b}, [x3], #16
tst w7, #2 // LR_HAVE_RIGHT
uxtl v3.8h, v4.8b
uxtl2 v4.8h, v4.16b
b.ne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
0:
ldp x3, x4, [sp, #48]
ldp x15, x0, [sp, #32]
ldp x13, x14, [sp, #16]
ldp x11, x12, [sp], #64
add x3, x3, x1
add x0, x0, x1
ret
endfunc
#include "looprestoration_tmpl.S"
// void dav1d_sgr_box3_row_h_8bpc_neon(int32_t *sumsq, int16_t *sum,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box3_row_h_8bpc_neon, export=1
add w4, w4, #2 // w += 2
tst w5, #1 // LR_HAVE_LEFT
b.eq 1f
cbnz x2, 0f
// LR_HAVE_LEFT && left == NULL
sub x3, x3, #2
ld1 {v0.16b}, [x3], #16
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v0.16b}, [x3], #16
ld1 {v1.s}[3], [x2]
// Move x3 back to account for the last 2 bytes we loaded earlier,
// which we'll shift out.
sub x3, x3, #2
ext v0.16b, v1.16b, v0.16b, #14
b 2f
1:
ld1 {v0.16b}, [x3], #16
// !LR_HAVE_LEFT, fill v1 with the leftmost byte
// and shift v0 to have 2x the first byte at the front.
dup v1.16b, v0.b[0]
// Move x3 back to account for the last 2 bytes we loaded before,
// which we shifted out.
sub x3, x3, #2
ext v0.16b, v1.16b, v0.16b, #14
2:
umull v1.8h, v0.8b, v0.8b
umull2 v2.8h, v0.16b, v0.16b
tst w5, #2 // LR_HAVE_RIGHT
b.ne 4f
// If we'll need to pad the right edge, load that byte to pad with
// here since we can find it pretty easily from here.
sub w13, w4, #(2 + 16 - 2 + 1)
ldr b30, [x3, w13, sxtw]
// Fill v30 with the right padding pixel
dup v30.16b, v30.b[0]
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #10
b.ge 4f // If w >= 10, all used input pixels are valid
// 1 <= w < 10, w pixels valid in v0. For w=9, this ends up called
// again; it's not strictly needed in those cases (we pad enough here),
// but keeping the code as simple as possible.
// Insert padding in v0.b[w] onwards
movrel x13, right_ext_mask
sub x13, x13, w4, uxtw
ld1 {v29.16b}, [x13]
bit v0.16b, v30.16b, v29.16b
// Update the precalculated squares
umull v1.8h, v0.8b, v0.8b
umull2 v2.8h, v0.16b, v0.16b
4: // Loop horizontally
ext v16.16b, v0.16b, v0.16b, #1
ext v17.16b, v0.16b, v0.16b, #2
uaddl v3.8h, v0.8b, v16.8b
ext v20.16b, v1.16b, v2.16b, #2
uaddw v3.8h, v3.8h, v17.8b
ext v21.16b, v1.16b, v2.16b, #4
uaddl v26.4s, v1.4h, v20.4h
uaddl2 v27.4s, v1.8h, v20.8h
uaddw v26.4s, v26.4s, v21.4h
uaddw2 v27.4s, v27.4s, v21.8h
subs w4, w4, #8
st1 {v3.8h}, [x1], #16
st1 {v26.4s,v27.4s}, [x0], #32
b.le 9f
tst w5, #2 // LR_HAVE_RIGHT
ld1 {v3.8b}, [x3], #8
mov v1.16b, v2.16b
ext v0.16b, v0.16b, v3.16b, #8
umull v2.8h, v3.8b, v3.8b
b.ne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
ret
endfunc
// void dav1d_sgr_box5_row_h_8bpc_neon(int32_t *sumsq, int16_t *sum,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box5_row_h_8bpc_neon, export=1
add w4, w4, #2 // w += 2
tst w5, #1 // LR_HAVE_LEFT
b.eq 1f
cbnz x2, 0f
// LR_HAVE_LEFT && left == NULL
sub x3, x3, #3
ld1 {v0.16b}, [x3], #16
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v0.16b}, [x3], #16
ld1 {v1.s}[3], [x2], #4
// Move x3 back to account for the last 3 bytes we loaded earlier,
// which we'll shift out.
sub x3, x3, #3
ext v0.16b, v1.16b, v0.16b, #13
b 2f
1:
ld1 {v0.16b}, [x3], #16
// !LR_HAVE_LEFT, fill v1 with the leftmost byte
// and shift v0 to have 3x the first byte at the front.
dup v1.16b, v0.b[0]
// Move x3 back to account for the last 3 bytes we loaded before,
// which we shifted out.
sub x3, x3, #3
ext v0.16b, v1.16b, v0.16b, #13
2:
umull v1.8h, v0.8b, v0.8b
umull2 v2.8h, v0.16b, v0.16b
tst w5, #2 // LR_HAVE_RIGHT
b.ne 4f
// If we'll need to pad the right edge, load that byte to pad with
// here since we can find it pretty easily from here.
sub w13, w4, #(2 + 16 - 3 + 1)
ldr b30, [x3, w13, sxtw]
// Fill v30 with the right padding pixel
dup v30.16b, v30.b[0]
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w4, #11
b.ge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+1 pixels valid in v0. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// Insert padding in v0.b[w+1] onwards; fuse the +1 into the
// buffer pointer.
movrel x13, right_ext_mask, -1
sub x13, x13, w4, uxtw
ld1 {v29.16b}, [x13]
bit v0.16b, v30.16b, v29.16b
// Update the precalculated squares
umull v1.8h, v0.8b, v0.8b
umull2 v2.8h, v0.16b, v0.16b
4: // Loop horizontally
ext v16.16b, v0.16b, v0.16b, #1
ext v17.16b, v0.16b, v0.16b, #2
ext v18.16b, v0.16b, v0.16b, #3
ext v19.16b, v0.16b, v0.16b, #4
uaddl v3.8h, v0.8b, v16.8b
uaddl v24.8h, v17.8b, v18.8b
uaddw v3.8h, v3.8h, v19.8b
add v3.8h, v3.8h, v24.8h
ext v16.16b, v1.16b, v2.16b, #2
ext v17.16b, v1.16b, v2.16b, #4
ext v18.16b, v1.16b, v2.16b, #6
ext v19.16b, v1.16b, v2.16b, #8
uaddl v26.4s, v1.4h, v16.4h
uaddl2 v27.4s, v1.8h, v16.8h
uaddl v16.4s, v17.4h, v18.4h
uaddl2 v17.4s, v17.8h, v18.8h
uaddw v26.4s, v26.4s, v19.4h
uaddw2 v27.4s, v27.4s, v19.8h
add v26.4s, v26.4s, v16.4s
add v27.4s, v27.4s, v17.4s
subs w4, w4, #8
st1 {v3.8h}, [x1], #16
st1 {v26.4s,v27.4s}, [x0], #32
b.le 9f
tst w5, #2 // LR_HAVE_RIGHT
ld1 {v3.8b}, [x3], #8
mov v1.16b, v2.16b
ext v0.16b, v0.16b, v3.16b, #8
umull v2.8h, v3.8b, v3.8b
b.ne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
ret
endfunc
// void dav1d_sgr_box35_row_h_8bpc_neon(int32_t *sumsq3, int16_t *sum3,
// int32_t *sumsq5, int16_t *sum5,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box35_row_h_8bpc_neon, export=1
add w6, w6, #2 // w += 2
tst w7, #1 // LR_HAVE_LEFT
b.eq 1f
cbnz x4, 0f
// LR_HAVE_LEFT && left == NULL
sub x5, x5, #3
ld1 {v0.16b}, [x5], #16
b 2f
0:
// LR_HAVE_LEFT, left != NULL
ld1 {v0.16b}, [x5], #16
ld1 {v1.s}[3], [x4], #4
// Move x3 back to account for the last 3 bytes we loaded earlier,
// which we'll shift out.
sub x5, x5, #3
ext v0.16b, v1.16b, v0.16b, #13
b 2f
1:
ld1 {v0.16b}, [x5], #16
// !LR_HAVE_LEFT, fill v1 with the leftmost byte
// and shift v0 to have 3x the first byte at the front.
dup v1.16b, v0.b[0]
// Move x3 back to account for the last 3 bytes we loaded before,
// which we shifted out.
sub x5, x5, #3
ext v0.16b, v1.16b, v0.16b, #13
2:
umull v1.8h, v0.8b, v0.8b
umull2 v2.8h, v0.16b, v0.16b
tst w7, #2 // LR_HAVE_RIGHT
b.ne 4f
// If we'll need to pad the right edge, load that byte to pad with
// here since we can find it pretty easily from here.
sub w13, w6, #(2 + 16 - 3 + 1)
ldr b30, [x5, w13, sxtw]
// Fill v30 with the right padding pixel
dup v30.16b, v30.b[0]
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp w6, #11
b.ge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+1 pixels valid in v0. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// Insert padding in v0.b[w+1] onwards; fuse the +1 into the
// buffer pointer.
movrel x13, right_ext_mask, -1
sub x13, x13, w6, uxtw
ld1 {v29.16b}, [x13]
bit v0.16b, v30.16b, v29.16b
// Update the precalculated squares
umull v1.8h, v0.8b, v0.8b
umull2 v2.8h, v0.16b, v0.16b
4: // Loop horizontally
ext v16.16b, v0.16b, v0.16b, #1
ext v17.16b, v0.16b, v0.16b, #2
ext v19.16b, v0.16b, v0.16b, #4
ext v18.16b, v0.16b, v0.16b, #3
uaddl v3.8h, v16.8b, v17.8b
uaddl v24.8h, v0.8b, v19.8b
uaddw v3.8h, v3.8h, v18.8b
ext v16.16b, v1.16b, v2.16b, #2
ext v17.16b, v1.16b, v2.16b, #4
ext v19.16b, v1.16b, v2.16b, #8
ext v18.16b, v1.16b, v2.16b, #6
st1 {v3.8h}, [x1], #16
add v3.8h, v3.8h, v24.8h
uaddl v26.4s, v16.4h, v17.4h
uaddl2 v27.4s, v16.8h, v17.8h
uaddl v16.4s, v1.4h, v19.4h
uaddl2 v17.4s, v1.8h, v19.8h
uaddw v26.4s, v26.4s, v18.4h
uaddw2 v27.4s, v27.4s, v18.8h
st1 {v26.4s,v27.4s}, [x0], #32
add v26.4s, v26.4s, v16.4s
add v27.4s, v27.4s, v17.4s
subs w6, w6, #8
st1 {v3.8h}, [x3], #16
st1 {v26.4s,v27.4s}, [x2], #32
b.le 9f
tst w7, #2 // LR_HAVE_RIGHT
ld1 {v3.8b}, [x5], #8
mov v1.16b, v2.16b
ext v0.16b, v0.16b, v3.16b, #8
umull v2.8h, v3.8b, v3.8b
b.ne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
ret
endfunc
sgr_funcs 8
|
Admenri/urge
| 12,389
|
third_party/dav1d/src/arm/64/util.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2015 Martin Storsjo
* Copyright © 2015 Janne Grunau
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef DAV1D_SRC_ARM_64_UTIL_S
#define DAV1D_SRC_ARM_64_UTIL_S
#include "config.h"
#include "src/arm/asm.S"
#ifndef __has_feature
#define __has_feature(x) 0
#endif
.macro movrel rd, val, offset=0
#if defined(__APPLE__)
.if \offset < 0
adrp \rd, \val@PAGE
add \rd, \rd, \val@PAGEOFF
sub \rd, \rd, -(\offset)
.else
adrp \rd, \val+(\offset)@PAGE
add \rd, \rd, \val+(\offset)@PAGEOFF
.endif
#elif defined(PIC) && defined(_WIN32)
.if \offset < 0
adrp \rd, \val
add \rd, \rd, :lo12:\val
sub \rd, \rd, -(\offset)
.else
adrp \rd, \val+(\offset)
add \rd, \rd, :lo12:\val+(\offset)
.endif
#elif __has_feature(hwaddress_sanitizer)
adrp \rd, :pg_hi21_nc:\val+(\offset)
movk \rd, #:prel_g3:\val+0x100000000
add \rd, \rd, :lo12:\val+(\offset)
#elif defined(PIC)
adrp \rd, \val+(\offset)
add \rd, \rd, :lo12:\val+(\offset)
#else
ldr \rd, =\val+\offset
#endif
.endm
.macro sub_sp space
#ifdef _WIN32
.if \space > 8192
// Here, we'd need to touch two (or more) pages while decrementing
// the stack pointer.
.error "sub_sp_align doesn't support values over 8K at the moment"
.elseif \space > 4096
sub x16, sp, #4096
ldr xzr, [x16]
sub sp, x16, #(\space - 4096)
.else
sub sp, sp, #\space
.endif
#else
.if \space >= 4096
sub sp, sp, #(\space)/4096*4096
.endif
.if (\space % 4096) != 0
sub sp, sp, #(\space)%4096
.endif
#endif
.endm
.macro transpose_8x8b_xtl r0, r1, r2, r3, r4, r5, r6, r7, xtl
// a0 b0 a1 b1 a2 b2 a3 b3 a4 b4 a5 b5 a6 b6 a7 b7
zip1 \r0\().16b, \r0\().16b, \r1\().16b
// c0 d0 c1 d1 c2 d2 d3 d3 c4 d4 c5 d5 c6 d6 d7 d7
zip1 \r2\().16b, \r2\().16b, \r3\().16b
// e0 f0 e1 f1 e2 f2 e3 f3 e4 f4 e5 f5 e6 f6 e7 f7
zip1 \r4\().16b, \r4\().16b, \r5\().16b
// g0 h0 g1 h1 g2 h2 h3 h3 g4 h4 g5 h5 g6 h6 h7 h7
zip1 \r6\().16b, \r6\().16b, \r7\().16b
// a0 b0 c0 d0 a2 b2 c2 d2 a4 b4 c4 d4 a6 b6 c6 d6
trn1 \r1\().8h, \r0\().8h, \r2\().8h
// a1 b1 c1 d1 a3 b3 c3 d3 a5 b5 c5 d5 a7 b7 c7 d7
trn2 \r3\().8h, \r0\().8h, \r2\().8h
// e0 f0 g0 h0 e2 f2 g2 h2 e4 f4 g4 h4 e6 f6 g6 h6
trn1 \r5\().8h, \r4\().8h, \r6\().8h
// e1 f1 g1 h1 e3 f3 g3 h3 e5 f5 g5 h5 e7 f7 g7 h7
trn2 \r7\().8h, \r4\().8h, \r6\().8h
// a0 b0 c0 d0 e0 f0 g0 h0 a4 b4 c4 d4 e4 f4 g4 h4
trn1 \r0\().4s, \r1\().4s, \r5\().4s
// a2 b2 c2 d2 e2 f2 g2 h2 a6 b6 c6 d6 e6 f6 g6 h6
trn2 \r2\().4s, \r1\().4s, \r5\().4s
// a1 b1 c1 d1 e1 f1 g1 h1 a5 b5 c5 d5 e5 f5 g5 h5
trn1 \r1\().4s, \r3\().4s, \r7\().4s
// a3 b3 c3 d3 e3 f3 g3 h3 a7 b7 c7 d7 e7 f7 g7 h7
trn2 \r3\().4s, \r3\().4s, \r7\().4s
\xtl\()2 \r4\().8h, \r0\().16b
\xtl \r0\().8h, \r0\().8b
\xtl\()2 \r6\().8h, \r2\().16b
\xtl \r2\().8h, \r2\().8b
\xtl\()2 \r5\().8h, \r1\().16b
\xtl \r1\().8h, \r1\().8b
\xtl\()2 \r7\().8h, \r3\().16b
\xtl \r3\().8h, \r3\().8b
.endm
.macro transpose_8x8h r0, r1, r2, r3, r4, r5, r6, r7, t8, t9
trn1 \t8\().8h, \r0\().8h, \r1\().8h
trn2 \t9\().8h, \r0\().8h, \r1\().8h
trn1 \r1\().8h, \r2\().8h, \r3\().8h
trn2 \r3\().8h, \r2\().8h, \r3\().8h
trn1 \r0\().8h, \r4\().8h, \r5\().8h
trn2 \r5\().8h, \r4\().8h, \r5\().8h
trn1 \r2\().8h, \r6\().8h, \r7\().8h
trn2 \r7\().8h, \r6\().8h, \r7\().8h
trn1 \r4\().4s, \r0\().4s, \r2\().4s
trn2 \r2\().4s, \r0\().4s, \r2\().4s
trn1 \r6\().4s, \r5\().4s, \r7\().4s
trn2 \r7\().4s, \r5\().4s, \r7\().4s
trn1 \r5\().4s, \t9\().4s, \r3\().4s
trn2 \t9\().4s, \t9\().4s, \r3\().4s
trn1 \r3\().4s, \t8\().4s, \r1\().4s
trn2 \t8\().4s, \t8\().4s, \r1\().4s
trn1 \r0\().2d, \r3\().2d, \r4\().2d
trn2 \r4\().2d, \r3\().2d, \r4\().2d
trn1 \r1\().2d, \r5\().2d, \r6\().2d
trn2 \r5\().2d, \r5\().2d, \r6\().2d
trn2 \r6\().2d, \t8\().2d, \r2\().2d
trn1 \r2\().2d, \t8\().2d, \r2\().2d
trn1 \r3\().2d, \t9\().2d, \r7\().2d
trn2 \r7\().2d, \t9\().2d, \r7\().2d
.endm
.macro transpose_8x8h_mov r0, r1, r2, r3, r4, r5, r6, r7, t8, t9, o0, o1, o2, o3, o4, o5, o6, o7
trn1 \t8\().8h, \r0\().8h, \r1\().8h
trn2 \t9\().8h, \r0\().8h, \r1\().8h
trn1 \r1\().8h, \r2\().8h, \r3\().8h
trn2 \r3\().8h, \r2\().8h, \r3\().8h
trn1 \r0\().8h, \r4\().8h, \r5\().8h
trn2 \r5\().8h, \r4\().8h, \r5\().8h
trn1 \r2\().8h, \r6\().8h, \r7\().8h
trn2 \r7\().8h, \r6\().8h, \r7\().8h
trn1 \r4\().4s, \r0\().4s, \r2\().4s
trn2 \r2\().4s, \r0\().4s, \r2\().4s
trn1 \r6\().4s, \r5\().4s, \r7\().4s
trn2 \r7\().4s, \r5\().4s, \r7\().4s
trn1 \r5\().4s, \t9\().4s, \r3\().4s
trn2 \t9\().4s, \t9\().4s, \r3\().4s
trn1 \r3\().4s, \t8\().4s, \r1\().4s
trn2 \t8\().4s, \t8\().4s, \r1\().4s
trn1 \o0\().2d, \r3\().2d, \r4\().2d
trn2 \o4\().2d, \r3\().2d, \r4\().2d
trn1 \o1\().2d, \r5\().2d, \r6\().2d
trn2 \o5\().2d, \r5\().2d, \r6\().2d
trn2 \o6\().2d, \t8\().2d, \r2\().2d
trn1 \o2\().2d, \t8\().2d, \r2\().2d
trn1 \o3\().2d, \t9\().2d, \r7\().2d
trn2 \o7\().2d, \t9\().2d, \r7\().2d
.endm
.macro transpose_8x16b r0, r1, r2, r3, r4, r5, r6, r7, t8, t9
trn1 \t8\().16b, \r0\().16b, \r1\().16b
trn2 \t9\().16b, \r0\().16b, \r1\().16b
trn1 \r1\().16b, \r2\().16b, \r3\().16b
trn2 \r3\().16b, \r2\().16b, \r3\().16b
trn1 \r0\().16b, \r4\().16b, \r5\().16b
trn2 \r5\().16b, \r4\().16b, \r5\().16b
trn1 \r2\().16b, \r6\().16b, \r7\().16b
trn2 \r7\().16b, \r6\().16b, \r7\().16b
trn1 \r4\().8h, \r0\().8h, \r2\().8h
trn2 \r2\().8h, \r0\().8h, \r2\().8h
trn1 \r6\().8h, \r5\().8h, \r7\().8h
trn2 \r7\().8h, \r5\().8h, \r7\().8h
trn1 \r5\().8h, \t9\().8h, \r3\().8h
trn2 \t9\().8h, \t9\().8h, \r3\().8h
trn1 \r3\().8h, \t8\().8h, \r1\().8h
trn2 \t8\().8h, \t8\().8h, \r1\().8h
trn1 \r0\().4s, \r3\().4s, \r4\().4s
trn2 \r4\().4s, \r3\().4s, \r4\().4s
trn1 \r1\().4s, \r5\().4s, \r6\().4s
trn2 \r5\().4s, \r5\().4s, \r6\().4s
trn2 \r6\().4s, \t8\().4s, \r2\().4s
trn1 \r2\().4s, \t8\().4s, \r2\().4s
trn1 \r3\().4s, \t9\().4s, \r7\().4s
trn2 \r7\().4s, \t9\().4s, \r7\().4s
.endm
.macro transpose_4x16b r0, r1, r2, r3, t4, t5, t6, t7
trn1 \t4\().16b, \r0\().16b, \r1\().16b
trn2 \t5\().16b, \r0\().16b, \r1\().16b
trn1 \t6\().16b, \r2\().16b, \r3\().16b
trn2 \t7\().16b, \r2\().16b, \r3\().16b
trn1 \r0\().8h, \t4\().8h, \t6\().8h
trn2 \r2\().8h, \t4\().8h, \t6\().8h
trn1 \r1\().8h, \t5\().8h, \t7\().8h
trn2 \r3\().8h, \t5\().8h, \t7\().8h
.endm
.macro transpose_4x4h r0, r1, r2, r3, t4, t5, t6, t7
trn1 \t4\().4h, \r0\().4h, \r1\().4h
trn2 \t5\().4h, \r0\().4h, \r1\().4h
trn1 \t6\().4h, \r2\().4h, \r3\().4h
trn2 \t7\().4h, \r2\().4h, \r3\().4h
trn1 \r0\().2s, \t4\().2s, \t6\().2s
trn2 \r2\().2s, \t4\().2s, \t6\().2s
trn1 \r1\().2s, \t5\().2s, \t7\().2s
trn2 \r3\().2s, \t5\().2s, \t7\().2s
.endm
.macro transpose_4x4s r0, r1, r2, r3, t4, t5, t6, t7
trn1 \t4\().4s, \r0\().4s, \r1\().4s
trn2 \t5\().4s, \r0\().4s, \r1\().4s
trn1 \t6\().4s, \r2\().4s, \r3\().4s
trn2 \t7\().4s, \r2\().4s, \r3\().4s
trn1 \r0\().2d, \t4\().2d, \t6\().2d
trn2 \r2\().2d, \t4\().2d, \t6\().2d
trn1 \r1\().2d, \t5\().2d, \t7\().2d
trn2 \r3\().2d, \t5\().2d, \t7\().2d
.endm
.macro transpose_4x8h r0, r1, r2, r3, t4, t5, t6, t7
trn1 \t4\().8h, \r0\().8h, \r1\().8h
trn2 \t5\().8h, \r0\().8h, \r1\().8h
trn1 \t6\().8h, \r2\().8h, \r3\().8h
trn2 \t7\().8h, \r2\().8h, \r3\().8h
trn1 \r0\().4s, \t4\().4s, \t6\().4s
trn2 \r2\().4s, \t4\().4s, \t6\().4s
trn1 \r1\().4s, \t5\().4s, \t7\().4s
trn2 \r3\().4s, \t5\().4s, \t7\().4s
.endm
.macro transpose_4x8h_mov r0, r1, r2, r3, t4, t5, t6, t7, o0, o1, o2, o3
trn1 \t4\().8h, \r0\().8h, \r1\().8h
trn2 \t5\().8h, \r0\().8h, \r1\().8h
trn1 \t6\().8h, \r2\().8h, \r3\().8h
trn2 \t7\().8h, \r2\().8h, \r3\().8h
trn1 \o0\().4s, \t4\().4s, \t6\().4s
trn2 \o2\().4s, \t4\().4s, \t6\().4s
trn1 \o1\().4s, \t5\().4s, \t7\().4s
trn2 \o3\().4s, \t5\().4s, \t7\().4s
.endm
#endif /* DAV1D_SRC_ARM_64_UTIL_S */
|
Admenri/urge
| 131,848
|
third_party/dav1d/src/arm/64/mc.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2018, Janne Grunau
* Copyright © 2018, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
.macro avg dst, t0, t1, t2, t3
ld1 {\t0\().8h,\t1\().8h}, [x2], 32
ld1 {\t2\().8h,\t3\().8h}, [x3], 32
add \t0\().8h, \t0\().8h, \t2\().8h
add \t1\().8h, \t1\().8h, \t3\().8h
sqrshrun \dst\().8b, \t0\().8h, #5
sqrshrun2 \dst\().16b, \t1\().8h, #5
.endm
.macro w_avg dst, t0, t1, t2, t3
ld1 {\t0\().8h,\t1\().8h}, [x2], 32
ld1 {\t2\().8h,\t3\().8h}, [x3], 32
sub \t0\().8h, \t2\().8h, \t0\().8h
sub \t1\().8h, \t3\().8h, \t1\().8h
sqdmulh \t0\().8h, \t0\().8h, v30.8h
sqdmulh \t1\().8h, \t1\().8h, v30.8h
add \t0\().8h, \t2\().8h, \t0\().8h
add \t1\().8h, \t3\().8h, \t1\().8h
sqrshrun \dst\().8b, \t0\().8h, #4
sqrshrun2 \dst\().16b, \t1\().8h, #4
.endm
.macro mask dst, t0, t1, t2, t3
ld1 {v30.16b}, [x6], 16
ld1 {\t0\().8h,\t1\().8h}, [x2], 32
mul v30.16b, v30.16b, v31.16b
ld1 {\t2\().8h,\t3\().8h}, [x3], 32
shll v28.8h, v30.8b, #8
shll2 v29.8h, v30.16b, #8
sub \t0\().8h, \t2\().8h, \t0\().8h
sub \t1\().8h, \t3\().8h, \t1\().8h
sqdmulh \t0\().8h, \t0\().8h, v28.8h
sqdmulh \t1\().8h, \t1\().8h, v29.8h
add \t0\().8h, \t2\().8h, \t0\().8h
add \t1\().8h, \t3\().8h, \t1\().8h
sqrshrun \dst\().8b, \t0\().8h, #4
sqrshrun2 \dst\().16b, \t1\().8h, #4
.endm
.macro bidir_fn type
function \type\()_8bpc_neon, export=1
clz w4, w4
.ifc \type, w_avg
dup v30.8h, w6
neg v30.8h, v30.8h
shl v30.8h, v30.8h, #11
.endif
.ifc \type, mask
movi v31.16b, #256-2
.endif
movrel x7, \type\()_tbl
sub w4, w4, #24
ldrsw x4, [x7, x4, lsl #2]
\type v4, v0, v1, v2, v3
add x7, x7, x4
br x7
40:
AARCH64_VALID_JUMP_TARGET
add x7, x0, x1
lsl x1, x1, #1
4:
cmp w5, #4
st1 {v4.s}[0], [x0], x1
st1 {v4.s}[1], [x7], x1
st1 {v4.s}[2], [x0], x1
st1 {v4.s}[3], [x7], x1
b.eq 0f
\type v5, v0, v1, v2, v3
cmp w5, #8
st1 {v5.s}[0], [x0], x1
st1 {v5.s}[1], [x7], x1
st1 {v5.s}[2], [x0], x1
st1 {v5.s}[3], [x7], x1
b.eq 0f
\type v4, v0, v1, v2, v3
st1 {v4.s}[0], [x0], x1
st1 {v4.s}[1], [x7], x1
\type v5, v0, v1, v2, v3
st1 {v4.s}[2], [x0], x1
st1 {v4.s}[3], [x7], x1
st1 {v5.s}[0], [x0], x1
st1 {v5.s}[1], [x7], x1
st1 {v5.s}[2], [x0], x1
st1 {v5.s}[3], [x7], x1
ret
80:
AARCH64_VALID_JUMP_TARGET
add x7, x0, x1
lsl x1, x1, #1
8:
st1 {v4.8b}, [x0], x1
\type v5, v0, v1, v2, v3
st1 {v4.d}[1], [x7], x1
st1 {v5.8b}, [x0], x1
subs w5, w5, #4
st1 {v5.d}[1], [x7], x1
b.le 0f
\type v4, v0, v1, v2, v3
b 8b
160:
AARCH64_VALID_JUMP_TARGET
16:
\type v5, v0, v1, v2, v3
st1 {v4.16b}, [x0], x1
\type v6, v0, v1, v2, v3
st1 {v5.16b}, [x0], x1
\type v7, v0, v1, v2, v3
st1 {v6.16b}, [x0], x1
subs w5, w5, #4
st1 {v7.16b}, [x0], x1
b.le 0f
\type v4, v0, v1, v2, v3
b 16b
320:
AARCH64_VALID_JUMP_TARGET
add x7, x0, x1
lsl x1, x1, #1
32:
\type v5, v0, v1, v2, v3
\type v6, v0, v1, v2, v3
st1 {v4.16b,v5.16b}, [x0], x1
\type v7, v0, v1, v2, v3
subs w5, w5, #2
st1 {v6.16b,v7.16b}, [x7], x1
b.le 0f
\type v4, v0, v1, v2, v3
b 32b
640:
AARCH64_VALID_JUMP_TARGET
add x7, x0, x1
lsl x1, x1, #1
64:
\type v5, v0, v1, v2, v3
\type v6, v0, v1, v2, v3
\type v7, v0, v1, v2, v3
\type v16, v0, v1, v2, v3
\type v17, v0, v1, v2, v3
st1 {v4.16b,v5.16b,v6.16b,v7.16b}, [x0], x1
\type v18, v0, v1, v2, v3
\type v19, v0, v1, v2, v3
subs w5, w5, #2
st1 {v16.16b,v17.16b,v18.16b,v19.16b}, [x7], x1
b.le 0f
\type v4, v0, v1, v2, v3
b 64b
1280:
AARCH64_VALID_JUMP_TARGET
add x7, x0, #64
128:
\type v5, v0, v1, v2, v3
\type v6, v0, v1, v2, v3
\type v7, v0, v1, v2, v3
\type v16, v0, v1, v2, v3
\type v17, v0, v1, v2, v3
st1 {v4.16b,v5.16b,v6.16b,v7.16b}, [x0], x1
\type v18, v0, v1, v2, v3
\type v19, v0, v1, v2, v3
subs w5, w5, #1
st1 {v16.16b,v17.16b,v18.16b,v19.16b}, [x7], x1
b.le 0f
\type v4, v0, v1, v2, v3
b 128b
0:
ret
endfunc
jumptable \type\()_tbl
.word 1280b - \type\()_tbl
.word 640b - \type\()_tbl
.word 320b - \type\()_tbl
.word 160b - \type\()_tbl
.word 80b - \type\()_tbl
.word 40b - \type\()_tbl
endjumptable
.endm
bidir_fn avg
bidir_fn w_avg
bidir_fn mask
.macro w_mask_fn type
function w_mask_\type\()_8bpc_neon, export=1
clz w8, w4
movrel x9, w_mask_\type\()_tbl
sub w8, w8, #24
ldrsw x8, [x9, x8, lsl #2]
add x9, x9, x8
mov w10, #6903
dup v0.8h, w10
.if \type == 444
movi v1.16b, #64
.elseif \type == 422
dup v2.8b, w7
movi v3.8b, #129
sub v3.8b, v3.8b, v2.8b
.elseif \type == 420
dup v2.8h, w7
movi v3.8h, #1, lsl #8
sub v3.8h, v3.8h, v2.8h
.endif
add x12, x0, x1
lsl x1, x1, #1
br x9
40:
AARCH64_VALID_JUMP_TARGET
4:
ld1 {v4.8h, v5.8h}, [x2], #32 // tmp1 (four rows at once)
ld1 {v6.8h, v7.8h}, [x3], #32 // tmp2 (four rows at once)
subs w5, w5, #4
sub v16.8h, v6.8h, v4.8h
sub v17.8h, v7.8h, v5.8h
sabd v18.8h, v4.8h, v6.8h
sabd v19.8h, v5.8h, v7.8h
uqsub v18.8h, v0.8h, v18.8h
uqsub v19.8h, v0.8h, v19.8h
ushr v18.8h, v18.8h, #8
ushr v19.8h, v19.8h, #8
shl v20.8h, v18.8h, #9
shl v21.8h, v19.8h, #9
sqdmulh v20.8h, v20.8h, v16.8h
sqdmulh v21.8h, v21.8h, v17.8h
add v20.8h, v20.8h, v4.8h
add v21.8h, v21.8h, v5.8h
sqrshrun v22.8b, v20.8h, #4
sqrshrun v23.8b, v21.8h, #4
.if \type == 444
uzp1 v18.16b, v18.16b, v19.16b // Same as xtn, xtn2
sub v18.16b, v1.16b, v18.16b
st1 {v18.16b}, [x6], #16
.elseif \type == 422
addp v18.8h, v18.8h, v19.8h
xtn v18.8b, v18.8h
uhsub v18.8b, v3.8b, v18.8b
st1 {v18.8b}, [x6], #8
.elseif \type == 420
trn1 v24.2d, v18.2d, v19.2d
trn2 v25.2d, v18.2d, v19.2d
add v24.8h, v24.8h, v25.8h
addp v18.8h, v24.8h, v24.8h
sub v18.4h, v3.4h, v18.4h
rshrn v18.8b, v18.8h, #2
str s18, [x6], #4
.endif
st1 {v22.s}[0], [x0], x1
st1 {v22.s}[1], [x12], x1
st1 {v23.s}[0], [x0], x1
st1 {v23.s}[1], [x12], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ld1 {v4.8h, v5.8h}, [x2], #32
ld1 {v6.8h, v7.8h}, [x3], #32
subs w5, w5, #2
sub v16.8h, v6.8h, v4.8h
sub v17.8h, v7.8h, v5.8h
sabd v18.8h, v4.8h, v6.8h
sabd v19.8h, v5.8h, v7.8h
uqsub v18.8h, v0.8h, v18.8h
uqsub v19.8h, v0.8h, v19.8h
ushr v18.8h, v18.8h, #8
ushr v19.8h, v19.8h, #8
shl v20.8h, v18.8h, #9
shl v21.8h, v19.8h, #9
sqdmulh v20.8h, v20.8h, v16.8h
sqdmulh v21.8h, v21.8h, v17.8h
add v20.8h, v20.8h, v4.8h
add v21.8h, v21.8h, v5.8h
sqrshrun v22.8b, v20.8h, #4
sqrshrun v23.8b, v21.8h, #4
.if \type == 444
uzp1 v18.16b, v18.16b, v19.16b // Same as xtn, xtn2
sub v18.16b, v1.16b, v18.16b
st1 {v18.16b}, [x6], #16
.elseif \type == 422
addp v18.8h, v18.8h, v19.8h
xtn v18.8b, v18.8h
uhsub v18.8b, v3.8b, v18.8b
st1 {v18.8b}, [x6], #8
.elseif \type == 420
add v18.8h, v18.8h, v19.8h
addp v18.8h, v18.8h, v18.8h
sub v18.4h, v3.4h, v18.4h
rshrn v18.8b, v18.8h, #2
str s18, [x6], #4
.endif
st1 {v22.8b}, [x0], x1
st1 {v23.8b}, [x12], x1
b.gt 8b
ret
1280:
640:
320:
160:
AARCH64_VALID_JUMP_TARGET
mov w11, w4
sub x1, x1, w4, uxtw
.if \type == 444
add x10, x6, w4, uxtw
.elseif \type == 422
add x10, x6, x11, lsr #1
.endif
add x9, x3, w4, uxtw #1
add x7, x2, w4, uxtw #1
161:
mov w8, w4
16:
ld1 {v4.8h, v5.8h}, [x2], #32
ld1 {v6.8h, v7.8h}, [x3], #32
ld1 {v16.8h, v17.8h}, [x7], #32
ld1 {v18.8h, v19.8h}, [x9], #32
subs w8, w8, #16
sub v6.8h, v6.8h, v4.8h
sub v7.8h, v7.8h, v5.8h
sub v18.8h, v18.8h, v16.8h
sub v19.8h, v19.8h, v17.8h
abs v20.8h, v6.8h
abs v21.8h, v7.8h
abs v22.8h, v18.8h
abs v23.8h, v19.8h
uqsub v20.8h, v0.8h, v20.8h
uqsub v21.8h, v0.8h, v21.8h
uqsub v22.8h, v0.8h, v22.8h
uqsub v23.8h, v0.8h, v23.8h
ushr v20.8h, v20.8h, #8
ushr v21.8h, v21.8h, #8
ushr v22.8h, v22.8h, #8
ushr v23.8h, v23.8h, #8
shl v24.8h, v20.8h, #9
shl v25.8h, v21.8h, #9
shl v26.8h, v22.8h, #9
shl v27.8h, v23.8h, #9
sqdmulh v24.8h, v24.8h, v6.8h
sqdmulh v25.8h, v25.8h, v7.8h
sqdmulh v26.8h, v26.8h, v18.8h
sqdmulh v27.8h, v27.8h, v19.8h
add v24.8h, v24.8h, v4.8h
add v25.8h, v25.8h, v5.8h
add v26.8h, v26.8h, v16.8h
add v27.8h, v27.8h, v17.8h
sqrshrun v24.8b, v24.8h, #4
sqrshrun v25.8b, v25.8h, #4
sqrshrun v26.8b, v26.8h, #4
sqrshrun v27.8b, v27.8h, #4
.if \type == 444
uzp1 v20.16b, v20.16b, v21.16b // Same as xtn, xtn2
uzp1 v21.16b, v22.16b, v23.16b // Ditto
sub v20.16b, v1.16b, v20.16b
sub v21.16b, v1.16b, v21.16b
st1 {v20.16b}, [x6], #16
st1 {v21.16b}, [x10], #16
.elseif \type == 422
addp v20.8h, v20.8h, v21.8h
addp v21.8h, v22.8h, v23.8h
xtn v20.8b, v20.8h
xtn v21.8b, v21.8h
uhsub v20.8b, v3.8b, v20.8b
uhsub v21.8b, v3.8b, v21.8b
st1 {v20.8b}, [x6], #8
st1 {v21.8b}, [x10], #8
.elseif \type == 420
add v20.8h, v20.8h, v22.8h
add v21.8h, v21.8h, v23.8h
addp v20.8h, v20.8h, v21.8h
sub v20.8h, v3.8h, v20.8h
rshrn v20.8b, v20.8h, #2
st1 {v20.8b}, [x6], #8
.endif
st1 {v24.8b, v25.8b}, [x0], #16
st1 {v26.8b, v27.8b}, [x12], #16
b.gt 16b
subs w5, w5, #2
add x2, x2, w4, uxtw #1
add x3, x3, w4, uxtw #1
add x7, x7, w4, uxtw #1
add x9, x9, w4, uxtw #1
.if \type == 444
add x6, x6, w4, uxtw
add x10, x10, w4, uxtw
.elseif \type == 422
add x6, x6, x11, lsr #1
add x10, x10, x11, lsr #1
.endif
add x0, x0, x1
add x12, x12, x1
b.gt 161b
ret
endfunc
jumptable w_mask_\type\()_tbl
.word 1280b - w_mask_\type\()_tbl
.word 640b - w_mask_\type\()_tbl
.word 320b - w_mask_\type\()_tbl
.word 160b - w_mask_\type\()_tbl
.word 80b - w_mask_\type\()_tbl
.word 40b - w_mask_\type\()_tbl
endjumptable
.endm
w_mask_fn 444
w_mask_fn 422
w_mask_fn 420
function blend_8bpc_neon, export=1
movrel x6, blend_tbl
clz w3, w3
sub w3, w3, #26
ldrsw x3, [x6, x3, lsl #2]
add x6, x6, x3
movi v4.16b, #64
add x8, x0, x1
lsl x1, x1, #1
br x6
40:
AARCH64_VALID_JUMP_TARGET
4:
ld1 {v2.8b}, [x5], #8
ldr d1, [x2], #8
ldr s0, [x0]
subs w4, w4, #2
ld1 {v0.s}[1], [x8]
sub v3.8b, v4.8b, v2.8b
umull v5.8h, v1.8b, v2.8b
umlal v5.8h, v0.8b, v3.8b
rshrn v6.8b, v5.8h, #6
st1 {v6.s}[0], [x0], x1
st1 {v6.s}[1], [x8], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ld1 {v2.16b}, [x5], #16
ld1 {v1.16b}, [x2], #16
ldr d0, [x0]
ld1 {v0.d}[1], [x8]
sub v3.16b, v4.16b, v2.16b
subs w4, w4, #2
umull v5.8h, v1.8b, v2.8b
umlal v5.8h, v0.8b, v3.8b
umull2 v6.8h, v1.16b, v2.16b
umlal2 v6.8h, v0.16b, v3.16b
rshrn v7.8b, v5.8h, #6
rshrn v16.8b, v6.8h, #6
st1 {v7.8b}, [x0], x1
st1 {v16.8b}, [x8], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ld1 {v1.16b, v2.16b}, [x5], #32
ld1 {v5.16b, v6.16b}, [x2], #32
ld1 {v0.16b}, [x0]
subs w4, w4, #2
sub v7.16b, v4.16b, v1.16b
sub v20.16b, v4.16b, v2.16b
ld1 {v3.16b}, [x8]
umull v16.8h, v5.8b, v1.8b
umlal v16.8h, v0.8b, v7.8b
umull2 v17.8h, v5.16b, v1.16b
umlal2 v17.8h, v0.16b, v7.16b
umull v21.8h, v6.8b, v2.8b
umlal v21.8h, v3.8b, v20.8b
umull2 v22.8h, v6.16b, v2.16b
umlal2 v22.8h, v3.16b, v20.16b
rshrn v18.8b, v16.8h, #6
rshrn2 v18.16b, v17.8h, #6
rshrn v19.8b, v21.8h, #6
rshrn2 v19.16b, v22.8h, #6
st1 {v18.16b}, [x0], x1
st1 {v19.16b}, [x8], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
32:
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x5], #64
ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x2], #64
ld1 {v20.16b, v21.16b}, [x0]
subs w4, w4, #2
ld1 {v22.16b, v23.16b}, [x8]
sub v5.16b, v4.16b, v0.16b
sub v6.16b, v4.16b, v1.16b
sub v30.16b, v4.16b, v2.16b
sub v31.16b, v4.16b, v3.16b
umull v24.8h, v16.8b, v0.8b
umlal v24.8h, v20.8b, v5.8b
umull2 v26.8h, v16.16b, v0.16b
umlal2 v26.8h, v20.16b, v5.16b
umull v28.8h, v17.8b, v1.8b
umlal v28.8h, v21.8b, v6.8b
umull2 v7.8h, v17.16b, v1.16b
umlal2 v7.8h, v21.16b, v6.16b
umull v27.8h, v18.8b, v2.8b
umlal v27.8h, v22.8b, v30.8b
umull2 v1.8h, v18.16b, v2.16b
umlal2 v1.8h, v22.16b, v30.16b
umull v29.8h, v19.8b, v3.8b
umlal v29.8h, v23.8b, v31.8b
umull2 v21.8h, v19.16b, v3.16b
umlal2 v21.8h, v23.16b, v31.16b
rshrn v24.8b, v24.8h, #6
rshrn2 v24.16b, v26.8h, #6
rshrn v25.8b, v28.8h, #6
rshrn2 v25.16b, v7.8h, #6
rshrn v27.8b, v27.8h, #6
rshrn2 v27.16b, v1.8h, #6
rshrn v28.8b, v29.8h, #6
rshrn2 v28.16b, v21.8h, #6
st1 {v24.16b, v25.16b}, [x0], x1
st1 {v27.16b, v28.16b}, [x8], x1
b.gt 32b
ret
endfunc
jumptable blend_tbl
.word 320b - blend_tbl
.word 160b - blend_tbl
.word 80b - blend_tbl
.word 40b - blend_tbl
endjumptable
function blend_h_8bpc_neon, export=1
movrel x6, blend_h_tbl
movrel x5, X(obmc_masks)
add x5, x5, w4, uxtw
sub w4, w4, w4, lsr #2
clz w7, w3
movi v4.16b, #64
add x8, x0, x1
lsl x1, x1, #1
sub w7, w7, #24
ldrsw x7, [x6, x7, lsl #2]
add x6, x6, x7
br x6
20:
AARCH64_VALID_JUMP_TARGET
2:
ldr h0, [x5], #2
ldr s1, [x2], #4
subs w4, w4, #2
ldr h2, [x0]
zip1 v0.8b, v0.8b, v0.8b
sub v3.8b, v4.8b, v0.8b
ld1 {v2.h}[1], [x8]
umull v5.8h, v1.8b, v0.8b
umlal v5.8h, v2.8b, v3.8b
rshrn v5.8b, v5.8h, #6
st1 {v5.h}[0], [x0], x1
st1 {v5.h}[1], [x8], x1
b.gt 2b
ret
40:
AARCH64_VALID_JUMP_TARGET
4:
ld2r {v0.8b, v1.8b}, [x5], #2
ld1 {v2.8b}, [x2], #8
subs w4, w4, #2
ext v0.8b, v0.8b, v1.8b, #4
ldr s3, [x0]
sub v5.8b, v4.8b, v0.8b
ld1 {v3.s}[1], [x8]
umull v6.8h, v2.8b, v0.8b
umlal v6.8h, v3.8b, v5.8b
rshrn v6.8b, v6.8h, #6
st1 {v6.s}[0], [x0], x1
st1 {v6.s}[1], [x8], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ld2r {v0.16b, v1.16b}, [x5], #2
ld1 {v2.16b}, [x2], #16
ldr d3, [x0]
ext v0.16b, v0.16b, v1.16b, #8
sub v5.16b, v4.16b, v0.16b
ld1 {v3.d}[1], [x8]
subs w4, w4, #2
umull v6.8h, v0.8b, v2.8b
umlal v6.8h, v3.8b, v5.8b
umull2 v7.8h, v0.16b, v2.16b
umlal2 v7.8h, v3.16b, v5.16b
rshrn v16.8b, v6.8h, #6
rshrn v17.8b, v7.8h, #6
st1 {v16.8b}, [x0], x1
st1 {v17.8b}, [x8], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ld2r {v0.16b, v1.16b}, [x5], #2
ld1 {v2.16b, v3.16b}, [x2], #32
ld1 {v5.16b}, [x0]
sub v7.16b, v4.16b, v0.16b
sub v16.16b, v4.16b, v1.16b
ld1 {v6.16b}, [x8]
subs w4, w4, #2
umull v17.8h, v0.8b, v2.8b
umlal v17.8h, v5.8b, v7.8b
umull2 v18.8h, v0.16b, v2.16b
umlal2 v18.8h, v5.16b, v7.16b
umull v19.8h, v1.8b, v3.8b
umlal v19.8h, v6.8b, v16.8b
umull2 v20.8h, v1.16b, v3.16b
umlal2 v20.8h, v6.16b, v16.16b
rshrn v21.8b, v17.8h, #6
rshrn2 v21.16b, v18.8h, #6
rshrn v22.8b, v19.8h, #6
rshrn2 v22.16b, v20.8h, #6
st1 {v21.16b}, [x0], x1
st1 {v22.16b}, [x8], x1
b.gt 16b
ret
1280:
640:
320:
AARCH64_VALID_JUMP_TARGET
sub x1, x1, w3, uxtw
add x7, x2, w3, uxtw
321:
ld2r {v0.16b, v1.16b}, [x5], #2
mov w6, w3
sub v20.16b, v4.16b, v0.16b
sub v21.16b, v4.16b, v1.16b
32:
ld1 {v16.16b, v17.16b}, [x2], #32
ld1 {v2.16b, v3.16b}, [x0]
subs w6, w6, #32
umull v23.8h, v0.8b, v16.8b
umlal v23.8h, v2.8b, v20.8b
ld1 {v18.16b, v19.16b}, [x7], #32
umull2 v27.8h, v0.16b, v16.16b
umlal2 v27.8h, v2.16b, v20.16b
ld1 {v6.16b, v7.16b}, [x8]
umull v24.8h, v0.8b, v17.8b
umlal v24.8h, v3.8b, v20.8b
umull2 v28.8h, v0.16b, v17.16b
umlal2 v28.8h, v3.16b, v20.16b
umull v25.8h, v1.8b, v18.8b
umlal v25.8h, v6.8b, v21.8b
umull2 v5.8h, v1.16b, v18.16b
umlal2 v5.8h, v6.16b, v21.16b
rshrn v29.8b, v23.8h, #6
rshrn2 v29.16b, v27.8h, #6
umull v26.8h, v1.8b, v19.8b
umlal v26.8h, v7.8b, v21.8b
umull2 v31.8h, v1.16b, v19.16b
umlal2 v31.8h, v7.16b, v21.16b
rshrn v30.8b, v24.8h, #6
rshrn2 v30.16b, v28.8h, #6
rshrn v23.8b, v25.8h, #6
rshrn2 v23.16b, v5.8h, #6
rshrn v24.8b, v26.8h, #6
st1 {v29.16b, v30.16b}, [x0], #32
rshrn2 v24.16b, v31.8h, #6
st1 {v23.16b, v24.16b}, [x8], #32
b.gt 32b
subs w4, w4, #2
add x0, x0, x1
add x8, x8, x1
add x2, x2, w3, uxtw
add x7, x7, w3, uxtw
b.gt 321b
ret
endfunc
jumptable blend_h_tbl
.word 1280b - blend_h_tbl
.word 640b - blend_h_tbl
.word 320b - blend_h_tbl
.word 160b - blend_h_tbl
.word 80b - blend_h_tbl
.word 40b - blend_h_tbl
.word 20b - blend_h_tbl
endjumptable
function blend_v_8bpc_neon, export=1
movrel x6, blend_v_tbl
movrel x5, X(obmc_masks)
add x5, x5, w3, uxtw
clz w3, w3
movi v4.16b, #64
add x8, x0, x1
lsl x1, x1, #1
sub w3, w3, #26
ldrsw x3, [x6, x3, lsl #2]
add x6, x6, x3
br x6
20:
AARCH64_VALID_JUMP_TARGET
ld1r {v0.8b}, [x5]
sub v1.8b, v4.8b, v0.8b
2:
ldr h2, [x2], #2
ldr b3, [x0]
subs w4, w4, #2
ld1 {v2.b}[1], [x2]
ld1 {v3.b}[1], [x8]
umull v5.8h, v2.8b, v0.8b
umlal v5.8h, v3.8b, v1.8b
rshrn v5.8b, v5.8h, #6
add x2, x2, #2
st1 {v5.b}[0], [x0], x1
st1 {v5.b}[1], [x8], x1
b.gt 2b
ret
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v0.2s}, [x5]
sub x1, x1, #2
sub v1.8b, v4.8b, v0.8b
4:
ld1 {v2.8b}, [x2], #8
ldr s3, [x0]
ld1 {v3.s}[1], [x8]
subs w4, w4, #2
umull v5.8h, v2.8b, v0.8b
umlal v5.8h, v3.8b, v1.8b
rshrn v5.8b, v5.8h, #6
str h5, [x0], #2
st1 {v5.h}[2], [x8], #2
st1 {v5.b}[2], [x0], x1
st1 {v5.b}[6], [x8], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1r {v0.2d}, [x5]
sub x1, x1, #4
sub v1.16b, v4.16b, v0.16b
zip2 v16.2d, v1.2d, v1.2d
8:
ld1 {v2.16b}, [x2], #16
ldr d3, [x0]
ldr d4, [x8]
subs w4, w4, #2
umull v5.8h, v0.8b, v2.8b
umlal v5.8h, v3.8b, v1.8b
umull2 v6.8h, v0.16b, v2.16b
umlal v6.8h, v4.8b, v16.8b
rshrn v7.8b, v5.8h, #6
rshrn v17.8b, v6.8h, #6
str s7, [x0], #4
str s17, [x8], #4
st1 {v7.h}[2], [x0], x1
st1 {v17.h}[2], [x8], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b}, [x5]
sub x1, x1, #8
sub v2.16b, v4.16b, v0.16b
16:
ld1 {v5.16b, v6.16b}, [x2], #32
ld1 {v7.16b}, [x0]
subs w4, w4, #2
ld1 {v16.16b}, [x8]
umull v17.8h, v5.8b, v0.8b
umlal v17.8h, v7.8b, v2.8b
umull2 v18.8h, v5.16b, v0.16b
umlal2 v18.8h, v7.16b, v2.16b
umull v20.8h, v6.8b, v0.8b
umlal v20.8h, v16.8b, v2.8b
umull2 v21.8h, v6.16b, v0.16b
umlal2 v21.8h, v16.16b, v2.16b
rshrn v19.8b, v17.8h, #6
rshrn2 v19.16b, v18.8h, #6
rshrn v22.8b, v20.8h, #6
rshrn2 v22.16b, v21.8h, #6
st1 {v19.8b}, [x0], #8
st1 {v22.8b}, [x8], #8
st1 {v19.s}[2], [x0], x1
st1 {v22.s}[2], [x8], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b, v1.16b}, [x5]
sub x1, x1, #16
sub v2.16b, v4.16b, v0.16b
sub v3.8b, v4.8b, v1.8b
32:
ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x2], #64
ld1 {v5.16b, v6.16b}, [x0]
subs w4, w4, #2
ld1 {v20.16b, v21.16b}, [x8]
umull v22.8h, v16.8b, v0.8b
umlal v22.8h, v5.8b, v2.8b
umull2 v23.8h, v16.16b, v0.16b
umlal2 v23.8h, v5.16b, v2.16b
umull v28.8h, v17.8b, v1.8b
umlal v28.8h, v6.8b, v3.8b
umull v30.8h, v18.8b, v0.8b
umlal v30.8h, v20.8b, v2.8b
umull2 v31.8h, v18.16b, v0.16b
umlal2 v31.8h, v20.16b, v2.16b
umull v25.8h, v19.8b, v1.8b
umlal v25.8h, v21.8b, v3.8b
rshrn v24.8b, v22.8h, #6
rshrn2 v24.16b, v23.8h, #6
rshrn v28.8b, v28.8h, #6
rshrn v30.8b, v30.8h, #6
rshrn2 v30.16b, v31.8h, #6
rshrn v27.8b, v25.8h, #6
st1 {v24.16b}, [x0], #16
st1 {v30.16b}, [x8], #16
st1 {v28.8b}, [x0], x1
st1 {v27.8b}, [x8], x1
b.gt 32b
ret
endfunc
jumptable blend_v_tbl
.word 320b - blend_v_tbl
.word 160b - blend_v_tbl
.word 80b - blend_v_tbl
.word 40b - blend_v_tbl
.word 20b - blend_v_tbl
endjumptable
// This has got the same signature as the put_8tap functions,
// and assumes that x8 is set to (clz(w)-24).
function put_neon, export=1
movrel x9, put_tbl
ldrsw x8, [x9, x8, lsl #2]
add x9, x9, x8
br x9
20:
AARCH64_VALID_JUMP_TARGET
2:
ldrh w9, [x2]
ldrh w10, [x2, x3]
add x2, x2, x3, lsl #1
subs w5, w5, #2
strh w9, [x0]
strh w10, [x0, x1]
add x0, x0, x1, lsl #1
b.gt 2b
ret
40:
AARCH64_VALID_JUMP_TARGET
4:
ldr w9, [x2]
ldr w10, [x2, x3]
add x2, x2, x3, lsl #1
subs w5, w5, #2
str w9, [x0]
str w10, [x0, x1]
add x0, x0, x1, lsl #1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ldr x9, [x2]
ldr x10, [x2, x3]
add x2, x2, x3, lsl #1
subs w5, w5, #2
str x9, [x0]
str x10, [x0, x1]
add x0, x0, x1, lsl #1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ldr q0, [x2]
ldr q1, [x2, x3]
add x2, x2, x3, lsl #1
subs w5, w5, #2
str q0, [x0]
str q1, [x0, x1]
add x0, x0, x1, lsl #1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
32:
ldp q0, q1, [x2]
add x2, x2, x3
stp q0, q1, [x0]
add x0, x0, x1
ldp q2, q3, [x2]
add x2, x2, x3
stp q2, q3, [x0]
subs w5, w5, #2
add x0, x0, x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
64:
ldp q0, q1, [x2]
stp q0, q1, [x0]
ldp q2, q3, [x2, #32]
add x2, x2, x3
stp q2, q3, [x0, #32]
subs w5, w5, #1
add x0, x0, x1
b.gt 64b
ret
1280:
AARCH64_VALID_JUMP_TARGET
128:
ldp q0, q1, [x2]
stp q0, q1, [x0]
ldp q2, q3, [x2, #32]
stp q2, q3, [x0, #32]
ldp q4, q5, [x2, #64]
stp q4, q5, [x0, #64]
ldp q6, q7, [x2, #96]
add x2, x2, x3
stp q6, q7, [x0, #96]
subs w5, w5, #1
add x0, x0, x1
b.gt 128b
ret
endfunc
jumptable put_tbl
.word 1280b - put_tbl
.word 640b - put_tbl
.word 320b - put_tbl
.word 160b - put_tbl
.word 80b - put_tbl
.word 40b - put_tbl
.word 20b - put_tbl
endjumptable
// This has got the same signature as the prep_8tap functions,
// and assumes that x8 is set to (clz(w)-24), and x7 to w*2.
function prep_neon, export=1
movrel x9, prep_tbl
ldrsw x8, [x9, x8, lsl #2]
movi v24.16b, #16
add x9, x9, x8
br x9
40:
AARCH64_VALID_JUMP_TARGET
4:
ldr s0, [x1]
ldr s2, [x1, x2]
add x1, x1, x2, lsl #1
ldr s1, [x1]
ldr s3, [x1, x2]
add x1, x1, x2, lsl #1
mov v0.s[1], v2.s[0]
mov v1.s[1], v3.s[0]
ushll v0.8h, v0.8b, #4
ushll v1.8h, v1.8b, #4
subs w4, w4, #4
stp q0, q1, [x0], #32
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ldr d0, [x1]
ldr d1, [x1, x2]
add x1, x1, x2, lsl #1
ldr d2, [x1]
ldr d3, [x1, x2]
add x1, x1, x2, lsl #1
ushll v0.8h, v0.8b, #4
ushll v1.8h, v1.8b, #4
umull v2.8h, v2.8b, v24.8b
umull v3.8h, v3.8b, v24.8b
subs w4, w4, #4
stp q0, q1, [x0]
stp q2, q3, [x0, #32]
add x0, x0, #64
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ldr q1, [x1]
ldr q3, [x1, x2]
add x1, x1, x2, lsl #1
ushll v0.8h, v1.8b, #4
ushll2 v1.8h, v1.16b, #4
ldr q5, [x1]
ldr q7, [x1, x2]
add x1, x1, x2, lsl #1
umull v2.8h, v3.8b, v24.8b
umull2 v3.8h, v3.16b, v24.16b
ushll v4.8h, v5.8b, #4
ushll2 v5.8h, v5.16b, #4
umull v6.8h, v7.8b, v24.8b
umull2 v7.8h, v7.16b, v24.16b
subs w4, w4, #4
stp q0, q1, [x0]
stp q2, q3, [x0, #32]
stp q4, q5, [x0, #64]
stp q6, q7, [x0, #96]
add x0, x0, #128
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
32:
ldp q4, q5, [x1]
add x1, x1, x2
ldp q6, q7, [x1]
add x1, x1, x2
ushll v0.8h, v4.8b, #4
ushll2 v1.8h, v4.16b, #4
umull v2.8h, v5.8b, v24.8b
umull2 v3.8h, v5.16b, v24.16b
ushll v4.8h, v6.8b, #4
ushll2 v5.8h, v6.16b, #4
umull v6.8h, v7.8b, v24.8b
umull2 v7.8h, v7.16b, v24.16b
subs w4, w4, #2
stp q0, q1, [x0]
stp q2, q3, [x0, #32]
stp q4, q5, [x0, #64]
stp q6, q7, [x0, #96]
add x0, x0, #128
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
64:
ldp q4, q5, [x1]
ldp q6, q7, [x1, #32]
add x1, x1, x2
ushll v0.8h, v4.8b, #4
ushll2 v1.8h, v4.16b, #4
umull v2.8h, v5.8b, v24.8b
umull2 v3.8h, v5.16b, v24.16b
ushll v4.8h, v6.8b, #4
ushll2 v5.8h, v6.16b, #4
umull v6.8h, v7.8b, v24.8b
umull2 v7.8h, v7.16b, v24.16b
subs w4, w4, #1
stp q0, q1, [x0]
stp q2, q3, [x0, #32]
stp q4, q5, [x0, #64]
stp q6, q7, [x0, #96]
add x0, x0, #128
b.gt 64b
ret
1280:
AARCH64_VALID_JUMP_TARGET
128:
ldp q28, q29, [x1]
ldp q30, q31, [x1, #32]
ushll v16.8h, v28.8b, #4
ushll2 v17.8h, v28.16b, #4
umull v18.8h, v29.8b, v24.8b
umull2 v19.8h, v29.16b, v24.16b
ushll v20.8h, v30.8b, #4
ushll2 v21.8h, v30.16b, #4
umull v22.8h, v31.8b, v24.8b
umull2 v23.8h, v31.16b, v24.16b
ldp q28, q29, [x1, #64]
ldp q30, q31, [x1, #96]
add x1, x1, x2
stp q16, q17, [x0]
stp q18, q19, [x0, #32]
stp q20, q21, [x0, #64]
stp q22, q23, [x0, #96]
ushll v16.8h, v28.8b, #4
ushll2 v17.8h, v28.16b, #4
umull v18.8h, v29.8b, v24.8b
umull2 v19.8h, v29.16b, v24.16b
ushll v20.8h, v30.8b, #4
ushll2 v21.8h, v30.16b, #4
umull v22.8h, v31.8b, v24.8b
umull2 v23.8h, v31.16b, v24.16b
subs w4, w4, #1
stp q16, q17, [x0, #128]
stp q18, q19, [x0, #160]
stp q20, q21, [x0, #192]
stp q22, q23, [x0, #224]
add x0, x0, #256
b.gt 128b
ret
endfunc
jumptable prep_tbl
.word 1280b - prep_tbl
.word 640b - prep_tbl
.word 320b - prep_tbl
.word 160b - prep_tbl
.word 80b - prep_tbl
.word 40b - prep_tbl
endjumptable
.macro load_slice s0, s1, strd, wd, d0, d1, d2, d3, d4, d5, d6
ld1 {\d0\wd}[0], [\s0], \strd
ld1 {\d1\wd}[0], [\s1], \strd
.ifnb \d2
ld1 {\d2\wd}[0], [\s0], \strd
ld1 {\d3\wd}[0], [\s1], \strd
.endif
.ifnb \d4
ld1 {\d4\wd}[0], [\s0], \strd
.endif
.ifnb \d5
ld1 {\d5\wd}[0], [\s1], \strd
.endif
.ifnb \d6
ld1 {\d6\wd}[0], [\s0], \strd
.endif
.endm
.macro load_reg s0, s1, strd, wd, d0, d1, d2, d3, d4, d5, d6
ld1 {\d0\wd}, [\s0], \strd
ld1 {\d1\wd}, [\s1], \strd
.ifnb \d2
ld1 {\d2\wd}, [\s0], \strd
ld1 {\d3\wd}, [\s1], \strd
.endif
.ifnb \d4
ld1 {\d4\wd}, [\s0], \strd
.endif
.ifnb \d5
ld1 {\d5\wd}, [\s1], \strd
.endif
.ifnb \d6
ld1 {\d6\wd}, [\s0], \strd
.endif
.endm
.macro load_h s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_slice \s0, \s1, \strd, .h, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro load_s s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_slice \s0, \s1, \strd, .s, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro load_8b s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_reg \s0, \s1, \strd, .8b, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro load_16b s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_reg \s0, \s1, \strd, .16b, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro interleave_1 wd, r0, r1, r2, r3, r4
trn1 \r0\wd, \r0\wd, \r1\wd
trn1 \r1\wd, \r1\wd, \r2\wd
.ifnb \r3
trn1 \r2\wd, \r2\wd, \r3\wd
trn1 \r3\wd, \r3\wd, \r4\wd
.endif
.endm
.macro interleave_1_h r0, r1, r2, r3, r4
interleave_1 .4h, \r0, \r1, \r2, \r3, \r4
.endm
.macro interleave_1_s r0, r1, r2, r3, r4
interleave_1 .2s, \r0, \r1, \r2, \r3, \r4
.endm
.macro interleave_2 wd, r0, r1, r2, r3, r4, r5
trn1 \r0\wd, \r0\wd, \r2\wd
trn1 \r1\wd, \r1\wd, \r3\wd
trn1 \r2\wd, \r2\wd, \r4\wd
trn1 \r3\wd, \r3\wd, \r5\wd
.endm
.macro interleave_2_s r0, r1, r2, r3, r4, r5
interleave_2 .2s, \r0, \r1, \r2, \r3, \r4, \r5
.endm
.macro uxtl_b r0, r1, r2, r3, r4, r5, r6
uxtl \r0\().8h, \r0\().8b
uxtl \r1\().8h, \r1\().8b
.ifnb \r2
uxtl \r2\().8h, \r2\().8b
uxtl \r3\().8h, \r3\().8b
.endif
.ifnb \r4
uxtl \r4\().8h, \r4\().8b
.endif
.ifnb \r5
uxtl \r5\().8h, \r5\().8b
.endif
.ifnb \r6
uxtl \r6\().8h, \r6\().8b
.endif
.endm
.macro mul_mla_4tap d, s0, s1, s2, s3, wd
mul \d\wd, \s0\wd, v0.h[0]
mla \d\wd, \s1\wd, v0.h[1]
mla \d\wd, \s2\wd, v0.h[2]
mla \d\wd, \s3\wd, v0.h[3]
.endm
// Interleaving the mul/mla chains actually hurts performance
// significantly on Cortex A53, thus keeping mul/mla tightly
// chained like this.
.macro mul_mla_6tap_0_4h d0, s0, s1, s2, s3, s4, s5, s6, s7
mul \d0\().4h, \s1\().4h, v0.h[1]
mla \d0\().4h, \s2\().4h, v0.h[2]
mla \d0\().4h, \s3\().4h, v0.h[3]
mla \d0\().4h, \s4\().4h, v0.h[4]
mla \d0\().4h, \s5\().4h, v0.h[5]
mla \d0\().4h, \s6\().4h, v0.h[6]
.endm
.macro mul_mla_6tap_0 d0, s0, s1, s2, s3, s4, s5, s6, s7
mul \d0\().8h, \s1\().8h, v0.h[1]
mla \d0\().8h, \s2\().8h, v0.h[2]
mla \d0\().8h, \s3\().8h, v0.h[3]
mla \d0\().8h, \s4\().8h, v0.h[4]
mla \d0\().8h, \s5\().8h, v0.h[5]
mla \d0\().8h, \s6\().8h, v0.h[6]
.endm
.macro mul_mla_6tap_1 d0, d1, s0, s1, s2, s3, s4, s5, s6, s7, s8
mul \d0\().8h, \s1\().8h, v0.h[1]
mla \d0\().8h, \s2\().8h, v0.h[2]
mla \d0\().8h, \s3\().8h, v0.h[3]
mla \d0\().8h, \s4\().8h, v0.h[4]
mla \d0\().8h, \s5\().8h, v0.h[5]
mla \d0\().8h, \s6\().8h, v0.h[6]
mul \d1\().8h, \s2\().8h, v0.h[1]
mla \d1\().8h, \s3\().8h, v0.h[2]
mla \d1\().8h, \s4\().8h, v0.h[3]
mla \d1\().8h, \s5\().8h, v0.h[4]
mla \d1\().8h, \s6\().8h, v0.h[5]
mla \d1\().8h, \s7\().8h, v0.h[6]
.endm
.macro mul_mla_6tap_2 d0, d1, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9
mul \d0\().8h, \s1\().8h, v0.h[1]
mla \d0\().8h, \s2\().8h, v0.h[2]
mla \d0\().8h, \s3\().8h, v0.h[3]
mla \d0\().8h, \s4\().8h, v0.h[4]
mla \d0\().8h, \s5\().8h, v0.h[5]
mla \d0\().8h, \s6\().8h, v0.h[6]
mul \d1\().8h, \s3\().8h, v0.h[1]
mla \d1\().8h, \s4\().8h, v0.h[2]
mla \d1\().8h, \s5\().8h, v0.h[3]
mla \d1\().8h, \s6\().8h, v0.h[4]
mla \d1\().8h, \s7\().8h, v0.h[5]
mla \d1\().8h, \s8\().8h, v0.h[6]
.endm
.macro mul_mla_8tap_0_4h d0, s0, s1, s2, s3, s4, s5, s6, s7
mul \d0\().4h, \s0\().4h, v0.h[0]
mla \d0\().4h, \s1\().4h, v0.h[1]
mla \d0\().4h, \s2\().4h, v0.h[2]
mla \d0\().4h, \s3\().4h, v0.h[3]
mla \d0\().4h, \s4\().4h, v0.h[4]
mla \d0\().4h, \s5\().4h, v0.h[5]
mla \d0\().4h, \s6\().4h, v0.h[6]
mla \d0\().4h, \s7\().4h, v0.h[7]
.endm
.macro mul_mla_8tap_0 d0, s0, s1, s2, s3, s4, s5, s6, s7
mul \d0\().8h, \s0\().8h, v0.h[0]
mla \d0\().8h, \s1\().8h, v0.h[1]
mla \d0\().8h, \s2\().8h, v0.h[2]
mla \d0\().8h, \s3\().8h, v0.h[3]
mla \d0\().8h, \s4\().8h, v0.h[4]
mla \d0\().8h, \s5\().8h, v0.h[5]
mla \d0\().8h, \s6\().8h, v0.h[6]
mla \d0\().8h, \s7\().8h, v0.h[7]
.endm
.macro mul_mla_8tap_1 d0, d1, s0, s1, s2, s3, s4, s5, s6, s7, s8
mul \d0\().8h, \s0\().8h, v0.h[0]
mla \d0\().8h, \s1\().8h, v0.h[1]
mla \d0\().8h, \s2\().8h, v0.h[2]
mla \d0\().8h, \s3\().8h, v0.h[3]
mla \d0\().8h, \s4\().8h, v0.h[4]
mla \d0\().8h, \s5\().8h, v0.h[5]
mla \d0\().8h, \s6\().8h, v0.h[6]
mla \d0\().8h, \s7\().8h, v0.h[7]
mul \d1\().8h, \s1\().8h, v0.h[0]
mla \d1\().8h, \s2\().8h, v0.h[1]
mla \d1\().8h, \s3\().8h, v0.h[2]
mla \d1\().8h, \s4\().8h, v0.h[3]
mla \d1\().8h, \s5\().8h, v0.h[4]
mla \d1\().8h, \s6\().8h, v0.h[5]
mla \d1\().8h, \s7\().8h, v0.h[6]
mla \d1\().8h, \s8\().8h, v0.h[7]
.endm
.macro mul_mla_8tap_2 d0, d1, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9
mul \d0\().8h, \s0\().8h, v0.h[0]
mla \d0\().8h, \s1\().8h, v0.h[1]
mla \d0\().8h, \s2\().8h, v0.h[2]
mla \d0\().8h, \s3\().8h, v0.h[3]
mla \d0\().8h, \s4\().8h, v0.h[4]
mla \d0\().8h, \s5\().8h, v0.h[5]
mla \d0\().8h, \s6\().8h, v0.h[6]
mla \d0\().8h, \s7\().8h, v0.h[7]
mul \d1\().8h, \s2\().8h, v0.h[0]
mla \d1\().8h, \s3\().8h, v0.h[1]
mla \d1\().8h, \s4\().8h, v0.h[2]
mla \d1\().8h, \s5\().8h, v0.h[3]
mla \d1\().8h, \s6\().8h, v0.h[4]
mla \d1\().8h, \s7\().8h, v0.h[5]
mla \d1\().8h, \s8\().8h, v0.h[6]
mla \d1\().8h, \s9\().8h, v0.h[7]
.endm
.macro sqrshrun_b shift, r0, r1, r2, r3
sqrshrun \r0\().8b, \r0\().8h, #\shift
.ifnb \r1
sqrshrun \r1\().8b, \r1\().8h, #\shift
.endif
.ifnb \r2
sqrshrun \r2\().8b, \r2\().8h, #\shift
sqrshrun \r3\().8b, \r3\().8h, #\shift
.endif
.endm
.macro srshr_h shift, r0, r1, r2, r3
srshr \r0\().8h, \r0\().8h, #\shift
.ifnb \r1
srshr \r1\().8h, \r1\().8h, #\shift
.endif
.ifnb \r2
srshr \r2\().8h, \r2\().8h, #\shift
srshr \r3\().8h, \r3\().8h, #\shift
.endif
.endm
.macro st_h strd, reg, lanes
st1 {\reg\().h}[0], [x0], \strd
st1 {\reg\().h}[1], [x8], \strd
.if \lanes > 2
st1 {\reg\().h}[2], [x0], \strd
st1 {\reg\().h}[3], [x8], \strd
.endif
.endm
.macro st_s strd, r0, r1
st1 {\r0\().s}[0], [x0], \strd
st1 {\r0\().s}[1], [x8], \strd
.ifnb \r1
st1 {\r1\().s}[0], [x0], \strd
st1 {\r1\().s}[1], [x8], \strd
.endif
.endm
.macro st_d strd, r0, r1
st1 {\r0\().8b}, [x0], \strd
st1 {\r0\().d}[1], [x8], \strd
.ifnb \r1
st1 {\r1\().8b}, [x0], \strd
st1 {\r1\().d}[1], [x8], \strd
.endif
.endm
.macro shift_store_4 type, strd, r0, r1
.ifc \type, put
sqrshrun_b 6, \r0, \r1
st_s \strd, \r0, \r1
.else
srshr_h 2, \r0, \r1
st_d \strd, \r0, \r1
.endif
.endm
.macro st_reg strd, wd, r0, r1, r2, r3, r4, r5, r6, r7
st1 {\r0\wd}, [x0], \strd
st1 {\r1\wd}, [x8], \strd
.ifnb \r2
st1 {\r2\wd}, [x0], \strd
st1 {\r3\wd}, [x8], \strd
.endif
.ifnb \r4
st1 {\r4\wd}, [x0], \strd
st1 {\r5\wd}, [x8], \strd
st1 {\r6\wd}, [x0], \strd
st1 {\r7\wd}, [x8], \strd
.endif
.endm
.macro st_8b strd, r0, r1, r2, r3, r4, r5, r6, r7
st_reg \strd, .8b, \r0, \r1, \r2, \r3, \r4, \r5, \r6, \r7
.endm
.macro st_16b strd, r0, r1, r2, r3, r4, r5, r6, r7
st_reg \strd, .16b, \r0, \r1, \r2, \r3, \r4, \r5, \r6, \r7
.endm
.macro shift_store_8 type, strd, r0, r1, r2, r3
.ifc \type, put
sqrshrun_b 6, \r0, \r1, \r2, \r3
st_8b \strd, \r0, \r1, \r2, \r3
.else
srshr_h 2, \r0, \r1, \r2, \r3
st_16b \strd, \r0, \r1, \r2, \r3
.endif
.endm
.macro shift_store_16 type, strd, r0, r1, r2, r3
.ifc \type, put
sqrshrun \r0\().8b, \r0\().8h, #6
sqrshrun2 \r0\().16b, \r1\().8h, #6
sqrshrun \r2\().8b, \r2\().8h, #6
sqrshrun2 \r2\().16b, \r3\().8h, #6
st_16b \strd, \r0, \r2
.else
srshr_h 2, \r0, \r1, \r2, \r3
st1 {\r0\().8h, \r1\().8h}, [x0], \strd
st1 {\r2\().8h, \r3\().8h}, [x8], \strd
.endif
.endm
.macro make_8tap_fn op, type, type_h, type_v, taps
function \op\()_8tap_\type\()_8bpc_neon, export=1
mov x8, \type_h
mov x9, \type_v
b \op\()_\taps\()_neon
endfunc
.endm
// No spaces in these expressions, due to gas-preprocessor.
#define REGULAR ((0*15<<7)|3*15)
#define SMOOTH ((1*15<<7)|4*15)
#define SHARP ((2*15<<7)|3*15)
.macro filter_fn type, dst, d_strd, src, s_strd, w, h, mx, xmx, my, xmy, ds2, sr2, shift_hv, taps
function \type\()_\taps\()_neon
mov w10, #0x4081 // (1 << 14) | (1 << 7) | (1 << 0)
mul \mx, \mx, w10
mul \my, \my, w10
add \mx, \mx, w8 // mx, 8tap_h, 4tap_h
add \my, \my, w9 // my, 8tap_v, 4tap_v
.ifc \type, prep
uxtw \d_strd, \w
lsl \d_strd, \d_strd, #1
.endif
clz w8, \w
tst \mx, #(0x7f << 14)
sub w8, w8, #24
movrel x10, X(mc_subpel_filters), -8
b.ne L(\type\()_\taps\()_h)
tst \my, #(0x7f << 14)
b.ne L(\type\()_\taps\()_v)
b \type\()_neon
L(\type\()_\taps\()_h):
cmp \w, #4
ubfx w9, \mx, #7, #7
and \mx, \mx, #0x7f
b.le 4f
mov \mx, w9
4:
tst \my, #(0x7f << 14)
add \xmx, x10, \mx, uxtw #3
b.ne L(\type\()_\taps\()_hv)
movrel x9, \type\()_\taps\()_h_tbl
ldrsw x8, [x9, x8, lsl #2]
add x9, x9, x8
br x9
20: // 2xN h
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
ldur s0, [\xmx, #2]
sub \src, \src, #1
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
sxtl v0.8h, v0.8b
2:
ld1 {v4.8b}, [\src], \s_strd
ld1 {v6.8b}, [\sr2], \s_strd
uxtl v4.8h, v4.8b
uxtl v6.8h, v6.8b
ext v5.16b, v4.16b, v4.16b, #2
ext v7.16b, v6.16b, v6.16b, #2
subs \h, \h, #2
trn1 v3.2s, v4.2s, v6.2s
trn2 v6.2s, v4.2s, v6.2s
trn1 v4.2s, v5.2s, v7.2s
trn2 v7.2s, v5.2s, v7.2s
mul v3.4h, v3.4h, v0.h[0]
mla v3.4h, v4.4h, v0.h[1]
mla v3.4h, v6.4h, v0.h[2]
mla v3.4h, v7.4h, v0.h[3]
srshr v3.4h, v3.4h, #2
sqrshrun v3.8b, v3.8h, #4
st1 {v3.h}[0], [\dst], \d_strd
st1 {v3.h}[1], [\ds2], \d_strd
b.gt 2b
ret
.endif
40: // 4xN h
AARCH64_VALID_JUMP_TARGET
ldur s0, [\xmx, #2]
sub \src, \src, #1
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
sxtl v0.8h, v0.8b
4:
ld1 {v16.8b}, [\src], \s_strd
ld1 {v20.8b}, [\sr2], \s_strd
uxtl v16.8h, v16.8b
uxtl v20.8h, v20.8b
ext v17.16b, v16.16b, v16.16b, #2
ext v18.16b, v16.16b, v16.16b, #4
ext v19.16b, v16.16b, v16.16b, #6
ext v21.16b, v20.16b, v20.16b, #2
ext v22.16b, v20.16b, v20.16b, #4
ext v23.16b, v20.16b, v20.16b, #6
subs \h, \h, #2
mul v16.4h, v16.4h, v0.h[0]
mla v16.4h, v17.4h, v0.h[1]
mla v16.4h, v18.4h, v0.h[2]
mla v16.4h, v19.4h, v0.h[3]
mul v20.4h, v20.4h, v0.h[0]
mla v20.4h, v21.4h, v0.h[1]
mla v20.4h, v22.4h, v0.h[2]
mla v20.4h, v23.4h, v0.h[3]
srshr v16.4h, v16.4h, #2
srshr v20.4h, v20.4h, #2
.ifc \type, put
sqrshrun v16.8b, v16.8h, #4
sqrshrun v20.8b, v20.8h, #4
str s16, [\dst]
str s20, [\ds2]
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
.else
st1 {v16.4h}, [\dst], \d_strd
st1 {v20.4h}, [\ds2], \d_strd
.endif
b.gt 4b
ret
80: // 8xN h
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [\xmx]
.ifc \taps, 6tap
sub \src, \src, #2
.else
sub \src, \src, #3
.endif
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
sxtl v0.8h, v0.8b
8:
ld1 {v16.8b, v17.8b}, [\src], \s_strd
ld1 {v20.8b, v21.8b}, [\sr2], \s_strd
uxtl v16.8h, v16.8b
uxtl v17.8h, v17.8b
uxtl v20.8h, v20.8b
uxtl v21.8h, v21.8b
.ifc \taps, 6tap
mul v18.8h, v16.8h, v0.h[1]
mul v22.8h, v20.8h, v0.h[1]
.irpc i, 23456
ext v19.16b, v16.16b, v17.16b, #(2*\i-2)
ext v23.16b, v20.16b, v21.16b, #(2*\i-2)
mla v18.8h, v19.8h, v0.h[\i]
mla v22.8h, v23.8h, v0.h[\i]
.endr
.else // 8tap
mul v18.8h, v16.8h, v0.h[0]
mul v22.8h, v20.8h, v0.h[0]
.irpc i, 1234567
ext v19.16b, v16.16b, v17.16b, #(2*\i)
ext v23.16b, v20.16b, v21.16b, #(2*\i)
mla v18.8h, v19.8h, v0.h[\i]
mla v22.8h, v23.8h, v0.h[\i]
.endr
.endif
subs \h, \h, #2
srshr v18.8h, v18.8h, #2
srshr v22.8h, v22.8h, #2
.ifc \type, put
sqrshrun v18.8b, v18.8h, #4
sqrshrun v22.8b, v22.8h, #4
st1 {v18.8b}, [\dst], \d_strd
st1 {v22.8b}, [\ds2], \d_strd
.else
st1 {v18.8h}, [\dst], \d_strd
st1 {v22.8h}, [\ds2], \d_strd
.endif
b.gt 8b
ret
160:
320:
640:
1280: // 16xN, 32xN, ... h
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [\xmx]
.ifc \taps, 6tap
sub \src, \src, #2
.else
sub \src, \src, #3
.endif
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
sxtl v0.8h, v0.8b
sub \s_strd, \s_strd, \w, uxtw
sub \s_strd, \s_strd, #8
.ifc \type, put
lsl \d_strd, \d_strd, #1
sub \d_strd, \d_strd, \w, uxtw
.endif
161:
ld1 {v16.8b, v17.8b, v18.8b}, [\src], #24
ld1 {v20.8b, v21.8b, v22.8b}, [\sr2], #24
mov \mx, \w
uxtl v16.8h, v16.8b
uxtl v17.8h, v17.8b
uxtl v18.8h, v18.8b
uxtl v20.8h, v20.8b
uxtl v21.8h, v21.8b
uxtl v22.8h, v22.8b
16:
.ifc \taps, 6tap
mul v24.8h, v16.8h, v0.h[1]
mul v25.8h, v17.8h, v0.h[1]
mul v26.8h, v20.8h, v0.h[1]
mul v27.8h, v21.8h, v0.h[1]
.irpc i, 23456
ext v28.16b, v16.16b, v17.16b, #(2*\i-2)
ext v29.16b, v17.16b, v18.16b, #(2*\i-2)
ext v30.16b, v20.16b, v21.16b, #(2*\i-2)
ext v31.16b, v21.16b, v22.16b, #(2*\i-2)
mla v24.8h, v28.8h, v0.h[\i]
mla v25.8h, v29.8h, v0.h[\i]
mla v26.8h, v30.8h, v0.h[\i]
mla v27.8h, v31.8h, v0.h[\i]
.endr
.else // 8tap
mul v24.8h, v16.8h, v0.h[0]
mul v25.8h, v17.8h, v0.h[0]
mul v26.8h, v20.8h, v0.h[0]
mul v27.8h, v21.8h, v0.h[0]
.irpc i, 1234567
ext v28.16b, v16.16b, v17.16b, #(2*\i)
ext v29.16b, v17.16b, v18.16b, #(2*\i)
ext v30.16b, v20.16b, v21.16b, #(2*\i)
ext v31.16b, v21.16b, v22.16b, #(2*\i)
mla v24.8h, v28.8h, v0.h[\i]
mla v25.8h, v29.8h, v0.h[\i]
mla v26.8h, v30.8h, v0.h[\i]
mla v27.8h, v31.8h, v0.h[\i]
.endr
.endif
srshr v24.8h, v24.8h, #2
srshr v25.8h, v25.8h, #2
srshr v26.8h, v26.8h, #2
srshr v27.8h, v27.8h, #2
subs \mx, \mx, #16
.ifc \type, put
sqrshrun v24.8b, v24.8h, #4
sqrshrun2 v24.16b, v25.8h, #4
sqrshrun v26.8b, v26.8h, #4
sqrshrun2 v26.16b, v27.8h, #4
st1 {v24.16b}, [\dst], #16
st1 {v26.16b}, [\ds2], #16
.else
st1 {v24.8h, v25.8h}, [\dst], #32
st1 {v26.8h, v27.8h}, [\ds2], #32
.endif
b.le 9f
mov v16.16b, v18.16b
mov v20.16b, v22.16b
ld1 {v17.8b, v18.8b}, [\src], #16
ld1 {v21.8b, v22.8b}, [\sr2], #16
uxtl v17.8h, v17.8b
uxtl v18.8h, v18.8b
uxtl v21.8h, v21.8b
uxtl v22.8h, v22.8b
b 16b
9:
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
add \src, \src, \s_strd
add \sr2, \sr2, \s_strd
subs \h, \h, #2
b.gt 161b
ret
endfunc
jumptable \type\()_\taps\()_h_tbl
.word 1280b - \type\()_\taps\()_h_tbl
.word 640b - \type\()_\taps\()_h_tbl
.word 320b - \type\()_\taps\()_h_tbl
.word 160b - \type\()_\taps\()_h_tbl
.word 80b - \type\()_\taps\()_h_tbl
.word 40b - \type\()_\taps\()_h_tbl
.word 20b - \type\()_\taps\()_h_tbl
endjumptable
function L(\type\()_\taps\()_v)
cmp \h, #4
ubfx w9, \my, #7, #7
and \my, \my, #0x7f
b.le 4f
mov \my, w9
4:
add \xmy, x10, \my, uxtw #3
movrel x9, \type\()_\taps\()_v_tbl
ldrsw x8, [x9, x8, lsl #2]
add x9, x9, x8
br x9
20: // 2xN v
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
b.gt 28f
cmp \h, #2
ldur s0, [\xmy, #2]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
// 2x2 v
load_h \src, \sr2, \s_strd, v1, v2, v3, v4, v5
interleave_1_h v1, v2, v3, v4, v5
b.gt 24f
uxtl_b v1, v2, v3, v4
mul_mla_4tap v6, v1, v2, v3, v4, .4h
sqrshrun_b 6, v6
st_h \d_strd, v6, 2
ret
24: // 2x4 v
load_h \sr2, \src, \s_strd, v6, v7
interleave_1_h v5, v6, v7
interleave_2_s v1, v2, v3, v4, v5, v6
uxtl_b v1, v2, v3, v4
mul_mla_4tap v6, v1, v2, v3, v4, .8h
sqrshrun_b 6, v6
st_h \d_strd, v6, 4
ret
28: // 2x6, 2x8, 2x12, 2x16 v
ld1 {v0.8b}, [\xmy]
sub \sr2, \src, \s_strd, lsl #1
add \ds2, \dst, \d_strd
sub \src, \sr2, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
sxtl v0.8h, v0.8b
load_h \src, \sr2, \s_strd, v1, v2, v3, v4, v5, v6, v7
interleave_1_h v1, v2, v3, v4, v5
interleave_1_h v5, v6, v7
interleave_2_s v1, v2, v3, v4, v5, v6
uxtl_b v1, v2, v3, v4
216:
subs \h, \h, #4
load_h \sr2, \src, \s_strd, v16, v17, v18, v19
interleave_1_h v7, v16, v17, v18, v19
interleave_2_s v5, v6, v7, v16, v17, v18
uxtl_b v5, v6, v7, v16
mul_mla_\taps\()_0 v30, v1, v2, v3, v4, v5, v6, v7, v16
sqrshrun_b 6, v30
st_h \d_strd, v30, 4
b.le 0f
cmp \h, #2
mov v1.16b, v5.16b
mov v2.16b, v6.16b
mov v3.16b, v7.16b
mov v4.16b, v16.16b
mov v5.16b, v17.16b
mov v6.16b, v18.16b
mov v7.16b, v19.16b
b.eq 26f
b 216b
26:
load_h \sr2, \src, \s_strd, v16, v17
interleave_1_h v7, v16, v17
uxtl_b v5, v6, v7, v16
mul_mla_\taps\()_0_4h v30, v1, v2, v3, v4, v5, v6, v7, v16
sqrshrun_b 6, v30
st_h \d_strd, v30, 2
0:
ret
.endif
40:
AARCH64_VALID_JUMP_TARGET
b.gt 480f
// 4x2, 4x4 v
cmp \h, #2
ldur s0, [\xmy, #2]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
load_s \src, \sr2, \s_strd, v1, v2, v3, v4, v5
interleave_1_s v1, v2, v3, v4, v5
uxtl_b v1, v2, v3, v4
mul_mla_4tap v6, v1, v2, v3, v4, .8h
shift_store_4 \type, \d_strd, v6
b.le 0f
load_s \sr2, \src, \s_strd, v6, v7
interleave_1_s v5, v6, v7
uxtl_b v5, v6
mul_mla_4tap v7, v3, v4, v5, v6, .8h
shift_store_4 \type, \d_strd, v7
0:
ret
480: // 4x6, 4x8, 4x12, 4x16 v
ld1 {v0.8b}, [\xmy]
sub \sr2, \src, \s_strd, lsl #1
add \ds2, \dst, \d_strd
sub \src, \sr2, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
load_s \src, \sr2, \s_strd, v16, v17, v18, v19, v20, v21, v22
interleave_1_s v16, v17, v18
interleave_1_s v18, v19, v20, v21, v22
uxtl_b v16, v17
uxtl_b v18, v19, v20, v21
48:
subs \h, \h, #4
load_s \sr2, \src, \s_strd, v23, v24, v25, v26
interleave_1_s v22, v23, v24, v25, v26
uxtl_b v22, v23, v24, v25
mul_mla_\taps\()_2 v1, v2, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25
shift_store_4 \type, \d_strd, v1, v2
b.le 0f
load_s \sr2, \src, \s_strd, v27, v16
subs \h, \h, #2
interleave_1_s v26, v27, v16
uxtl_b v26, v27
mul_mla_\taps\()_0 v1, v20, v21, v22, v23, v24, v25, v26, v27
shift_store_4 \type, \d_strd, v1
b.le 0f
load_s \sr2, \src, \s_strd, v17, v18
subs \h, \h, #2
interleave_1_s v16, v17, v18
uxtl_b v16, v17
mul_mla_\taps\()_0 v2, v22, v23, v24, v25, v26, v27, v16, v17
shift_store_4 \type, \d_strd, v2
b.le 0f
subs \h, \h, #4
load_s \sr2, \src, \s_strd, v19, v20, v21, v22
interleave_1_s v18, v19, v20, v21, v22
uxtl_b v18, v19, v20, v21
mul_mla_\taps\()_2 v1, v2, v24, v25, v26, v27, v16, v17, v18, v19, v20, v21
shift_store_4 \type, \d_strd, v1, v2
b.gt 48b
0:
ret
80:
AARCH64_VALID_JUMP_TARGET
b.gt 880f
// 8x2, 8x4 v
cmp \h, #2
ldur s0, [\xmy, #2]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
load_8b \src, \sr2, \s_strd, v1, v2, v3, v4, v5
uxtl_b v1, v2, v3, v4, v5
mul_mla_4tap v6, v1, v2, v3, v4, .8h
mul_mla_4tap v7, v2, v3, v4, v5, .8h
shift_store_8 \type, \d_strd, v6, v7
b.le 0f
load_8b \sr2, \src, \s_strd, v6, v7
uxtl_b v6, v7
mul_mla_4tap v1, v3, v4, v5, v6, .8h
mul_mla_4tap v2, v4, v5, v6, v7, .8h
shift_store_8 \type, \d_strd, v1, v2
0:
ret
880: // 8x6, 8x8, 8x16, 8x32 v
1680: // 16x8, 16x16, ...
320: // 32x8, 32x16, ...
640:
1280:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [\xmy]
sub \src, \src, \s_strd
sub \src, \src, \s_strd, lsl #1
sxtl v0.8h, v0.8b
mov \my, \h
168:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
load_8b \src, \sr2, \s_strd, v16, v17, v18, v19, v20, v21, v22
uxtl_b v16, v17, v18, v19, v20, v21, v22
88:
subs \h, \h, #2
load_8b \sr2, \src, \s_strd, v23, v24
uxtl_b v23, v24
mul_mla_\taps\()_1 v1, v2, v16, v17, v18, v19, v20, v21, v22, v23, v24
shift_store_8 \type, \d_strd, v1, v2
b.le 9f
subs \h, \h, #2
load_8b \sr2, \src, \s_strd, v25, v26
uxtl_b v25, v26
mul_mla_\taps\()_1 v3, v4, v18, v19, v20, v21, v22, v23, v24, v25, v26
shift_store_8 \type, \d_strd, v3, v4
b.le 9f
subs \h, \h, #2
load_8b \sr2, \src, \s_strd, v27, v16
uxtl_b v27, v16
mul_mla_\taps\()_1 v1, v2, v20, v21, v22, v23, v24, v25, v26, v27, v16
shift_store_8 \type, \d_strd, v1, v2
b.le 9f
subs \h, \h, #2
load_8b \sr2, \src, \s_strd, v17, v18
uxtl_b v17, v18
mul_mla_\taps\()_1 v3, v4, v22, v23, v24, v25, v26, v27, v16, v17, v18
shift_store_8 \type, \d_strd, v3, v4
b.le 9f
subs \h, \h, #4
load_8b \sr2, \src, \s_strd, v19, v20, v21, v22
uxtl_b v19, v20, v21, v22
mul_mla_\taps\()_1 v1, v2, v24, v25, v26, v27, v16, v17, v18, v19, v20
mul_mla_\taps\()_1 v3, v4, v26, v27, v16, v17, v18, v19, v20, v21, v22
shift_store_8 \type, \d_strd, v1, v2, v3, v4
b.gt 88b
9:
subs \w, \w, #8
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #3
mov \h, \my
add \src, \src, #8
.ifc \type, put
add \dst, \dst, #8
.else
add \dst, \dst, #16
.endif
b 168b
0:
ret
160:
AARCH64_VALID_JUMP_TARGET
b.gt 1680b
// 16x2, 16x4 v
ldur s0, [\xmy, #2]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
cmp \h, #2
load_16b \src, \sr2, \s_strd, v1, v2, v3, v4, v5
uxtl v16.8h, v1.8b
uxtl v17.8h, v2.8b
uxtl v18.8h, v3.8b
uxtl v19.8h, v4.8b
uxtl v20.8h, v5.8b
uxtl2 v23.8h, v1.16b
uxtl2 v24.8h, v2.16b
uxtl2 v25.8h, v3.16b
uxtl2 v26.8h, v4.16b
uxtl2 v27.8h, v5.16b
mul_mla_4tap v1, v16, v17, v18, v19, .8h
mul_mla_4tap v16, v17, v18, v19, v20, .8h
mul_mla_4tap v2, v23, v24, v25, v26, .8h
mul_mla_4tap v17, v24, v25, v26, v27, .8h
shift_store_16 \type, \d_strd, v1, v2, v16, v17
b.le 0f
load_16b \sr2, \src, \s_strd, v6, v7
uxtl v21.8h, v6.8b
uxtl v22.8h, v7.8b
uxtl2 v28.8h, v6.16b
uxtl2 v29.8h, v7.16b
mul_mla_4tap v1, v18, v19, v20, v21, .8h
mul_mla_4tap v3, v19, v20, v21, v22, .8h
mul_mla_4tap v2, v25, v26, v27, v28, .8h
mul_mla_4tap v4, v26, v27, v28, v29, .8h
shift_store_16 \type, \d_strd, v1, v2, v3, v4
0:
ret
endfunc
jumptable \type\()_\taps\()_v_tbl
.word 1280b - \type\()_\taps\()_v_tbl
.word 640b - \type\()_\taps\()_v_tbl
.word 320b - \type\()_\taps\()_v_tbl
.word 160b - \type\()_\taps\()_v_tbl
.word 80b - \type\()_\taps\()_v_tbl
.word 40b - \type\()_\taps\()_v_tbl
.word 20b - \type\()_\taps\()_v_tbl
endjumptable
function L(\type\()_\taps\()_hv)
cmp \h, #4
ubfx w9, \my, #7, #7
and \my, \my, #0x7f
b.le 4f
mov \my, w9
4:
add \xmy, x10, \my, uxtw #3
movrel x9, \type\()_\taps\()_hv_tbl
ldrsw x8, [x9, x8, lsl #2]
add x9, x9, x8
br x9
20:
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
ldur s0, [\xmx, #2]
b.gt 280f
ldur s1, [\xmy, #2]
// 2x2, 2x4 hv
sub \sr2, \src, #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
ld1 {v28.8b}, [\src], \s_strd
uxtl v28.8h, v28.8b
ext v29.16b, v28.16b, v28.16b, #2
mul v28.4h, v28.4h, v0.4h
mul v29.4h, v29.4h, v0.4h
addp v28.4h, v28.4h, v29.4h
addp v16.4h, v28.4h, v28.4h
srshr v16.4h, v16.4h, #2
bl L(\type\()_\taps\()_filter_2)
trn1 v16.2s, v16.2s, v28.2s
mov v17.8b, v28.8b
2:
bl L(\type\()_\taps\()_filter_2)
ext v18.8b, v17.8b, v28.8b, #4
smull v2.4s, v16.4h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal v2.4s, v28.4h, v1.h[3]
sqrshrn v2.4h, v2.4s, #\shift_hv
sqxtun v2.8b, v2.8h
subs \h, \h, #2
st1 {v2.h}[0], [\dst], \d_strd
st1 {v2.h}[1], [\ds2], \d_strd
b.le 0f
mov v16.8b, v18.8b
mov v17.8b, v28.8b
b 2b
280: // 2x8, 2x16, 2x32 hv
ld1 {v1.8b}, [\xmy]
sub \src, \src, #1
sub \sr2, \src, \s_strd, lsl #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
ld1 {v28.8b}, [\src], \s_strd
uxtl v28.8h, v28.8b
ext v29.16b, v28.16b, v28.16b, #2
mul v28.4h, v28.4h, v0.4h
mul v29.4h, v29.4h, v0.4h
addp v28.4h, v28.4h, v29.4h
addp v16.4h, v28.4h, v28.4h
srshr v16.4h, v16.4h, #2
bl L(\type\()_\taps\()_filter_2)
trn1 v16.2s, v16.2s, v28.2s
mov v17.8b, v28.8b
bl L(\type\()_\taps\()_filter_2)
ext v18.8b, v17.8b, v28.8b, #4
mov v19.8b, v28.8b
bl L(\type\()_\taps\()_filter_2)
ext v20.8b, v19.8b, v28.8b, #4
mov v21.8b, v28.8b
28:
bl L(\type\()_\taps\()_filter_2)
ext v22.8b, v21.8b, v28.8b, #4
.ifc \taps, 6tap
smull v2.4s, v17.4h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal v2.4s, v19.4h, v1.h[3]
smlal v2.4s, v20.4h, v1.h[4]
smlal v2.4s, v21.4h, v1.h[5]
smlal v2.4s, v22.4h, v1.h[6]
.else // 8tap
smull v2.4s, v16.4h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal v2.4s, v19.4h, v1.h[3]
smlal v2.4s, v20.4h, v1.h[4]
smlal v2.4s, v21.4h, v1.h[5]
smlal v2.4s, v22.4h, v1.h[6]
smlal v2.4s, v28.4h, v1.h[7]
.endif
sqrshrn v2.4h, v2.4s, #\shift_hv
sqxtun v2.8b, v2.8h
subs \h, \h, #2
st1 {v2.h}[0], [\dst], \d_strd
st1 {v2.h}[1], [\ds2], \d_strd
b.le 0f
mov v16.8b, v18.8b
mov v17.8b, v19.8b
mov v18.8b, v20.8b
mov v19.8b, v21.8b
mov v20.8b, v22.8b
mov v21.8b, v28.8b
b 28b
0:
ret x15
L(\type\()_\taps\()_filter_2):
ld1 {v28.8b}, [\sr2], \s_strd
ld1 {v30.8b}, [\src], \s_strd
uxtl v28.8h, v28.8b
uxtl v30.8h, v30.8b
ext v29.16b, v28.16b, v28.16b, #2
ext v31.16b, v30.16b, v30.16b, #2
trn1 v27.2s, v28.2s, v30.2s
trn2 v30.2s, v28.2s, v30.2s
trn1 v28.2s, v29.2s, v31.2s
trn2 v31.2s, v29.2s, v31.2s
mul v27.4h, v27.4h, v0.h[0]
mla v27.4h, v28.4h, v0.h[1]
mla v27.4h, v30.4h, v0.h[2]
mla v27.4h, v31.4h, v0.h[3]
srshr v28.4h, v27.4h, #2
ret
.endif
40:
AARCH64_VALID_JUMP_TARGET
ldur s0, [\xmx, #2]
b.gt 480f
ldur s1, [\xmy, #2]
sub \sr2, \src, #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
// 4x2, 4x4 hv
ld1 {v26.8b}, [\src], \s_strd
uxtl v26.8h, v26.8b
ext v28.16b, v26.16b, v26.16b, #2
ext v29.16b, v26.16b, v26.16b, #4
ext v30.16b, v26.16b, v26.16b, #6
mul v31.4h, v26.4h, v0.h[0]
mla v31.4h, v28.4h, v0.h[1]
mla v31.4h, v29.4h, v0.h[2]
mla v31.4h, v30.4h, v0.h[3]
srshr v16.4h, v31.4h, #2
bl L(\type\()_\taps\()_filter_4)
mov v17.8b, v28.8b
mov v18.8b, v29.8b
4:
bl L(\type\()_\taps\()_filter_4)
// Interleaving the mul/mla chains actually hurts performance
// significantly on Cortex A53, thus keeping mul/mla tightly
// chained like this.
smull v2.4s, v16.4h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal v2.4s, v28.4h, v1.h[3]
smull v3.4s, v17.4h, v1.h[0]
smlal v3.4s, v18.4h, v1.h[1]
smlal v3.4s, v28.4h, v1.h[2]
smlal v3.4s, v29.4h, v1.h[3]
sqrshrn v2.4h, v2.4s, #\shift_hv
sqrshrn v3.4h, v3.4s, #\shift_hv
subs \h, \h, #2
.ifc \type, put
sqxtun v2.8b, v2.8h
sqxtun v3.8b, v3.8h
str s2, [\dst]
str s3, [\ds2]
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
.else
st1 {v2.4h}, [\dst], \d_strd
st1 {v3.4h}, [\ds2], \d_strd
.endif
b.le 0f
mov v16.8b, v18.8b
mov v17.8b, v28.8b
mov v18.8b, v29.8b
b 4b
480: // 4x8, 4x16, 4x32 hv
ld1 {v1.8b}, [\xmy]
sub \src, \src, #1
.ifc \taps, 6tap
sub \sr2, \src, \s_strd
sub \src, \src, \s_strd, lsl #1
.else
sub \sr2, \src, \s_strd, lsl #1
sub \src, \sr2, \s_strd
.endif
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
ld1 {v26.8b}, [\src], \s_strd
uxtl v26.8h, v26.8b
ext v28.16b, v26.16b, v26.16b, #2
ext v29.16b, v26.16b, v26.16b, #4
ext v30.16b, v26.16b, v26.16b, #6
mul v31.4h, v26.4h, v0.h[0]
mla v31.4h, v28.4h, v0.h[1]
mla v31.4h, v29.4h, v0.h[2]
mla v31.4h, v30.4h, v0.h[3]
.ifc \taps, 6tap
srshr v18.4h, v31.4h, #2
.else
srshr v16.4h, v31.4h, #2
bl L(\type\()_\taps\()_filter_4)
mov v17.8b, v28.8b
mov v18.8b, v29.8b
.endif
bl L(\type\()_\taps\()_filter_4)
mov v19.8b, v28.8b
mov v20.8b, v29.8b
bl L(\type\()_\taps\()_filter_4)
mov v21.8b, v28.8b
mov v22.8b, v29.8b
48:
bl L(\type\()_\taps\()_filter_4)
.ifc \taps, 6tap
smull v2.4s, v18.4h, v1.h[1]
smlal v2.4s, v19.4h, v1.h[2]
smlal v2.4s, v20.4h, v1.h[3]
smlal v2.4s, v21.4h, v1.h[4]
smlal v2.4s, v22.4h, v1.h[5]
smlal v2.4s, v28.4h, v1.h[6]
smull v3.4s, v19.4h, v1.h[1]
smlal v3.4s, v20.4h, v1.h[2]
smlal v3.4s, v21.4h, v1.h[3]
smlal v3.4s, v22.4h, v1.h[4]
smlal v3.4s, v28.4h, v1.h[5]
smlal v3.4s, v29.4h, v1.h[6]
.else // 8tap
smull v2.4s, v16.4h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal v2.4s, v19.4h, v1.h[3]
smlal v2.4s, v20.4h, v1.h[4]
smlal v2.4s, v21.4h, v1.h[5]
smlal v2.4s, v22.4h, v1.h[6]
smlal v2.4s, v28.4h, v1.h[7]
smull v3.4s, v17.4h, v1.h[0]
smlal v3.4s, v18.4h, v1.h[1]
smlal v3.4s, v19.4h, v1.h[2]
smlal v3.4s, v20.4h, v1.h[3]
smlal v3.4s, v21.4h, v1.h[4]
smlal v3.4s, v22.4h, v1.h[5]
smlal v3.4s, v28.4h, v1.h[6]
smlal v3.4s, v29.4h, v1.h[7]
.endif
sqrshrn v2.4h, v2.4s, #\shift_hv
sqrshrn v3.4h, v3.4s, #\shift_hv
subs \h, \h, #2
.ifc \type, put
sqxtun v2.8b, v2.8h
sqxtun v3.8b, v3.8h
str s2, [\dst]
str s3, [\ds2]
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
.else
st1 {v2.4h}, [\dst], \d_strd
st1 {v3.4h}, [\ds2], \d_strd
.endif
b.le 0f
.ifc \taps, 8tap
mov v16.8b, v18.8b
mov v17.8b, v19.8b
.endif
mov v18.8b, v20.8b
mov v19.8b, v21.8b
mov v20.8b, v22.8b
mov v21.8b, v28.8b
mov v22.8b, v29.8b
b 48b
0:
ret x15
L(\type\()_\taps\()_filter_4):
ld1 {v26.8b}, [\sr2], \s_strd
ld1 {v27.8b}, [\src], \s_strd
uxtl v26.8h, v26.8b
uxtl v27.8h, v27.8b
ext v28.16b, v26.16b, v26.16b, #2
ext v29.16b, v26.16b, v26.16b, #4
ext v30.16b, v26.16b, v26.16b, #6
mul v31.4h, v26.4h, v0.h[0]
mla v31.4h, v28.4h, v0.h[1]
mla v31.4h, v29.4h, v0.h[2]
mla v31.4h, v30.4h, v0.h[3]
ext v28.16b, v27.16b, v27.16b, #2
ext v29.16b, v27.16b, v27.16b, #4
ext v30.16b, v27.16b, v27.16b, #6
mul v27.4h, v27.4h, v0.h[0]
mla v27.4h, v28.4h, v0.h[1]
mla v27.4h, v29.4h, v0.h[2]
mla v27.4h, v30.4h, v0.h[3]
srshr v28.4h, v31.4h, #2
srshr v29.4h, v27.4h, #2
ret
80:
160:
320:
AARCH64_VALID_JUMP_TARGET
b.gt 880f
ld1 {v0.8b}, [\xmx]
ldur s1, [\xmy, #2]
.ifc \taps, 6tap
sub \src, \src, #2
.else
sub \src, \src, #3
.endif
sub \src, \src, \s_strd
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
mov \my, \h
164: // 8x2, 8x4, 16x2, 16x4, 32x2, 32x4 hv
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
bl L(\type\()_\taps\()_filter_8_first)
bl L(\type\()_\taps\()_filter_8)
mov v17.16b, v24.16b
mov v18.16b, v25.16b
8:
smull v2.4s, v16.4h, v1.h[0]
smull2 v3.4s, v16.8h, v1.h[0]
bl L(\type\()_\taps\()_filter_8)
smull v4.4s, v17.4h, v1.h[0]
smull2 v5.4s, v17.8h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal2 v3.4s, v17.8h, v1.h[1]
smlal v4.4s, v18.4h, v1.h[1]
smlal2 v5.4s, v18.8h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal2 v3.4s, v18.8h, v1.h[2]
smlal v4.4s, v24.4h, v1.h[2]
smlal2 v5.4s, v24.8h, v1.h[2]
smlal v2.4s, v24.4h, v1.h[3]
smlal2 v3.4s, v24.8h, v1.h[3]
smlal v4.4s, v25.4h, v1.h[3]
smlal2 v5.4s, v25.8h, v1.h[3]
sqrshrn v2.4h, v2.4s, #\shift_hv
sqrshrn2 v2.8h, v3.4s, #\shift_hv
sqrshrn v4.4h, v4.4s, #\shift_hv
sqrshrn2 v4.8h, v5.4s, #\shift_hv
subs \h, \h, #2
.ifc \type, put
sqxtun v2.8b, v2.8h
sqxtun v4.8b, v4.8h
st1 {v2.8b}, [\dst], \d_strd
st1 {v4.8b}, [\ds2], \d_strd
.else
st1 {v2.8h}, [\dst], \d_strd
st1 {v4.8h}, [\ds2], \d_strd
.endif
b.le 9f
mov v16.16b, v18.16b
mov v17.16b, v24.16b
mov v18.16b, v25.16b
b 8b
9:
subs \w, \w, #8
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #2
mov \h, \my
add \src, \src, #8
.ifc \type, put
add \dst, \dst, #8
.else
add \dst, \dst, #16
.endif
b 164b
880: // 8x8, 8x16, ..., 16x8, ..., 32x8, ... hv
640:
1280:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [\xmx]
ld1 {v1.8b}, [\xmy]
.ifc \taps, 6tap
sub \src, \src, #2
.else
sub \src, \src, #3
sub \src, \src, \s_strd
.endif
sub \src, \src, \s_strd, lsl #1
sxtl v0.8h, v0.8b
sxtl v1.8h, v1.8b
mov x15, x30
mov \my, \h
168:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
bl L(\type\()_\taps\()_filter_8_first)
.ifc \taps, 6tap
mov v18.16b, v16.16b
.else
bl L(\type\()_\taps\()_filter_8)
mov v17.16b, v24.16b
mov v18.16b, v25.16b
.endif
bl L(\type\()_\taps\()_filter_8)
mov v19.16b, v24.16b
mov v20.16b, v25.16b
bl L(\type\()_\taps\()_filter_8)
mov v21.16b, v24.16b
mov v22.16b, v25.16b
88:
.ifc \taps, 6tap
smull v2.4s, v18.4h, v1.h[1]
smull2 v3.4s, v18.8h, v1.h[1]
bl L(\type\()_\taps\()_filter_8)
smull v4.4s, v19.4h, v1.h[1]
smull2 v5.4s, v19.8h, v1.h[1]
smlal v2.4s, v19.4h, v1.h[2]
smlal2 v3.4s, v19.8h, v1.h[2]
smlal v4.4s, v20.4h, v1.h[2]
smlal2 v5.4s, v20.8h, v1.h[2]
smlal v2.4s, v20.4h, v1.h[3]
smlal2 v3.4s, v20.8h, v1.h[3]
smlal v4.4s, v21.4h, v1.h[3]
smlal2 v5.4s, v21.8h, v1.h[3]
smlal v2.4s, v21.4h, v1.h[4]
smlal2 v3.4s, v21.8h, v1.h[4]
smlal v4.4s, v22.4h, v1.h[4]
smlal2 v5.4s, v22.8h, v1.h[4]
smlal v2.4s, v22.4h, v1.h[5]
smlal2 v3.4s, v22.8h, v1.h[5]
smlal v4.4s, v24.4h, v1.h[5]
smlal2 v5.4s, v24.8h, v1.h[5]
smlal v2.4s, v24.4h, v1.h[6]
smlal2 v3.4s, v24.8h, v1.h[6]
smlal v4.4s, v25.4h, v1.h[6]
smlal2 v5.4s, v25.8h, v1.h[6]
.else // 8tap
smull v2.4s, v16.4h, v1.h[0]
smull2 v3.4s, v16.8h, v1.h[0]
bl L(\type\()_\taps\()_filter_8)
smull v4.4s, v17.4h, v1.h[0]
smull2 v5.4s, v17.8h, v1.h[0]
smlal v2.4s, v17.4h, v1.h[1]
smlal2 v3.4s, v17.8h, v1.h[1]
smlal v4.4s, v18.4h, v1.h[1]
smlal2 v5.4s, v18.8h, v1.h[1]
smlal v2.4s, v18.4h, v1.h[2]
smlal2 v3.4s, v18.8h, v1.h[2]
smlal v4.4s, v19.4h, v1.h[2]
smlal2 v5.4s, v19.8h, v1.h[2]
smlal v2.4s, v19.4h, v1.h[3]
smlal2 v3.4s, v19.8h, v1.h[3]
smlal v4.4s, v20.4h, v1.h[3]
smlal2 v5.4s, v20.8h, v1.h[3]
smlal v2.4s, v20.4h, v1.h[4]
smlal2 v3.4s, v20.8h, v1.h[4]
smlal v4.4s, v21.4h, v1.h[4]
smlal2 v5.4s, v21.8h, v1.h[4]
smlal v2.4s, v21.4h, v1.h[5]
smlal2 v3.4s, v21.8h, v1.h[5]
smlal v4.4s, v22.4h, v1.h[5]
smlal2 v5.4s, v22.8h, v1.h[5]
smlal v2.4s, v22.4h, v1.h[6]
smlal2 v3.4s, v22.8h, v1.h[6]
smlal v4.4s, v24.4h, v1.h[6]
smlal2 v5.4s, v24.8h, v1.h[6]
smlal v2.4s, v24.4h, v1.h[7]
smlal2 v3.4s, v24.8h, v1.h[7]
smlal v4.4s, v25.4h, v1.h[7]
smlal2 v5.4s, v25.8h, v1.h[7]
.endif
sqrshrn v2.4h, v2.4s, #\shift_hv
sqrshrn2 v2.8h, v3.4s, #\shift_hv
sqrshrn v4.4h, v4.4s, #\shift_hv
sqrshrn2 v4.8h, v5.4s, #\shift_hv
subs \h, \h, #2
.ifc \type, put
sqxtun v2.8b, v2.8h
sqxtun v4.8b, v4.8h
st1 {v2.8b}, [\dst], \d_strd
st1 {v4.8b}, [\ds2], \d_strd
.else
st1 {v2.8h}, [\dst], \d_strd
st1 {v4.8h}, [\ds2], \d_strd
.endif
b.le 9f
.ifc \taps, 8tap
mov v16.16b, v18.16b
mov v17.16b, v19.16b
.endif
mov v18.16b, v20.16b
mov v19.16b, v21.16b
mov v20.16b, v22.16b
mov v21.16b, v24.16b
mov v22.16b, v25.16b
b 88b
9:
subs \w, \w, #8
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #3
mov \h, \my
add \src, \src, #8
.ifc \type, put
add \dst, \dst, #8
.else
add \dst, \dst, #16
.endif
.ifc \taps, 6tap
add \src, \src, \s_strd, lsl #1
.endif
b 168b
0:
ret x15
L(\type\()_\taps\()_filter_8_first):
ld1 {v28.8b, v29.8b}, [\src], \s_strd
uxtl v28.8h, v28.8b
uxtl v29.8h, v29.8b
.ifc \taps, 6tap
mul v16.8h, v28.8h, v0.h[1]
ext v25.16b, v28.16b, v29.16b, #(2*1)
ext v26.16b, v28.16b, v29.16b, #(2*2)
ext v27.16b, v28.16b, v29.16b, #(2*3)
mla v16.8h, v25.8h, v0.h[2]
mla v16.8h, v26.8h, v0.h[3]
mla v16.8h, v27.8h, v0.h[4]
ext v24.16b, v28.16b, v29.16b, #(2*4)
ext v25.16b, v28.16b, v29.16b, #(2*5)
mla v16.8h, v24.8h, v0.h[5]
mla v16.8h, v25.8h, v0.h[6]
.else // 8tap
mul v16.8h, v28.8h, v0.h[0]
ext v24.16b, v28.16b, v29.16b, #(2*1)
ext v25.16b, v28.16b, v29.16b, #(2*2)
ext v26.16b, v28.16b, v29.16b, #(2*3)
ext v27.16b, v28.16b, v29.16b, #(2*4)
mla v16.8h, v24.8h, v0.h[1]
mla v16.8h, v25.8h, v0.h[2]
mla v16.8h, v26.8h, v0.h[3]
mla v16.8h, v27.8h, v0.h[4]
ext v24.16b, v28.16b, v29.16b, #(2*5)
ext v25.16b, v28.16b, v29.16b, #(2*6)
ext v26.16b, v28.16b, v29.16b, #(2*7)
mla v16.8h, v24.8h, v0.h[5]
mla v16.8h, v25.8h, v0.h[6]
mla v16.8h, v26.8h, v0.h[7]
.endif
srshr v16.8h, v16.8h, #2
ret
L(\type\()_\taps\()_filter_8):
ld1 {v28.8b, v29.8b}, [\sr2], \s_strd
ld1 {v30.8b, v31.8b}, [\src], \s_strd
uxtl v28.8h, v28.8b
uxtl v29.8h, v29.8b
uxtl v30.8h, v30.8b
uxtl v31.8h, v31.8b
.ifc \taps, 6tap
mul v24.8h, v28.8h, v0.h[1]
mul v25.8h, v30.8h, v0.h[1]
.irpc i, 23456
ext v26.16b, v28.16b, v29.16b, #(2*\i-2)
ext v27.16b, v30.16b, v31.16b, #(2*\i-2)
mla v24.8h, v26.8h, v0.h[\i]
mla v25.8h, v27.8h, v0.h[\i]
.endr
.else // 8tap
mul v24.8h, v28.8h, v0.h[0]
mul v25.8h, v30.8h, v0.h[0]
.irpc i, 1234567
ext v26.16b, v28.16b, v29.16b, #(2*\i)
ext v27.16b, v30.16b, v31.16b, #(2*\i)
mla v24.8h, v26.8h, v0.h[\i]
mla v25.8h, v27.8h, v0.h[\i]
.endr
.endif
srshr v24.8h, v24.8h, #2
srshr v25.8h, v25.8h, #2
ret
endfunc
jumptable \type\()_\taps\()_hv_tbl
.word 1280b - \type\()_\taps\()_hv_tbl
.word 640b - \type\()_\taps\()_hv_tbl
.word 320b - \type\()_\taps\()_hv_tbl
.word 160b - \type\()_\taps\()_hv_tbl
.word 80b - \type\()_\taps\()_hv_tbl
.word 40b - \type\()_\taps\()_hv_tbl
.word 20b - \type\()_\taps\()_hv_tbl
endjumptable
.endm
.macro filter_bilin_fn type, dst, d_strd, src, s_strd, w, h, mx, xmx, my, xmy, ds2, sr2, shift_hv
function \type\()_bilin_8bpc_neon, export=1
dup v1.16b, \mx
dup v3.16b, \my
mov w9, #16
sub w8, w9, \mx
sub w9, w9, \my
dup v0.16b, w8
dup v2.16b, w9
.ifc \type, prep
uxtw \d_strd, \w
lsl \d_strd, \d_strd, #1
.endif
clz w8, \w
sub w8, w8, #24
cbnz \mx, L(\type\()_bilin_h)
cbnz \my, L(\type\()_bilin_v)
b \type\()_neon
L(\type\()_bilin_h):
cbnz \my, L(\type\()_bilin_hv)
movrel x9, \type\()_bilin_h_tbl
ldrsw x8, [x9, x8, lsl #2]
add x9, x9, x8
br x9
20: // 2xN h
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
2:
ld1r {v4.4s}, [\src], \s_strd
ld1r {v6.4s}, [\sr2], \s_strd
ext v5.8b, v4.8b, v4.8b, #1
ext v7.8b, v6.8b, v6.8b, #1
trn1 v4.4h, v4.4h, v6.4h
trn1 v5.4h, v5.4h, v7.4h
subs \h, \h, #2
umull v4.8h, v4.8b, v0.8b
umlal v4.8h, v5.8b, v1.8b
uqrshrn v4.8b, v4.8h, #4
st1 {v4.h}[0], [\dst], \d_strd
st1 {v4.h}[1], [\ds2], \d_strd
b.gt 2b
ret
.endif
40: // 4xN h
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
4:
ld1 {v4.8b}, [\src], \s_strd
ld1 {v6.8b}, [\sr2], \s_strd
ext v5.8b, v4.8b, v4.8b, #1
ext v7.8b, v6.8b, v6.8b, #1
trn1 v4.2s, v4.2s, v6.2s
trn1 v5.2s, v5.2s, v7.2s
subs \h, \h, #2
umull v4.8h, v4.8b, v0.8b
umlal v4.8h, v5.8b, v1.8b
.ifc \type, put
uqrshrn v4.8b, v4.8h, #4
st1 {v4.s}[0], [\dst], \d_strd
st1 {v4.s}[1], [\ds2], \d_strd
.else
st1 {v4.8b}, [\dst], \d_strd
st1 {v4.d}[1], [\ds2], \d_strd
.endif
b.gt 4b
ret
80: // 8xN h
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
8:
ld1 {v4.16b}, [\src], \s_strd
ld1 {v6.16b}, [\sr2], \s_strd
ext v5.16b, v4.16b, v4.16b, #1
ext v7.16b, v6.16b, v6.16b, #1
subs \h, \h, #2
umull v4.8h, v4.8b, v0.8b
umull v6.8h, v6.8b, v0.8b
umlal v4.8h, v5.8b, v1.8b
umlal v6.8h, v7.8b, v1.8b
.ifc \type, put
uqrshrn v4.8b, v4.8h, #4
uqrshrn v6.8b, v6.8h, #4
st1 {v4.8b}, [\dst], \d_strd
st1 {v6.8b}, [\ds2], \d_strd
.else
st1 {v4.8h}, [\dst], \d_strd
st1 {v6.8h}, [\ds2], \d_strd
.endif
b.gt 8b
ret
160:
320:
640:
1280: // 16xN, 32xN, ... h
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
sub \s_strd, \s_strd, \w, uxtw
sub \s_strd, \s_strd, #8
.ifc \type, put
lsl \d_strd, \d_strd, #1
sub \d_strd, \d_strd, \w, uxtw
.endif
161:
ld1 {v16.d}[1], [\src], #8
ld1 {v20.d}[1], [\sr2], #8
mov \mx, \w
16:
ld1 {v18.16b}, [\src], #16
ld1 {v22.16b}, [\sr2], #16
ext v17.16b, v16.16b, v18.16b, #8
ext v19.16b, v16.16b, v18.16b, #9
ext v21.16b, v20.16b, v22.16b, #8
ext v23.16b, v20.16b, v22.16b, #9
umull v16.8h, v17.8b, v0.8b
umull2 v17.8h, v17.16b, v0.16b
umull v20.8h, v21.8b, v0.8b
umull2 v21.8h, v21.16b, v0.16b
umlal v16.8h, v19.8b, v1.8b
umlal2 v17.8h, v19.16b, v1.16b
umlal v20.8h, v23.8b, v1.8b
umlal2 v21.8h, v23.16b, v1.16b
subs \mx, \mx, #16
.ifc \type, put
uqrshrn v16.8b, v16.8h, #4
uqrshrn2 v16.16b, v17.8h, #4
uqrshrn v20.8b, v20.8h, #4
uqrshrn2 v20.16b, v21.8h, #4
st1 {v16.16b}, [\dst], #16
st1 {v20.16b}, [\ds2], #16
.else
st1 {v16.8h, v17.8h}, [\dst], #32
st1 {v20.8h, v21.8h}, [\ds2], #32
.endif
b.le 9f
mov v16.16b, v18.16b
mov v20.16b, v22.16b
b 16b
9:
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
add \src, \src, \s_strd
add \sr2, \sr2, \s_strd
subs \h, \h, #2
b.gt 161b
ret
endfunc
jumptable \type\()_bilin_h_tbl
.word 1280b - \type\()_bilin_h_tbl
.word 640b - \type\()_bilin_h_tbl
.word 320b - \type\()_bilin_h_tbl
.word 160b - \type\()_bilin_h_tbl
.word 80b - \type\()_bilin_h_tbl
.word 40b - \type\()_bilin_h_tbl
.word 20b - \type\()_bilin_h_tbl
endjumptable
function L(\type\()_bilin_v)
cmp \h, #4
movrel x9, \type\()_bilin_v_tbl
ldrsw x8, [x9, x8, lsl #2]
add x9, x9, x8
br x9
20: // 2xN v
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
cmp \h, #2
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
// 2x2 v
ld1r {v16.8h}, [\src], \s_strd
b.gt 24f
22:
ld1r {v17.8h}, [\sr2], \s_strd
ld1r {v18.8h}, [\src], \s_strd
trn1 v16.4h, v16.4h, v17.4h
trn1 v17.4h, v17.4h, v18.4h
umull v4.8h, v16.8b, v2.8b
umlal v4.8h, v17.8b, v3.8b
uqrshrn v4.8b, v4.8h, #4
str h4, [\dst]
st1 {v4.h}[1], [\ds2]
ret
24: // 2x4, 2x6, 2x8, ... v
ld1r {v17.8h}, [\sr2], \s_strd
ld1r {v18.8h}, [\src], \s_strd
ld1r {v19.8h}, [\sr2], \s_strd
ld1r {v20.8h}, [\src], \s_strd
sub \h, \h, #4
trn1 v16.4h, v16.4h, v17.4h
trn1 v17.4h, v17.4h, v18.4h
trn1 v18.4h, v18.4h, v19.4h
trn1 v19.4h, v19.4h, v20.4h
trn1 v16.2s, v16.2s, v18.2s
trn1 v17.2s, v17.2s, v19.2s
umull v4.8h, v16.8b, v2.8b
umlal v4.8h, v17.8b, v3.8b
cmp \h, #2
uqrshrn v4.8b, v4.8h, #4
st1 {v4.h}[0], [\dst], \d_strd
st1 {v4.h}[1], [\ds2], \d_strd
st1 {v4.h}[2], [\dst], \d_strd
st1 {v4.h}[3], [\ds2], \d_strd
b.lt 0f
mov v16.8b, v20.8b
b.eq 22b
b 24b
0:
ret
.endif
40: // 4xN v
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1r {v16.4s}, [\src], \s_strd
4:
ld1r {v17.4s}, [\sr2], \s_strd
ld1r {v18.4s}, [\src], \s_strd
trn1 v16.2s, v16.2s, v17.2s
trn1 v17.2s, v17.2s, v18.2s
umull v4.8h, v16.8b, v2.8b
umlal v4.8h, v17.8b, v3.8b
subs \h, \h, #2
.ifc \type, put
uqrshrn v4.8b, v4.8h, #4
st1 {v4.s}[0], [\dst], \d_strd
st1 {v4.s}[1], [\ds2], \d_strd
.else
st1 {v4.8b}, [\dst], \d_strd
st1 {v4.d}[1], [\ds2], \d_strd
.endif
b.le 0f
mov v16.8b, v18.8b
b 4b
0:
ret
80: // 8xN v
AARCH64_VALID_JUMP_TARGET
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1 {v16.8b}, [\src], \s_strd
8:
ld1 {v17.8b}, [\sr2], \s_strd
ld1 {v18.8b}, [\src], \s_strd
umull v4.8h, v16.8b, v2.8b
umull v5.8h, v17.8b, v2.8b
umlal v4.8h, v17.8b, v3.8b
umlal v5.8h, v18.8b, v3.8b
subs \h, \h, #2
.ifc \type, put
uqrshrn v4.8b, v4.8h, #4
uqrshrn v5.8b, v5.8h, #4
st1 {v4.8b}, [\dst], \d_strd
st1 {v5.8b}, [\ds2], \d_strd
.else
st1 {v4.8h}, [\dst], \d_strd
st1 {v5.8h}, [\ds2], \d_strd
.endif
b.le 0f
mov v16.8b, v18.8b
b 8b
0:
ret
160: // 16xN, 32xN, ...
320:
640:
1280:
AARCH64_VALID_JUMP_TARGET
mov \my, \h
1:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1 {v16.16b}, [\src], \s_strd
2:
ld1 {v17.16b}, [\sr2], \s_strd
ld1 {v18.16b}, [\src], \s_strd
umull v4.8h, v16.8b, v2.8b
umull2 v5.8h, v16.16b, v2.16b
umull v6.8h, v17.8b, v2.8b
umull2 v7.8h, v17.16b, v2.16b
umlal v4.8h, v17.8b, v3.8b
umlal2 v5.8h, v17.16b, v3.16b
umlal v6.8h, v18.8b, v3.8b
umlal2 v7.8h, v18.16b, v3.16b
subs \h, \h, #2
.ifc \type, put
uqrshrn v4.8b, v4.8h, #4
uqrshrn2 v4.16b, v5.8h, #4
uqrshrn v6.8b, v6.8h, #4
uqrshrn2 v6.16b, v7.8h, #4
st1 {v4.16b}, [\dst], \d_strd
st1 {v6.16b}, [\ds2], \d_strd
.else
st1 {v4.8h, v5.8h}, [\dst], \d_strd
st1 {v6.8h, v7.8h}, [\ds2], \d_strd
.endif
b.le 9f
mov v16.16b, v18.16b
b 2b
9:
subs \w, \w, #16
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #1
mov \h, \my
add \src, \src, #16
.ifc \type, put
add \dst, \dst, #16
.else
add \dst, \dst, #32
.endif
b 1b
0:
ret
endfunc
jumptable \type\()_bilin_v_tbl
.word 1280b - \type\()_bilin_v_tbl
.word 640b - \type\()_bilin_v_tbl
.word 320b - \type\()_bilin_v_tbl
.word 160b - \type\()_bilin_v_tbl
.word 80b - \type\()_bilin_v_tbl
.word 40b - \type\()_bilin_v_tbl
.word 20b - \type\()_bilin_v_tbl
endjumptable
function L(\type\()_bilin_hv)
uxtl v2.8h, v2.8b
uxtl v3.8h, v3.8b
movrel x9, \type\()_bilin_hv_tbl
ldrsw x8, [x9, x8, lsl #2]
add x9, x9, x8
br x9
20: // 2xN hv
AARCH64_VALID_JUMP_TARGET
.ifc \type, put
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1r {v28.4s}, [\src], \s_strd
ext v29.8b, v28.8b, v28.8b, #1
umull v16.8h, v28.8b, v0.8b
umlal v16.8h, v29.8b, v1.8b
2:
ld1r {v28.4s}, [\sr2], \s_strd
ld1r {v30.4s}, [\src], \s_strd
ext v29.8b, v28.8b, v28.8b, #1
ext v31.8b, v30.8b, v30.8b, #1
trn1 v28.4h, v28.4h, v30.4h
trn1 v29.4h, v29.4h, v31.4h
umull v17.8h, v28.8b, v0.8b
umlal v17.8h, v29.8b, v1.8b
trn1 v16.2s, v16.2s, v17.2s
mul v4.4h, v16.4h, v2.4h
mla v4.4h, v17.4h, v3.4h
uqrshrn v4.8b, v4.8h, #8
subs \h, \h, #2
st1 {v4.h}[0], [\dst], \d_strd
st1 {v4.h}[1], [\ds2], \d_strd
b.le 0f
trn2 v16.2s, v17.2s, v17.2s
b 2b
0:
ret
.endif
40: // 4xN hv
AARCH64_VALID_JUMP_TARGET
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1 {v28.8b}, [\src], \s_strd
ext v29.8b, v28.8b, v28.8b, #1
umull v16.8h, v28.8b, v0.8b
umlal v16.8h, v29.8b, v1.8b
4:
ld1 {v28.8b}, [\sr2], \s_strd
ld1 {v30.8b}, [\src], \s_strd
ext v29.8b, v28.8b, v28.8b, #1
ext v31.8b, v30.8b, v30.8b, #1
trn1 v28.2s, v28.2s, v30.2s
trn1 v29.2s, v29.2s, v31.2s
umull v17.8h, v28.8b, v0.8b
umlal v17.8h, v29.8b, v1.8b
trn1 v16.2d, v16.2d, v17.2d
mul v4.8h, v16.8h, v2.8h
mla v4.8h, v17.8h, v3.8h
subs \h, \h, #2
.ifc \type, put
uqrshrn v4.8b, v4.8h, #8
st1 {v4.s}[0], [\dst], \d_strd
st1 {v4.s}[1], [\ds2], \d_strd
.else
urshr v4.8h, v4.8h, #4
st1 {v4.8b}, [\dst], \d_strd
st1 {v4.d}[1], [\ds2], \d_strd
.endif
b.le 0f
trn2 v16.2d, v17.2d, v17.2d
b 4b
0:
ret
80: // 8xN, 16xN, ... hv
160:
320:
640:
1280:
AARCH64_VALID_JUMP_TARGET
mov \my, \h
1:
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
ld1 {v28.16b}, [\src], \s_strd
ext v29.16b, v28.16b, v28.16b, #1
umull v16.8h, v28.8b, v0.8b
umlal v16.8h, v29.8b, v1.8b
2:
ld1 {v28.16b}, [\sr2], \s_strd
ld1 {v30.16b}, [\src], \s_strd
ext v29.16b, v28.16b, v28.16b, #1
ext v31.16b, v30.16b, v30.16b, #1
umull v17.8h, v28.8b, v0.8b
umlal v17.8h, v29.8b, v1.8b
umull v18.8h, v30.8b, v0.8b
umlal v18.8h, v31.8b, v1.8b
mul v4.8h, v16.8h, v2.8h
mla v4.8h, v17.8h, v3.8h
mul v5.8h, v17.8h, v2.8h
mla v5.8h, v18.8h, v3.8h
subs \h, \h, #2
.ifc \type, put
uqrshrn v4.8b, v4.8h, #8
uqrshrn v5.8b, v5.8h, #8
st1 {v4.8b}, [\dst], \d_strd
st1 {v5.8b}, [\ds2], \d_strd
.else
urshr v4.8h, v4.8h, #4
urshr v5.8h, v5.8h, #4
st1 {v4.8h}, [\dst], \d_strd
st1 {v5.8h}, [\ds2], \d_strd
.endif
b.le 9f
mov v16.16b, v18.16b
b 2b
9:
subs \w, \w, #8
b.le 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
msub \src, \s_strd, \xmy, \src
msub \dst, \d_strd, \xmy, \dst
sub \src, \src, \s_strd, lsl #1
mov \h, \my
add \src, \src, #8
.ifc \type, put
add \dst, \dst, #8
.else
add \dst, \dst, #16
.endif
b 1b
0:
ret
endfunc
jumptable \type\()_bilin_hv_tbl
.word 1280b - \type\()_bilin_hv_tbl
.word 640b - \type\()_bilin_hv_tbl
.word 320b - \type\()_bilin_hv_tbl
.word 160b - \type\()_bilin_hv_tbl
.word 80b - \type\()_bilin_hv_tbl
.word 40b - \type\()_bilin_hv_tbl
.word 20b - \type\()_bilin_hv_tbl
endjumptable
.endm
make_8tap_fn put, regular_sharp, REGULAR, SHARP, 8tap
make_8tap_fn put, smooth_sharp, SMOOTH, SHARP, 8tap
make_8tap_fn put, sharp, SHARP, SHARP, 8tap
make_8tap_fn put, sharp_regular, SHARP, REGULAR, 8tap
make_8tap_fn put, sharp_smooth, SHARP, SMOOTH, 8tap
filter_fn put, x0, x1, x2, x3, w4, w5, w6, x6, w7, x7, x8, x9, 10, 8tap
make_8tap_fn put, regular, REGULAR, REGULAR, 6tap
make_8tap_fn put, regular_smooth, REGULAR, SMOOTH, 6tap
make_8tap_fn put, smooth, SMOOTH, SMOOTH, 6tap
make_8tap_fn put, smooth_regular, SMOOTH, REGULAR, 6tap
filter_fn put, x0, x1, x2, x3, w4, w5, w6, x6, w7, x7, x8, x9, 10, 6tap
filter_bilin_fn put, x0, x1, x2, x3, w4, w5, w6, x6, w7, x7, x8, x9, 10
make_8tap_fn prep, regular_sharp, REGULAR, SHARP, 8tap
make_8tap_fn prep, smooth_sharp, SMOOTH, SHARP, 8tap
make_8tap_fn prep, sharp, SHARP, SHARP, 8tap
make_8tap_fn prep, sharp_regular, SHARP, REGULAR, 8tap
make_8tap_fn prep, sharp_smooth, SHARP, SMOOTH, 8tap
filter_fn prep, x0, x7, x1, x2, w3, w4, w5, x5, w6, x6, x8, x9, 6, 8tap
make_8tap_fn prep, regular, REGULAR, REGULAR, 6tap
make_8tap_fn prep, regular_smooth, REGULAR, SMOOTH, 6tap
make_8tap_fn prep, smooth, SMOOTH, SMOOTH, 6tap
make_8tap_fn prep, smooth_regular, SMOOTH, REGULAR, 6tap
filter_fn prep, x0, x7, x1, x2, w3, w4, w5, x5, w6, x6, x8, x9, 6, 6tap
filter_bilin_fn prep, x0, x7, x1, x2, w3, w4, w5, x5, w6, x6, x8, x9, 6
.macro load_filter_row dst, src, inc
asr w13, \src, #10
add \src, \src, \inc
ldr \dst, [x11, w13, sxtw #3]
.endm
function warp_filter_horz_neon
add w12, w5, #512
ld1 {v16.8b, v17.8b}, [x2], x3
load_filter_row d0, w12, w7
load_filter_row d1, w12, w7
load_filter_row d2, w12, w7
load_filter_row d3, w12, w7
load_filter_row d4, w12, w7
load_filter_row d5, w12, w7
load_filter_row d6, w12, w7
// subtract by 128 to allow using smull
eor v16.8b, v16.8b, v22.8b
eor v17.8b, v17.8b, v22.8b
load_filter_row d7, w12, w7
ext v18.8b, v16.8b, v17.8b, #1
ext v19.8b, v16.8b, v17.8b, #2
smull v0.8h, v0.8b, v16.8b
smull v1.8h, v1.8b, v18.8b
ext v18.8b, v16.8b, v17.8b, #3
ext v20.8b, v16.8b, v17.8b, #4
smull v2.8h, v2.8b, v19.8b
smull v3.8h, v3.8b, v18.8b
ext v18.8b, v16.8b, v17.8b, #5
ext v19.8b, v16.8b, v17.8b, #6
smull v4.8h, v4.8b, v20.8b
smull v5.8h, v5.8b, v18.8b
ext v18.8b, v16.8b, v17.8b, #7
smull v6.8h, v6.8b, v19.8b
smull v7.8h, v7.8b, v18.8b
addp v0.8h, v0.8h, v1.8h
addp v2.8h, v2.8h, v3.8h
addp v4.8h, v4.8h, v5.8h
addp v6.8h, v6.8h, v7.8h
addp v0.8h, v0.8h, v2.8h
addp v4.8h, v4.8h, v6.8h
addp v0.8h, v0.8h, v4.8h
add w5, w5, w8
ret
endfunc
// void dav1d_warp_affine_8x8_8bpc_neon(
// pixel *dst, const ptrdiff_t dst_stride,
// const pixel *src, const ptrdiff_t src_stride,
// const int16_t *const abcd, int mx, int my)
.macro warp t, shift
function warp_affine_8x8\t\()_8bpc_neon, export=1
ldr x4, [x4]
sbfx x7, x4, #0, #16
sbfx x8, x4, #16, #16
sbfx x9, x4, #32, #16
sbfx x4, x4, #48, #16
mov w10, #8
sub x2, x2, x3, lsl #1
sub x2, x2, x3
sub x2, x2, #3
movrel x11, X(mc_warp_filter), 64*8
mov x15, x30
.ifnb \t
lsl x1, x1, #1
.endif
movi v22.8b, #128
.ifb \t
movi v23.8h, #128
.else
movi v23.8h, #8, lsl #8
.endif
bl warp_filter_horz_neon
srshr v24.8h, v0.8h, #3
bl warp_filter_horz_neon
srshr v25.8h, v0.8h, #3
bl warp_filter_horz_neon
srshr v26.8h, v0.8h, #3
bl warp_filter_horz_neon
srshr v27.8h, v0.8h, #3
bl warp_filter_horz_neon
srshr v28.8h, v0.8h, #3
bl warp_filter_horz_neon
srshr v29.8h, v0.8h, #3
bl warp_filter_horz_neon
srshr v30.8h, v0.8h, #3
1:
add w14, w6, #512
bl warp_filter_horz_neon
srshr v31.8h, v0.8h, #3
load_filter_row d0, w14, w9
load_filter_row d1, w14, w9
load_filter_row d2, w14, w9
load_filter_row d3, w14, w9
load_filter_row d4, w14, w9
load_filter_row d5, w14, w9
load_filter_row d6, w14, w9
load_filter_row d7, w14, w9
transpose_8x8b_xtl v0, v1, v2, v3, v4, v5, v6, v7, sxtl
// This ordering of smull/smlal/smull2/smlal2 is highly
// beneficial for Cortex A53 here.
smull v16.4s, v24.4h, v0.4h
smlal v16.4s, v25.4h, v1.4h
smlal v16.4s, v26.4h, v2.4h
smlal v16.4s, v27.4h, v3.4h
smlal v16.4s, v28.4h, v4.4h
smlal v16.4s, v29.4h, v5.4h
smlal v16.4s, v30.4h, v6.4h
smlal v16.4s, v31.4h, v7.4h
smull2 v17.4s, v24.8h, v0.8h
smlal2 v17.4s, v25.8h, v1.8h
smlal2 v17.4s, v26.8h, v2.8h
smlal2 v17.4s, v27.8h, v3.8h
smlal2 v17.4s, v28.8h, v4.8h
smlal2 v17.4s, v29.8h, v5.8h
smlal2 v17.4s, v30.8h, v6.8h
smlal2 v17.4s, v31.8h, v7.8h
mov v24.16b, v25.16b
mov v25.16b, v26.16b
sqrshrn v16.4h, v16.4s, #\shift
mov v26.16b, v27.16b
sqrshrn2 v16.8h, v17.4s, #\shift
mov v27.16b, v28.16b
mov v28.16b, v29.16b
add v16.8h, v16.8h, v23.8h
.ifb \t
sqxtun v16.8b, v16.8h
.endif
mov v29.16b, v30.16b
mov v30.16b, v31.16b
subs w10, w10, #1
.ifnb \t
st1 {v16.8h}, [x0], x1
.else
st1 {v16.8b}, [x0], x1
.endif
add w6, w6, w4
b.gt 1b
ret x15
endfunc
.endm
warp , 11
warp t, 7
// void dav1d_emu_edge_8bpc_neon(
// const intptr_t bw, const intptr_t bh,
// const intptr_t iw, const intptr_t ih,
// const intptr_t x, const intptr_t y,
// pixel *dst, const ptrdiff_t dst_stride,
// const pixel *ref, const ptrdiff_t ref_stride)
function emu_edge_8bpc_neon, export=1
ldp x8, x9, [sp]
// ref += iclip(y, 0, ih - 1) * PXSTRIDE(ref_stride)
// ref += iclip(x, 0, iw - 1)
sub x12, x3, #1 // ih - 1
cmp x5, x3
sub x13, x2, #1 // iw - 1
csel x12, x12, x5, ge // min(y, ih - 1)
cmp x4, x2
bic x12, x12, x12, asr #63 // max(min(y, ih - 1), 0)
csel x13, x13, x4, ge // min(x, iw - 1)
bic x13, x13, x13, asr #63 // max(min(x, iw - 1), 0)
madd x8, x12, x9, x8 // ref += iclip() * stride
add x8, x8, x13 // ref += iclip()
// bottom_ext = iclip(y + bh - ih, 0, bh - 1)
// top_ext = iclip(-y, 0, bh - 1)
add x10, x5, x1 // y + bh
neg x5, x5 // -y
sub x10, x10, x3 // y + bh - ih
sub x12, x1, #1 // bh - 1
cmp x10, x1
bic x5, x5, x5, asr #63 // max(-y, 0)
csel x10, x10, x12, lt // min(y + bh - ih, bh-1)
cmp x5, x1
bic x10, x10, x10, asr #63 // max(min(y + bh - ih, bh-1), 0)
csel x5, x5, x12, lt // min(max(-y, 0), bh-1)
// right_ext = iclip(x + bw - iw, 0, bw - 1)
// left_ext = iclip(-x, 0, bw - 1)
add x11, x4, x0 // x + bw
neg x4, x4 // -x
sub x11, x11, x2 // x + bw - iw
sub x13, x0, #1 // bw - 1
cmp x11, x0
bic x4, x4, x4, asr #63 // max(-x, 0)
csel x11, x11, x13, lt // min(x + bw - iw, bw-1)
cmp x4, x0
bic x11, x11, x11, asr #63 // max(min(x + bw - iw, bw-1), 0)
csel x4, x4, x13, lt // min(max(-x, 0), bw - 1)
// center_h = bh - top_ext - bottom_ext
// dst += top_ext * PXSTRIDE(dst_stride)
// center_w = bw - left_ext - right_ext
sub x1, x1, x5 // bh - top_ext
madd x6, x5, x7, x6
sub x2, x0, x4 // bw - left_ext
sub x1, x1, x10 // center_h = bh - top_ext - bottom_ext
sub x2, x2, x11 // center_w = bw - left_ext - right_ext
mov x14, x6 // backup of dst
.macro v_loop need_left, need_right
0:
.if \need_left
ld1r {v0.16b}, [x8]
mov x12, x6 // out = dst
mov x3, x4
1:
subs x3, x3, #16
st1 {v0.16b}, [x12], #16
b.gt 1b
.endif
mov x13, x8
add x12, x6, x4 // out = dst + left_ext
mov x3, x2
1:
ld1 {v0.16b, v1.16b}, [x13], #32
subs x3, x3, #32
st1 {v0.16b, v1.16b}, [x12], #32
b.gt 1b
.if \need_right
add x3, x8, x2 // in + center_w
sub x3, x3, #1 // in + center_w - 1
add x12, x6, x4 // dst + left_ext
ld1r {v0.16b}, [x3]
add x12, x12, x2 // out = dst + left_ext + center_w
mov x3, x11
1:
subs x3, x3, #16
st1 {v0.16b}, [x12], #16
b.gt 1b
.endif
subs x1, x1, #1 // center_h--
add x6, x6, x7
add x8, x8, x9
b.gt 0b
.endm
cbz x4, 2f
// need_left
cbz x11, 3f
// need_left + need_right
v_loop 1, 1
b 5f
2:
// !need_left
cbz x11, 4f
// !need_left + need_right
v_loop 0, 1
b 5f
3:
// need_left + !need_right
v_loop 1, 0
b 5f
4:
// !need_left + !need_right
v_loop 0, 0
5:
cbz x10, 3f
// need_bottom
sub x8, x6, x7 // ref = dst - stride
mov x4, x0
1:
ld1 {v0.16b, v1.16b}, [x8], #32
mov x3, x10
2:
subs x3, x3, #1
st1 {v0.16b, v1.16b}, [x6], x7
b.gt 2b
msub x6, x7, x10, x6 // dst -= bottom_ext * stride
subs x4, x4, #32 // bw -= 32
add x6, x6, #32 // dst += 32
b.gt 1b
3:
cbz x5, 3f
// need_top
msub x6, x7, x5, x14 // dst = stored_dst - top_ext * stride
1:
ld1 {v0.16b, v1.16b}, [x14], #32
mov x3, x5
2:
subs x3, x3, #1
st1 {v0.16b, v1.16b}, [x6], x7
b.gt 2b
msub x6, x7, x5, x6 // dst -= top_ext * stride
subs x0, x0, #32 // bw -= 32
add x6, x6, #32 // dst += 32
b.gt 1b
3:
ret
endfunc
|
Admenri/urge
| 30,148
|
third_party/dav1d/src/arm/64/looprestoration_tmpl.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2018, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#define FILTER_OUT_STRIDE 384
.macro sgr_funcs bpc
// void dav1d_sgr_finish_filter1_2rows_Xbpc_neon(int16_t *tmp,
// const pixel *src,
// const ptrdiff_t src_stride,
// const int32_t **a,
// const int16_t **b,
// const int w, const int h);
function sgr_finish_filter1_2rows_\bpc\()bpc_neon, export=1
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
ldp x7, x8, [x3]
ldp x9, x3, [x3, #16]
ldp x10, x11, [x4]
ldp x12, x4, [x4, #16]
mov x13, #FILTER_OUT_STRIDE
cmp w6, #1
add x2, x1, x2 // src + stride
csel x2, x1, x2, le // if (h <= 1) x2 = x1
add x13, x0, x13, lsl #1
movi v30.8h, #3
movi v31.4s, #3
1:
ld1 {v0.8h, v1.8h}, [x10], #32
ld1 {v2.8h, v3.8h}, [x11], #32
ld1 {v4.8h, v5.8h}, [x12], #32
ld1 {v6.8h, v7.8h}, [x4], #32
ld1 {v16.4s, v17.4s, v18.4s}, [x7], #48
ld1 {v19.4s, v20.4s, v21.4s}, [x8], #48
ld1 {v22.4s, v23.4s, v24.4s}, [x9], #48
ld1 {v25.4s, v26.4s, v27.4s}, [x3], #48
2:
ext v8.16b, v0.16b, v1.16b, #2 // [0][1]
ext v9.16b, v2.16b, v3.16b, #2 // [1][1]
ext v10.16b, v4.16b, v5.16b, #2 // [2][1]
ext v11.16b, v0.16b, v1.16b, #4 // [0][2]
ext v12.16b, v2.16b, v3.16b, #4 // [1][2]
ext v13.16b, v4.16b, v5.16b, #4 // [2][2]
add v14.8h, v2.8h, v8.8h // [1][0] + [0][1]
add v15.8h, v9.8h, v10.8h // [1][1] + [2][1]
add v28.8h, v0.8h, v11.8h // [0][0] + [0][2]
add v14.8h, v14.8h, v12.8h // () + [1][2]
add v29.8h, v4.8h, v13.8h // [2][0] + [2][2]
ext v8.16b, v6.16b, v7.16b, #2 // [3][1]
ext v11.16b, v6.16b, v7.16b, #4 // [3][2]
add v14.8h, v14.8h, v15.8h // mid
add v15.8h, v28.8h, v29.8h // corners
add v28.8h, v4.8h, v9.8h // [2][0] + [1][1]
add v29.8h, v10.8h, v8.8h // [2][1] + [3][1]
add v2.8h, v2.8h, v12.8h // [1][0] + [1][2]
add v28.8h, v28.8h, v13.8h // () + [2][2]
add v4.8h, v6.8h, v11.8h // [3][0] + [3][2]
add v0.8h, v28.8h, v29.8h // mid
add v2.8h, v2.8h, v4.8h // corners
shl v4.8h, v14.8h, #2
mla v4.8h, v15.8h, v30.8h // * 3 -> a
shl v0.8h, v0.8h, #2
mla v0.8h, v2.8h, v30.8h // * 3 -> a
ext v8.16b, v16.16b, v17.16b, #4 // [0][1]
ext v9.16b, v17.16b, v18.16b, #4
ext v10.16b, v16.16b, v17.16b, #8 // [0][2]
ext v11.16b, v17.16b, v18.16b, #8
ext v12.16b, v19.16b, v20.16b, #4 // [1][1]
ext v13.16b, v20.16b, v21.16b, #4
add v8.4s, v8.4s, v19.4s // [0][1] + [1][0]
add v9.4s, v9.4s, v20.4s
add v16.4s, v16.4s, v10.4s // [0][0] + [0][2]
add v17.4s, v17.4s, v11.4s
ext v14.16b, v19.16b, v20.16b, #8 // [1][2]
ext v15.16b, v20.16b, v21.16b, #8
add v16.4s, v16.4s, v22.4s // () + [2][0]
add v17.4s, v17.4s, v23.4s
add v28.4s, v12.4s, v14.4s // [1][1] + [1][2]
add v29.4s, v13.4s, v15.4s
ext v10.16b, v22.16b, v23.16b, #4 // [2][1]
ext v11.16b, v23.16b, v24.16b, #4
add v8.4s, v8.4s, v28.4s // mid (incomplete)
add v9.4s, v9.4s, v29.4s
add v19.4s, v19.4s, v14.4s // [1][0] + [1][2]
add v20.4s, v20.4s, v15.4s
add v14.4s, v22.4s, v12.4s // [2][0] + [1][1]
add v15.4s, v23.4s, v13.4s
ext v12.16b, v22.16b, v23.16b, #8 // [2][2]
ext v13.16b, v23.16b, v24.16b, #8
ext v28.16b, v25.16b, v26.16b, #4 // [3][1]
ext v29.16b, v26.16b, v27.16b, #4
add v8.4s, v8.4s, v10.4s // () + [2][1] = mid
add v9.4s, v9.4s, v11.4s
add v14.4s, v14.4s, v10.4s // () + [2][1]
add v15.4s, v15.4s, v11.4s
ext v10.16b, v25.16b, v26.16b, #8 // [3][2]
ext v11.16b, v26.16b, v27.16b, #8
add v16.4s, v16.4s, v12.4s // () + [2][2] = corner
add v17.4s, v17.4s, v13.4s
add v12.4s, v12.4s, v28.4s // [2][2] + [3][1]
add v13.4s, v13.4s, v29.4s
add v25.4s, v25.4s, v10.4s // [3][0] + [3][2]
add v26.4s, v26.4s, v11.4s
add v14.4s, v14.4s, v12.4s // mid
add v15.4s, v15.4s, v13.4s
add v19.4s, v19.4s, v25.4s // corner
add v20.4s, v20.4s, v26.4s
.if \bpc == 8
ld1 {v25.8b}, [x1], #8 // src
ld1 {v26.8b}, [x2], #8
.else
ld1 {v25.8h}, [x1], #16 // src
ld1 {v26.8h}, [x2], #16
.endif
shl v8.4s, v8.4s, #2
shl v9.4s, v9.4s, #2
mla v8.4s, v16.4s, v31.4s // * 3 -> b
mla v9.4s, v17.4s, v31.4s
.if \bpc == 8
uxtl v25.8h, v25.8b // src
uxtl v26.8h, v26.8b
.endif
shl v14.4s, v14.4s, #2
shl v15.4s, v15.4s, #2
mla v14.4s, v19.4s, v31.4s // * 3 -> b
mla v15.4s, v20.4s, v31.4s
umlsl v8.4s, v4.4h, v25.4h // b - a * src
umlsl2 v9.4s, v4.8h, v25.8h
umlsl v14.4s, v0.4h, v26.4h // b - a * src
umlsl2 v15.4s, v0.8h, v26.8h
mov v0.16b, v1.16b
rshrn v8.4h, v8.4s, #9
rshrn2 v8.8h, v9.4s, #9
mov v2.16b, v3.16b
rshrn v14.4h, v14.4s, #9
rshrn2 v14.8h, v15.4s, #9
subs w5, w5, #8
mov v4.16b, v5.16b
st1 {v8.8h}, [x0], #16
mov v6.16b, v7.16b
st1 {v14.8h}, [x13], #16
b.le 3f
mov v16.16b, v18.16b
mov v19.16b, v21.16b
mov v22.16b, v24.16b
mov v25.16b, v27.16b
ld1 {v1.8h}, [x10], #16
ld1 {v3.8h}, [x11], #16
ld1 {v5.8h}, [x12], #16
ld1 {v7.8h}, [x4], #16
ld1 {v17.4s, v18.4s}, [x7], #32
ld1 {v20.4s, v21.4s}, [x8], #32
ld1 {v23.4s, v24.4s}, [x9], #32
ld1 {v26.4s, v27.4s}, [x3], #32
b 2b
3:
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret
endfunc
// void dav1d_sgr_finish_weighted1_Xbpc_neon(pixel *dst,
// const int32_t **a, const int16_t **b,
// const int w, const int w1,
// const int bitdepth_max);
function sgr_finish_weighted1_\bpc\()bpc_neon, export=1
ldp x7, x8, [x1]
ldr x1, [x1, #16]
ldp x9, x10, [x2]
ldr x2, [x2, #16]
dup v31.8h, w4
dup v30.8h, w5
movi v6.8h, #3
movi v7.4s, #3
1:
ld1 {v0.8h, v1.8h}, [x9], #32
ld1 {v2.8h, v3.8h}, [x10], #32
ld1 {v4.8h, v5.8h}, [x2], #32
ld1 {v16.4s, v17.4s, v18.4s}, [x7], #48
ld1 {v19.4s, v20.4s, v21.4s}, [x8], #48
ld1 {v22.4s, v23.4s, v24.4s}, [x1], #48
2:
ext v25.16b, v0.16b, v1.16b, #2 // -stride
ext v26.16b, v2.16b, v3.16b, #2 // 0
ext v27.16b, v4.16b, v5.16b, #2 // +stride
ext v28.16b, v0.16b, v1.16b, #4 // +1-stride
ext v29.16b, v2.16b, v3.16b, #4 // +1
add v2.8h, v2.8h, v25.8h // -1, -stride
ext v25.16b, v4.16b, v5.16b, #4 // +1+stride
add v26.8h, v26.8h, v27.8h // 0, +stride
add v0.8h, v0.8h, v28.8h // -1-stride, +1-stride
add v2.8h, v2.8h, v26.8h
add v4.8h, v4.8h, v25.8h // -1+stride, +1+stride
add v2.8h, v2.8h, v29.8h // +1
add v0.8h, v0.8h, v4.8h
ext v25.16b, v16.16b, v17.16b, #4 // -stride
ext v26.16b, v17.16b, v18.16b, #4
shl v2.8h, v2.8h, #2
ext v27.16b, v16.16b, v17.16b, #8 // +1-stride
ext v28.16b, v17.16b, v18.16b, #8
ext v29.16b, v19.16b, v20.16b, #4 // 0
ext v4.16b, v20.16b, v21.16b, #4
mla v2.8h, v0.8h, v6.8h // * 3 -> a
add v25.4s, v25.4s, v19.4s // -stride, -1
add v26.4s, v26.4s, v20.4s
add v16.4s, v16.4s, v27.4s // -1-stride, +1-stride
add v17.4s, v17.4s, v28.4s
ext v27.16b, v19.16b, v20.16b, #8 // +1
ext v28.16b, v20.16b, v21.16b, #8
add v16.4s, v16.4s, v22.4s // -1+stride
add v17.4s, v17.4s, v23.4s
add v29.4s, v29.4s, v27.4s // 0, +1
add v4.4s, v4.4s, v28.4s
add v25.4s, v25.4s, v29.4s
add v26.4s, v26.4s, v4.4s
ext v27.16b, v22.16b, v23.16b, #4 // +stride
ext v28.16b, v23.16b, v24.16b, #4
ext v29.16b, v22.16b, v23.16b, #8 // +1+stride
ext v4.16b, v23.16b, v24.16b, #8
.if \bpc == 8
ld1 {v19.8b}, [x0] // src
.else
ld1 {v19.8h}, [x0] // src
.endif
add v25.4s, v25.4s, v27.4s // +stride
add v26.4s, v26.4s, v28.4s
add v16.4s, v16.4s, v29.4s // +1+stride
add v17.4s, v17.4s, v4.4s
shl v25.4s, v25.4s, #2
shl v26.4s, v26.4s, #2
mla v25.4s, v16.4s, v7.4s // * 3 -> b
mla v26.4s, v17.4s, v7.4s
.if \bpc == 8
uxtl v19.8h, v19.8b // src
.endif
mov v0.16b, v1.16b
umlsl v25.4s, v2.4h, v19.4h // b - a * src
umlsl2 v26.4s, v2.8h, v19.8h
mov v2.16b, v3.16b
rshrn v25.4h, v25.4s, #9
rshrn2 v25.8h, v26.4s, #9
subs w3, w3, #8
// weighted1
mov v4.16b, v5.16b
ld1 {v1.8h}, [x9], #16
ld1 {v3.8h}, [x10], #16
smull v26.4s, v25.4h, v31.4h // v = t1 * w1
smull2 v27.4s, v25.8h, v31.8h
ld1 {v5.8h}, [x2], #16
rshrn v26.4h, v26.4s, #11
rshrn2 v26.8h, v27.4s, #11
usqadd v19.8h, v26.8h
.if \bpc == 8
mov v16.16b, v18.16b
sqxtun v26.8b, v19.8h
mov v19.16b, v21.16b
mov v22.16b, v24.16b
st1 {v26.8b}, [x0], #8
.else
mov v16.16b, v18.16b
umin v26.8h, v19.8h, v30.8h
mov v19.16b, v21.16b
mov v22.16b, v24.16b
st1 {v26.8h}, [x0], #16
.endif
b.le 3f
ld1 {v17.4s, v18.4s}, [x7], #32
ld1 {v20.4s, v21.4s}, [x8], #32
ld1 {v23.4s, v24.4s}, [x1], #32
b 2b
3:
ret
endfunc
// void dav1d_sgr_finish_filter2_2rows_Xbpc_neon(int16_t *tmp,
// const pixel *src,
// const ptrdiff_t stride,
// const int32_t **a,
// const int16_t **b,
// const int w, const int h);
function sgr_finish_filter2_2rows_\bpc\()bpc_neon, export=1
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
ldp x3, x7, [x3]
ldp x4, x8, [x4]
mov x10, #FILTER_OUT_STRIDE
cmp w6, #1
add x2, x1, x2 // src + stride
csel x2, x1, x2, le // if (h <= 1) x2 = x1
add x10, x0, x10, lsl #1
movi v4.8h, #5
movi v5.4s, #5
movi v6.8h, #6
movi v7.4s, #6
1:
ld1 {v0.8h, v1.8h}, [x4], #32
ld1 {v2.8h, v3.8h}, [x8], #32
ld1 {v16.4s, v17.4s, v18.4s}, [x3], #48
ld1 {v19.4s, v20.4s, v21.4s}, [x7], #48
2:
ext v24.16b, v0.16b, v1.16b, #4 // +1-stride
ext v25.16b, v2.16b, v3.16b, #4 // +1+stride
ext v22.16b, v0.16b, v1.16b, #2 // -stride
ext v23.16b, v2.16b, v3.16b, #2 // +stride
add v0.8h, v0.8h, v24.8h // -1-stride, +1-stride
add v25.8h, v2.8h, v25.8h // -1+stride, +1+stride
add v2.8h, v22.8h, v23.8h // -stride, +stride
add v0.8h, v0.8h, v25.8h
mul v8.8h, v25.8h, v4.8h // * 5
mla v8.8h, v23.8h, v6.8h // * 6
ext v22.16b, v16.16b, v17.16b, #4 // -stride
ext v23.16b, v17.16b, v18.16b, #4
ext v24.16b, v19.16b, v20.16b, #4 // +stride
ext v25.16b, v20.16b, v21.16b, #4
ext v26.16b, v16.16b, v17.16b, #8 // +1-stride
ext v27.16b, v17.16b, v18.16b, #8
ext v28.16b, v19.16b, v20.16b, #8 // +1+stride
ext v29.16b, v20.16b, v21.16b, #8
mul v0.8h, v0.8h, v4.8h // * 5
mla v0.8h, v2.8h, v6.8h // * 6
.if \bpc == 8
ld1 {v31.8b}, [x1], #8
ld1 {v30.8b}, [x2], #8
.else
ld1 {v31.8h}, [x1], #16
ld1 {v30.8h}, [x2], #16
.endif
add v16.4s, v16.4s, v26.4s // -1-stride, +1-stride
add v17.4s, v17.4s, v27.4s
add v19.4s, v19.4s, v28.4s // -1+stride, +1+stride
add v20.4s, v20.4s, v29.4s
add v16.4s, v16.4s, v19.4s
add v17.4s, v17.4s, v20.4s
mul v9.4s, v19.4s, v5.4s // * 5
mla v9.4s, v24.4s, v7.4s // * 6
mul v10.4s, v20.4s, v5.4s // * 5
mla v10.4s, v25.4s, v7.4s // * 6
add v22.4s, v22.4s, v24.4s // -stride, +stride
add v23.4s, v23.4s, v25.4s
// This is, surprisingly, faster than other variants where the
// mul+mla pairs are further apart, on Cortex A53.
mul v16.4s, v16.4s, v5.4s // * 5
mla v16.4s, v22.4s, v7.4s // * 6
mul v17.4s, v17.4s, v5.4s // * 5
mla v17.4s, v23.4s, v7.4s // * 6
.if \bpc == 8
uxtl v31.8h, v31.8b
uxtl v30.8h, v30.8b
.endif
umlsl v16.4s, v0.4h, v31.4h // b - a * src
umlsl2 v17.4s, v0.8h, v31.8h
umlsl v9.4s, v8.4h, v30.4h // b - a * src
umlsl2 v10.4s, v8.8h, v30.8h
mov v0.16b, v1.16b
rshrn v16.4h, v16.4s, #9
rshrn2 v16.8h, v17.4s, #9
rshrn v9.4h, v9.4s, #8
rshrn2 v9.8h, v10.4s, #8
subs w5, w5, #8
mov v2.16b, v3.16b
st1 {v16.8h}, [x0], #16
st1 {v9.8h}, [x10], #16
b.le 9f
mov v16.16b, v18.16b
mov v19.16b, v21.16b
ld1 {v1.8h}, [x4], #16
ld1 {v3.8h}, [x8], #16
ld1 {v17.4s, v18.4s}, [x3], #32
ld1 {v20.4s, v21.4s}, [x7], #32
b 2b
9:
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret
endfunc
// void dav1d_sgr_finish_weighted2_Xbpc_neon(pixel *dst, const ptrdiff_t stride,
// const int32_t **a,
// const int16_t **b,
// const int w, const int h,
// const int w1,
// const int bitdepth_max);
function sgr_finish_weighted2_\bpc\()bpc_neon, export=1
stp d8, d9, [sp, #-0x30]!
str d10, [sp, #0x10]
stp d14, d15, [sp, #0x20]
dup v14.8h, w6
dup v15.8h, w7
ldp x2, x7, [x2]
ldp x3, x8, [x3]
cmp w5, #1
add x1, x0, x1 // src + stride
// if (h <= 1), set the pointer to the second row to any dummy buffer
// we can clobber (x2 in this case)
csel x1, x2, x1, le
movi v4.8h, #5
movi v5.4s, #5
movi v6.8h, #6
movi v7.4s, #6
1:
ld1 {v0.8h, v1.8h}, [x3], #32
ld1 {v2.8h, v3.8h}, [x8], #32
ld1 {v16.4s, v17.4s, v18.4s}, [x2], #48
ld1 {v19.4s, v20.4s, v21.4s}, [x7], #48
2:
ext v24.16b, v0.16b, v1.16b, #4 // +1-stride
ext v25.16b, v2.16b, v3.16b, #4 // +1+stride
ext v22.16b, v0.16b, v1.16b, #2 // -stride
ext v23.16b, v2.16b, v3.16b, #2 // +stride
add v0.8h, v0.8h, v24.8h // -1-stride, +1-stride
add v25.8h, v2.8h, v25.8h // -1+stride, +1+stride
add v2.8h, v22.8h, v23.8h // -stride, +stride
add v0.8h, v0.8h, v25.8h
mul v8.8h, v25.8h, v4.8h // * 5
mla v8.8h, v23.8h, v6.8h // * 6
ext v22.16b, v16.16b, v17.16b, #4 // -stride
ext v23.16b, v17.16b, v18.16b, #4
ext v24.16b, v19.16b, v20.16b, #4 // +stride
ext v25.16b, v20.16b, v21.16b, #4
ext v26.16b, v16.16b, v17.16b, #8 // +1-stride
ext v27.16b, v17.16b, v18.16b, #8
ext v28.16b, v19.16b, v20.16b, #8 // +1+stride
ext v29.16b, v20.16b, v21.16b, #8
mul v0.8h, v0.8h, v4.8h // * 5
mla v0.8h, v2.8h, v6.8h // * 6
.if \bpc == 8
ld1 {v31.8b}, [x0]
ld1 {v30.8b}, [x1]
.else
ld1 {v31.8h}, [x0]
ld1 {v30.8h}, [x1]
.endif
add v16.4s, v16.4s, v26.4s // -1-stride, +1-stride
add v17.4s, v17.4s, v27.4s
add v19.4s, v19.4s, v28.4s // -1+stride, +1+stride
add v20.4s, v20.4s, v29.4s
add v16.4s, v16.4s, v19.4s
add v17.4s, v17.4s, v20.4s
mul v9.4s, v19.4s, v5.4s // * 5
mla v9.4s, v24.4s, v7.4s // * 6
mul v10.4s, v20.4s, v5.4s // * 5
mla v10.4s, v25.4s, v7.4s // * 6
add v22.4s, v22.4s, v24.4s // -stride, +stride
add v23.4s, v23.4s, v25.4s
// This is, surprisingly, faster than other variants where the
// mul+mla pairs are further apart, on Cortex A53.
mul v16.4s, v16.4s, v5.4s // * 5
mla v16.4s, v22.4s, v7.4s // * 6
mul v17.4s, v17.4s, v5.4s // * 5
mla v17.4s, v23.4s, v7.4s // * 6
.if \bpc == 8
uxtl v31.8h, v31.8b
uxtl v30.8h, v30.8b
.endif
umlsl v16.4s, v0.4h, v31.4h // b - a * src
umlsl2 v17.4s, v0.8h, v31.8h
umlsl v9.4s, v8.4h, v30.4h // b - a * src
umlsl2 v10.4s, v8.8h, v30.8h
mov v0.16b, v1.16b
rshrn v16.4h, v16.4s, #9
rshrn2 v16.8h, v17.4s, #9
rshrn v9.4h, v9.4s, #8
rshrn2 v9.8h, v10.4s, #8
subs w4, w4, #8
// weighted1
mov v2.16b, v3.16b
ld1 {v1.8h}, [x3], #16
ld1 {v3.8h}, [x8], #16
smull v22.4s, v16.4h, v14.4h // v
smull2 v23.4s, v16.8h, v14.8h
mov v16.16b, v18.16b
smull v24.4s, v9.4h, v14.4h
smull2 v25.4s, v9.8h, v14.8h
mov v19.16b, v21.16b
rshrn v22.4h, v22.4s, #11
rshrn2 v22.8h, v23.4s, #11
rshrn v23.4h, v24.4s, #11
rshrn2 v23.8h, v25.4s, #11
usqadd v31.8h, v22.8h
usqadd v30.8h, v23.8h
.if \bpc == 8
sqxtun v22.8b, v31.8h
sqxtun v23.8b, v30.8h
st1 {v22.8b}, [x0], #8
st1 {v23.8b}, [x1], #8
.else
umin v22.8h, v31.8h, v15.8h
umin v23.8h, v30.8h, v15.8h
st1 {v22.8h}, [x0], #16
st1 {v23.8h}, [x1], #16
.endif
b.le 3f
ld1 {v17.4s, v18.4s}, [x2], #32
ld1 {v20.4s, v21.4s}, [x7], #32
b 2b
3:
ldp d14, d15, [sp, #0x20]
ldr d10, [sp, #0x10]
ldp d8, d9, [sp], 0x30
ret
endfunc
// void dav1d_sgr_weighted2_Xbpc_neon(pixel *dst, const ptrdiff_t stride,
// const int16_t *t1, const int16_t *t2,
// const int w, const int h,
// const int16_t wt[2], const int bitdepth_max);
function sgr_weighted2_\bpc\()bpc_neon, export=1
cmp w5, #2
add x10, x0, x1
add x12, x2, #2*FILTER_OUT_STRIDE
add x13, x3, #2*FILTER_OUT_STRIDE
ld2r {v30.8h, v31.8h}, [x6] // wt[0], wt[1]
.if \bpc == 16
dup v29.8h, w7
.endif
mov x8, #4*FILTER_OUT_STRIDE
lsl x1, x1, #1
add w9, w4, #7
bic x9, x9, #7 // Aligned width
.if \bpc == 8
sub x1, x1, x9
.else
sub x1, x1, x9, lsl #1
.endif
sub x8, x8, x9, lsl #1
mov w9, w4
b.lt 2f
1:
.if \bpc == 8
ld1 {v0.8b}, [x0]
ld1 {v16.8b}, [x10]
.else
ld1 {v0.8h}, [x0]
ld1 {v16.8h}, [x10]
.endif
ld1 {v1.8h}, [x2], #16
ld1 {v17.8h}, [x12], #16
ld1 {v2.8h}, [x3], #16
ld1 {v18.8h}, [x13], #16
subs w4, w4, #8
.if \bpc == 8
uxtl v0.8h, v0.8b
uxtl v16.8h, v16.8b
.endif
smull v3.4s, v1.4h, v30.4h // wt[0] * t1
smlal v3.4s, v2.4h, v31.4h // wt[1] * t2
smull2 v4.4s, v1.8h, v30.8h // wt[0] * t1
smlal2 v4.4s, v2.8h, v31.8h // wt[1] * t2
smull v19.4s, v17.4h, v30.4h // wt[0] * t1
smlal v19.4s, v18.4h, v31.4h // wt[1] * t2
smull2 v20.4s, v17.8h, v30.8h // wt[0] * t1
smlal2 v20.4s, v18.8h, v31.8h // wt[1] * t2
rshrn v3.4h, v3.4s, #11
rshrn2 v3.8h, v4.4s, #11
rshrn v19.4h, v19.4s, #11
rshrn2 v19.8h, v20.4s, #11
usqadd v0.8h, v3.8h
usqadd v16.8h, v19.8h
.if \bpc == 8
sqxtun v3.8b, v0.8h
sqxtun v19.8b, v16.8h
st1 {v3.8b}, [x0], #8
st1 {v19.8b}, [x10], #8
.else
umin v3.8h, v0.8h, v29.8h
umin v19.8h, v16.8h, v29.8h
st1 {v3.8h}, [x0], #16
st1 {v19.8h}, [x10], #16
.endif
b.gt 1b
subs w5, w5, #2
cmp w5, #1
b.lt 0f
mov w4, w9
add x0, x0, x1
add x10, x10, x1
add x2, x2, x8
add x12, x12, x8
add x3, x3, x8
add x13, x13, x8
b.eq 2f
b 1b
2:
.if \bpc == 8
ld1 {v0.8b}, [x0]
.else
ld1 {v0.8h}, [x0]
.endif
ld1 {v1.8h}, [x2], #16
ld1 {v2.8h}, [x3], #16
subs w4, w4, #8
.if \bpc == 8
uxtl v0.8h, v0.8b
.endif
smull v3.4s, v1.4h, v30.4h // wt[0] * t1
smlal v3.4s, v2.4h, v31.4h // wt[1] * t2
smull2 v4.4s, v1.8h, v30.8h // wt[0] * t1
smlal2 v4.4s, v2.8h, v31.8h // wt[1] * t2
rshrn v3.4h, v3.4s, #11
rshrn2 v3.8h, v4.4s, #11
usqadd v0.8h, v3.8h
.if \bpc == 8
sqxtun v3.8b, v0.8h
st1 {v3.8b}, [x0], #8
.else
umin v3.8h, v0.8h, v29.8h
st1 {v3.8h}, [x0], #16
.endif
b.gt 2b
0:
ret
endfunc
.endm
|
Admenri/urge
| 20,654
|
third_party/dav1d/src/arm/64/cdef_tmpl.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
.macro dir_table w, stride
const directions\w
.byte -1 * \stride + 1, -2 * \stride + 2
.byte 0 * \stride + 1, -1 * \stride + 2
.byte 0 * \stride + 1, 0 * \stride + 2
.byte 0 * \stride + 1, 1 * \stride + 2
.byte 1 * \stride + 1, 2 * \stride + 2
.byte 1 * \stride + 0, 2 * \stride + 1
.byte 1 * \stride + 0, 2 * \stride + 0
.byte 1 * \stride + 0, 2 * \stride - 1
// Repeated, to avoid & 7
.byte -1 * \stride + 1, -2 * \stride + 2
.byte 0 * \stride + 1, -1 * \stride + 2
.byte 0 * \stride + 1, 0 * \stride + 2
.byte 0 * \stride + 1, 1 * \stride + 2
.byte 1 * \stride + 1, 2 * \stride + 2
.byte 1 * \stride + 0, 2 * \stride + 1
endconst
.endm
.macro tables
dir_table 8, 16
dir_table 4, 8
const pri_taps
.byte 4, 2, 3, 3
endconst
.endm
.macro load_px d1, d2, w
.if \w == 8
add x6, x2, w9, sxtb #1 // x + off
sub x9, x2, w9, sxtb #1 // x - off
ld1 {\d1\().8h}, [x6] // p0
ld1 {\d2\().8h}, [x9] // p1
.else
add x6, x2, w9, sxtb #1 // x + off
sub x9, x2, w9, sxtb #1 // x - off
ld1 {\d1\().4h}, [x6] // p0
add x6, x6, #2*8 // += stride
ld1 {\d2\().4h}, [x9] // p1
add x9, x9, #2*8 // += stride
ld1 {\d1\().d}[1], [x6] // p0
ld1 {\d2\().d}[1], [x9] // p1
.endif
.endm
.macro handle_pixel s1, s2, thresh_vec, shift, tap, min
.if \min
umin v2.8h, v2.8h, \s1\().8h
smax v3.8h, v3.8h, \s1\().8h
umin v2.8h, v2.8h, \s2\().8h
smax v3.8h, v3.8h, \s2\().8h
.endif
uabd v16.8h, v0.8h, \s1\().8h // abs(diff)
uabd v20.8h, v0.8h, \s2\().8h // abs(diff)
ushl v17.8h, v16.8h, \shift // abs(diff) >> shift
ushl v21.8h, v20.8h, \shift // abs(diff) >> shift
uqsub v17.8h, \thresh_vec, v17.8h // clip = imax(0, threshold - (abs(diff) >> shift))
uqsub v21.8h, \thresh_vec, v21.8h // clip = imax(0, threshold - (abs(diff) >> shift))
sub v18.8h, \s1\().8h, v0.8h // diff = p0 - px
sub v22.8h, \s2\().8h, v0.8h // diff = p1 - px
neg v16.8h, v17.8h // -clip
neg v20.8h, v21.8h // -clip
smin v18.8h, v18.8h, v17.8h // imin(diff, clip)
smin v22.8h, v22.8h, v21.8h // imin(diff, clip)
dup v19.8h, \tap // taps[k]
smax v18.8h, v18.8h, v16.8h // constrain() = imax(imin(diff, clip), -clip)
smax v22.8h, v22.8h, v20.8h // constrain() = imax(imin(diff, clip), -clip)
mla v1.8h, v18.8h, v19.8h // sum += taps[k] * constrain()
mla v1.8h, v22.8h, v19.8h // sum += taps[k] * constrain()
.endm
// void dav1d_cdef_filterX_Ybpc_neon(pixel *dst, ptrdiff_t dst_stride,
// const uint16_t *tmp, int pri_strength,
// int sec_strength, int dir, int damping,
// int h, size_t edges);
.macro filter_func w, bpc, pri, sec, min, suffix
function cdef_filter\w\suffix\()_\bpc\()bpc_neon
.if \bpc == 8
ldr w8, [sp] // edges
cmp w8, #0xf
b.eq cdef_filter\w\suffix\()_edged_8bpc_neon
.endif
.if \pri
.if \bpc == 16
ldr w9, [sp, #8] // bitdepth_max
clz w9, w9
sub w9, w9, #24 // -bitdepth_min_8
neg w9, w9 // bitdepth_min_8
.endif
movrel x8, pri_taps
.if \bpc == 16
lsr w9, w3, w9 // pri_strength >> bitdepth_min_8
and w9, w9, #1 // (pri_strength >> bitdepth_min_8) & 1
.else
and w9, w3, #1
.endif
add x8, x8, w9, uxtw #1
.endif
movrel x9, directions\w
add x5, x9, w5, uxtw #1
movi v30.4h, #15
dup v28.4h, w6 // damping
.if \pri
dup v25.8h, w3 // threshold
.endif
.if \sec
dup v27.8h, w4 // threshold
.endif
trn1 v24.4h, v25.4h, v27.4h
clz v24.4h, v24.4h // clz(threshold)
sub v24.4h, v30.4h, v24.4h // ulog2(threshold)
uqsub v24.4h, v28.4h, v24.4h // shift = imax(0, damping - ulog2(threshold))
neg v24.4h, v24.4h // -shift
.if \sec
dup v26.8h, v24.h[1]
.endif
.if \pri
dup v24.8h, v24.h[0]
.endif
1:
.if \w == 8
ld1 {v0.8h}, [x2] // px
.else
add x12, x2, #2*8
ld1 {v0.4h}, [x2] // px
ld1 {v0.d}[1], [x12] // px
.endif
movi v1.8h, #0 // sum
.if \min
mov v2.16b, v0.16b // min
mov v3.16b, v0.16b // max
.endif
// Instead of loading sec_taps 2, 1 from memory, just set it
// to 2 initially and decrease for the second round.
// This is also used as loop counter.
mov w11, #2 // sec_taps[0]
2:
.if \pri
ldrb w9, [x5] // off1
load_px v4, v5, \w
.endif
.if \sec
add x5, x5, #4 // +2*2
ldrb w9, [x5] // off2
load_px v6, v7, \w
.endif
.if \pri
ldrb w10, [x8] // *pri_taps
handle_pixel v4, v5, v25.8h, v24.8h, w10, \min
.endif
.if \sec
add x5, x5, #8 // +2*4
ldrb w9, [x5] // off3
load_px v4, v5, \w
handle_pixel v6, v7, v27.8h, v26.8h, w11, \min
handle_pixel v4, v5, v27.8h, v26.8h, w11, \min
sub x5, x5, #11 // x5 -= 2*(2+4); x5 += 1;
.else
add x5, x5, #1 // x5 += 1
.endif
subs w11, w11, #1 // sec_tap-- (value)
.if \pri
add x8, x8, #1 // pri_taps++ (pointer)
.endif
b.ne 2b
cmlt v4.8h, v1.8h, #0 // -(sum < 0)
add v1.8h, v1.8h, v4.8h // sum - (sum < 0)
srshr v1.8h, v1.8h, #4 // (8 + sum - (sum < 0)) >> 4
add v0.8h, v0.8h, v1.8h // px + (8 + sum ...) >> 4
.if \min
smin v0.8h, v0.8h, v3.8h
smax v0.8h, v0.8h, v2.8h // iclip(px + .., min, max)
.endif
.if \bpc == 8
xtn v0.8b, v0.8h
.endif
.if \w == 8
add x2, x2, #2*16 // tmp += tmp_stride
subs w7, w7, #1 // h--
.if \bpc == 8
st1 {v0.8b}, [x0], x1
.else
st1 {v0.8h}, [x0], x1
.endif
.else
.if \bpc == 8
st1 {v0.s}[0], [x0], x1
.else
st1 {v0.d}[0], [x0], x1
.endif
add x2, x2, #2*16 // tmp += 2*tmp_stride
subs w7, w7, #2 // h -= 2
.if \bpc == 8
st1 {v0.s}[1], [x0], x1
.else
st1 {v0.d}[1], [x0], x1
.endif
.endif
// Reset pri_taps and directions back to the original point
sub x5, x5, #2
.if \pri
sub x8, x8, #2
.endif
b.gt 1b
ret
endfunc
.endm
.macro filter w, bpc
filter_func \w, \bpc, pri=1, sec=0, min=0, suffix=_pri
filter_func \w, \bpc, pri=0, sec=1, min=0, suffix=_sec
filter_func \w, \bpc, pri=1, sec=1, min=1, suffix=_pri_sec
function cdef_filter\w\()_\bpc\()bpc_neon, export=1
cbnz w3, 1f // pri_strength
b cdef_filter\w\()_sec_\bpc\()bpc_neon // only sec
1:
cbnz w4, 1f // sec_strength
b cdef_filter\w\()_pri_\bpc\()bpc_neon // only pri
1:
b cdef_filter\w\()_pri_sec_\bpc\()bpc_neon // both pri and sec
endfunc
.endm
const div_table
.short 840, 420, 280, 210, 168, 140, 120, 105
endconst
const alt_fact
.short 420, 210, 140, 105, 105, 105, 105, 105, 140, 210, 420, 0
endconst
.macro cost_alt d1, d2, s1, s2, s3, s4
smull v22.4s, \s1\().4h, \s1\().4h // sum_alt[n]*sum_alt[n]
smull2 v23.4s, \s1\().8h, \s1\().8h
smull v24.4s, \s2\().4h, \s2\().4h
smull v25.4s, \s3\().4h, \s3\().4h // sum_alt[n]*sum_alt[n]
smull2 v26.4s, \s3\().8h, \s3\().8h
smull v27.4s, \s4\().4h, \s4\().4h
mul v22.4s, v22.4s, v29.4s // sum_alt[n]^2*fact
mla v22.4s, v23.4s, v30.4s
mla v22.4s, v24.4s, v31.4s
mul v25.4s, v25.4s, v29.4s // sum_alt[n]^2*fact
mla v25.4s, v26.4s, v30.4s
mla v25.4s, v27.4s, v31.4s
addv \d1, v22.4s // *cost_ptr
addv \d2, v25.4s // *cost_ptr
.endm
.macro find_best s1, s2, s3
.ifnb \s2
mov w5, \s2\().s[0]
.endif
cmp w4, w1 // cost[n] > best_cost
csel w0, w3, w0, gt // best_dir = n
csel w1, w4, w1, gt // best_cost = cost[n]
.ifnb \s2
add w3, w3, #1 // n++
cmp w5, w1 // cost[n] > best_cost
mov w4, \s3\().s[0]
csel w0, w3, w0, gt // best_dir = n
csel w1, w5, w1, gt // best_cost = cost[n]
add w3, w3, #1 // n++
.endif
.endm
// Steps for loading and preparing each row
.macro dir_load_step1 s1, bpc
.if \bpc == 8
ld1 {\s1\().8b}, [x0], x1
.else
ld1 {\s1\().8h}, [x0], x1
.endif
.endm
.macro dir_load_step2 s1, bpc
.if \bpc == 8
usubl \s1\().8h, \s1\().8b, v31.8b
.else
ushl \s1\().8h, \s1\().8h, v8.8h
.endif
.endm
.macro dir_load_step3 s1, bpc
// Nothing for \bpc == 8
.if \bpc != 8
sub \s1\().8h, \s1\().8h, v31.8h
.endif
.endm
// int dav1d_cdef_find_dir_Xbpc_neon(const pixel *img, const ptrdiff_t stride,
// unsigned *const var)
.macro find_dir bpc
function cdef_find_dir_\bpc\()bpc_neon, export=1
.if \bpc == 16
str d8, [sp, #-0x10]!
clz w3, w3 // clz(bitdepth_max)
sub w3, w3, #24 // -bitdepth_min_8
dup v8.8h, w3
.endif
sub sp, sp, #32 // cost
mov w3, #8
.if \bpc == 8
movi v31.16b, #128
.else
movi v31.8h, #128
.endif
movi v30.16b, #0
movi v1.8h, #0 // v0-v1 sum_diag[0]
movi v3.8h, #0 // v2-v3 sum_diag[1]
movi v5.8h, #0 // v4-v5 sum_hv[0-1]
movi v7.8h, #0 // v6-v7 sum_alt[0]
dir_load_step1 v26, \bpc // Setup first row early
movi v17.8h, #0 // v16-v17 sum_alt[1]
movi v18.8h, #0 // v18-v19 sum_alt[2]
dir_load_step2 v26, \bpc
movi v19.8h, #0
dir_load_step3 v26, \bpc
movi v21.8h, #0 // v20-v21 sum_alt[3]
.irpc i, 01234567
addv h25, v26.8h // [y]
rev64 v27.8h, v26.8h
addp v28.8h, v26.8h, v30.8h // [(x >> 1)]
add v5.8h, v5.8h, v26.8h // sum_hv[1]
ext v27.16b, v27.16b, v27.16b, #8 // [-x]
rev64 v29.4h, v28.4h // [-(x >> 1)]
ins v4.h[\i], v25.h[0] // sum_hv[0]
.if \i < 6
ext v22.16b, v30.16b, v26.16b, #(16-2*(3-(\i/2)))
ext v23.16b, v26.16b, v30.16b, #(16-2*(3-(\i/2)))
add v18.8h, v18.8h, v22.8h // sum_alt[2]
add v19.4h, v19.4h, v23.4h // sum_alt[2]
.else
add v18.8h, v18.8h, v26.8h // sum_alt[2]
.endif
.if \i == 0
mov v20.16b, v26.16b // sum_alt[3]
.elseif \i == 1
add v20.8h, v20.8h, v26.8h // sum_alt[3]
.else
ext v24.16b, v30.16b, v26.16b, #(16-2*(\i/2))
ext v25.16b, v26.16b, v30.16b, #(16-2*(\i/2))
add v20.8h, v20.8h, v24.8h // sum_alt[3]
add v21.4h, v21.4h, v25.4h // sum_alt[3]
.endif
.if \i == 0
mov v0.16b, v26.16b // sum_diag[0]
dir_load_step1 v26, \bpc
mov v2.16b, v27.16b // sum_diag[1]
dir_load_step2 v26, \bpc
mov v6.16b, v28.16b // sum_alt[0]
dir_load_step3 v26, \bpc
mov v16.16b, v29.16b // sum_alt[1]
.else
ext v22.16b, v30.16b, v26.16b, #(16-2*\i)
ext v23.16b, v26.16b, v30.16b, #(16-2*\i)
ext v24.16b, v30.16b, v27.16b, #(16-2*\i)
ext v25.16b, v27.16b, v30.16b, #(16-2*\i)
.if \i != 7 // Nothing to load for the final row
dir_load_step1 v26, \bpc // Start setting up the next row early.
.endif
add v0.8h, v0.8h, v22.8h // sum_diag[0]
add v1.8h, v1.8h, v23.8h // sum_diag[0]
add v2.8h, v2.8h, v24.8h // sum_diag[1]
add v3.8h, v3.8h, v25.8h // sum_diag[1]
.if \i != 7
dir_load_step2 v26, \bpc
.endif
ext v22.16b, v30.16b, v28.16b, #(16-2*\i)
ext v23.16b, v28.16b, v30.16b, #(16-2*\i)
ext v24.16b, v30.16b, v29.16b, #(16-2*\i)
ext v25.16b, v29.16b, v30.16b, #(16-2*\i)
.if \i != 7
dir_load_step3 v26, \bpc
.endif
add v6.8h, v6.8h, v22.8h // sum_alt[0]
add v7.4h, v7.4h, v23.4h // sum_alt[0]
add v16.8h, v16.8h, v24.8h // sum_alt[1]
add v17.4h, v17.4h, v25.4h // sum_alt[1]
.endif
.endr
movi v31.4s, #105
smull v26.4s, v4.4h, v4.4h // sum_hv[0]*sum_hv[0]
smlal2 v26.4s, v4.8h, v4.8h
smull v27.4s, v5.4h, v5.4h // sum_hv[1]*sum_hv[1]
smlal2 v27.4s, v5.8h, v5.8h
mul v26.4s, v26.4s, v31.4s // cost[2] *= 105
mul v27.4s, v27.4s, v31.4s // cost[6] *= 105
addv s4, v26.4s // cost[2]
addv s5, v27.4s // cost[6]
rev64 v1.8h, v1.8h
rev64 v3.8h, v3.8h
ext v1.16b, v1.16b, v1.16b, #10 // sum_diag[0][14-n]
ext v3.16b, v3.16b, v3.16b, #10 // sum_diag[1][14-n]
str s4, [sp, #2*4] // cost[2]
str s5, [sp, #6*4] // cost[6]
movrel x4, div_table
ld1 {v31.8h}, [x4]
smull v22.4s, v0.4h, v0.4h // sum_diag[0]*sum_diag[0]
smull2 v23.4s, v0.8h, v0.8h
smlal v22.4s, v1.4h, v1.4h
smlal2 v23.4s, v1.8h, v1.8h
smull v24.4s, v2.4h, v2.4h // sum_diag[1]*sum_diag[1]
smull2 v25.4s, v2.8h, v2.8h
smlal v24.4s, v3.4h, v3.4h
smlal2 v25.4s, v3.8h, v3.8h
uxtl v30.4s, v31.4h // div_table
uxtl2 v31.4s, v31.8h
mul v22.4s, v22.4s, v30.4s // cost[0]
mla v22.4s, v23.4s, v31.4s // cost[0]
mul v24.4s, v24.4s, v30.4s // cost[4]
mla v24.4s, v25.4s, v31.4s // cost[4]
addv s0, v22.4s // cost[0]
addv s2, v24.4s // cost[4]
movrel x5, alt_fact
ld1 {v29.4h, v30.4h, v31.4h}, [x5]// div_table[2*m+1] + 105
str s0, [sp, #0*4] // cost[0]
str s2, [sp, #4*4] // cost[4]
uxtl v29.4s, v29.4h // div_table[2*m+1] + 105
uxtl v30.4s, v30.4h
uxtl v31.4s, v31.4h
cost_alt s6, s16, v6, v7, v16, v17 // cost[1], cost[3]
cost_alt s18, s20, v18, v19, v20, v21 // cost[5], cost[7]
str s6, [sp, #1*4] // cost[1]
str s16, [sp, #3*4] // cost[3]
mov w0, #0 // best_dir
mov w1, v0.s[0] // best_cost
mov w3, #1 // n
str s18, [sp, #5*4] // cost[5]
str s20, [sp, #7*4] // cost[7]
mov w4, v6.s[0]
find_best v6, v4, v16
find_best v16, v2, v18
find_best v18, v5, v20
find_best v20
eor w3, w0, #4 // best_dir ^4
ldr w4, [sp, w3, uxtw #2]
sub w1, w1, w4 // best_cost - cost[best_dir ^ 4]
lsr w1, w1, #10
str w1, [x2] // *var
add sp, sp, #32
.if \bpc == 16
ldr d8, [sp], 0x10
.endif
ret
endfunc
.endm
|
Admenri/urge
| 214,833
|
third_party/dav1d/src/arm/64/ipred.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
// void ipred_dc_128_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_128_8bpc_neon, export=1
clz w3, w3
movrel x5, ipred_dc_128_tbl
sub w3, w3, #25
ldrsw x3, [x5, w3, uxtw #2]
movi v0.16b, #128
add x5, x5, x3
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
4:
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
subs w4, w4, #4
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
subs w4, w4, #4
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
movi v1.16b, #128
32:
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
movi v1.16b, #128
movi v2.16b, #128
movi v3.16b, #128
64:
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
b.gt 64b
ret
endfunc
jumptable ipred_dc_128_tbl
.word 640b - ipred_dc_128_tbl
.word 320b - ipred_dc_128_tbl
.word 160b - ipred_dc_128_tbl
.word 80b - ipred_dc_128_tbl
.word 40b - ipred_dc_128_tbl
endjumptable
// void ipred_v_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_v_8bpc_neon, export=1
clz w3, w3
movrel x5, ipred_v_tbl
sub w3, w3, #25
ldrsw x3, [x5, w3, uxtw #2]
add x2, x2, #1
add x5, x5, x3
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.s}[0], [x2]
4:
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
subs w4, w4, #4
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [x2]
8:
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
subs w4, w4, #4
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b}, [x2]
16:
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b, v1.16b}, [x2]
32:
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x2]
64:
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
b.gt 64b
ret
endfunc
jumptable ipred_v_tbl
.word 640b - ipred_v_tbl
.word 320b - ipred_v_tbl
.word 160b - ipred_v_tbl
.word 80b - ipred_v_tbl
.word 40b - ipred_v_tbl
endjumptable
// void ipred_h_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_h_8bpc_neon, export=1
clz w3, w3
movrel x5, ipred_h_tbl
sub w3, w3, #25
ldrsw x3, [x5, w3, uxtw #2]
sub x2, x2, #4
add x5, x5, x3
mov x7, #-4
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
4:
ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x2], x7
st1 {v3.s}[0], [x0], x1
st1 {v2.s}[0], [x6], x1
subs w4, w4, #4
st1 {v1.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x2], x7
st1 {v3.8b}, [x0], x1
st1 {v2.8b}, [x6], x1
subs w4, w4, #4
st1 {v1.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [x2], x7
st1 {v3.16b}, [x0], x1
st1 {v2.16b}, [x6], x1
subs w4, w4, #4
st1 {v1.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
32:
ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [x2], x7
str q3, [x0, #16]
str q2, [x6, #16]
st1 {v3.16b}, [x0], x1
st1 {v2.16b}, [x6], x1
subs w4, w4, #4
str q1, [x0, #16]
str q0, [x6, #16]
st1 {v1.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
64:
ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [x2], x7
str q3, [x0, #16]
str q2, [x6, #16]
stp q3, q3, [x0, #32]
stp q2, q2, [x6, #32]
st1 {v3.16b}, [x0], x1
st1 {v2.16b}, [x6], x1
subs w4, w4, #4
str q1, [x0, #16]
str q0, [x6, #16]
stp q1, q1, [x0, #32]
stp q0, q0, [x6, #32]
st1 {v1.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
b.gt 64b
ret
endfunc
jumptable ipred_h_tbl
.word 640b - ipred_h_tbl
.word 320b - ipred_h_tbl
.word 160b - ipred_h_tbl
.word 80b - ipred_h_tbl
.word 40b - ipred_h_tbl
endjumptable
// void ipred_dc_top_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_top_8bpc_neon, export=1
clz w3, w3
movrel x5, ipred_dc_top_tbl
sub w3, w3, #25
ldrsw x3, [x5, w3, uxtw #2]
add x2, x2, #1
add x5, x5, x3
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v0.2s}, [x2]
uaddlv h0, v0.8b
rshrn v0.8b, v0.8h, #3
dup v0.8b, v0.b[0]
4:
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
subs w4, w4, #4
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [x2]
uaddlv h0, v0.8b
rshrn v0.8b, v0.8h, #3
dup v0.8b, v0.b[0]
8:
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
subs w4, w4, #4
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b}, [x2]
uaddlv h0, v0.16b
rshrn v0.8b, v0.8h, #4
dup v0.16b, v0.b[0]
16:
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b, v1.16b}, [x2]
uaddlv h0, v0.16b
uaddlv h1, v1.16b
add v2.4h, v0.4h, v1.4h
rshrn v2.8b, v2.8h, #5
dup v0.16b, v2.b[0]
dup v1.16b, v2.b[0]
32:
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x2]
uaddlv h0, v0.16b
uaddlv h1, v1.16b
uaddlv h2, v2.16b
uaddlv h3, v3.16b
add v4.4h, v0.4h, v1.4h
add v5.4h, v2.4h, v3.4h
add v4.4h, v4.4h, v5.4h
rshrn v4.8b, v4.8h, #6
dup v0.16b, v4.b[0]
dup v1.16b, v4.b[0]
dup v2.16b, v4.b[0]
dup v3.16b, v4.b[0]
64:
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
b.gt 64b
ret
endfunc
jumptable ipred_dc_top_tbl
.word 640b - ipred_dc_top_tbl
.word 320b - ipred_dc_top_tbl
.word 160b - ipred_dc_top_tbl
.word 80b - ipred_dc_top_tbl
.word 40b - ipred_dc_top_tbl
endjumptable
// void ipred_dc_left_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_left_8bpc_neon, export=1
sub x2, x2, w4, uxtw
clz w3, w3
clz w7, w4
movrel x5, ipred_dc_left_tbl
sub w3, w3, #20 // 25 leading bits, minus table offset 5
sub w7, w7, #25
ldrsw x3, [x5, w3, uxtw #2]
ldrsw x7, [x5, w7, uxtw #2]
add x3, x5, x3
add x5, x5, x7
add x6, x0, x1
lsl x1, x1, #1
br x5
L(ipred_dc_left_h4):
AARCH64_VALID_JUMP_TARGET
ld1r {v0.2s}, [x2]
uaddlv h0, v0.8b
rshrn v0.8b, v0.8h, #3
dup v0.16b, v0.b[0]
br x3
L(ipred_dc_left_w4):
AARCH64_VALID_JUMP_TARGET
1:
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
subs w4, w4, #4
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
b.gt 1b
ret
L(ipred_dc_left_h8):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [x2]
uaddlv h0, v0.8b
rshrn v0.8b, v0.8h, #3
dup v0.16b, v0.b[0]
br x3
L(ipred_dc_left_w8):
AARCH64_VALID_JUMP_TARGET
1:
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
subs w4, w4, #4
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
b.gt 1b
ret
L(ipred_dc_left_h16):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b}, [x2]
uaddlv h0, v0.16b
rshrn v0.8b, v0.8h, #4
dup v0.16b, v0.b[0]
br x3
L(ipred_dc_left_w16):
AARCH64_VALID_JUMP_TARGET
1:
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
b.gt 1b
ret
L(ipred_dc_left_h32):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b, v1.16b}, [x2]
uaddlv h0, v0.16b
uaddlv h1, v1.16b
add v0.4h, v0.4h, v1.4h
rshrn v0.8b, v0.8h, #5
dup v0.16b, v0.b[0]
br x3
L(ipred_dc_left_w32):
AARCH64_VALID_JUMP_TARGET
mov v1.16b, v0.16b
1:
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
b.gt 1b
ret
L(ipred_dc_left_h64):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x2]
uaddlv h0, v0.16b
uaddlv h1, v1.16b
uaddlv h2, v2.16b
uaddlv h3, v3.16b
add v0.4h, v0.4h, v1.4h
add v2.4h, v2.4h, v3.4h
add v0.4h, v0.4h, v2.4h
rshrn v0.8b, v0.8h, #6
dup v0.16b, v0.b[0]
br x3
L(ipred_dc_left_w64):
AARCH64_VALID_JUMP_TARGET
mov v1.16b, v0.16b
mov v2.16b, v0.16b
mov v3.16b, v0.16b
1:
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
b.gt 1b
ret
endfunc
jumptable ipred_dc_left_tbl
.word L(ipred_dc_left_h64) - ipred_dc_left_tbl
.word L(ipred_dc_left_h32) - ipred_dc_left_tbl
.word L(ipred_dc_left_h16) - ipred_dc_left_tbl
.word L(ipred_dc_left_h8) - ipred_dc_left_tbl
.word L(ipred_dc_left_h4) - ipred_dc_left_tbl
.word L(ipred_dc_left_w64) - ipred_dc_left_tbl
.word L(ipred_dc_left_w32) - ipred_dc_left_tbl
.word L(ipred_dc_left_w16) - ipred_dc_left_tbl
.word L(ipred_dc_left_w8) - ipred_dc_left_tbl
.word L(ipred_dc_left_w4) - ipred_dc_left_tbl
endjumptable
// void ipred_dc_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_8bpc_neon, export=1
sub x2, x2, w4, uxtw
add w7, w3, w4 // width + height
clz w3, w3
clz w6, w4
dup v16.8h, w7 // width + height
movrel x5, ipred_dc_tbl
rbit w7, w7 // rbit(width + height)
sub w3, w3, #20 // 25 leading bits, minus table offset 5
sub w6, w6, #25
clz w7, w7 // ctz(width + height)
ldrsw x3, [x5, w3, uxtw #2]
ldrsw x6, [x5, w6, uxtw #2]
neg w7, w7 // -ctz(width + height)
add x3, x5, x3
add x5, x5, x6
ushr v16.8h, v16.8h, #1 // (width + height) >> 1
dup v17.8h, w7 // -ctz(width + height)
add x6, x0, x1
lsl x1, x1, #1
br x5
L(ipred_dc_h4):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.s}[0], [x2], #4
ins v0.s[1], wzr
uaddlv h0, v0.8b
add x2, x2, #1
br x3
L(ipred_dc_w4):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.s}[0], [x2]
ins v1.s[1], wzr
add v0.4h, v0.4h, v16.4h
uaddlv h1, v1.8b
cmp w4, #4
add v0.4h, v0.4h, v1.4h
ushl v0.4h, v0.4h, v17.4h
b.eq 1f
// h = 8/16
mov w16, #(0x3334/2)
movk w16, #(0x5556/2), lsl #16
add w17, w4, w4 // w17 = 2*h = 16 or 32
lsr w16, w16, w17
dup v16.4h, w16
sqdmulh v0.4h, v0.4h, v16.4h
1:
dup v0.8b, v0.b[0]
2:
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
subs w4, w4, #4
st1 {v0.s}[0], [x0], x1
st1 {v0.s}[0], [x6], x1
b.gt 2b
ret
L(ipred_dc_h8):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [x2], #8
uaddlv h0, v0.8b
add x2, x2, #1
br x3
L(ipred_dc_w8):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.8b}, [x2]
add v0.4h, v0.4h, v16.4h
uaddlv h1, v1.8b
cmp w4, #8
add v0.4h, v0.4h, v1.4h
ushl v0.4h, v0.4h, v17.4h
b.eq 1f
// h = 4/16/32
cmp w4, #32
mov w16, #(0x3334/2)
mov w17, #(0x5556/2)
csel w16, w16, w17, eq
dup v16.4h, w16
sqdmulh v0.4h, v0.4h, v16.4h
1:
dup v0.8b, v0.b[0]
2:
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
subs w4, w4, #4
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x6], x1
b.gt 2b
ret
L(ipred_dc_h16):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b}, [x2], #16
uaddlv h0, v0.16b
add x2, x2, #1
br x3
L(ipred_dc_w16):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.16b}, [x2]
add v0.4h, v0.4h, v16.4h
uaddlv h1, v1.16b
cmp w4, #16
add v0.4h, v0.4h, v1.4h
ushl v0.4h, v0.4h, v17.4h
b.eq 1f
// h = 4/8/32/64
tst w4, #(32+16+8) // 16 added to make a consecutive bitmask
mov w16, #(0x3334/2)
mov w17, #(0x5556/2)
csel w16, w16, w17, eq
dup v16.4h, w16
sqdmulh v0.4h, v0.4h, v16.4h
1:
dup v0.16b, v0.b[0]
2:
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b}, [x0], x1
st1 {v0.16b}, [x6], x1
b.gt 2b
ret
L(ipred_dc_h32):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b, v1.16b}, [x2], #32
uaddlv h0, v0.16b
uaddlv h1, v1.16b
add x2, x2, #1
add v0.4h, v0.4h, v1.4h
br x3
L(ipred_dc_w32):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.16b, v2.16b}, [x2]
add v0.4h, v0.4h, v16.4h
uaddlv h1, v1.16b
uaddlv h2, v2.16b
cmp w4, #32
add v0.4h, v0.4h, v1.4h
add v0.4h, v0.4h, v2.4h
ushl v4.4h, v0.4h, v17.4h
b.eq 1f
// h = 8/16/64
cmp w4, #8
mov w16, #(0x3334/2)
mov w17, #(0x5556/2)
csel w16, w16, w17, eq
dup v16.4h, w16
sqdmulh v4.4h, v4.4h, v16.4h
1:
dup v0.16b, v4.b[0]
dup v1.16b, v4.b[0]
2:
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v0.16b, v1.16b}, [x6], x1
b.gt 2b
ret
L(ipred_dc_h64):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x2], #64
uaddlv h0, v0.16b
uaddlv h1, v1.16b
uaddlv h2, v2.16b
uaddlv h3, v3.16b
add v0.4h, v0.4h, v1.4h
add v2.4h, v2.4h, v3.4h
add x2, x2, #1
add v0.4h, v0.4h, v2.4h
br x3
L(ipred_dc_w64):
AARCH64_VALID_JUMP_TARGET
ld1 {v1.16b, v2.16b, v3.16b, v4.16b}, [x2]
add v0.4h, v0.4h, v16.4h
uaddlv h1, v1.16b
uaddlv h2, v2.16b
uaddlv h3, v3.16b
uaddlv h4, v4.16b
add v1.4h, v1.4h, v2.4h
add v3.4h, v3.4h, v4.4h
cmp w4, #64
add v0.4h, v0.4h, v1.4h
add v0.4h, v0.4h, v3.4h
ushl v4.4h, v0.4h, v17.4h
b.eq 1f
// h = 16/32
mov w16, #(0x5556/2)
movk w16, #(0x3334/2), lsl #16
lsr w16, w16, w4
dup v16.4h, w16
sqdmulh v4.4h, v4.4h, v16.4h
1:
dup v0.16b, v4.b[0]
dup v1.16b, v4.b[0]
dup v2.16b, v4.b[0]
dup v3.16b, v4.b[0]
2:
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
subs w4, w4, #4
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x6], x1
b.gt 2b
ret
endfunc
jumptable ipred_dc_tbl
.word L(ipred_dc_h64) - ipred_dc_tbl
.word L(ipred_dc_h32) - ipred_dc_tbl
.word L(ipred_dc_h16) - ipred_dc_tbl
.word L(ipred_dc_h8) - ipred_dc_tbl
.word L(ipred_dc_h4) - ipred_dc_tbl
.word L(ipred_dc_w64) - ipred_dc_tbl
.word L(ipred_dc_w32) - ipred_dc_tbl
.word L(ipred_dc_w16) - ipred_dc_tbl
.word L(ipred_dc_w8) - ipred_dc_tbl
.word L(ipred_dc_w4) - ipred_dc_tbl
endjumptable
// void ipred_paeth_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_paeth_8bpc_neon, export=1
clz w9, w3
movrel x5, ipred_paeth_tbl
sub w9, w9, #25
ldrsw x9, [x5, w9, uxtw #2]
ld1r {v4.16b}, [x2]
add x8, x2, #1
sub x2, x2, #4
add x5, x5, x9
mov x7, #-4
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v5.4s}, [x8]
usubl v6.8h, v5.8b, v4.8b // top - topleft
4:
ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x2], x7
zip1 v0.2s, v0.2s, v1.2s
zip1 v2.2s, v2.2s, v3.2s
uaddw v16.8h, v6.8h, v0.8b
uaddw v17.8h, v6.8h, v2.8b
sqxtun v16.8b, v16.8h // base
sqxtun2 v16.16b, v17.8h
zip1 v0.2d, v0.2d, v2.2d
uabd v20.16b, v5.16b, v16.16b // tdiff
uabd v22.16b, v4.16b, v16.16b // tldiff
uabd v16.16b, v0.16b, v16.16b // ldiff
umin v18.16b, v20.16b, v22.16b // min(tdiff, tldiff)
cmhs v20.16b, v22.16b, v20.16b // tldiff >= tdiff
cmhs v16.16b, v18.16b, v16.16b // min(tdiff, tldiff) >= ldiff
bsl v20.16b, v5.16b, v4.16b // tdiff <= tldiff ? top : topleft
bit v20.16b, v0.16b, v16.16b // ldiff <= min ? left : ...
st1 {v20.s}[3], [x0], x1
st1 {v20.s}[2], [x6], x1
subs w4, w4, #4
st1 {v20.s}[1], [x0], x1
st1 {v20.s}[0], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1r {v5.2d}, [x8]
usubl v6.8h, v5.8b, v4.8b // top - topleft
8:
ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x2], x7
uaddw v16.8h, v6.8h, v0.8b
uaddw v17.8h, v6.8h, v1.8b
uaddw v18.8h, v6.8h, v2.8b
uaddw v19.8h, v6.8h, v3.8b
sqxtun v16.8b, v16.8h // base
sqxtun2 v16.16b, v17.8h
sqxtun v18.8b, v18.8h
sqxtun2 v18.16b, v19.8h
zip1 v2.2d, v2.2d, v3.2d
zip1 v0.2d, v0.2d, v1.2d
uabd v21.16b, v5.16b, v18.16b // tdiff
uabd v20.16b, v5.16b, v16.16b
uabd v23.16b, v4.16b, v18.16b // tldiff
uabd v22.16b, v4.16b, v16.16b
uabd v17.16b, v2.16b, v18.16b // ldiff
uabd v16.16b, v0.16b, v16.16b
umin v19.16b, v21.16b, v23.16b // min(tdiff, tldiff)
umin v18.16b, v20.16b, v22.16b
cmhs v21.16b, v23.16b, v21.16b // tldiff >= tdiff
cmhs v20.16b, v22.16b, v20.16b
cmhs v17.16b, v19.16b, v17.16b // min(tdiff, tldiff) >= ldiff
cmhs v16.16b, v18.16b, v16.16b
bsl v21.16b, v5.16b, v4.16b // tdiff <= tldiff ? top : topleft
bsl v20.16b, v5.16b, v4.16b
bit v21.16b, v2.16b, v17.16b // ldiff <= min ? left : ...
bit v20.16b, v0.16b, v16.16b
st1 {v21.d}[1], [x0], x1
st1 {v21.d}[0], [x6], x1
subs w4, w4, #4
st1 {v20.d}[1], [x0], x1
st1 {v20.d}[0], [x6], x1
b.gt 8b
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
ld1 {v5.16b}, [x8], #16
mov w9, w3
// Set up pointers for four rows in parallel; x0, x6, x5, x10
add x5, x0, x1
add x10, x6, x1
lsl x1, x1, #1
sub x1, x1, w3, uxtw
1:
ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [x2], x7
2:
usubl v6.8h, v5.8b, v4.8b // top - topleft
usubl2 v7.8h, v5.16b, v4.16b
uaddw v24.8h, v6.8h, v0.8b
uaddw v25.8h, v7.8h, v0.8b
uaddw v26.8h, v6.8h, v1.8b
uaddw v27.8h, v7.8h, v1.8b
uaddw v28.8h, v6.8h, v2.8b
uaddw v29.8h, v7.8h, v2.8b
uaddw v30.8h, v6.8h, v3.8b
uaddw v31.8h, v7.8h, v3.8b
sqxtun v17.8b, v26.8h // base
sqxtun2 v17.16b, v27.8h
sqxtun v16.8b, v24.8h
sqxtun2 v16.16b, v25.8h
sqxtun v19.8b, v30.8h
sqxtun2 v19.16b, v31.8h
sqxtun v18.8b, v28.8h
sqxtun2 v18.16b, v29.8h
uabd v23.16b, v5.16b, v19.16b // tdiff
uabd v22.16b, v5.16b, v18.16b
uabd v21.16b, v5.16b, v17.16b
uabd v20.16b, v5.16b, v16.16b
uabd v27.16b, v4.16b, v19.16b // tldiff
uabd v26.16b, v4.16b, v18.16b
uabd v25.16b, v4.16b, v17.16b
uabd v24.16b, v4.16b, v16.16b
uabd v19.16b, v3.16b, v19.16b // ldiff
uabd v18.16b, v2.16b, v18.16b
uabd v17.16b, v1.16b, v17.16b
uabd v16.16b, v0.16b, v16.16b
umin v31.16b, v23.16b, v27.16b // min(tdiff, tldiff)
umin v30.16b, v22.16b, v26.16b
umin v29.16b, v21.16b, v25.16b
umin v28.16b, v20.16b, v24.16b
cmhs v23.16b, v27.16b, v23.16b // tldiff >= tdiff
cmhs v22.16b, v26.16b, v22.16b
cmhs v21.16b, v25.16b, v21.16b
cmhs v20.16b, v24.16b, v20.16b
cmhs v19.16b, v31.16b, v19.16b // min(tdiff, tldiff) >= ldiff
cmhs v18.16b, v30.16b, v18.16b
cmhs v17.16b, v29.16b, v17.16b
cmhs v16.16b, v28.16b, v16.16b
bsl v23.16b, v5.16b, v4.16b // tdiff <= tldiff ? top : topleft
bsl v22.16b, v5.16b, v4.16b
bsl v21.16b, v5.16b, v4.16b
bsl v20.16b, v5.16b, v4.16b
bit v23.16b, v3.16b, v19.16b // ldiff <= min ? left : ...
bit v22.16b, v2.16b, v18.16b
bit v21.16b, v1.16b, v17.16b
bit v20.16b, v0.16b, v16.16b
subs w3, w3, #16
st1 {v23.16b}, [x0], #16
st1 {v22.16b}, [x6], #16
st1 {v21.16b}, [x5], #16
st1 {v20.16b}, [x10], #16
b.le 8f
ld1 {v5.16b}, [x8], #16
b 2b
8:
subs w4, w4, #4
b.le 9f
// End of horizontal loop, move pointers to next four rows
sub x8, x8, w9, uxtw
add x0, x0, x1
add x6, x6, x1
// Load the top row as early as possible
ld1 {v5.16b}, [x8], #16
add x5, x5, x1
add x10, x10, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_paeth_tbl
.word 640b - ipred_paeth_tbl
.word 320b - ipred_paeth_tbl
.word 160b - ipred_paeth_tbl
.word 80b - ipred_paeth_tbl
.word 40b - ipred_paeth_tbl
endjumptable
// void ipred_smooth_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_8bpc_neon, export=1
movrel x10, X(sm_weights)
add x11, x10, w4, uxtw
add x10, x10, w3, uxtw
clz w9, w3
movrel x5, ipred_smooth_tbl
sub x12, x2, w4, uxtw
sub w9, w9, #25
ldrsw x9, [x5, w9, uxtw #2]
ld1r {v4.16b}, [x12] // bottom
add x8, x2, #1
add x5, x5, x9
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v6.2s}, [x8] // top
ld1r {v7.2s}, [x10] // weights_hor
sub x2, x2, #4
mov x7, #-4
dup v5.16b, v6.b[3] // right
usubl v6.8h, v6.8b, v4.8b // top-bottom
uxtl v7.8h, v7.8b // weights_hor
4:
ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x2], x7 // left
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x11], #4 // weights_ver
shll v20.8h, v5.8b, #8 // right*256
shll v21.8h, v5.8b, #8
zip1 v1.2s, v1.2s, v0.2s // left, flipped
zip1 v0.2s, v3.2s, v2.2s
zip1 v16.2s, v16.2s, v17.2s // weights_ver
zip1 v18.2s, v18.2s, v19.2s
shll v22.8h, v4.8b, #8 // bottom*256
shll v23.8h, v4.8b, #8
usubl v0.8h, v0.8b, v5.8b // left-right
usubl v1.8h, v1.8b, v5.8b
uxtl v16.8h, v16.8b // weights_ver
uxtl v18.8h, v18.8b
mla v20.8h, v0.8h, v7.8h // right*256 + (left-right)*weights_hor
mla v21.8h, v1.8h, v7.8h
mla v22.8h, v6.8h, v16.8h // bottom*256 + (top-bottom)*weights_ver
mla v23.8h, v6.8h, v18.8h
uhadd v20.8h, v20.8h, v22.8h
uhadd v21.8h, v21.8h, v23.8h
rshrn v20.8b, v20.8h, #8
rshrn v21.8b, v21.8h, #8
st1 {v20.s}[0], [x0], x1
st1 {v20.s}[1], [x6], x1
subs w4, w4, #4
st1 {v21.s}[0], [x0], x1
st1 {v21.s}[1], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v6.8b}, [x8] // top
ld1 {v7.8b}, [x10] // weights_hor
sub x2, x2, #4
mov x7, #-4
dup v5.16b, v6.b[7] // right
usubl v6.8h, v6.8b, v4.8b // top-bottom
uxtl v7.8h, v7.8b // weights_hor
8:
ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x2], x7 // left
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x11], #4 // weights_ver
shll v20.8h, v5.8b, #8 // right*256
shll v21.8h, v5.8b, #8
shll v22.8h, v5.8b, #8
shll v23.8h, v5.8b, #8
usubl v0.8h, v0.8b, v5.8b // left-right
usubl v1.8h, v1.8b, v5.8b
usubl v2.8h, v2.8b, v5.8b
usubl v3.8h, v3.8b, v5.8b
shll v24.8h, v4.8b, #8 // bottom*256
shll v25.8h, v4.8b, #8
shll v26.8h, v4.8b, #8
shll v27.8h, v4.8b, #8
uxtl v16.8h, v16.8b // weights_ver
uxtl v17.8h, v17.8b
uxtl v18.8h, v18.8b
uxtl v19.8h, v19.8b
mla v20.8h, v3.8h, v7.8h // right*256 + (left-right)*weights_hor
mla v21.8h, v2.8h, v7.8h // (left flipped)
mla v22.8h, v1.8h, v7.8h
mla v23.8h, v0.8h, v7.8h
mla v24.8h, v6.8h, v16.8h // bottom*256 + (top-bottom)*weights_ver
mla v25.8h, v6.8h, v17.8h
mla v26.8h, v6.8h, v18.8h
mla v27.8h, v6.8h, v19.8h
uhadd v20.8h, v20.8h, v24.8h
uhadd v21.8h, v21.8h, v25.8h
uhadd v22.8h, v22.8h, v26.8h
uhadd v23.8h, v23.8h, v27.8h
rshrn v20.8b, v20.8h, #8
rshrn v21.8b, v21.8h, #8
rshrn v22.8b, v22.8h, #8
rshrn v23.8b, v23.8h, #8
st1 {v20.8b}, [x0], x1
st1 {v21.8b}, [x6], x1
subs w4, w4, #4
st1 {v22.8b}, [x0], x1
st1 {v23.8b}, [x6], x1
b.gt 8b
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
add x12, x2, w3, uxtw
sub x2, x2, #2
mov x7, #-2
ld1r {v5.16b}, [x12] // right
sub x1, x1, w3, uxtw
mov w9, w3
1:
ld2r {v0.8b, v1.8b}, [x2], x7 // left
ld2r {v16.8b, v17.8b}, [x11], #2 // weights_ver
usubl v0.8h, v0.8b, v5.8b // left-right
usubl v1.8h, v1.8b, v5.8b
uxtl v16.8h, v16.8b // weights_ver
uxtl v17.8h, v17.8b
2:
ld1 {v7.16b}, [x10], #16 // weights_hor
ld1 {v3.16b}, [x8], #16 // top
shll v20.8h, v5.8b, #8 // right*256
shll v21.8h, v5.8b, #8
shll v22.8h, v5.8b, #8
shll v23.8h, v5.8b, #8
uxtl v6.8h, v7.8b // weights_hor
uxtl2 v7.8h, v7.16b
usubl v2.8h, v3.8b, v4.8b // top-bottom
usubl2 v3.8h, v3.16b, v4.16b
mla v20.8h, v1.8h, v6.8h // right*256 + (left-right)*weights_hor
mla v21.8h, v1.8h, v7.8h // (left flipped)
mla v22.8h, v0.8h, v6.8h
mla v23.8h, v0.8h, v7.8h
shll v24.8h, v4.8b, #8 // bottom*256
shll v25.8h, v4.8b, #8
shll v26.8h, v4.8b, #8
shll v27.8h, v4.8b, #8
mla v24.8h, v2.8h, v16.8h // bottom*256 + (top-bottom)*weights_ver
mla v25.8h, v3.8h, v16.8h
mla v26.8h, v2.8h, v17.8h
mla v27.8h, v3.8h, v17.8h
uhadd v20.8h, v20.8h, v24.8h
uhadd v21.8h, v21.8h, v25.8h
uhadd v22.8h, v22.8h, v26.8h
uhadd v23.8h, v23.8h, v27.8h
rshrn v20.8b, v20.8h, #8
rshrn2 v20.16b, v21.8h, #8
rshrn v22.8b, v22.8h, #8
rshrn2 v22.16b, v23.8h, #8
subs w3, w3, #16
st1 {v20.16b}, [x0], #16
st1 {v22.16b}, [x6], #16
b.gt 2b
subs w4, w4, #2
b.le 9f
sub x8, x8, w9, uxtw
sub x10, x10, w9, uxtw
add x0, x0, x1
add x6, x6, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_smooth_tbl
.word 640b - ipred_smooth_tbl
.word 320b - ipred_smooth_tbl
.word 160b - ipred_smooth_tbl
.word 80b - ipred_smooth_tbl
.word 40b - ipred_smooth_tbl
endjumptable
// void ipred_smooth_v_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_v_8bpc_neon, export=1
movrel x7, X(sm_weights)
add x7, x7, w4, uxtw
clz w9, w3
movrel x5, ipred_smooth_v_tbl
sub x8, x2, w4, uxtw
sub w9, w9, #25
ldrsw x9, [x5, w9, uxtw #2]
ld1r {v4.16b}, [x8] // bottom
add x2, x2, #1
add x5, x5, x9
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v6.2s}, [x2] // top
usubl v6.8h, v6.8b, v4.8b // top-bottom
4:
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x7], #4 // weights_ver
shll v22.8h, v4.8b, #8 // bottom*256
shll v23.8h, v4.8b, #8
zip1 v16.2s, v16.2s, v17.2s // weights_ver
zip1 v18.2s, v18.2s, v19.2s
uxtl v16.8h, v16.8b // weights_ver
uxtl v18.8h, v18.8b
mla v22.8h, v6.8h, v16.8h // bottom*256 + (top-bottom)*weights_ver
mla v23.8h, v6.8h, v18.8h
rshrn v22.8b, v22.8h, #8
rshrn v23.8b, v23.8h, #8
st1 {v22.s}[0], [x0], x1
st1 {v22.s}[1], [x6], x1
subs w4, w4, #4
st1 {v23.s}[0], [x0], x1
st1 {v23.s}[1], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v6.8b}, [x2] // top
usubl v6.8h, v6.8b, v4.8b // top-bottom
8:
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x7], #4 // weights_ver
shll v24.8h, v4.8b, #8 // bottom*256
shll v25.8h, v4.8b, #8
shll v26.8h, v4.8b, #8
shll v27.8h, v4.8b, #8
uxtl v16.8h, v16.8b // weights_ver
uxtl v17.8h, v17.8b
uxtl v18.8h, v18.8b
uxtl v19.8h, v19.8b
mla v24.8h, v6.8h, v16.8h // bottom*256 + (top-bottom)*weights_ver
mla v25.8h, v6.8h, v17.8h
mla v26.8h, v6.8h, v18.8h
mla v27.8h, v6.8h, v19.8h
rshrn v24.8b, v24.8h, #8
rshrn v25.8b, v25.8h, #8
rshrn v26.8b, v26.8h, #8
rshrn v27.8b, v27.8h, #8
st1 {v24.8b}, [x0], x1
st1 {v25.8b}, [x6], x1
subs w4, w4, #4
st1 {v26.8b}, [x0], x1
st1 {v27.8b}, [x6], x1
b.gt 8b
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
// Set up pointers for four rows in parallel; x0, x6, x5, x8
add x5, x0, x1
add x8, x6, x1
lsl x1, x1, #1
sub x1, x1, w3, uxtw
mov w9, w3
1:
ld4r {v16.8b, v17.8b, v18.8b, v19.8b}, [x7], #4 // weights_ver
uxtl v16.8h, v16.8b // weights_ver
uxtl v17.8h, v17.8b
uxtl v18.8h, v18.8b
uxtl v19.8h, v19.8b
2:
ld1 {v3.16b}, [x2], #16 // top
shll v20.8h, v4.8b, #8 // bottom*256
shll v21.8h, v4.8b, #8
shll v22.8h, v4.8b, #8
shll v23.8h, v4.8b, #8
shll v24.8h, v4.8b, #8
shll v25.8h, v4.8b, #8
shll v26.8h, v4.8b, #8
shll v27.8h, v4.8b, #8
usubl v2.8h, v3.8b, v4.8b // top-bottom
usubl2 v3.8h, v3.16b, v4.16b
mla v20.8h, v2.8h, v16.8h // bottom*256 + (top-bottom)*weights_ver
mla v21.8h, v3.8h, v16.8h
mla v22.8h, v2.8h, v17.8h
mla v23.8h, v3.8h, v17.8h
mla v24.8h, v2.8h, v18.8h
mla v25.8h, v3.8h, v18.8h
mla v26.8h, v2.8h, v19.8h
mla v27.8h, v3.8h, v19.8h
rshrn v20.8b, v20.8h, #8
rshrn2 v20.16b, v21.8h, #8
rshrn v22.8b, v22.8h, #8
rshrn2 v22.16b, v23.8h, #8
rshrn v24.8b, v24.8h, #8
rshrn2 v24.16b, v25.8h, #8
rshrn v26.8b, v26.8h, #8
rshrn2 v26.16b, v27.8h, #8
subs w3, w3, #16
st1 {v20.16b}, [x0], #16
st1 {v22.16b}, [x6], #16
st1 {v24.16b}, [x5], #16
st1 {v26.16b}, [x8], #16
b.gt 2b
subs w4, w4, #4
b.le 9f
sub x2, x2, w9, uxtw
add x0, x0, x1
add x6, x6, x1
add x5, x5, x1
add x8, x8, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_smooth_v_tbl
.word 640b - ipred_smooth_v_tbl
.word 320b - ipred_smooth_v_tbl
.word 160b - ipred_smooth_v_tbl
.word 80b - ipred_smooth_v_tbl
.word 40b - ipred_smooth_v_tbl
endjumptable
// void ipred_smooth_h_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_h_8bpc_neon, export=1
movrel x8, X(sm_weights)
add x8, x8, w3, uxtw
clz w9, w3
movrel x5, ipred_smooth_h_tbl
add x12, x2, w3, uxtw
sub w9, w9, #25
ldrsw x9, [x5, w9, uxtw #2]
ld1r {v5.16b}, [x12] // right
add x5, x5, x9
add x6, x0, x1
lsl x1, x1, #1
br x5
40:
AARCH64_VALID_JUMP_TARGET
ld1r {v7.2s}, [x8] // weights_hor
sub x2, x2, #4
mov x7, #-4
uxtl v7.8h, v7.8b // weights_hor
4:
ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x2], x7 // left
shll v20.8h, v5.8b, #8 // right*256
shll v21.8h, v5.8b, #8
zip1 v1.2s, v1.2s, v0.2s // left, flipped
zip1 v0.2s, v3.2s, v2.2s
usubl v0.8h, v0.8b, v5.8b // left-right
usubl v1.8h, v1.8b, v5.8b
mla v20.8h, v0.8h, v7.8h // right*256 + (left-right)*weights_hor
mla v21.8h, v1.8h, v7.8h
rshrn v20.8b, v20.8h, #8
rshrn v21.8b, v21.8h, #8
st1 {v20.s}[0], [x0], x1
st1 {v20.s}[1], [x6], x1
subs w4, w4, #4
st1 {v21.s}[0], [x0], x1
st1 {v21.s}[1], [x6], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ld1 {v7.8b}, [x8] // weights_hor
sub x2, x2, #4
mov x7, #-4
uxtl v7.8h, v7.8b // weights_hor
8:
ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x2], x7 // left
shll v20.8h, v5.8b, #8 // right*256
shll v21.8h, v5.8b, #8
shll v22.8h, v5.8b, #8
shll v23.8h, v5.8b, #8
usubl v3.8h, v3.8b, v5.8b // left-right
usubl v2.8h, v2.8b, v5.8b
usubl v1.8h, v1.8b, v5.8b
usubl v0.8h, v0.8b, v5.8b
mla v20.8h, v3.8h, v7.8h // right*256 + (left-right)*weights_hor
mla v21.8h, v2.8h, v7.8h // (left flipped)
mla v22.8h, v1.8h, v7.8h
mla v23.8h, v0.8h, v7.8h
rshrn v20.8b, v20.8h, #8
rshrn v21.8b, v21.8h, #8
rshrn v22.8b, v22.8h, #8
rshrn v23.8b, v23.8h, #8
st1 {v20.8b}, [x0], x1
st1 {v21.8b}, [x6], x1
subs w4, w4, #4
st1 {v22.8b}, [x0], x1
st1 {v23.8b}, [x6], x1
b.gt 8b
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
sub x2, x2, #4
mov x7, #-4
// Set up pointers for four rows in parallel; x0, x6, x5, x10
add x5, x0, x1
add x10, x6, x1
lsl x1, x1, #1
sub x1, x1, w3, uxtw
mov w9, w3
1:
ld4r {v0.8b, v1.8b, v2.8b, v3.8b}, [x2], x7 // left
usubl v0.8h, v0.8b, v5.8b // left-right
usubl v1.8h, v1.8b, v5.8b
usubl v2.8h, v2.8b, v5.8b
usubl v3.8h, v3.8b, v5.8b
2:
ld1 {v7.16b}, [x8], #16 // weights_hor
shll v20.8h, v5.8b, #8 // right*256
shll v21.8h, v5.8b, #8
shll v22.8h, v5.8b, #8
shll v23.8h, v5.8b, #8
shll v24.8h, v5.8b, #8
shll v25.8h, v5.8b, #8
shll v26.8h, v5.8b, #8
shll v27.8h, v5.8b, #8
uxtl v6.8h, v7.8b // weights_hor
uxtl2 v7.8h, v7.16b
mla v20.8h, v3.8h, v6.8h // right*256 + (left-right)*weights_hor
mla v21.8h, v3.8h, v7.8h // (left flipped)
mla v22.8h, v2.8h, v6.8h
mla v23.8h, v2.8h, v7.8h
mla v24.8h, v1.8h, v6.8h
mla v25.8h, v1.8h, v7.8h
mla v26.8h, v0.8h, v6.8h
mla v27.8h, v0.8h, v7.8h
rshrn v20.8b, v20.8h, #8
rshrn2 v20.16b, v21.8h, #8
rshrn v22.8b, v22.8h, #8
rshrn2 v22.16b, v23.8h, #8
rshrn v24.8b, v24.8h, #8
rshrn2 v24.16b, v25.8h, #8
rshrn v26.8b, v26.8h, #8
rshrn2 v26.16b, v27.8h, #8
subs w3, w3, #16
st1 {v20.16b}, [x0], #16
st1 {v22.16b}, [x6], #16
st1 {v24.16b}, [x5], #16
st1 {v26.16b}, [x10], #16
b.gt 2b
subs w4, w4, #4
b.le 9f
sub x8, x8, w9, uxtw
add x0, x0, x1
add x6, x6, x1
add x5, x5, x1
add x10, x10, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_smooth_h_tbl
.word 640b - ipred_smooth_h_tbl
.word 320b - ipred_smooth_h_tbl
.word 160b - ipred_smooth_h_tbl
.word 80b - ipred_smooth_h_tbl
.word 40b - ipred_smooth_h_tbl
endjumptable
const padding_mask_buf
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
padding_mask:
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
endconst
// void ipred_z1_upsample_edge_8bpc_neon(pixel *out, const int hsz,
// const pixel *const in, const int end);
function ipred_z1_upsample_edge_8bpc_neon, export=1
movrel x4, padding_mask
ld1 {v0.16b}, [x2] // in[]
add x5, x2, w3, uxtw // in[end]
sub x4, x4, w3, uxtw
ld1r {v1.16b}, [x5] // padding
ld1 {v3.16b}, [x4] // padding_mask
movi v31.8h, #9
bit v0.16b, v1.16b, v3.16b // padded in[]
ext v4.16b, v0.16b, v1.16b, #1
ext v5.16b, v0.16b, v1.16b, #2
ext v6.16b, v0.16b, v1.16b, #3
uaddl v16.8h, v4.8b, v5.8b // in[i+1] + in[i+2]
uaddl2 v17.8h, v4.16b, v5.16b
uaddl v18.8h, v0.8b, v6.8b // in[i+0] + in[i+3]
uaddl2 v19.8h, v0.16b, v6.16b
mul v16.8h, v16.8h, v31.8h // 9*(in[i+1] + in[i+2])
mul v17.8h, v17.8h, v31.8h
sub v16.8h, v16.8h, v18.8h
sub v17.8h, v17.8h, v19.8h
sqrshrun v16.8b, v16.8h, #4
sqrshrun2 v16.16b, v17.8h, #4
zip1 v0.16b, v4.16b, v16.16b
zip2 v1.16b, v4.16b, v16.16b
st1 {v0.16b, v1.16b}, [x0]
ret
endfunc
// void ipred_z2_upsample_edge_8bpc_neon(pixel *out, const int sz,
// const pixel *const in);
function ipred_z2_upsample_edge_8bpc_neon, export=1
// Here, sz is 4 or 8, and we produce 2*sz+1 output elements.
movrel x4, padding_mask
ld1 {v0.16b}, [x2] // in[]
add x5, x2, w1, uxtw // in[sz]
sub x4, x4, w1, uxtw
ld1r {v2.16b}, [x2] // in[0] for padding
ld1r {v1.16b}, [x5] // padding
ld1 {v3.16b}, [x4] // padding_mask
movi v31.8h, #9
bit v0.16b, v1.16b, v3.16b // padded in[]
ext v4.16b, v2.16b, v0.16b, #15
ext v5.16b, v0.16b, v1.16b, #1
ext v6.16b, v0.16b, v1.16b, #2
uaddl v16.8h, v0.8b, v5.8b // in[i+0] + in[i+1]
uaddl v18.8h, v4.8b, v6.8b // in[i-1] + in[i+2]
mul v16.8h, v16.8h, v31.8h // 9*(in[i+1] + in[i+2])
sub v16.8h, v16.8h, v18.8h
sqrshrun v16.8b, v16.8h, #4
add x5, x0, #16
zip1 v2.16b, v0.16b, v16.16b
st1 {v1.b}[0], [x5]
// In case sz=8, output one single pixel in out[16].
st1 {v2.16b}, [x0]
ret
endfunc
const edge_filter
.byte 0, 4, 8, 0
.byte 0, 5, 6, 0
// Leaving out the coeffs for strength=3
// .byte 2, 4, 4, 0
endconst
// void ipred_z1_filter_edge_8bpc_neon(pixel *out, const int sz,
// const pixel *const in, const int end,
// const int strength);
function ipred_z1_filter_edge_8bpc_neon, export=1
cmp w4, #3
b.eq L(fivetap) // if (strength == 3) goto fivetap
movrel x5, edge_filter, -3
add x5, x5, w4, uxtw #2 // edge_filter + (strength - 1)*4 + 1
ld1 {v31.h}[0], [x5] // kernel[1-2]
ld1 {v0.16b}, [x2], #16
dup v30.16b, v31.b[0]
dup v31.16b, v31.b[1]
1:
// in[end], is the last valid pixel. We produce 16 pixels out by
// using 18 pixels in - the last pixel used is [17] of the ones
// read/buffered.
cmp w3, #17
ld1 {v1.16b}, [x2], #16
b.lt 2f
ext v2.16b, v0.16b, v1.16b, #1
ext v3.16b, v0.16b, v1.16b, #2
umull v4.8h, v0.8b, v30.8b
umlal v4.8h, v2.8b, v31.8b
umlal v4.8h, v3.8b, v30.8b
umull2 v5.8h, v0.16b, v30.16b
umlal2 v5.8h, v2.16b, v31.16b
umlal2 v5.8h, v3.16b, v30.16b
subs w1, w1, #16
mov v0.16b, v1.16b
rshrn v4.8b, v4.8h, #4
rshrn2 v4.16b, v5.8h, #4
sub w3, w3, #16
st1 {v4.16b}, [x0], #16
b.gt 1b
ret
2:
// Right padding
// x2[w3-32] is the padding pixel (x2 points 32 bytes ahead)
movrel x5, padding_mask
sub w6, w3, #32
sub x5, x5, w3, uxtw
add x6, x2, w6, sxtw
ld1 {v2.16b}, [x5] // padding_mask
ld1r {v1.16b}, [x6]
bit v0.16b, v1.16b, v2.16b // Pad v0-v1
// Filter one block
ext v2.16b, v0.16b, v1.16b, #1
ext v3.16b, v0.16b, v1.16b, #2
umull v4.8h, v0.8b, v30.8b
umlal v4.8h, v2.8b, v31.8b
umlal v4.8h, v3.8b, v30.8b
umull2 v5.8h, v0.16b, v30.16b
umlal2 v5.8h, v2.16b, v31.16b
umlal2 v5.8h, v3.16b, v30.16b
subs w1, w1, #16
rshrn v4.8b, v4.8h, #4
rshrn2 v4.16b, v5.8h, #4
st1 {v4.16b}, [x0], #16
b.le 9f
5:
// After one block, any remaining output would only be filtering
// padding - thus just store the padding.
subs w1, w1, #16
st1 {v1.16b}, [x0], #16
b.gt 5b
9:
ret
L(fivetap):
sub x2, x2, #1 // topleft -= 1
movi v29.16b, #2
ld1 {v0.16b}, [x2], #16
movi v30.16b, #4
movi v31.16b, #4
ins v0.b[0], v0.b[1]
1:
// in[end+1], is the last valid pixel. We produce 16 pixels out by
// using 20 pixels in - the last pixel used is [19] of the ones
// read/buffered.
cmp w3, #18
ld1 {v1.16b}, [x2], #16
b.lt 2f // if (end + 1 < 19)
ext v2.16b, v0.16b, v1.16b, #1
ext v3.16b, v0.16b, v1.16b, #2
ext v4.16b, v0.16b, v1.16b, #3
ext v5.16b, v0.16b, v1.16b, #4
umull v6.8h, v0.8b, v29.8b
umlal v6.8h, v2.8b, v30.8b
umlal v6.8h, v3.8b, v31.8b
umlal v6.8h, v4.8b, v30.8b
umlal v6.8h, v5.8b, v29.8b
umull2 v7.8h, v0.16b, v29.16b
umlal2 v7.8h, v2.16b, v30.16b
umlal2 v7.8h, v3.16b, v31.16b
umlal2 v7.8h, v4.16b, v30.16b
umlal2 v7.8h, v5.16b, v29.16b
subs w1, w1, #16
mov v0.16b, v1.16b
rshrn v6.8b, v6.8h, #4
rshrn2 v6.16b, v7.8h, #4
sub w3, w3, #16
st1 {v6.16b}, [x0], #16
b.gt 1b
ret
2:
// Right padding
// x2[w3+1-32] is the padding pixel (x2 points 32 bytes ahead)
movrel x5, padding_mask, -1
sub w6, w3, #31
sub x5, x5, w3, uxtw
add x6, x2, w6, sxtw
ld1 {v2.16b, v3.16b}, [x5] // padding_mask
ld1r {v28.16b}, [x6]
bit v0.16b, v28.16b, v2.16b // Pad v0-v1
bit v1.16b, v28.16b, v3.16b
4:
// Filter one block
ext v2.16b, v0.16b, v1.16b, #1
ext v3.16b, v0.16b, v1.16b, #2
ext v4.16b, v0.16b, v1.16b, #3
ext v5.16b, v0.16b, v1.16b, #4
umull v6.8h, v0.8b, v29.8b
umlal v6.8h, v2.8b, v30.8b
umlal v6.8h, v3.8b, v31.8b
umlal v6.8h, v4.8b, v30.8b
umlal v6.8h, v5.8b, v29.8b
umull2 v7.8h, v0.16b, v29.16b
umlal2 v7.8h, v2.16b, v30.16b
umlal2 v7.8h, v3.16b, v31.16b
umlal2 v7.8h, v4.16b, v30.16b
umlal2 v7.8h, v5.16b, v29.16b
subs w1, w1, #16
mov v0.16b, v1.16b
mov v1.16b, v28.16b
rshrn v6.8b, v6.8h, #4
rshrn2 v6.16b, v7.8h, #4
sub w3, w3, #16
st1 {v6.16b}, [x0], #16
b.le 9f
// v0-v1[w3+1] is the last valid pixel; if (w3 + 1 > 0) we need to
// filter properly once more - aka (w3 >= 0).
cmp w3, #0
b.ge 4b
5:
// When w3 <= 0, all remaining pixels in v0-v1 are equal to the
// last valid pixel - thus just output that without filtering.
subs w1, w1, #16
st1 {v1.16b}, [x0], #16
b.gt 5b
9:
ret
endfunc
// void ipred_pixel_set_8bpc_neon(pixel *out, const pixel px,
// const int n);
function ipred_pixel_set_8bpc_neon, export=1
dup v0.16b, w1
1:
subs w2, w2, #16
st1 {v0.16b}, [x0], #16
b.gt 1b
ret
endfunc
// void ipred_z1_fill1_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const top,
// const int width, const int height,
// const int dx, const int max_base_x);
function ipred_z1_fill1_8bpc_neon, export=1
clz w9, w3
movrel x8, ipred_z1_fill1_tbl
sub w9, w9, #25
ldrsw x9, [x8, w9, uxtw #2]
add x10, x2, w6, uxtw // top[max_base_x]
add x8, x8, x9
ld1r {v31.16b}, [x10] // padding
mov w7, w5
mov w15, #64
br x8
40:
AARCH64_VALID_JUMP_TARGET
4:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 49f
ldr d0, [x2, w8, uxtw] // top[base]
ldr d2, [x2, w10, uxtw]
dup v4.4h, w9 // frac
dup v5.4h, w11
ext v1.8b, v0.8b, v0.8b, #1 // top[base+1]
ext v3.8b, v2.8b, v2.8b, #1
usubl v6.8h, v1.8b, v0.8b // top[base+1]-top[base]
usubl v7.8h, v3.8b, v2.8b
ushll v16.8h, v0.8b, #6 // top[base]*64
ushll v17.8h, v2.8b, #6
mla v16.4h, v6.4h, v4.4h // + top[base+1]*frac
mla v17.4h, v7.4h, v5.4h
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
st1 {v16.s}[0], [x0], x1
add w7, w7, w5 // xpos += dx
subs w4, w4, #2
st1 {v17.s}[0], [x0], x1
b.gt 4b
ret
49:
st1 {v31.s}[0], [x0], x1
subs w4, w4, #2
st1 {v31.s}[0], [x0], x1
b.gt 49b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 89f
ldr q0, [x2, w8, uxtw] // top[base]
ldr q2, [x2, w10, uxtw]
dup v4.8b, w9 // frac
dup v5.8b, w11
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
dup v6.8b, w9 // 64 - frac
dup v7.8b, w11
ext v1.16b, v0.16b, v0.16b, #1 // top[base+1]
ext v3.16b, v2.16b, v2.16b, #1
umull v16.8h, v0.8b, v6.8b // top[base]*(64-frac)
umlal v16.8h, v1.8b, v4.8b // + top[base+1]*frac
umull v17.8h, v2.8b, v7.8b
umlal v17.8h, v3.8b, v5.8b
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
st1 {v16.8b}, [x0], x1
add w7, w7, w5 // xpos += dx
subs w4, w4, #2
st1 {v17.8b}, [x0], x1
b.gt 8b
ret
89:
st1 {v31.8b}, [x0], x1
subs w4, w4, #2
st1 {v31.8b}, [x0], x1
b.gt 89b
ret
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
mov w12, w3
add x13, x0, x1
lsl x1, x1, #1
sub x1, x1, w3, uxtw
1:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 169f
add x8, x2, w8, uxtw
add x10, x2, w10, uxtw
dup v4.16b, w9 // frac
dup v5.16b, w11
ld1 {v0.16b, v1.16b}, [x8], #32 // top[base]
ld1 {v2.16b, v3.16b}, [x10], #32
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
dup v6.16b, w9 // 64 - frac
dup v7.16b, w11
add w7, w7, w5 // xpos += dx
2:
ext v16.16b, v0.16b, v1.16b, #1 // top[base+1]
ext v17.16b, v2.16b, v3.16b, #1
subs w3, w3, #16
umull v18.8h, v0.8b, v6.8b // top[base]*(64-frac)
umlal v18.8h, v16.8b, v4.8b // + top[base+1]*frac
umull2 v19.8h, v0.16b, v6.16b
umlal2 v19.8h, v16.16b, v4.16b
umull v20.8h, v2.8b, v7.8b
umlal v20.8h, v17.8b, v5.8b
umull2 v21.8h, v2.16b, v7.16b
umlal2 v21.8h, v17.16b, v5.16b
rshrn v16.8b, v18.8h, #6
rshrn2 v16.16b, v19.8h, #6
rshrn v17.8b, v20.8h, #6
rshrn2 v17.16b, v21.8h, #6
st1 {v16.16b}, [x0], #16
st1 {v17.16b}, [x13], #16
b.le 3f
mov v0.16b, v1.16b
ld1 {v1.16b}, [x8], #16 // top[base]
mov v2.16b, v3.16b
ld1 {v3.16b}, [x10], #16
b 2b
3:
subs w4, w4, #2
b.le 9f
add x0, x0, x1
add x13, x13, x1
mov w3, w12
b 1b
9:
ret
169:
st1 {v31.16b}, [x0], #16
subs w3, w3, #16
st1 {v31.16b}, [x13], #16
b.gt 169b
subs w4, w4, #2
b.le 9b
add x0, x0, x1
add x13, x13, x1
mov w3, w12
b 169b
endfunc
jumptable ipred_z1_fill1_tbl
.word 640b - ipred_z1_fill1_tbl
.word 320b - ipred_z1_fill1_tbl
.word 160b - ipred_z1_fill1_tbl
.word 80b - ipred_z1_fill1_tbl
.word 40b - ipred_z1_fill1_tbl
endjumptable
function ipred_z1_fill2_8bpc_neon, export=1
cmp w3, #8
add x10, x2, w6, uxtw // top[max_base_x]
ld1r {v31.16b}, [x10] // padding
mov w7, w5
mov w15, #64
b.eq 8f
4: // w == 4
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 49f
ldr d0, [x2, w8, uxtw] // top[base]
ldr d2, [x2, w10, uxtw]
dup v4.4h, w9 // frac
dup v5.4h, w11
uzp2 v1.8b, v0.8b, v0.8b // top[base+1]
uzp1 v0.8b, v0.8b, v0.8b // top[base]
uzp2 v3.8b, v2.8b, v2.8b
uzp1 v2.8b, v2.8b, v2.8b
usubl v6.8h, v1.8b, v0.8b // top[base+1]-top[base]
usubl v7.8h, v3.8b, v2.8b
ushll v16.8h, v0.8b, #6 // top[base]*64
ushll v17.8h, v2.8b, #6
mla v16.4h, v6.4h, v4.4h // + top[base+1]*frac
mla v17.4h, v7.4h, v5.4h
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
st1 {v16.s}[0], [x0], x1
add w7, w7, w5 // xpos += dx
subs w4, w4, #2
st1 {v17.s}[0], [x0], x1
b.gt 4b
ret
49:
st1 {v31.s}[0], [x0], x1
subs w4, w4, #2
st1 {v31.s}[0], [x0], x1
b.gt 49b
ret
8: // w == 8
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // xpos += dx
cmp w8, w6 // base >= max_base_x
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge 89f
ldr q0, [x2, w8, uxtw] // top[base]
ldr q2, [x2, w10, uxtw]
dup v4.8b, w9 // frac
dup v5.8b, w11
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
dup v6.8b, w9 // 64 - frac
dup v7.8b, w11
uzp2 v1.16b, v0.16b, v0.16b // top[base+1]
uzp1 v0.16b, v0.16b, v0.16b // top[base]
uzp2 v3.16b, v2.16b, v2.16b
uzp1 v2.16b, v2.16b, v2.16b
umull v16.8h, v1.8b, v4.8b // top[base+1]*frac
umlal v16.8h, v0.8b, v6.8b // + top[base]*(64-frac)
umull v17.8h, v3.8b, v5.8b
umlal v17.8h, v2.8b, v7.8b
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
st1 {v16.8b}, [x0], x1
add w7, w7, w5 // xpos += dx
subs w4, w4, #2
st1 {v17.8b}, [x0], x1
b.gt 8b
ret
89:
st1 {v31.8b}, [x0], x1
subs w4, w4, #2
st1 {v31.8b}, [x0], x1
b.gt 89b
ret
endfunc
// void ipred_reverse_8bpc_neon(pixel *dst, const pixel *const src,
// const int n);
function ipred_reverse_8bpc_neon, export=1
sub x1, x1, #16
add x3, x0, #8
mov x4, #16
1:
ld1 {v0.16b}, [x1]
subs w2, w2, #16
rev64 v0.16b, v0.16b
sub x1, x1, #16
st1 {v0.d}[1], [x0], x4
st1 {v0.d}[0], [x3], x4
b.gt 1b
ret
endfunc
const increments
.short 0, 1, 2, 3, 4, 5, 6, 7
.short 8, 9, 10, 11, 12, 13, 14, 15
endconst
// void ipred_z2_fill1_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const top,
// const pixel *const left,
// const int width, const int height,
// const int dx, const int dy);
function ipred_z2_fill1_8bpc_neon, export=1
clz w10, w4
movrel x9, ipred_z2_fill1_tbl
sub w10, w10, #25
ldrsw x10, [x9, w10, uxtw #2]
mov w8, #(1 << 6) // xpos = 1 << 6
add x9, x9, x10
sub w8, w8, w6 // xpos -= dx
movrel x11, increments
ld1 {v31.8h}, [x11] // increments
neg w7, w7 // -dy
br x9
40:
AARCH64_VALID_JUMP_TARGET
dup v30.4h, w7 // -dy
movi v17.8b, #1
mul v16.4h, v31.4h, v30.4h // {0,1,2,3}* -dy
movi v25.16b, #0x3e
add v30.4h, v16.4h, v30.4h // -= dy
xtn v31.8b, v31.8h // {0,1,2,3}
// Worst case height for w=4 is 16, but we need at least h+1 elements
ld1 {v0.16b, v1.16b}, [x3] // left[]
movi v26.16b, #64
movi v19.16b, #2
xtn v27.8b, v30.8h // (uint8_t)ypos
shrn v29.8b, v30.8h, #6 // ypos >> 6
and v27.8b, v27.8b, v25.8b // frac_y
add v29.8b, v29.8b, v17.8b // base_y = (ypos >> 6) + 1
add v30.8b, v29.8b, v17.8b // base_y + 1
add v28.8b, v29.8b, v19.8b // base_y + 2
tbl v16.8b, {v0.16b}, v29.8b // left[base_y]
trn1 v30.2s, v30.2s, v28.2s // base_y + 1, base_y + 2
sub v28.8b, v26.8b, v27.8b // 64 - frac_y
trn1 v31.2s, v31.2s, v31.2s // {0,1,2,3,0,1,2,3}
trn1 v27.2s, v27.2s, v27.2s // frac_y
trn1 v28.2s, v28.2s, v28.2s // 64 - frac_y
movi v29.8b, #2
4:
asr w9, w8, #6 // base_x
dup v6.4h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-4 // base_x <= -4
asr w11, w8, #6 // base_x
b.le 49f
dup v7.4h, w8 // xpos
ldr d2, [x2, w9, sxtw] // top[base_x]
ldr d4, [x2, w11, sxtw]
trn1 v6.2d, v6.2d, v7.2d // xpos
// Cut corners here; only doing tbl over v0 here; we only
// seem to need the last pixel, from v1, after skipping to the
// left-only codepath below.
tbl v17.8b, {v0.16b}, v30.8b // left[base_y+1], left[base_y+2]
shrn v20.8b, v6.8h, #6 // first base_x for each row
xtn v6.8b, v6.8h // (uint8_t)xpos
ext v3.8b, v2.8b, v2.8b, #1 // top[base_x+1]
ext v5.8b, v4.8b, v4.8b, #1
and v6.8b, v6.8b, v25.8b // frac_x
trn1 v16.2s, v16.2s, v17.2s // left[base_y], left[base_y+1]
trn1 v2.2s, v2.2s, v4.2s // top[base_x]
trn1 v3.2s, v3.2s, v5.2s // top[base_x+1]
sub v7.8b, v26.8b, v6.8b // 64 - frac_x
add v20.8b, v20.8b, v31.8b // actual base_x
umull v16.8h, v16.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v16.8h, v17.8b, v27.8b // + left[base_y+1]*frac_y
umull v22.8h, v2.8b, v7.8b // top[base_x]-*(64-frac_x)
umlal v22.8h, v3.8b, v6.8b // + top[base_x+1]*frac_x
cmge v20.8b, v20.8b, #0
rshrn v16.8b, v16.8h, #6
rshrn v22.8b, v22.8h, #6
bit v16.8b, v22.8b, v20.8b
st1 {v16.s}[0], [x0], x1
sub w8, w8, w6 // xpos -= dx
subs w5, w5, #2
st1 {v16.s}[1], [x0], x1
b.le 9f
ext v16.8b, v17.8b, v17.8b, #4
add v30.8b, v30.8b, v29.8b // base_y += 2
b 4b
49:
tbl v17.8b, {v0.16b, v1.16b}, v30.8b // left[base_y+1], left[base_y+2]
trn1 v16.2s, v16.2s, v17.2s // left[base_y], left[base_y+1]
umull v18.8h, v16.8b, v28.8b // left[base_y]*(64-frac_t)
umlal v18.8h, v17.8b, v27.8b // + left[base_y+1]*frac_y
rshrn v18.8b, v18.8h, #6
st1 {v18.s}[0], [x0], x1
subs w5, w5, #2
st1 {v18.s}[1], [x0], x1
b.le 9f
ext v16.8b, v17.8b, v17.8b, #4
add v30.8b, v30.8b, v29.8b // base_y += 2
b 49b
9:
ret
80:
AARCH64_VALID_JUMP_TARGET
dup v30.8h, w7 // -dy
movi v17.8b, #1
mul v16.8h, v31.8h, v30.8h // {0,1,2,3,4,5,6,7}* -dy
movi v25.16b, #0x3e
add v30.8h, v16.8h, v30.8h // -= dy
xtn v31.8b, v31.8h // {0,1,2,3,4,5,6,7}
// Worst case height for w=8 is 32, but we need at least h+1 elements
ld1 {v0.16b, v1.16b, v2.16b}, [x3] // left[]
movi v26.16b, #64
movi v19.16b, #2
xtn v27.8b, v30.8h // (uint8_t)ypos
shrn v29.8b, v30.8h, #6 // ypos >> 6
and v27.8b, v27.8b, v25.8b // frac_y
add v29.8b, v29.8b, v17.8b // base_y = (ypos >> 6) + 1
// Cut corners here; for the first row we don't expect to need to
// read outside of v0.
tbl v18.8b, {v0.16b}, v29.8b // left[base_y]
add v30.8b, v29.8b, v19.8b // base_y + 2
add v29.8b, v29.8b, v17.8b // base_y + 1
sub v28.8b, v26.8b, v27.8b // 64 - frac_y
trn1 v31.2d, v31.2d, v31.2d // {0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7}
movi v24.8b, #2 // 2
8:
asr w9, w8, #6 // base_x
dup v16.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-8 // base_x <= -8
asr w11, w8, #6 // base_x
b.le 89f
dup v17.8h, w8 // xpos
ldr q4, [x2, w9, sxtw] // top[base_x]
ldr q6, [x2, w11, sxtw]
// Cut corners here; only doing tbl over v0-v1 here; we only
// seem to need the last pixel, from v2, after skipping to the
// left-only codepath below.
tbl v19.8b, {v0.16b, v1.16b}, v29.8b // left[base_y+1]
shrn v21.8b, v16.8h, #6 // first base_x
shrn2 v21.16b, v17.8h, #6
xtn v16.8b, v16.8h // (uint8_t)xpos
xtn2 v16.16b, v17.8h
tbl v20.8b, {v0.16b, v1.16b}, v30.8b // left[base_y+2]
ext v5.16b, v4.16b, v4.16b, #1 // top[base_x+1]
ext v7.16b, v6.16b, v6.16b, #1
and v16.16b, v16.16b, v25.16b // frac_x
trn1 v4.2d, v4.2d, v6.2d // top[base_x]
trn1 v5.2d, v5.2d, v7.2d // top[base_x+1]
sub v7.16b, v26.16b, v16.16b // 64 - frac_x
add v21.16b, v21.16b, v31.16b // actual base_x
umull v6.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v6.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
umull v17.8h, v19.8b, v28.8b
umlal v17.8h, v20.8b, v27.8b
umull v22.8h, v4.8b, v7.8b // top[base_x]-*(64-frac_x)
umlal v22.8h, v5.8b, v16.8b // + top[base_x+1]*frac_x
umull2 v23.8h, v4.16b, v7.16b
umlal2 v23.8h, v5.16b, v16.16b
cmge v21.16b, v21.16b, #0
rshrn v6.8b, v6.8h, #6
rshrn2 v6.16b, v17.8h, #6
rshrn v22.8b, v22.8h, #6
rshrn2 v22.16b, v23.8h, #6
bit v6.16b, v22.16b, v21.16b
st1 {v6.d}[0], [x0], x1
sub w8, w8, w6 // xpos -= dx
subs w5, w5, #2
st1 {v6.d}[1], [x0], x1
b.le 9f
mov v18.8b, v20.8b
add v29.8b, v29.8b, v24.8b // base_y += 2
add v30.8b, v30.8b, v24.8b // base_y += 2
b 8b
89:
tbl v19.8b, {v0.16b, v1.16b, v2.16b}, v29.8b // left[base_y+1]
tbl v20.8b, {v0.16b, v1.16b, v2.16b}, v30.8b // left[base_y+2]
umull v6.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v6.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
umull v17.8h, v19.8b, v28.8b
umlal v17.8h, v20.8b, v27.8b
rshrn v6.8b, v6.8h, #6
rshrn2 v6.16b, v17.8h, #6
st1 {v6.d}[0], [x0], x1
subs w5, w5, #2
st1 {v6.d}[1], [x0], x1
b.le 9f
mov v18.8b, v20.8b
add v29.8b, v29.8b, v24.8b // base_y += 2
add v30.8b, v30.8b, v24.8b // base_y += 2
b 89b
9:
ret
160:
AARCH64_VALID_JUMP_TARGET
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
add x11, x11, #16 // increments
dup v18.8h, w7 // -dy
movi v17.16b, #1
add x3, x3, #1 // Skip past left[0]
ld1 {v14.8h}, [x11] // {8,9,10,11,12,13,14,15}
mul v16.8h, v31.8h, v18.8h // {0,1,2,3,4,5,6,7}* -dy
mul v19.8h, v14.8h, v18.8h // {8,9,10,11,12,13,14,15}* -dy
movi v25.16b, #0x3e
add v16.8h, v16.8h, v18.8h // -= dy
add v18.8h, v19.8h, v18.8h
xtn v31.8b, v31.8h // {0,1,2,3,4,5,6,7}
xtn2 v31.16b, v14.8h // {8,9,10,11,12,13,14,15}
// Worst case height is 64.
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x3] // left[]
ld1r {v15.16b}, [x2] // left[0] == top[0]
movi v26.16b, #64
movi v19.16b, #2
xtn v27.8b, v16.8h // (uint8_t)ypos
xtn2 v27.16b, v18.8h
shrn v29.8b, v16.8h, #6 // ypos >> 6
shrn2 v29.16b, v18.8h, #6
mov v18.16b, v15.16b // left[0]
and v27.16b, v27.16b, v25.16b // frac_y
// Cut corners here; for the first row we don't expect to need to
// read outside of v0.
tbx v18.16b, {v0.16b}, v29.16b // left[base_y]
add v30.16b, v29.16b, v19.16b // base_y + 2
add v29.16b, v29.16b, v17.16b // base_y + 1
sub v28.16b, v26.16b, v27.16b // 64 - frac_y
movi v24.16b, #2 // 2
16:
asr w9, w8, #6 // base_x
dup v16.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-16 // base_x <= -16
asr w11, w8, #6 // base_x
b.le 169f
dup v17.8h, w8 // xpos
add x9, x2, w9, sxtw
add x11, x2, w11, sxtw
ld1 {v4.16b, v5.16b}, [x9] // top[base_x]
mov v19.16b, v15.16b // left[0]
ld1 {v6.16b, v7.16b}, [x11]
tbx v19.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+1]
mov v20.16b, v15.16b // left[0]
shrn v21.8b, v16.8h, #6 // first base_x
shrn v22.8b, v17.8h, #6
xtn v16.8b, v16.8h // (uint8_t)xpos
xtn v17.8b, v17.8h
tbx v20.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v30.16b // left[base_y+2]
trn1 v21.2d, v21.2d, v21.2d // first base_x
trn1 v22.2d, v22.2d, v22.2d
trn1 v16.2d, v16.2d, v16.2d // (uint8_t)xpos
trn1 v17.2d, v17.2d, v17.2d
ext v5.16b, v4.16b, v5.16b, #1 // top[base_x+1]
ext v7.16b, v6.16b, v7.16b, #1
and v16.16b, v16.16b, v25.16b // frac_x
and v17.16b, v17.16b, v25.16b
umull v10.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v10.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
sub v8.16b, v26.16b, v16.16b // 64 - frac_x
sub v9.16b, v26.16b, v17.16b
umull2 v11.8h, v18.16b, v28.16b
umlal2 v11.8h, v19.16b, v27.16b
add v21.16b, v21.16b, v31.16b // actual base_x
add v22.16b, v22.16b, v31.16b
umull v12.8h, v19.8b, v28.8b
umlal v12.8h, v20.8b, v27.8b
umull2 v13.8h, v19.16b, v28.16b
umlal2 v13.8h, v20.16b, v27.16b
rshrn v10.8b, v10.8h, #6
rshrn2 v10.16b, v11.8h, #6
rshrn v11.8b, v12.8h, #6
rshrn2 v11.16b, v13.8h, #6
umull v12.8h, v4.8b, v8.8b // top[base_x]-*(64-frac_x)
umlal v12.8h, v5.8b, v16.8b // + top[base_x+1]*frac_x
umull2 v13.8h, v4.16b, v8.16b
umlal2 v13.8h, v5.16b, v16.16b
umull v14.8h, v6.8b, v9.8b
umlal v14.8h, v7.8b, v17.8b
umull2 v18.8h, v6.16b, v9.16b
umlal2 v18.8h, v7.16b, v17.16b
cmge v21.16b, v21.16b, #0
cmge v22.16b, v22.16b, #0
rshrn v12.8b, v12.8h, #6
rshrn2 v12.16b, v13.8h, #6
rshrn v13.8b, v14.8h, #6
rshrn2 v13.16b, v18.8h, #6
bit v10.16b, v12.16b, v21.16b
bit v11.16b, v13.16b, v22.16b
st1 {v10.16b}, [x0], x1
subs w5, w5, #2
sub w8, w8, w6 // xpos -= dx
st1 {v11.16b}, [x0], x1
b.le 9f
mov v18.16b, v20.16b
add v29.16b, v29.16b, v24.16b // base_y += 2
add v30.16b, v30.16b, v24.16b // base_y += 2
b 16b
169:
mov v19.16b, v15.16b
mov v20.16b, v15.16b
tbx v19.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+1]
tbx v20.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v30.16b // left[base_y+2]
umull v4.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v4.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
umull2 v5.8h, v18.16b, v28.16b
umlal2 v5.8h, v19.16b, v27.16b
umull v6.8h, v19.8b, v28.8b
umlal v6.8h, v20.8b, v27.8b
umull2 v7.8h, v19.16b, v28.16b
umlal2 v7.8h, v20.16b, v27.16b
rshrn v4.8b, v4.8h, #6
rshrn2 v4.16b, v5.8h, #6
rshrn v5.8b, v6.8h, #6
rshrn2 v5.16b, v7.8h, #6
st1 {v4.16b}, [x0], x1
subs w5, w5, #2
st1 {v5.16b}, [x0], x1
b.le 9f
mov v18.16b, v20.16b
add v29.16b, v29.16b, v24.16b // base_y += 2
add v30.16b, v30.16b, v24.16b // base_y += 2
b 169b
9:
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret
320:
640:
AARCH64_VALID_JUMP_TARGET
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
add x11, x11, #16 // increments
dup v25.8h, w7 // -dy
add x3, x3, #1 // Skip past left[0]
ld1 {v14.8h}, [x11] // {8,9,10,11,12,13,14,15}
add x13, x0, x1 // alternating row
lsl x1, x1, #1 // stride *= 2
sub x1, x1, w4, uxtw // stride -= width
movi v11.8h, #8
mul v26.8h, v31.8h, v25.8h // {0,1,2,3,4,5,6,7}* -dy
add v26.8h, v26.8h, v25.8h // -= dy
mul v25.8h, v25.8h, v11.8h // -8*dy
xtn v31.8b, v31.8h // {0,1,2,3,4,5,6,7}
xtn2 v31.16b, v14.8h // {8,9,10,11,12,13,14,15}
// Worst case height is 64.
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x3] // left[]
ld1r {v15.16b}, [x2] // left[0] == top[0]
mov w12, w4 // orig w
neg w14, w4 // -w
1:
mov v23.16b, v26.16b // reset ypos
asr w9, w8, #6 // base_x
dup v16.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, w14 // base_x <= -w
asr w11, w8, #6 // base_x
b.le 329f
dup v17.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
add x9, x2, w9, sxtw
add x11, x2, w11, sxtw
sqshrn v21.8b, v16.8h, #6 // first base_x
sqshrn v22.8b, v17.8h, #6
xtn v16.8b, v16.8h // (uint8_t)xpos
xtn v17.8b, v17.8h
ld1 {v4.16b}, [x9], #16 // top[base_x]
ld1 {v6.16b}, [x11], #16
trn1 v21.2d, v21.2d, v21.2d // first base_x
trn1 v22.2d, v22.2d, v22.2d
trn1 v16.2d, v16.2d, v16.2d // (uint8_t)xpos
trn1 v17.2d, v17.2d, v17.2d
movi v10.16b, #0x3e
movi v11.16b, #64
and v16.16b, v16.16b, v10.16b // frac_x
and v17.16b, v17.16b, v10.16b
sub v8.16b, v11.16b, v16.16b // 64 - frac_x
sub v9.16b, v11.16b, v17.16b
add v21.16b, v21.16b, v31.16b // actual base_x
add v22.16b, v22.16b, v31.16b
2:
add v13.8h, v23.8h, v25.8h // ypos -= 8*dy
movi v12.16b, #64
movi v20.16b, #2
movi v10.16b, #0x3e
smov w10, v22.b[0]
xtn v27.8b, v23.8h // (uint8_t)ypos
xtn2 v27.16b, v13.8h
shrn v29.8b, v23.8h, #6 // ypos >> 6
shrn2 v29.16b, v13.8h, #6
cmp w10, #0 // base_x (bottom left) >= 0
and v27.16b, v27.16b, v10.16b // frac_y
mov v18.16b, v15.16b // left[0]
b.ge 4f
add v23.8h, v13.8h, v25.8h // ypos -= 8*dy
movi v13.16b, #1
tbx v18.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y]
add v29.16b, v29.16b, v13.16b // base_y + 1
mov v19.16b, v15.16b // left[0]
sub v28.16b, v12.16b, v27.16b // 64 - frac_y
ld1 {v5.16b}, [x9], #16 // top[base_x]
ld1 {v7.16b}, [x11], #16
tbx v19.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+1]
add v29.16b, v29.16b, v13.16b // base_y + 2
mov v20.16b, v15.16b // left[0]
tbx v20.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+2]
umull v10.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v10.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
umull2 v11.8h, v18.16b, v28.16b
umlal2 v11.8h, v19.16b, v27.16b
umull v12.8h, v19.8b, v28.8b
umlal v12.8h, v20.8b, v27.8b
umull2 v13.8h, v19.16b, v28.16b
umlal2 v13.8h, v20.16b, v27.16b
ext v18.16b, v4.16b, v5.16b, #1 // top[base_x+1]
ext v19.16b, v6.16b, v7.16b, #1
rshrn v10.8b, v10.8h, #6
rshrn2 v10.16b, v11.8h, #6
rshrn v11.8b, v12.8h, #6
rshrn2 v11.16b, v13.8h, #6
umull v12.8h, v4.8b, v8.8b // top[base_x]-*(64-frac_x)
umlal v12.8h, v18.8b, v16.8b // + top[base_x+1]*frac_x
umull2 v13.8h, v4.16b, v8.16b
umlal2 v13.8h, v18.16b, v16.16b
umull v14.8h, v6.8b, v9.8b
umlal v14.8h, v19.8b, v17.8b
umull2 v20.8h, v6.16b, v9.16b
umlal2 v20.8h, v19.16b, v17.16b
cmge v18.16b, v21.16b, #0
cmge v19.16b, v22.16b, #0
rshrn v12.8b, v12.8h, #6
rshrn2 v12.16b, v13.8h, #6
rshrn v13.8b, v14.8h, #6
rshrn2 v13.16b, v20.8h, #6
bit v10.16b, v12.16b, v18.16b
bit v11.16b, v13.16b, v19.16b
st1 {v10.16b}, [x0], #16
subs w4, w4, #16
st1 {v11.16b}, [x13], #16
b.le 3f
movi v10.16b, #16
mov v4.16b, v5.16b
mov v6.16b, v7.16b
add v21.16b, v21.16b, v10.16b // base_x += 16
add v22.16b, v22.16b, v10.16b
b 2b
3:
subs w5, w5, #2
b.le 9f
movi v10.8h, #128
add x0, x0, x1
add x13, x13, x1
mov w4, w12 // reset w
add v26.8h, v26.8h, v10.8h // ypos += 2*(1<<6)
b 1b
4: // The rest of the row only predicted from top[]
ld1 {v5.16b}, [x9], #16 // top[base_x]
ld1 {v7.16b}, [x11], #16
ext v18.16b, v4.16b, v5.16b, #1 // top[base_x+1]
ext v19.16b, v6.16b, v7.16b, #1
umull v12.8h, v4.8b, v8.8b // top[base_x]-*(64-frac_x)
umlal v12.8h, v18.8b, v16.8b // + top[base_x+1]*frac_x
umull2 v13.8h, v4.16b, v8.16b
umlal2 v13.8h, v18.16b, v16.16b
umull v14.8h, v6.8b, v9.8b
umlal v14.8h, v19.8b, v17.8b
umull2 v20.8h, v6.16b, v9.16b
umlal2 v20.8h, v19.16b, v17.16b
rshrn v12.8b, v12.8h, #6
rshrn2 v12.16b, v13.8h, #6
rshrn v13.8b, v14.8h, #6
rshrn2 v13.16b, v20.8h, #6
st1 {v12.16b}, [x0], #16
subs w4, w4, #16
st1 {v13.16b}, [x13], #16
b.le 3b
mov v4.16b, v5.16b
mov v6.16b, v7.16b
b 4b
329: // The rest of the block only predicted from left[]
add x1, x1, w4, uxtw // restore stride
mov w12, w5 // orig remaining h
1:
add v13.8h, v23.8h, v25.8h // ypos -= 8*dy
movi v12.16b, #64
movi v10.16b, #0x3e
xtn v27.8b, v23.8h // (uint8_t)ypos
xtn2 v27.16b, v13.8h
shrn v29.8b, v23.8h, #6 // ypos >> 6
shrn2 v29.16b, v13.8h, #6
and v27.16b, v27.16b, v10.16b // frac_y
mov v18.16b, v15.16b // left[0]
add v23.8h, v13.8h, v25.8h // ypos -= 8*dy
movi v21.16b, #1
tbx v18.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y]
add v29.16b, v29.16b, v21.16b // base_y + 1
sub v28.16b, v12.16b, v27.16b // 64 - frac_y
2:
mov v19.16b, v15.16b // left[0]
tbx v19.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+1]
add v29.16b, v29.16b, v21.16b // base_y + 2
mov v20.16b, v15.16b // left[0]
tbx v20.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v29.16b // left[base_y+2]
add v29.16b, v29.16b, v21.16b // next base_y
umull v10.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v10.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
umull2 v11.8h, v18.16b, v28.16b
umlal2 v11.8h, v19.16b, v27.16b
umull v12.8h, v19.8b, v28.8b
umlal v12.8h, v20.8b, v27.8b
umull2 v13.8h, v19.16b, v28.16b
umlal2 v13.8h, v20.16b, v27.16b
rshrn v10.8b, v10.8h, #6
rshrn2 v10.16b, v11.8h, #6
rshrn v11.8b, v12.8h, #6
rshrn2 v11.16b, v13.8h, #6
st1 {v10.16b}, [x0], x1
subs w5, w5, #2
st1 {v11.16b}, [x13], x1
b.le 3f
mov v18.16b, v20.16b
b 2b
3:
subs w4, w4, #16
b.le 9f
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
lsl x1, x1, #1
add x0, x0, #16
add x13, x13, #16
mov w5, w12 // reset h
b 1b
9:
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret
endfunc
jumptable ipred_z2_fill1_tbl
.word 640b - ipred_z2_fill1_tbl
.word 320b - ipred_z2_fill1_tbl
.word 160b - ipred_z2_fill1_tbl
.word 80b - ipred_z2_fill1_tbl
.word 40b - ipred_z2_fill1_tbl
endjumptable
function ipred_z2_fill2_8bpc_neon, export=1
cmp w4, #8
mov w8, #(2 << 6) // xpos = 2 << 6
sub w8, w8, w6 // xpos -= dx
movrel x11, increments
ld1 {v31.8h}, [x11] // increments
neg w7, w7 // -dy
b.eq 80f
40:
dup v30.4h, w7 // -dy
movi v17.8b, #1
mul v16.4h, v31.4h, v30.4h // {0,1,2,3}* -dy
movi v25.16b, #0x3e
add v30.4h, v16.4h, v30.4h // -= dy
xtn v31.8b, v31.8h // {0,1,2,3}
// For upsample_top, w <= 8 and h <= 8; we may need up to h+1 elements
// from left.
ld1 {v0.16b}, [x3] // left[]
movi v26.16b, #64
movi v19.16b, #2
xtn v27.8b, v30.8h // (uint8_t)ypos
shrn v29.8b, v30.8h, #6 // ypos >> 6
and v27.8b, v27.8b, v25.8b // frac_y
add v29.8b, v29.8b, v17.8b // base_y = (ypos >> 6) + 1
add v30.8b, v29.8b, v17.8b // base_y + 1
add v28.8b, v29.8b, v19.8b // base_y + 2
tbl v16.8b, {v0.16b}, v29.8b // left[base_y]
trn1 v30.2s, v30.2s, v28.2s // base_y + 1, base_y + 2
sub v28.8b, v26.8b, v27.8b // 64 - frac_y
trn1 v31.2s, v31.2s, v31.2s // {0,1,2,3,0,1,2,3}
trn1 v27.2s, v27.2s, v27.2s // frac_y
trn1 v28.2s, v28.2s, v28.2s // 64 - frac_y
movi v29.8b, #2
add v31.8b, v31.8b, v31.8b // {0,2,4,6,0,2,4,6}
4:
asr w9, w8, #6 // base_x
dup v6.4h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-8 // base_x <= -8
asr w11, w8, #6 // base_x
b.le 49f
dup v7.4h, w8 // xpos
ldr d2, [x2, w9, sxtw] // top[base_x]
ldr d4, [x2, w11, sxtw]
trn1 v6.2d, v6.2d, v7.2d // xpos
tbl v17.8b, {v0.16b}, v30.8b // left[base_y+1], left[base_y+2]
shrn v20.8b, v6.8h, #6 // first base_x for each row
xtn v6.8b, v6.8h // (uint8_t)xpos
uzp2 v3.8b, v2.8b, v4.8b // top[base_x+1]
uzp1 v2.8b, v2.8b, v4.8b // top[base_x]
and v6.8b, v6.8b, v25.8b // frac_x
trn1 v16.2s, v16.2s, v17.2s // left[base_y], left[base_y+1]
sub v7.8b, v26.8b, v6.8b // 64 - frac_x
add v20.8b, v20.8b, v31.8b // actual base_x
umull v16.8h, v16.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v16.8h, v17.8b, v27.8b // + left[base_y+1]*frac_y
umull v22.8h, v2.8b, v7.8b // top[base_x]-*(64-frac_x)
umlal v22.8h, v3.8b, v6.8b // + top[base_x+1]*frac_x
cmge v20.8b, v20.8b, #0
rshrn v16.8b, v16.8h, #6
rshrn v22.8b, v22.8h, #6
bit v16.8b, v22.8b, v20.8b
st1 {v16.s}[0], [x0], x1
sub w8, w8, w6 // xpos -= dx
subs w5, w5, #2
st1 {v16.s}[1], [x0], x1
b.le 9f
ext v16.8b, v17.8b, v17.8b, #4
add v30.8b, v30.8b, v29.8b // base_y += 2
b 4b
49:
tbl v17.8b, {v0.16b}, v30.8b // left[base_y+1], left[base_y+2]
trn1 v16.2s, v16.2s, v17.2s // left[base_y], left[base_y+1]
umull v18.8h, v16.8b, v28.8b // left[base_y]*(64-frac_t)
umlal v18.8h, v17.8b, v27.8b // + left[base_y+1]*frac_y
rshrn v18.8b, v18.8h, #6
st1 {v18.s}[0], [x0], x1
subs w5, w5, #2
st1 {v18.s}[1], [x0], x1
b.le 9f
ext v16.8b, v17.8b, v17.8b, #4
add v30.8b, v30.8b, v29.8b // base_y += 2
b 49b
9:
ret
80:
dup v30.8h, w7 // -dy
movi v17.8b, #1
mul v16.8h, v31.8h, v30.8h // {0,1,2,3,4,5,6,7}* -dy
movi v25.16b, #0x3e
add v30.8h, v16.8h, v30.8h // -= dy
xtn v31.8b, v31.8h // {0,1,2,3,4,5,6,7}
// For upsample_top, w <= 8 and h <= 8; we may need up to h+1 elements
// from left.
ld1 {v0.16b}, [x3] // left[]
movi v26.16b, #64
movi v19.16b, #2
xtn v27.8b, v30.8h // (uint8_t)ypos
shrn v29.8b, v30.8h, #6 // ypos >> 6
and v27.8b, v27.8b, v25.8b // frac_y
add v29.8b, v29.8b, v17.8b // base_y = (ypos >> 6) + 1
tbl v18.8b, {v0.16b}, v29.8b // left[base_y]
add v30.8b, v29.8b, v19.8b // base_y + 2
add v29.8b, v29.8b, v17.8b // base_y + 1
sub v28.8b, v26.8b, v27.8b // 64 - frac_y
trn1 v31.2d, v31.2d, v31.2d // {0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7}
movi v24.8b, #2 // 2
add v31.16b, v31.16b, v31.16b // {0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14}
8:
asr w9, w8, #6 // base_x
dup v16.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-16 // base_x <= -16
asr w11, w8, #6 // base_x
b.le 89f
dup v17.8h, w8 // xpos
ldr q4, [x2, w9, sxtw] // top[base_x]
ldr q6, [x2, w11, sxtw]
tbl v19.8b, {v0.16b}, v29.8b // left[base_y+1]
shrn v21.8b, v16.8h, #6 // first base_x
shrn2 v21.16b, v17.8h, #6
xtn v16.8b, v16.8h // (uint8_t)xpos
xtn2 v16.16b, v17.8h
tbl v20.8b, {v0.16b}, v30.8b // left[base_y+2]
uzp2 v5.16b, v4.16b, v6.16b // top[base_x+1]
uzp1 v4.16b, v4.16b, v6.16b // top[base_x]
and v16.16b, v16.16b, v25.16b // frac_x
sub v7.16b, v26.16b, v16.16b // 64 - frac_x
add v21.16b, v21.16b, v31.16b // actual base_x
umull v6.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v6.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
umull v17.8h, v19.8b, v28.8b
umlal v17.8h, v20.8b, v27.8b
umull v22.8h, v4.8b, v7.8b // top[base_x]-*(64-frac_x)
umlal v22.8h, v5.8b, v16.8b // + top[base_x+1]*frac_x
umull2 v23.8h, v4.16b, v7.16b
umlal2 v23.8h, v5.16b, v16.16b
cmge v21.16b, v21.16b, #0
rshrn v6.8b, v6.8h, #6
rshrn2 v6.16b, v17.8h, #6
rshrn v22.8b, v22.8h, #6
rshrn2 v22.16b, v23.8h, #6
bit v6.16b, v22.16b, v21.16b
st1 {v6.d}[0], [x0], x1
sub w8, w8, w6 // xpos -= dx
subs w5, w5, #2
st1 {v6.d}[1], [x0], x1
b.le 9f
mov v18.8b, v20.8b
add v29.8b, v29.8b, v24.8b // base_y += 2
add v30.8b, v30.8b, v24.8b // base_y += 2
b 8b
89:
tbl v19.8b, {v0.16b}, v29.8b // left[base_y+1]
tbl v20.8b, {v0.16b}, v30.8b // left[base_y+2]
umull v6.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v6.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
umull v17.8h, v19.8b, v28.8b
umlal v17.8h, v20.8b, v27.8b
rshrn v6.8b, v6.8h, #6
rshrn2 v6.16b, v17.8h, #6
st1 {v6.d}[0], [x0], x1
subs w5, w5, #2
st1 {v6.d}[1], [x0], x1
b.le 9f
mov v18.8b, v20.8b
add v29.8b, v29.8b, v24.8b // base_y += 2
add v30.8b, v30.8b, v24.8b // base_y += 2
b 89b
9:
ret
endfunc
function ipred_z2_fill3_8bpc_neon, export=1
cmp w4, #8
mov w8, #(1 << 6) // xpos = 1 << 6
sub w8, w8, w6 // xpos -= dx
movrel x11, increments
ld1 {v31.8h}, [x11] // increments
neg w7, w7 // -dy
b.eq 80f
40:
dup v30.4h, w7 // -dy
movi v17.8b, #1
mul v16.4h, v31.4h, v30.4h // {0,1,2,3}* -dy
movi v25.16b, #0x3e
add v30.4h, v16.4h, v30.4h // -= dy
xtn v31.8b, v31.8h // {0,1,2,3}
// For upsample_left, w <= 8 and h <= 8; we may need up to 2*h+1 elements.
ld1 {v0.16b, v1.16b}, [x3] // left[]
movi v26.16b, #64
movi v19.16b, #2
xtn v27.8b, v30.8h // (uint8_t)ypos
shrn v29.8b, v30.8h, #6 // ypos >> 6
and v27.8b, v27.8b, v25.8b // frac_y
add v29.8b, v29.8b, v19.8b // base_y = (ypos >> 6) + 2
add v30.8b, v29.8b, v17.8b // base_y + 1
add v28.8b, v29.8b, v19.8b // base_y + 2
trn1 v31.2s, v31.2s, v31.2s // {0,1,2,3,0,1,2,3}
add v24.8b, v30.8b, v19.8b // base_y + 3
trn1 v29.2s, v29.2s, v28.2s // base_y + 0, base_y + 2
trn1 v30.2s, v30.2s, v24.2s // base_y + 1, base_y + 3
sub v28.8b, v26.8b, v27.8b // 64 - frac_y
trn1 v27.2s, v27.2s, v27.2s // frac_y
trn1 v28.2s, v28.2s, v28.2s // 64 - frac_y
movi v24.8b, #4
4:
asr w9, w8, #6 // base_x
dup v6.4h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-4 // base_x <= -4
asr w11, w8, #6 // base_x
b.le 49f
dup v7.4h, w8 // xpos
ldr d2, [x2, w9, sxtw] // top[base_x]
ldr d4, [x2, w11, sxtw]
trn1 v6.2d, v6.2d, v7.2d // xpos
tbl v16.8b, {v0.16b, v1.16b}, v29.8b // left[base_y+0], left[base_y+2]
tbl v17.8b, {v0.16b, v1.16b}, v30.8b // left[base_y+1], left[base_y+3]
shrn v20.8b, v6.8h, #6 // first base_x for each row
xtn v6.8b, v6.8h // (uint8_t)xpos
ext v3.8b, v2.8b, v2.8b, #1 // top[base_x+1]
ext v5.8b, v4.8b, v4.8b, #1
and v6.8b, v6.8b, v25.8b // frac_x
trn1 v2.2s, v2.2s, v4.2s // top[base_x]
trn1 v3.2s, v3.2s, v5.2s // top[base_x+1]
sub v7.8b, v26.8b, v6.8b // 64 - frac_x
add v20.8b, v20.8b, v31.8b // actual base_x
umull v16.8h, v16.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v16.8h, v17.8b, v27.8b // + left[base_y+1]*frac_y
umull v22.8h, v2.8b, v7.8b // top[base_x]-*(64-frac_x)
umlal v22.8h, v3.8b, v6.8b // + top[base_x+1]*frac_x
cmge v20.8b, v20.8b, #0
rshrn v16.8b, v16.8h, #6
rshrn v22.8b, v22.8h, #6
bit v16.8b, v22.8b, v20.8b
st1 {v16.s}[0], [x0], x1
sub w8, w8, w6 // xpos -= dx
subs w5, w5, #2
st1 {v16.s}[1], [x0], x1
b.le 9f
add v29.8b, v29.8b, v24.8b // base_y += 4
add v30.8b, v30.8b, v24.8b // base_y += 4
b 4b
49:
tbl v16.8b, {v0.16b, v1.16b}, v29.8b // left[base_y+0], left[base_y+2]
tbl v17.8b, {v0.16b, v1.16b}, v30.8b // left[base_y+1], left[base_y+3]
umull v18.8h, v16.8b, v28.8b // left[base_y]*(64-frac_t)
umlal v18.8h, v17.8b, v27.8b // + left[base_y+1]*frac_y
rshrn v18.8b, v18.8h, #6
st1 {v18.s}[0], [x0], x1
subs w5, w5, #2
st1 {v18.s}[1], [x0], x1
b.le 9f
add v29.8b, v29.8b, v24.8b // base_y += 4
add v30.8b, v30.8b, v24.8b // base_y += 4
b 49b
9:
ret
80:
dup v30.8h, w7 // -dy
movi v17.8b, #1
mul v16.8h, v31.8h, v30.8h // {0,1,2,3,4,5,6,7}* -dy
movi v25.16b, #0x3e
add v30.8h, v16.8h, v30.8h // -= dy
xtn v31.8b, v31.8h // {0,1,2,3,4,5,6,7}
// For upsample_left, w <= 8 and h <= 8; we may need up to 2*h+1 elements.
ld1 {v0.16b, v1.16b, v2.16b}, [x3] // left[]
movi v26.16b, #64
movi v19.16b, #2
xtn v27.8b, v30.8h // (uint8_t)ypos
shrn v29.8b, v30.8h, #6 // ypos >> 6
and v27.8b, v27.8b, v25.8b // frac_y
add v29.8b, v29.8b, v19.8b // base_y = (ypos >> 6) + 2
add v28.8b, v29.8b, v17.8b // base_y + 1
add v30.8b, v29.8b, v19.8b // base_y + 2
trn1 v31.2d, v31.2d, v31.2d // {0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7}
add v24.8b, v28.8b, v19.8b // base_y + 3
trn1 v29.2d, v29.2d, v30.2d // base_y + 0, base_y + 2
trn1 v30.2d, v28.2d, v24.2d // base_y + 1, base_y + 3
sub v28.8b, v26.8b, v27.8b // 64 - frac_y
movi v24.16b, #4
trn1 v27.2d, v27.2d, v27.2d // frac_y
trn1 v28.2d, v28.2d, v28.2d // 64 - frac_y
8:
asr w9, w8, #6 // base_x
dup v16.8h, w8 // xpos
sub w8, w8, w6 // xpos -= dx
cmp w9, #-8 // base_x <= -8
asr w11, w8, #6 // base_x
b.le 89f
dup v17.8h, w8 // xpos
ldr q4, [x2, w9, sxtw] // top[base_x]
ldr q6, [x2, w11, sxtw]
tbl v18.16b, {v0.16b, v1.16b, v2.16b}, v29.16b // left[base_y+0], left[base_y+2]
tbl v19.16b, {v0.16b, v1.16b, v2.16b}, v30.16b // left[base_y+1], left[base_y+3]
shrn v21.8b, v16.8h, #6 // first base_x
shrn2 v21.16b, v17.8h, #6
xtn v16.8b, v16.8h // (uint8_t)xpos
xtn2 v16.16b, v17.8h
ext v5.16b, v4.16b, v4.16b, #1 // top[base_x+1]
ext v7.16b, v6.16b, v6.16b, #1
and v16.16b, v16.16b, v25.16b // frac_x
trn1 v4.2d, v4.2d, v6.2d // top[base_x]
trn1 v5.2d, v5.2d, v7.2d // top[base_x+1]
sub v7.16b, v26.16b, v16.16b // 64 - frac_x
add v21.16b, v21.16b, v31.16b // actual base_x
umull v6.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v6.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
umull2 v17.8h, v18.16b, v28.16b
umlal2 v17.8h, v19.16b, v27.16b
umull v22.8h, v4.8b, v7.8b // top[base_x]-*(64-frac_x)
umlal v22.8h, v5.8b, v16.8b // + top[base_x+1]*frac_x
umull2 v23.8h, v4.16b, v7.16b
umlal2 v23.8h, v5.16b, v16.16b
cmge v21.16b, v21.16b, #0
rshrn v6.8b, v6.8h, #6
rshrn2 v6.16b, v17.8h, #6
rshrn v22.8b, v22.8h, #6
rshrn2 v22.16b, v23.8h, #6
bit v6.16b, v22.16b, v21.16b
st1 {v6.d}[0], [x0], x1
sub w8, w8, w6 // xpos -= dx
subs w5, w5, #2
st1 {v6.d}[1], [x0], x1
b.le 9f
add v29.16b, v29.16b, v24.16b // base_y += 4
add v30.16b, v30.16b, v24.16b // base_y += 4
b 8b
89:
tbl v18.16b, {v0.16b, v1.16b, v2.16b}, v29.16b // left[base_y+0], left[base_y+2]
tbl v19.16b, {v0.16b, v1.16b, v2.16b}, v30.16b // left[base_y+1], left[base_y+3]
umull v6.8h, v18.8b, v28.8b // left[base_y]*(64-frac_y)
umlal v6.8h, v19.8b, v27.8b // + left[base_y+1]*frac_y
umull2 v17.8h, v18.16b, v28.16b
umlal2 v17.8h, v19.16b, v27.16b
rshrn v6.8b, v6.8h, #6
rshrn2 v6.16b, v17.8h, #6
st1 {v6.d}[0], [x0], x1
subs w5, w5, #2
st1 {v6.d}[1], [x0], x1
b.le 9f
add v29.16b, v29.16b, v24.16b // base_y += 4
add v30.16b, v30.16b, v24.16b // base_y += 4
b 89b
9:
ret
endfunc
// void ipred_z3_fill1_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const left,
// const int width, const int height,
// const int dy, const int max_base_y);
function ipred_z3_fill1_8bpc_neon, export=1
cmp w6, #64
clz w9, w3
movrel x8, ipred_z3_fill1_tbl
sub w9, w9, #25
ldrsw x9, [x8, w9, uxtw #2]
add x10, x2, w6, uxtw // left[max_base_y]
add x8, x8, x9
movrel x11, increments
ld1r {v31.16b}, [x10] // padding
ld1 {v30.8h}, [x11] // increments
mov w7, w5
b.gt L(ipred_z3_fill1_large_h16)
br x8
40:
AARCH64_VALID_JUMP_TARGET
dup v29.4h, w5 // dy
mul v30.4h, v30.4h, v29.4h // {0,1,2,3,4,5,6,7}*dy
movi v23.16b, #0x3e
// Worst case max_base_y is width+height-1, for w=4, h=16, <= 32
ld1 {v0.16b, v1.16b}, [x2] // left[]
add v30.4h, v29.4h, v30.4h // ypos
movi v22.16b, #64
movi v20.16b, #1
movi v21.16b, #2
xtn v24.8b, v30.8h // (uint8_t)ypos
uqshrn v26.8b, v30.8h, #6 // base
and v24.8b, v24.8b, v23.8b // frac
mov v4.8b, v31.8b
uqadd v27.8b, v26.8b, v20.8b // base + 1
uqadd v28.8b, v26.8b, v21.8b // base + 2
sub v25.8b, v22.8b, v24.8b // 64 - frac
tbx v4.8b, {v0.16b, v1.16b}, v26.8b // left[base]
trn1 v27.2s, v27.2s, v28.2s // base + 1, base + 2
trn1 v24.2s, v24.2s, v24.2s // frac
trn1 v25.2s, v25.2s, v25.2s // 64 - frac
1:
mov v5.8b, v31.8b
tbx v5.8b, {v0.16b, v1.16b}, v27.8b // left[base+1], left[base+2]
trn1 v4.2s, v4.2s, v5.2s // left[base], left[base+1]
umull v16.8h, v4.8b, v25.8b // left[base]*(64-frac)
umlal v16.8h, v5.8b, v24.8b // + left[base+1]*frac
rshrn v16.8b, v16.8h, #6
st1 {v16.s}[0], [x0], x1
subs w4, w4, #2
st1 {v16.s}[1], [x0], x1
b.le 9f
ext v4.8b, v5.8b, v5.8b, #4
uqadd v27.8b, v27.8b, v21.8b // base += 2
b 1b
9:
ret
80:
AARCH64_VALID_JUMP_TARGET
dup v29.8h, w5 // dy
mul v30.8h, v30.8h, v29.8h // {0,1,2,3,4,5,6,7}*dy
movi v23.16b, #0x3e
// Worst case max_base_y is width+height-1, for w=8, h=32, <= 48
ld1 {v0.16b, v1.16b, v2.16b}, [x2] // left[]
add v30.8h, v29.8h, v30.8h // ypos
movi v22.16b, #64
movi v20.16b, #1
movi v21.16b, #2
xtn v24.8b, v30.8h // (uint8_t)ypos
uqshrn v26.8b, v30.8h, #6 // base
and v24.8b, v24.8b, v23.8b // frac
mov v4.8b, v31.8b
uqadd v27.8b, v26.8b, v20.8b // base + 1
uqadd v28.8b, v26.8b, v21.8b // base + 2
sub v25.8b, v22.8b, v24.8b // 64 - frac
tbx v4.8b, {v0.16b, v1.16b, v2.16b}, v26.8b // left[base]
1:
mov v5.8b, v31.8b
mov v6.8b, v31.8b
tbx v5.8b, {v0.16b, v1.16b, v2.16b}, v27.8b // left[base+1]
tbx v6.8b, {v0.16b, v1.16b, v2.16b}, v28.8b // left[base+2]
umull v16.8h, v4.8b, v25.8b // left[base]*(64-frac)
umlal v16.8h, v5.8b, v24.8b // + left[base+1]*frac
umull v17.8h, v5.8b, v25.8b
umlal v17.8h, v6.8b, v24.8b
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
st1 {v16.8b}, [x0], x1
subs w4, w4, #2
st1 {v17.8b}, [x0], x1
b.le 9f
mov v4.8b, v6.8b
uqadd v27.8b, v27.8b, v21.8b // base += 2
uqadd v28.8b, v28.8b, v21.8b // base += 2
b 1b
9:
ret
160:
AARCH64_VALID_JUMP_TARGET
dup v28.8h, w5 // dy
shl v29.8h, v28.8h, #3 // 8*dy
mul v30.8h, v30.8h, v28.8h // {0,1,2,3,4,5,6,7}*dy
movi v23.16b, #0x3e
// This is only executed if we've checked that max_base_y <= 64.
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x2] // left[]
add v28.8h, v28.8h, v30.8h // ypos
movi v22.16b, #64
movi v20.16b, #1
movi v21.16b, #2
add v29.8h, v28.8h, v29.8h // ypos + 8*dy
xtn v24.8b, v28.8h // (uint8_t)ypos
xtn2 v24.16b, v29.8h
uqshrn v26.8b, v28.8h, #6 // base
uqshrn2 v26.16b, v29.8h, #6
and v24.16b, v24.16b, v23.16b // frac
mov v4.16b, v31.16b
uqadd v27.16b, v26.16b, v20.16b // base + 1
uqadd v28.16b, v26.16b, v21.16b // base + 2
sub v25.16b, v22.16b, v24.16b // 64 - frac
tbx v4.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v26.16b // left[base]
1:
mov v5.16b, v31.16b
mov v6.16b, v31.16b
tbx v5.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v27.16b // left[base+1]
tbx v6.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v28.16b // left[base+2]
umull v16.8h, v4.8b, v25.8b // left[base]*(64-frac)
umlal v16.8h, v5.8b, v24.8b // + left[base+1]*frac
umull2 v17.8h, v4.16b, v25.16b
umlal2 v17.8h, v5.16b, v24.16b
umull v18.8h, v5.8b, v25.8b
umlal v18.8h, v6.8b, v24.8b
umull2 v19.8h, v5.16b, v25.16b
umlal2 v19.8h, v6.16b, v24.16b
rshrn v16.8b, v16.8h, #6
rshrn2 v16.16b, v17.8h, #6
rshrn v17.8b, v18.8h, #6
rshrn2 v17.16b, v19.8h, #6
st1 {v16.16b}, [x0], x1
subs w4, w4, #2
st1 {v17.16b}, [x0], x1
b.le 9f
mov v4.16b, v6.16b
uqadd v27.16b, v27.16b, v21.16b // base += 2
uqadd v28.16b, v28.16b, v21.16b // base += 2
b 1b
9:
ret
320:
640:
AARCH64_VALID_JUMP_TARGET
dup v28.8h, w5 // dy
mov w12, w3
add x13, x0, x1
shl v29.8h, v28.8h, #3 // 8*dy
mul v30.8h, v30.8h, v28.8h // {0,1,2,3,4,5,6,7}*dy
movi v23.16b, #0x3e
lsl x1, x1, #1
sub x1, x1, w3, uxtw
add v30.8h, v28.8h, v30.8h // ypos
// This is only executed if we've checked that max_base_y <= 64.
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x2] // left[]
movi v22.16b, #64
movi v20.16b, #1
movi v21.16b, #2
1:
mov v26.16b, v30.16b // reset ypos
2:
add v27.8h, v26.8h, v29.8h // ypos + 8*dy
uqshrn v16.8b, v26.8h, #6 // base
uqshrn2 v16.16b, v27.8h, #6
xtn v24.8b, v26.8h // (uint8_t)ypos
xtn2 v24.16b, v27.8h
umov w14, v16.b[0]
and v24.16b, v24.16b, v23.16b // frac
uqadd v17.16b, v16.16b, v20.16b // base + 1
cmp w14, w6 // base >= max_base_y
uqadd v18.16b, v16.16b, v21.16b // base + 2
sub v25.16b, v22.16b, v24.16b // 64 - frac
b.ge 4f
mov v4.16b, v31.16b
mov v5.16b, v31.16b
mov v6.16b, v31.16b
tbx v4.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v16.16b // left[base]
tbx v5.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v17.16b // left[base+1]
tbx v6.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v18.16b // left[base+2]
subs w3, w3, #16
umull v16.8h, v4.8b, v25.8b // left[base]*(64-frac)
umlal v16.8h, v5.8b, v24.8b // + left[base+1]*frac
umull2 v17.8h, v4.16b, v25.16b
umlal2 v17.8h, v5.16b, v24.16b
umull v18.8h, v5.8b, v25.8b
umlal v18.8h, v6.8b, v24.8b
umull2 v19.8h, v5.16b, v25.16b
umlal2 v19.8h, v6.16b, v24.16b
rshrn v16.8b, v16.8h, #6
rshrn2 v16.16b, v17.8h, #6
rshrn v17.8b, v18.8h, #6
rshrn2 v17.16b, v19.8h, #6
st1 {v16.16b}, [x0], #16
st1 {v17.16b}, [x13], #16
b.le 3f
add v26.8h, v27.8h, v29.8h // ypos += 16*dy
b 2b
3:
subs w4, w4, #2
b.le 9f
movi v16.8h, #128
add x0, x0, x1
add x13, x13, x1
add v30.8h, v30.8h, v16.8h // ypos = dy + y*(1<<6)*2
mov w3, w12
b 1b
4:
subs w3, w3, #16
st1 {v31.16b}, [x0], #16
st1 {v31.16b}, [x13], #16
b.gt 4b
b 3b
9:
ret
L(ipred_z3_fill1_large_h16):
// Fallback case for max_base_y > 64; similar to the z1
// implementation. This does the filtering vertically, filling out
// a 2x pixel column at a time.
mov w15, #64
add x13, x0, x1
lsl x1, x1, #1
mov w12, w4
1:
lsr w8, w7, #6 // base
and w9, w7, #0x3e // frac
add w7, w7, w5 // ypos += dy
cmp w8, w6 // base >= max_base_y
lsr w10, w7, #6 // base
and w11, w7, #0x3e // frac
b.ge ipred_z3_fill_padding_neon
add x8, x2, w8, uxtw
add x10, x2, w10, uxtw
dup v4.16b, w9 // frac
dup v5.16b, w11
ld1 {v0.16b, v1.16b}, [x8], #32 // left[base]
ld1 {v2.16b, v3.16b}, [x10], #32
sub w9, w15, w9 // 64 - frac
sub w11, w15, w11
dup v6.16b, w9 // 64 - frac
dup v7.16b, w11
add w7, w7, w5 // ypos += dy
2:
ext v16.16b, v0.16b, v1.16b, #1 // left[base+1]
ext v17.16b, v2.16b, v3.16b, #1
subs w4, w4, #16
umull v18.8h, v16.8b, v4.8b // left[base+1]*frac
umlal v18.8h, v0.8b, v6.8b // + left[base]*(64-frac)
umull2 v19.8h, v16.16b, v4.16b
umlal2 v19.8h, v0.16b, v6.16b
umull v20.8h, v17.8b, v5.8b
umlal v20.8h, v2.8b, v7.8b
umull2 v21.8h, v17.16b, v5.16b
umlal2 v21.8h, v2.16b, v7.16b
rshrn v16.8b, v18.8h, #6
rshrn2 v16.16b, v19.8h, #6
rshrn v17.8b, v20.8h, #6
rshrn2 v17.16b, v21.8h, #6
zip1 v18.16b, v16.16b, v17.16b
zip2 v19.16b, v16.16b, v17.16b
st1 {v18.h}[0], [x0], x1
st1 {v18.h}[1], [x13], x1
st1 {v18.h}[2], [x0], x1
st1 {v18.h}[3], [x13], x1
st1 {v18.h}[4], [x0], x1
st1 {v18.h}[5], [x13], x1
st1 {v18.h}[6], [x0], x1
st1 {v18.h}[7], [x13], x1
st1 {v19.h}[0], [x0], x1
st1 {v19.h}[1], [x13], x1
st1 {v19.h}[2], [x0], x1
st1 {v19.h}[3], [x13], x1
st1 {v19.h}[4], [x0], x1
st1 {v19.h}[5], [x13], x1
st1 {v19.h}[6], [x0], x1
st1 {v19.h}[7], [x13], x1
b.le 3f
mov v0.16b, v1.16b
ld1 {v1.16b}, [x8], #16 // left[base]
mov v2.16b, v3.16b
ld1 {v3.16b}, [x10], #16
b 2b
3:
subs w3, w3, #2
b.le 9f
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
lsl x1, x1, #1
add x0, x0, #2
add x13, x13, #2
mov w4, w12
b 1b
9:
ret
endfunc
jumptable ipred_z3_fill1_tbl
.word 640b - ipred_z3_fill1_tbl
.word 320b - ipred_z3_fill1_tbl
.word 160b - ipred_z3_fill1_tbl
.word 80b - ipred_z3_fill1_tbl
.word 40b - ipred_z3_fill1_tbl
endjumptable
function ipred_z3_fill_padding_neon, export=0
cmp w3, #16
movrel x8, ipred_z3_fill_padding_tbl
b.gt ipred_z3_fill_padding_wide
// w3 = remaining width, w4 = constant height
mov w12, w4
1:
// Fill a WxH rectangle with padding. W can be any number;
// this fills the exact width by filling in the largest
// power of two in the remaining width, and repeating.
clz w9, w3
sub w9, w9, #25
ldrsw x9, [x8, w9, uxtw #2]
add x9, x8, x9
br x9
20:
AARCH64_VALID_JUMP_TARGET
2:
st1 {v31.h}[0], [x0], x1
subs w4, w4, #4
st1 {v31.h}[0], [x13], x1
st1 {v31.h}[0], [x0], x1
st1 {v31.h}[0], [x13], x1
b.gt 2b
subs w3, w3, #2
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
b.le 9f
lsl x1, x1, #1
add x0, x0, #2
add x13, x13, #2
mov w4, w12
b 1b
40:
AARCH64_VALID_JUMP_TARGET
4:
st1 {v31.s}[0], [x0], x1
subs w4, w4, #4
st1 {v31.s}[0], [x13], x1
st1 {v31.s}[0], [x0], x1
st1 {v31.s}[0], [x13], x1
b.gt 4b
subs w3, w3, #4
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
b.le 9f
lsl x1, x1, #1
add x0, x0, #4
add x13, x13, #4
mov w4, w12
b 1b
80:
AARCH64_VALID_JUMP_TARGET
8:
st1 {v31.8b}, [x0], x1
subs w4, w4, #4
st1 {v31.8b}, [x13], x1
st1 {v31.8b}, [x0], x1
st1 {v31.8b}, [x13], x1
b.gt 8b
subs w3, w3, #8
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
b.le 9f
lsl x1, x1, #1
add x0, x0, #8
add x13, x13, #8
mov w4, w12
b 1b
160:
320:
640:
AARCH64_VALID_JUMP_TARGET
16:
st1 {v31.16b}, [x0], x1
subs w4, w4, #4
st1 {v31.16b}, [x13], x1
st1 {v31.16b}, [x0], x1
st1 {v31.16b}, [x13], x1
b.gt 16b
subs w3, w3, #16
lsr x1, x1, #1
msub x0, x1, x12, x0 // ptr -= h * stride
msub x13, x1, x12, x13
b.le 9f
lsl x1, x1, #1
add x0, x0, #16
add x13, x13, #16
mov w4, w12
b 1b
9:
ret
endfunc
jumptable ipred_z3_fill_padding_tbl
.word 640b - ipred_z3_fill_padding_tbl
.word 320b - ipred_z3_fill_padding_tbl
.word 160b - ipred_z3_fill_padding_tbl
.word 80b - ipred_z3_fill_padding_tbl
.word 40b - ipred_z3_fill_padding_tbl
.word 20b - ipred_z3_fill_padding_tbl
endjumptable
function ipred_z3_fill_padding_wide
// Fill a WxH rectangle with padding, with W > 16.
lsr x1, x1, #1
mov w12, w3
sub x1, x1, w3, uxtw
1:
ands w5, w3, #15
b.eq 2f
// If the width isn't aligned to 16, first do one 16 byte write
// and align the start pointer.
sub w3, w3, w5
st1 {v31.16b}, [x0]
add x0, x0, w5, uxtw
2:
// Fill the rest of the line with aligned 16 byte writes.
subs w3, w3, #16
st1 {v31.16b}, [x0], #16
b.gt 2b
subs w4, w4, #1
add x0, x0, x1
b.le 9f
mov w3, w12
b 1b
9:
ret
endfunc
function ipred_z3_fill2_8bpc_neon, export=1
cmp w3, #8
add x10, x2, w6, uxtw // left[max_base_y]
movrel x11, increments
ld1r {v31.16b}, [x10] // padding
ld1 {v30.8h}, [x11] // increments
b.eq 80f
40: // w == 4
dup v29.4h, w5 // dy
mul v30.4h, v30.4h, v29.4h // {0,1,2,3,4,5,6,7}*dy
movi v23.16b, #0x3e
// Worst case max_base_y is 2*(width+height)-2, but width+height <= 16,
// so max_base_y <= 32.
ld1 {v0.16b, v1.16b}, [x2] // left[]
add v30.4h, v29.4h, v30.4h // ypos
movi v22.16b, #64
movi v20.16b, #1
movi v21.16b, #2
xtn v24.8b, v30.8h // (uint8_t)ypos
uqshrn v26.8b, v30.8h, #6 // base
and v24.8b, v24.8b, v23.8b // frac
uqadd v27.8b, v26.8b, v20.8b // base + 1
uqadd v28.8b, v26.8b, v21.8b // base + 2
sub v25.8b, v22.8b, v24.8b // 64 - frac
uqadd v29.8b, v27.8b, v21.8b // base + 3
trn1 v24.2s, v24.2s, v24.2s // frac
trn1 v26.2s, v26.2s, v28.2s // base + 0, base + 2
trn1 v27.2s, v27.2s, v29.2s // base + 1, base + 3
trn1 v25.2s, v25.2s, v25.2s // 64 - frac
movi v21.16b, #4
1:
mov v4.8b, v31.8b
mov v5.8b, v31.8b
tbx v4.8b, {v0.16b, v1.16b}, v26.8b // left[base], left[base+2]
tbx v5.8b, {v0.16b, v1.16b}, v27.8b // left[base+1], left[base+3]
umull v16.8h, v4.8b, v25.8b // left[base]*(64-frac)
umlal v16.8h, v5.8b, v24.8b // + left[base+1]*frac
rshrn v16.8b, v16.8h, #6
st1 {v16.s}[0], [x0], x1
subs w4, w4, #2
st1 {v16.s}[1], [x0], x1
b.le 9f
uqadd v26.8b, v26.8b, v21.8b // base += 4
uqadd v27.8b, v27.8b, v21.8b // base += 4
b 1b
9:
ret
80: // w == 8
dup v29.8h, w5 // dy
mul v30.8h, v30.8h, v29.8h // {0,1,2,3,4,5,6,7}*dy
movi v23.16b, #0x3e
// Worst case max_base_y is 2*(width+height)-2, but width+height <= 16,
// so max_base_y <= 32.
ld1 {v0.16b, v1.16b}, [x2] // left[]
add v30.8h, v29.8h, v30.8h // ypos
movi v22.16b, #64
movi v20.16b, #1
movi v21.16b, #2
xtn v24.8b, v30.8h // (uint8_t)ypos
uqshrn v26.8b, v30.8h, #6 // base
and v24.8b, v24.8b, v23.8b // frac
uqadd v27.8b, v26.8b, v20.8b // base + 1
uqadd v28.8b, v26.8b, v21.8b // base + 2
sub v25.8b, v22.8b, v24.8b // 64 - frac
uqadd v29.8b, v27.8b, v21.8b // base + 3
trn1 v24.2d, v24.2d, v24.2d // frac
trn1 v26.2d, v26.2d, v28.2d // base + 0, base + 2
trn1 v27.2d, v27.2d, v29.2d // base + 1, base + 3
trn1 v25.2d, v25.2d, v25.2d // 64 - frac
movi v21.16b, #4
1:
mov v4.16b, v31.16b
mov v5.16b, v31.16b
tbx v4.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v26.16b // left[base], left[base+2]
tbx v5.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v27.16b // left[base+1], left[base+3]
umull v16.8h, v4.8b, v25.8b // left[base]*(64-frac)
umlal v16.8h, v5.8b, v24.8b // + left[base+1]*frac
umull2 v17.8h, v4.16b, v25.16b
umlal2 v17.8h, v5.16b, v24.16b
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
st1 {v16.8b}, [x0], x1
subs w4, w4, #2
st1 {v17.8b}, [x0], x1
b.le 9f
uqadd v26.16b, v26.16b, v21.16b // base += 4
uqadd v27.16b, v27.16b, v21.16b // base += 4
b 1b
9:
ret
endfunc
// void ipred_filter_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int filt_idx,
// const int max_width, const int max_height);
function ipred_filter_8bpc_neon, export=1
and w5, w5, #511
movrel x6, X(filter_intra_taps)
lsl w5, w5, #6
add x6, x6, w5, uxtw
ld1 {v16.8b, v17.8b, v18.8b, v19.8b}, [x6], #32
clz w9, w3
movrel x5, ipred_filter_tbl
ld1 {v20.8b, v21.8b, v22.8b}, [x6]
sub w9, w9, #26
ldrsw x9, [x5, w9, uxtw #2]
sxtl v16.8h, v16.8b
sxtl v17.8h, v17.8b
add x5, x5, x9
sxtl v18.8h, v18.8b
sxtl v19.8h, v19.8b
add x6, x0, x1
lsl x1, x1, #1
sxtl v20.8h, v20.8b
sxtl v21.8h, v21.8b
sxtl v22.8h, v22.8b
br x5
40:
AARCH64_VALID_JUMP_TARGET
ldur s0, [x2, #1] // top (0-3)
sub x2, x2, #2
mov x7, #-2
uxtl v0.8h, v0.8b // top (0-3)
4:
ld1 {v1.s}[0], [x2], x7 // left (0-1) + topleft (2)
mul v2.8h, v17.8h, v0.h[0] // p1(top[0]) * filter(1)
mla v2.8h, v18.8h, v0.h[1] // p2(top[1]) * filter(2)
mla v2.8h, v19.8h, v0.h[2] // p3(top[2]) * filter(3)
uxtl v1.8h, v1.8b // left (0-1) + topleft (2)
mla v2.8h, v20.8h, v0.h[3] // p4(top[3]) * filter(4)
mla v2.8h, v16.8h, v1.h[2] // p0(topleft) * filter(0)
mla v2.8h, v21.8h, v1.h[1] // p5(left[0]) * filter(5)
mla v2.8h, v22.8h, v1.h[0] // p6(left[1]) * filter(6)
sqrshrun v2.8b, v2.8h, #4
subs w4, w4, #2
st1 {v2.s}[0], [x0], x1
uxtl v0.8h, v2.8b
st1 {v2.s}[1], [x6], x1
ext v0.16b, v0.16b, v0.16b, #8 // move top from [4-7] to [0-3]
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
ldur d0, [x2, #1] // top (0-7)
sub x2, x2, #2
mov x7, #-2
uxtl v0.8h, v0.8b // top (0-7)
8:
ld1 {v1.s}[0], [x2], x7 // left (0-1) + topleft (2)
mul v2.8h, v17.8h, v0.h[0] // p1(top[0]) * filter(1)
mla v2.8h, v18.8h, v0.h[1] // p2(top[1]) * filter(2)
mla v2.8h, v19.8h, v0.h[2] // p3(top[2]) * filter(3)
uxtl v1.8h, v1.8b // left (0-1) + topleft (2)
mla v2.8h, v20.8h, v0.h[3] // p4(top[3]) * filter(4)
mla v2.8h, v16.8h, v1.h[2] // p0(topleft) * filter(0)
mla v2.8h, v21.8h, v1.h[1] // p5(left[0]) * filter(5)
mla v2.8h, v22.8h, v1.h[0] // p6(left[1]) * filter(6)
mul v3.8h, v17.8h, v0.h[4] // p1(top[0]) * filter(1)
mla v3.8h, v18.8h, v0.h[5] // p2(top[1]) * filter(2)
mla v3.8h, v19.8h, v0.h[6] // p3(top[2]) * filter(3)
sqrshrun v2.8b, v2.8h, #4
uxtl v1.8h, v2.8b // first block, in 16 bit
mla v3.8h, v20.8h, v0.h[7] // p4(top[3]) * filter(4)
mla v3.8h, v16.8h, v0.h[3] // p0(topleft) * filter(0)
mla v3.8h, v21.8h, v1.h[3] // p5(left[0]) * filter(5)
mla v3.8h, v22.8h, v1.h[7] // p6(left[1]) * filter(6)
sqrshrun v3.8b, v3.8h, #4
subs w4, w4, #2
st2 {v2.s, v3.s}[0], [x0], x1
zip2 v0.2s, v2.2s, v3.2s
st2 {v2.s, v3.s}[1], [x6], x1
uxtl v0.8h, v0.8b
b.gt 8b
ret
160:
320:
AARCH64_VALID_JUMP_TARGET
add x8, x2, #1
sub x2, x2, #2
mov x7, #-2
sub x1, x1, w3, uxtw
mov w9, w3
1:
ld1 {v0.s}[0], [x2], x7 // left (0-1) + topleft (2)
uxtl v0.8h, v0.8b // left (0-1) + topleft (2)
2:
ld1 {v2.16b}, [x8], #16 // top(0-15)
mul v3.8h, v16.8h, v0.h[2] // p0(topleft) * filter(0)
mla v3.8h, v21.8h, v0.h[1] // p5(left[0]) * filter(5)
uxtl v1.8h, v2.8b // top(0-7)
uxtl2 v2.8h, v2.16b // top(8-15)
mla v3.8h, v22.8h, v0.h[0] // p6(left[1]) * filter(6)
mla v3.8h, v17.8h, v1.h[0] // p1(top[0]) * filter(1)
mla v3.8h, v18.8h, v1.h[1] // p2(top[1]) * filter(2)
mla v3.8h, v19.8h, v1.h[2] // p3(top[2]) * filter(3)
mla v3.8h, v20.8h, v1.h[3] // p4(top[3]) * filter(4)
mul v4.8h, v17.8h, v1.h[4] // p1(top[0]) * filter(1)
mla v4.8h, v18.8h, v1.h[5] // p2(top[1]) * filter(2)
mla v4.8h, v19.8h, v1.h[6] // p3(top[2]) * filter(3)
sqrshrun v3.8b, v3.8h, #4
uxtl v0.8h, v3.8b // first block, in 16 bit
mla v4.8h, v20.8h, v1.h[7] // p4(top[3]) * filter(4)
mla v4.8h, v16.8h, v1.h[3] // p0(topleft) * filter(0)
mla v4.8h, v21.8h, v0.h[3] // p5(left[0]) * filter(5)
mla v4.8h, v22.8h, v0.h[7] // p6(left[1]) * filter(6)
mul v5.8h, v17.8h, v2.h[0] // p1(top[0]) * filter(1)
mla v5.8h, v18.8h, v2.h[1] // p2(top[1]) * filter(2)
mla v5.8h, v19.8h, v2.h[2] // p3(top[2]) * filter(3)
sqrshrun v4.8b, v4.8h, #4
uxtl v0.8h, v4.8b // second block, in 16 bit
mla v5.8h, v20.8h, v2.h[3] // p4(top[3]) * filter(4)
mla v5.8h, v16.8h, v1.h[7] // p0(topleft) * filter(0)
mla v5.8h, v21.8h, v0.h[3] // p5(left[0]) * filter(5)
mla v5.8h, v22.8h, v0.h[7] // p6(left[1]) * filter(6)
mul v6.8h, v17.8h, v2.h[4] // p1(top[0]) * filter(1)
mla v6.8h, v18.8h, v2.h[5] // p2(top[1]) * filter(2)
mla v6.8h, v19.8h, v2.h[6] // p3(top[2]) * filter(3)
sqrshrun v5.8b, v5.8h, #4
uxtl v0.8h, v5.8b // third block, in 16 bit
mla v6.8h, v20.8h, v2.h[7] // p4(top[3]) * filter(4)
mla v6.8h, v16.8h, v2.h[3] // p0(topleft) * filter(0)
mla v6.8h, v21.8h, v0.h[3] // p5(left[0]) * filter(5)
mla v6.8h, v22.8h, v0.h[7] // p6(left[1]) * filter(6)
subs w3, w3, #16
sqrshrun v6.8b, v6.8h, #4
st4 {v3.s, v4.s, v5.s, v6.s}[0], [x0], #16
st4 {v3.s, v4.s, v5.s, v6.s}[1], [x6], #16
b.le 8f
ins v0.h[2], v2.h[7]
ins v0.b[0], v6.b[7]
ins v0.b[2], v6.b[3]
b 2b
8:
subs w4, w4, #2
b.le 9f
sub x8, x6, w9, uxtw
add x0, x0, x1
add x6, x6, x1
mov w3, w9
b 1b
9:
ret
endfunc
jumptable ipred_filter_tbl
.word 320b - ipred_filter_tbl
.word 160b - ipred_filter_tbl
.word 80b - ipred_filter_tbl
.word 40b - ipred_filter_tbl
endjumptable
// void pal_pred_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const pal, const uint8_t *idx,
// const int w, const int h);
function pal_pred_8bpc_neon, export=1
ld1 {v0.8b}, [x2]
clz w9, w4
movrel x6, pal_pred_tbl
sub w9, w9, #25
movi v31.16b, #7
ldrsw x9, [x6, w9, uxtw #2]
add x6, x6, x9
add x2, x0, x1
lsl x1, x1, #1
br x6
40:
AARCH64_VALID_JUMP_TARGET
4:
ld1 {v1.8b}, [x3], #8
subs w5, w5, #4
ushr v3.8b, v1.8b, #4
and v2.8b, v1.8b, v31.8b
zip1 v1.16b, v2.16b, v3.16b
tbl v1.16b, {v0.16b}, v1.16b
st1 {v1.s}[0], [x0], x1
st1 {v1.s}[1], [x2], x1
st1 {v1.s}[2], [x0], x1
st1 {v1.s}[3], [x2], x1
b.gt 4b
ret
80:
AARCH64_VALID_JUMP_TARGET
8:
ld1 {v1.16b}, [x3], #16
subs w5, w5, #4
ushr v4.16b, v1.16b, #4
and v3.16b, v1.16b, v31.16b
zip1 v1.16b, v3.16b, v4.16b
zip2 v2.16b, v3.16b, v4.16b
tbl v1.16b, {v0.16b}, v1.16b
st1 {v1.d}[0], [x0], x1
tbl v2.16b, {v0.16b}, v2.16b
st1 {v1.d}[1], [x2], x1
st1 {v2.d}[0], [x0], x1
st1 {v2.d}[1], [x2], x1
b.gt 8b
ret
160:
AARCH64_VALID_JUMP_TARGET
16:
ld1 {v1.16b, v2.16b}, [x3], #32
subs w5, w5, #4
ushr v5.16b, v1.16b, #4
and v4.16b, v1.16b, v31.16b
ushr v7.16b, v2.16b, #4
and v6.16b, v2.16b, v31.16b
zip1 v1.16b, v4.16b, v5.16b
zip2 v2.16b, v4.16b, v5.16b
zip1 v3.16b, v6.16b, v7.16b
tbl v1.16b, {v0.16b}, v1.16b
zip2 v4.16b, v6.16b, v7.16b
tbl v2.16b, {v0.16b}, v2.16b
st1 {v1.16b}, [x0], x1
tbl v3.16b, {v0.16b}, v3.16b
st1 {v2.16b}, [x2], x1
tbl v4.16b, {v0.16b}, v4.16b
st1 {v3.16b}, [x0], x1
st1 {v4.16b}, [x2], x1
b.gt 16b
ret
320:
AARCH64_VALID_JUMP_TARGET
32:
ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x3], #64
subs w5, w5, #4
ushr v21.16b, v16.16b, #4
and v20.16b, v16.16b, v31.16b
ushr v23.16b, v17.16b, #4
and v22.16b, v17.16b, v31.16b
ushr v25.16b, v18.16b, #4
and v24.16b, v18.16b, v31.16b
ushr v27.16b, v19.16b, #4
and v26.16b, v19.16b, v31.16b
zip1 v16.16b, v20.16b, v21.16b
zip2 v17.16b, v20.16b, v21.16b
zip1 v18.16b, v22.16b, v23.16b
zip2 v19.16b, v22.16b, v23.16b
zip1 v20.16b, v24.16b, v25.16b
zip2 v21.16b, v24.16b, v25.16b
tbl v16.16b, {v0.16b}, v16.16b
zip1 v22.16b, v26.16b, v27.16b
tbl v17.16b, {v0.16b}, v17.16b
zip2 v23.16b, v26.16b, v27.16b
tbl v18.16b, {v0.16b}, v18.16b
tbl v19.16b, {v0.16b}, v19.16b
tbl v20.16b, {v0.16b}, v20.16b
st1 {v16.16b, v17.16b}, [x0], x1
tbl v21.16b, {v0.16b}, v21.16b
st1 {v18.16b, v19.16b}, [x2], x1
tbl v22.16b, {v0.16b}, v22.16b
st1 {v20.16b, v21.16b}, [x0], x1
tbl v23.16b, {v0.16b}, v23.16b
st1 {v22.16b, v23.16b}, [x2], x1
b.gt 32b
ret
640:
AARCH64_VALID_JUMP_TARGET
64:
ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x3], #64
subs w5, w5, #2
ushr v21.16b, v16.16b, #4
and v20.16b, v16.16b, v31.16b
ushr v23.16b, v17.16b, #4
and v22.16b, v17.16b, v31.16b
ushr v25.16b, v18.16b, #4
and v24.16b, v18.16b, v31.16b
ushr v27.16b, v19.16b, #4
and v26.16b, v19.16b, v31.16b
zip1 v16.16b, v20.16b, v21.16b
zip2 v17.16b, v20.16b, v21.16b
zip1 v18.16b, v22.16b, v23.16b
zip2 v19.16b, v22.16b, v23.16b
zip1 v20.16b, v24.16b, v25.16b
zip2 v21.16b, v24.16b, v25.16b
tbl v16.16b, {v0.16b}, v16.16b
zip1 v22.16b, v26.16b, v27.16b
tbl v17.16b, {v0.16b}, v17.16b
zip2 v23.16b, v26.16b, v27.16b
tbl v18.16b, {v0.16b}, v18.16b
tbl v19.16b, {v0.16b}, v19.16b
st1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x0], x1
tbl v20.16b, {v0.16b}, v20.16b
tbl v21.16b, {v0.16b}, v21.16b
tbl v22.16b, {v0.16b}, v22.16b
tbl v23.16b, {v0.16b}, v23.16b
st1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x2], x1
b.gt 64b
ret
endfunc
jumptable pal_pred_tbl
.word 640b - pal_pred_tbl
.word 320b - pal_pred_tbl
.word 160b - pal_pred_tbl
.word 80b - pal_pred_tbl
.word 40b - pal_pred_tbl
endjumptable
// void ipred_cfl_128_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha);
function ipred_cfl_128_8bpc_neon, export=1
clz w9, w3
movrel x7, ipred_cfl_128_tbl
sub w9, w9, #26
ldrsw x9, [x7, w9, uxtw #2]
movi v0.8h, #128 // dc
dup v1.8h, w6 // alpha
add x7, x7, x9
add x6, x0, x1
lsl x1, x1, #1
br x7
L(ipred_cfl_splat_w4):
AARCH64_VALID_JUMP_TARGET
1:
ld1 {v2.8h, v3.8h}, [x5], #32
mul v2.8h, v2.8h, v1.8h // diff = ac * alpha
mul v3.8h, v3.8h, v1.8h
cmlt v4.8h, v2.8h, #0 // sign
cmlt v5.8h, v3.8h, #0
add v2.8h, v2.8h, v4.8h // diff + sign
add v3.8h, v3.8h, v5.8h
srshr v2.8h, v2.8h, #6 // (diff + sign + 32) >> 6 = apply_sign()
srshr v3.8h, v3.8h, #6
add v2.8h, v2.8h, v0.8h // dc + apply_sign()
add v3.8h, v3.8h, v0.8h
sqxtun v2.8b, v2.8h // iclip_pixel(dc + apply_sign())
sqxtun v3.8b, v3.8h
st1 {v2.s}[0], [x0], x1
st1 {v2.s}[1], [x6], x1
subs w4, w4, #4
st1 {v3.s}[0], [x0], x1
st1 {v3.s}[1], [x6], x1
b.gt 1b
ret
L(ipred_cfl_splat_w8):
AARCH64_VALID_JUMP_TARGET
1:
ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [x5], #64
mul v2.8h, v2.8h, v1.8h // diff = ac * alpha
mul v3.8h, v3.8h, v1.8h
mul v4.8h, v4.8h, v1.8h
mul v5.8h, v5.8h, v1.8h
cmlt v16.8h, v2.8h, #0 // sign
cmlt v17.8h, v3.8h, #0
cmlt v18.8h, v4.8h, #0
cmlt v19.8h, v5.8h, #0
add v2.8h, v2.8h, v16.8h // diff + sign
add v3.8h, v3.8h, v17.8h
add v4.8h, v4.8h, v18.8h
add v5.8h, v5.8h, v19.8h
srshr v2.8h, v2.8h, #6 // (diff + sign + 32) >> 6 = apply_sign()
srshr v3.8h, v3.8h, #6
srshr v4.8h, v4.8h, #6
srshr v5.8h, v5.8h, #6
add v2.8h, v2.8h, v0.8h // dc + apply_sign()
add v3.8h, v3.8h, v0.8h
add v4.8h, v4.8h, v0.8h
add v5.8h, v5.8h, v0.8h
sqxtun v2.8b, v2.8h // iclip_pixel(dc + apply_sign())
sqxtun v3.8b, v3.8h
sqxtun v4.8b, v4.8h
sqxtun v5.8b, v5.8h
st1 {v2.8b}, [x0], x1
st1 {v3.8b}, [x6], x1
subs w4, w4, #4
st1 {v4.8b}, [x0], x1
st1 {v5.8b}, [x6], x1
b.gt 1b
ret
L(ipred_cfl_splat_w16):
AARCH64_VALID_JUMP_TARGET
add x7, x5, w3, uxtw #1
sub x1, x1, w3, uxtw
mov w9, w3
1:
ld1 {v2.8h, v3.8h}, [x5], #32
ld1 {v4.8h, v5.8h}, [x7], #32
mul v2.8h, v2.8h, v1.8h // diff = ac * alpha
mul v3.8h, v3.8h, v1.8h
mul v4.8h, v4.8h, v1.8h
mul v5.8h, v5.8h, v1.8h
cmlt v16.8h, v2.8h, #0 // sign
cmlt v17.8h, v3.8h, #0
cmlt v18.8h, v4.8h, #0
cmlt v19.8h, v5.8h, #0
add v2.8h, v2.8h, v16.8h // diff + sign
add v3.8h, v3.8h, v17.8h
add v4.8h, v4.8h, v18.8h
add v5.8h, v5.8h, v19.8h
srshr v2.8h, v2.8h, #6 // (diff + sign + 32) >> 6 = apply_sign()
srshr v3.8h, v3.8h, #6
srshr v4.8h, v4.8h, #6
srshr v5.8h, v5.8h, #6
add v2.8h, v2.8h, v0.8h // dc + apply_sign()
add v3.8h, v3.8h, v0.8h
add v4.8h, v4.8h, v0.8h
add v5.8h, v5.8h, v0.8h
sqxtun v2.8b, v2.8h // iclip_pixel(dc + apply_sign())
sqxtun v3.8b, v3.8h
sqxtun v4.8b, v4.8h
sqxtun v5.8b, v5.8h
subs w3, w3, #16
st1 {v2.8b, v3.8b}, [x0], #16
st1 {v4.8b, v5.8b}, [x6], #16
b.gt 1b
subs w4, w4, #2
add x5, x5, w9, uxtw #1
add x7, x7, w9, uxtw #1
add x0, x0, x1
add x6, x6, x1
mov w3, w9
b.gt 1b
ret
endfunc
jumptable ipred_cfl_128_tbl
ipred_cfl_splat_tbl:
.word L(ipred_cfl_splat_w16) - ipred_cfl_128_tbl
.word L(ipred_cfl_splat_w16) - ipred_cfl_128_tbl
.word L(ipred_cfl_splat_w8) - ipred_cfl_128_tbl
.word L(ipred_cfl_splat_w4) - ipred_cfl_128_tbl
endjumptable
// void ipred_cfl_top_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha);
function ipred_cfl_top_8bpc_neon, export=1
clz w9, w3
movrel x7, ipred_cfl_top_tbl
sub w9, w9, #26
ldrsw x9, [x7, w9, uxtw #2]
dup v1.8h, w6 // alpha
add x2, x2, #1
add x7, x7, x9
add x6, x0, x1
lsl x1, x1, #1
br x7
4:
AARCH64_VALID_JUMP_TARGET
ld1r {v0.2s}, [x2]
uaddlv h0, v0.8b
urshr v0.4h, v0.4h, #3
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w4)
8:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [x2]
uaddlv h0, v0.8b
urshr v0.4h, v0.4h, #3
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w8)
16:
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b}, [x2]
uaddlv h0, v0.16b
urshr v0.4h, v0.4h, #4
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w16)
32:
AARCH64_VALID_JUMP_TARGET
ld1 {v2.16b, v3.16b}, [x2]
uaddlv h2, v2.16b
uaddlv h3, v3.16b
add v2.4h, v2.4h, v3.4h
urshr v2.4h, v2.4h, #5
dup v0.8h, v2.h[0]
b L(ipred_cfl_splat_w16)
endfunc
jumptable ipred_cfl_top_tbl
.word 32b - ipred_cfl_top_tbl
.word 16b - ipred_cfl_top_tbl
.word 8b - ipred_cfl_top_tbl
.word 4b - ipred_cfl_top_tbl
endjumptable
// void ipred_cfl_left_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha);
function ipred_cfl_left_8bpc_neon, export=1
sub x2, x2, w4, uxtw
clz w9, w3
clz w8, w4
movrel x10, ipred_cfl_splat_tbl
movrel x7, ipred_cfl_left_tbl
sub w9, w9, #26
sub w8, w8, #26
ldrsw x9, [x10, w9, uxtw #2]
ldrsw x8, [x7, w8, uxtw #2]
dup v1.8h, w6 // alpha
add x9, x10, x9
add x7, x7, x8
add x6, x0, x1
lsl x1, x1, #1
br x7
L(ipred_cfl_left_h4):
AARCH64_VALID_JUMP_TARGET
ld1r {v0.2s}, [x2]
uaddlv h0, v0.8b
urshr v0.4h, v0.4h, #3
dup v0.8h, v0.h[0]
br x9
L(ipred_cfl_left_h8):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [x2]
uaddlv h0, v0.8b
urshr v0.4h, v0.4h, #3
dup v0.8h, v0.h[0]
br x9
L(ipred_cfl_left_h16):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b}, [x2]
uaddlv h0, v0.16b
urshr v0.4h, v0.4h, #4
dup v0.8h, v0.h[0]
br x9
L(ipred_cfl_left_h32):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.16b, v3.16b}, [x2]
uaddlv h2, v2.16b
uaddlv h3, v3.16b
add v2.4h, v2.4h, v3.4h
urshr v2.4h, v2.4h, #5
dup v0.8h, v2.h[0]
br x9
endfunc
jumptable ipred_cfl_left_tbl
.word L(ipred_cfl_left_h32) - ipred_cfl_left_tbl
.word L(ipred_cfl_left_h16) - ipred_cfl_left_tbl
.word L(ipred_cfl_left_h8) - ipred_cfl_left_tbl
.word L(ipred_cfl_left_h4) - ipred_cfl_left_tbl
endjumptable
// void ipred_cfl_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha);
function ipred_cfl_8bpc_neon, export=1
sub x2, x2, w4, uxtw
add w8, w3, w4 // width + height
dup v1.8h, w6 // alpha
clz w9, w3
clz w6, w4
dup v16.8h, w8 // width + height
movrel x7, ipred_cfl_tbl
rbit w8, w8 // rbit(width + height)
sub w9, w9, #22 // 26 leading bits, minus table offset 4
sub w6, w6, #26
clz w8, w8 // ctz(width + height)
ldrsw x9, [x7, w9, uxtw #2]
ldrsw x6, [x7, w6, uxtw #2]
neg w8, w8 // -ctz(width + height)
add x9, x7, x9
add x7, x7, x6
ushr v16.8h, v16.8h, #1 // (width + height) >> 1
dup v17.8h, w8 // -ctz(width + height)
add x6, x0, x1
lsl x1, x1, #1
br x7
L(ipred_cfl_h4):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.s}[0], [x2], #4
ins v0.s[1], wzr
add x2, x2, #1
uaddlv h0, v0.8b
br x9
L(ipred_cfl_w4):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.s}[0], [x2]
ins v2.s[1], wzr
add v0.4h, v0.4h, v16.4h
uaddlv h2, v2.8b
cmp w4, #4
add v0.4h, v0.4h, v2.4h
ushl v0.4h, v0.4h, v17.4h
b.eq 1f
// h = 8/16
mov w16, #(0x3334/2)
movk w16, #(0x5556/2), lsl #16
add w17, w4, w4 // w17 = 2*h = 16 or 32
lsr w16, w16, w17
dup v16.4h, w16
sqdmulh v0.4h, v0.4h, v16.4h
1:
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w4)
L(ipred_cfl_h8):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.8b}, [x2], #8
uaddlv h0, v0.8b
add x2, x2, #1
br x9
L(ipred_cfl_w8):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.8b}, [x2]
add v0.4h, v0.4h, v16.4h
uaddlv h2, v2.8b
cmp w4, #8
add v0.4h, v0.4h, v2.4h
ushl v0.4h, v0.4h, v17.4h
b.eq 1f
// h = 4/16/32
cmp w4, #32
mov w16, #(0x3334/2)
mov w17, #(0x5556/2)
csel w16, w16, w17, eq
dup v16.4h, w16
sqdmulh v0.4h, v0.4h, v16.4h
1:
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w8)
L(ipred_cfl_h16):
AARCH64_VALID_JUMP_TARGET
ld1 {v0.16b}, [x2], #16
uaddlv h0, v0.16b
add x2, x2, #1
br x9
L(ipred_cfl_w16):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.16b}, [x2]
add v0.4h, v0.4h, v16.4h
uaddlv h2, v2.16b
cmp w4, #16
add v0.4h, v0.4h, v2.4h
ushl v0.4h, v0.4h, v17.4h
b.eq 1f
// h = 4/8/32
cmp w4, #4
mov w16, #(0x3334/2)
mov w17, #(0x5556/2)
csel w16, w16, w17, eq
dup v16.4h, w16
sqdmulh v0.4h, v0.4h, v16.4h
1:
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w16)
L(ipred_cfl_h32):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.16b, v3.16b}, [x2], #32
uaddlv h2, v2.16b
uaddlv h3, v3.16b
add x2, x2, #1
add v0.4h, v2.4h, v3.4h
br x9
L(ipred_cfl_w32):
AARCH64_VALID_JUMP_TARGET
ld1 {v2.16b, v3.16b}, [x2]
add v0.4h, v0.4h, v16.4h
uaddlv h2, v2.16b
uaddlv h3, v3.16b
cmp w4, #32
add v0.4h, v0.4h, v2.4h
add v0.4h, v0.4h, v3.4h
ushl v0.4h, v0.4h, v17.4h
b.eq 1f
// h = 8/16
mov w16, #(0x5556/2)
movk w16, #(0x3334/2), lsl #16
add w17, w4, w4 // w17 = 2*h = 16 or 32
lsr w16, w16, w17
dup v16.4h, w16
sqdmulh v0.4h, v0.4h, v16.4h
1:
dup v0.8h, v0.h[0]
b L(ipred_cfl_splat_w16)
endfunc
jumptable ipred_cfl_tbl
.word L(ipred_cfl_h32) - ipred_cfl_tbl
.word L(ipred_cfl_h16) - ipred_cfl_tbl
.word L(ipred_cfl_h8) - ipred_cfl_tbl
.word L(ipred_cfl_h4) - ipred_cfl_tbl
.word L(ipred_cfl_w32) - ipred_cfl_tbl
.word L(ipred_cfl_w16) - ipred_cfl_tbl
.word L(ipred_cfl_w8) - ipred_cfl_tbl
.word L(ipred_cfl_w4) - ipred_cfl_tbl
endjumptable
// void cfl_ac_420_8bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_420_8bpc_neon, export=1
clz w8, w5
lsl w4, w4, #2
movrel x7, ipred_cfl_ac_420_tbl
sub w8, w8, #27
ldrsw x8, [x7, w8, uxtw #2]
movi v16.8h, #0
movi v17.8h, #0
movi v18.8h, #0
movi v19.8h, #0
add x7, x7, x8
sub w8, w6, w4 // height - h_pad
rbit w9, w5 // rbit(width)
rbit w10, w6 // rbit(height)
clz w9, w9 // ctz(width)
clz w10, w10 // ctz(height)
add w9, w9, w10 // log2sz
add x10, x1, x2
dup v31.4s, w9
lsl x2, x2, #1
neg v31.4s, v31.4s // -log2sz
br x7
L(ipred_cfl_ac_420_w4):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input
ld1 {v0.8b}, [x1], x2
ld1 {v1.8b}, [x10], x2
ld1 {v0.d}[1], [x1], x2
ld1 {v1.d}[1], [x10], x2
uaddlp v0.8h, v0.16b
uaddlp v1.8h, v1.16b
add v0.8h, v0.8h, v1.8h
shl v0.8h, v0.8h, #1
subs w8, w8, #2
st1 {v0.8h}, [x0], #16
add v16.8h, v16.8h, v0.8h
b.gt 1b
trn2 v1.2d, v0.2d, v0.2d
trn2 v0.2d, v0.2d, v0.2d
L(ipred_cfl_ac_420_w4_hpad):
cbz w4, 3f
2: // Vertical padding (h_pad > 0)
subs w4, w4, #4
st1 {v0.8h, v1.8h}, [x0], #32
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
b.gt 2b
3:
// Aggregate the sums
add v0.8h, v16.8h, v17.8h
uaddlv s0, v0.8h // sum
sub x0, x0, w6, uxtw #3
urshl v4.2s, v0.2s, v31.2s // (sum + (1 << (log2sz - 1))) >>= log2sz
dup v4.8h, v4.h[0]
6: // Subtract dc from ac
ld1 {v0.8h, v1.8h}, [x0]
subs w6, w6, #4
sub v0.8h, v0.8h, v4.8h
sub v1.8h, v1.8h, v4.8h
st1 {v0.8h, v1.8h}, [x0], #32
b.gt 6b
ret
L(ipred_cfl_ac_420_w8):
AARCH64_VALID_JUMP_TARGET
cbnz w3, L(ipred_cfl_ac_420_w8_wpad)
1: // Copy and subsample input, without padding
ld1 {v0.16b}, [x1], x2
ld1 {v1.16b}, [x10], x2
ld1 {v2.16b}, [x1], x2
uaddlp v0.8h, v0.16b
ld1 {v3.16b}, [x10], x2
uaddlp v1.8h, v1.16b
uaddlp v2.8h, v2.16b
uaddlp v3.8h, v3.16b
add v0.8h, v0.8h, v1.8h
add v2.8h, v2.8h, v3.8h
shl v0.8h, v0.8h, #1
shl v1.8h, v2.8h, #1
subs w8, w8, #2
st1 {v0.8h, v1.8h}, [x0], #32
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
b.gt 1b
mov v0.16b, v1.16b
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_420_w8_wpad):
1: // Copy and subsample input, padding 4
ld1 {v0.8b}, [x1], x2
ld1 {v1.8b}, [x10], x2
ld1 {v0.d}[1], [x1], x2
ld1 {v1.d}[1], [x10], x2
uaddlp v0.8h, v0.16b
uaddlp v1.8h, v1.16b
add v0.8h, v0.8h, v1.8h
shl v0.8h, v0.8h, #1
dup v1.4h, v0.h[3]
dup v3.4h, v0.h[7]
trn2 v2.2d, v0.2d, v0.2d
subs w8, w8, #2
st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [x0], #32
add v16.4h, v16.4h, v0.4h
add v17.4h, v17.4h, v1.4h
add v18.4h, v18.4h, v2.4h
add v19.4h, v19.4h, v3.4h
b.gt 1b
trn1 v0.2d, v2.2d, v3.2d
trn1 v1.2d, v2.2d, v3.2d
L(ipred_cfl_ac_420_w8_hpad):
cbz w4, 3f
2: // Vertical padding (h_pad > 0)
subs w4, w4, #4
st1 {v0.8h, v1.8h}, [x0], #32
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
st1 {v0.8h, v1.8h}, [x0], #32
add v18.8h, v18.8h, v0.8h
add v19.8h, v19.8h, v1.8h
b.gt 2b
3:
L(ipred_cfl_ac_420_w8_calc_subtract_dc):
// Aggregate the sums
add v0.8h, v16.8h, v17.8h
add v2.8h, v18.8h, v19.8h
uaddlp v0.4s, v0.8h
uaddlp v2.4s, v2.8h
add v0.4s, v0.4s, v2.4s
addv s0, v0.4s // sum
sub x0, x0, w6, uxtw #4
urshl v4.2s, v0.2s, v31.2s // (sum + (1 << (log2sz - 1))) >>= log2sz
dup v4.8h, v4.h[0]
L(ipred_cfl_ac_420_w8_subtract_dc):
6: // Subtract dc from ac
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0]
subs w6, w6, #4
sub v0.8h, v0.8h, v4.8h
sub v1.8h, v1.8h, v4.8h
sub v2.8h, v2.8h, v4.8h
sub v3.8h, v3.8h, v4.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
b.gt 6b
ret
L(ipred_cfl_ac_420_w16):
AARCH64_VALID_JUMP_TARGET
movrel x7, ipred_cfl_ac_420_w16_tbl
ldrsw x3, [x7, w3, uxtw #2]
add x7, x7, x3
br x7
L(ipred_cfl_ac_420_w16_wpad0):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, without padding
ld1 {v0.16b, v1.16b}, [x1], x2
ld1 {v2.16b, v3.16b}, [x10], x2
uaddlp v0.8h, v0.16b
ld1 {v4.16b, v5.16b}, [x1], x2
uaddlp v1.8h, v1.16b
ld1 {v6.16b, v7.16b}, [x10], x2
uaddlp v2.8h, v2.16b
uaddlp v3.8h, v3.16b
uaddlp v4.8h, v4.16b
uaddlp v5.8h, v5.16b
uaddlp v6.8h, v6.16b
uaddlp v7.8h, v7.16b
add v0.8h, v0.8h, v2.8h
add v1.8h, v1.8h, v3.8h
add v4.8h, v4.8h, v6.8h
add v5.8h, v5.8h, v7.8h
shl v0.8h, v0.8h, #1
shl v1.8h, v1.8h, #1
shl v2.8h, v4.8h, #1
shl v3.8h, v5.8h, #1
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad1):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 4
ldr d1, [x1, #16]
ld1 {v0.16b}, [x1], x2
ldr d3, [x10, #16]
ld1 {v2.16b}, [x10], x2
uaddlp v1.4h, v1.8b
ldr d5, [x1, #16]
uaddlp v0.8h, v0.16b
ld1 {v4.16b}, [x1], x2
uaddlp v3.4h, v3.8b
ldr d7, [x10, #16]
uaddlp v2.8h, v2.16b
ld1 {v6.16b}, [x10], x2
uaddlp v5.4h, v5.8b
uaddlp v4.8h, v4.16b
uaddlp v7.4h, v7.8b
uaddlp v6.8h, v6.16b
add v1.4h, v1.4h, v3.4h
add v0.8h, v0.8h, v2.8h
add v5.4h, v5.4h, v7.4h
add v4.8h, v4.8h, v6.8h
shl v1.4h, v1.4h, #1
shl v0.8h, v0.8h, #1
shl v3.4h, v5.4h, #1
shl v2.8h, v4.8h, #1
dup v4.4h, v1.h[3]
dup v5.4h, v3.h[3]
trn1 v1.2d, v1.2d, v4.2d
trn1 v3.2d, v3.2d, v5.2d
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad2):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 8
ld1 {v0.16b}, [x1], x2
ld1 {v2.16b}, [x10], x2
ld1 {v4.16b}, [x1], x2
uaddlp v0.8h, v0.16b
ld1 {v6.16b}, [x10], x2
uaddlp v2.8h, v2.16b
uaddlp v4.8h, v4.16b
uaddlp v6.8h, v6.16b
add v0.8h, v0.8h, v2.8h
add v4.8h, v4.8h, v6.8h
shl v0.8h, v0.8h, #1
shl v2.8h, v4.8h, #1
dup v1.8h, v0.h[7]
dup v3.8h, v2.h[7]
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad3):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 12
ld1 {v0.8b}, [x1], x2
ld1 {v2.8b}, [x10], x2
ld1 {v4.8b}, [x1], x2
uaddlp v0.4h, v0.8b
ld1 {v6.8b}, [x10], x2
uaddlp v2.4h, v2.8b
uaddlp v4.4h, v4.8b
uaddlp v6.4h, v6.8b
add v0.4h, v0.4h, v2.4h
add v4.4h, v4.4h, v6.4h
shl v0.4h, v0.4h, #1
shl v2.4h, v4.4h, #1
dup v1.8h, v0.h[3]
dup v3.8h, v2.h[3]
trn1 v0.2d, v0.2d, v1.2d
trn1 v2.2d, v2.2d, v3.2d
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
L(ipred_cfl_ac_420_w16_hpad):
cbz w4, 3f
2: // Vertical padding (h_pad > 0)
subs w4, w4, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 2b
3:
// Double the height and reuse the w8 summing/subtracting
lsl w6, w6, #1
b L(ipred_cfl_ac_420_w8_calc_subtract_dc)
endfunc
jumptable ipred_cfl_ac_420_tbl
.word L(ipred_cfl_ac_420_w16) - ipred_cfl_ac_420_tbl
.word L(ipred_cfl_ac_420_w8) - ipred_cfl_ac_420_tbl
.word L(ipred_cfl_ac_420_w4) - ipred_cfl_ac_420_tbl
endjumptable
jumptable ipred_cfl_ac_420_w16_tbl
.word L(ipred_cfl_ac_420_w16_wpad0) - ipred_cfl_ac_420_w16_tbl
.word L(ipred_cfl_ac_420_w16_wpad1) - ipred_cfl_ac_420_w16_tbl
.word L(ipred_cfl_ac_420_w16_wpad2) - ipred_cfl_ac_420_w16_tbl
.word L(ipred_cfl_ac_420_w16_wpad3) - ipred_cfl_ac_420_w16_tbl
endjumptable
// void cfl_ac_422_8bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_422_8bpc_neon, export=1
clz w8, w5
lsl w4, w4, #2
movrel x7, ipred_cfl_ac_422_tbl
sub w8, w8, #27
ldrsw x8, [x7, w8, uxtw #2]
movi v16.8h, #0
movi v17.8h, #0
movi v18.8h, #0
movi v19.8h, #0
add x7, x7, x8
sub w8, w6, w4 // height - h_pad
rbit w9, w5 // rbit(width)
rbit w10, w6 // rbit(height)
clz w9, w9 // ctz(width)
clz w10, w10 // ctz(height)
add w9, w9, w10 // log2sz
add x10, x1, x2
dup v31.4s, w9
lsl x2, x2, #1
neg v31.4s, v31.4s // -log2sz
br x7
L(ipred_cfl_ac_422_w4):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input
ld1 {v0.8b}, [x1], x2
ld1 {v0.d}[1], [x10], x2
ld1 {v1.8b}, [x1], x2
ld1 {v1.d}[1], [x10], x2
uaddlp v0.8h, v0.16b
uaddlp v1.8h, v1.16b
shl v0.8h, v0.8h, #2
shl v1.8h, v1.8h, #2
subs w8, w8, #4
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
st1 {v0.8h, v1.8h}, [x0], #32
b.gt 1b
trn2 v0.2d, v1.2d, v1.2d
trn2 v1.2d, v1.2d, v1.2d
b L(ipred_cfl_ac_420_w4_hpad)
L(ipred_cfl_ac_422_w8):
AARCH64_VALID_JUMP_TARGET
cbnz w3, L(ipred_cfl_ac_422_w8_wpad)
1: // Copy and subsample input, without padding
ld1 {v0.16b}, [x1], x2
ld1 {v1.16b}, [x10], x2
ld1 {v2.16b}, [x1], x2
uaddlp v0.8h, v0.16b
ld1 {v3.16b}, [x10], x2
uaddlp v1.8h, v1.16b
uaddlp v2.8h, v2.16b
uaddlp v3.8h, v3.16b
shl v0.8h, v0.8h, #2
shl v1.8h, v1.8h, #2
shl v2.8h, v2.8h, #2
shl v3.8h, v3.8h, #2
subs w8, w8, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v3.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_422_w8_wpad):
1: // Copy and subsample input, padding 4
ld1 {v0.8b}, [x1], x2
ld1 {v0.d}[1], [x10], x2
ld1 {v2.8b}, [x1], x2
ld1 {v2.d}[1], [x10], x2
uaddlp v0.8h, v0.16b
uaddlp v2.8h, v2.16b
shl v0.8h, v0.8h, #2
shl v2.8h, v2.8h, #2
dup v4.4h, v0.h[3]
dup v5.8h, v0.h[7]
dup v6.4h, v2.h[3]
dup v7.8h, v2.h[7]
trn2 v1.2d, v0.2d, v5.2d
trn1 v0.2d, v0.2d, v4.2d
trn2 v3.2d, v2.2d, v7.2d
trn1 v2.2d, v2.2d, v6.2d
subs w8, w8, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v3.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_422_w16):
AARCH64_VALID_JUMP_TARGET
movrel x7, ipred_cfl_ac_422_w16_tbl
ldrsw x3, [x7, w3, uxtw #2]
add x7, x7, x3
br x7
L(ipred_cfl_ac_422_w16_wpad0):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, without padding
ld1 {v0.16b, v1.16b}, [x1], x2
ld1 {v2.16b, v3.16b}, [x10], x2
uaddlp v0.8h, v0.16b
uaddlp v1.8h, v1.16b
uaddlp v2.8h, v2.16b
uaddlp v3.8h, v3.16b
shl v0.8h, v0.8h, #2
shl v1.8h, v1.8h, #2
shl v2.8h, v2.8h, #2
shl v3.8h, v3.8h, #2
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad1):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 4
ldr d1, [x1, #16]
ld1 {v0.16b}, [x1], x2
ldr d3, [x10, #16]
ld1 {v2.16b}, [x10], x2
uaddlp v1.4h, v1.8b
uaddlp v0.8h, v0.16b
uaddlp v3.4h, v3.8b
uaddlp v2.8h, v2.16b
shl v1.4h, v1.4h, #2
shl v0.8h, v0.8h, #2
shl v3.4h, v3.4h, #2
shl v2.8h, v2.8h, #2
dup v4.4h, v1.h[3]
dup v5.4h, v3.h[3]
trn1 v1.2d, v1.2d, v4.2d
trn1 v3.2d, v3.2d, v5.2d
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad2):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 8
ld1 {v0.16b}, [x1], x2
ld1 {v2.16b}, [x10], x2
uaddlp v0.8h, v0.16b
uaddlp v2.8h, v2.16b
shl v0.8h, v0.8h, #2
shl v2.8h, v2.8h, #2
dup v1.8h, v0.h[7]
dup v3.8h, v2.h[7]
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad3):
AARCH64_VALID_JUMP_TARGET
1: // Copy and subsample input, padding 12
ld1 {v0.8b}, [x1], x2
ld1 {v2.8b}, [x10], x2
uaddlp v0.4h, v0.8b
uaddlp v2.4h, v2.8b
shl v0.4h, v0.4h, #2
shl v2.4h, v2.4h, #2
dup v1.8h, v0.h[3]
dup v3.8h, v2.h[3]
trn1 v0.2d, v0.2d, v1.2d
trn1 v2.2d, v2.2d, v3.2d
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v2.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w16_hpad)
endfunc
jumptable ipred_cfl_ac_422_tbl
.word L(ipred_cfl_ac_422_w16) - ipred_cfl_ac_422_tbl
.word L(ipred_cfl_ac_422_w8) - ipred_cfl_ac_422_tbl
.word L(ipred_cfl_ac_422_w4) - ipred_cfl_ac_422_tbl
endjumptable
jumptable ipred_cfl_ac_422_w16_tbl
.word L(ipred_cfl_ac_422_w16_wpad0) - ipred_cfl_ac_422_w16_tbl
.word L(ipred_cfl_ac_422_w16_wpad1) - ipred_cfl_ac_422_w16_tbl
.word L(ipred_cfl_ac_422_w16_wpad2) - ipred_cfl_ac_422_w16_tbl
.word L(ipred_cfl_ac_422_w16_wpad3) - ipred_cfl_ac_422_w16_tbl
endjumptable
// void cfl_ac_444_8bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_444_8bpc_neon, export=1
clz w8, w5
lsl w4, w4, #2
movrel x7, ipred_cfl_ac_444_tbl
sub w8, w8, #26
ldrsw x8, [x7, w8, uxtw #2]
movi v16.8h, #0
movi v17.8h, #0
movi v18.8h, #0
movi v19.8h, #0
add x7, x7, x8
sub w8, w6, w4 // height - h_pad
rbit w9, w5 // rbit(width)
rbit w10, w6 // rbit(height)
clz w9, w9 // ctz(width)
clz w10, w10 // ctz(height)
add w9, w9, w10 // log2sz
add x10, x1, x2
dup v31.4s, w9
lsl x2, x2, #1
neg v31.4s, v31.4s // -log2sz
br x7
L(ipred_cfl_ac_444_w4):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input
ld1 {v0.s}[0], [x1], x2
ld1 {v0.s}[1], [x10], x2
ld1 {v1.s}[0], [x1], x2
ld1 {v1.s}[1], [x10], x2
ushll v0.8h, v0.8b, #3
ushll v1.8h, v1.8b, #3
subs w8, w8, #4
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
st1 {v0.8h, v1.8h}, [x0], #32
b.gt 1b
trn2 v0.2d, v1.2d, v1.2d
trn2 v1.2d, v1.2d, v1.2d
b L(ipred_cfl_ac_420_w4_hpad)
L(ipred_cfl_ac_444_w8):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input
ld1 {v0.8b}, [x1], x2
ld1 {v1.8b}, [x10], x2
ld1 {v2.8b}, [x1], x2
ushll v0.8h, v0.8b, #3
ld1 {v3.8b}, [x10], x2
ushll v1.8h, v1.8b, #3
ushll v2.8h, v2.8b, #3
ushll v3.8h, v3.8b, #3
subs w8, w8, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
b.gt 1b
mov v0.16b, v3.16b
mov v1.16b, v3.16b
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_444_w16):
AARCH64_VALID_JUMP_TARGET
cbnz w3, L(ipred_cfl_ac_444_w16_wpad)
1: // Copy and expand input, without padding
ld1 {v0.16b}, [x1], x2
ld1 {v2.16b}, [x10], x2
ld1 {v4.16b}, [x1], x2
ushll2 v1.8h, v0.16b, #3
ushll v0.8h, v0.8b, #3
ld1 {v6.16b}, [x10], x2
ushll2 v3.8h, v2.16b, #3
ushll v2.8h, v2.8b, #3
ushll2 v5.8h, v4.16b, #3
ushll v4.8h, v4.8b, #3
ushll2 v7.8h, v6.16b, #3
ushll v6.8h, v6.8b, #3
subs w8, w8, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], #64
add v16.8h, v16.8h, v4.8h
add v17.8h, v17.8h, v5.8h
add v18.8h, v18.8h, v6.8h
add v19.8h, v19.8h, v7.8h
b.gt 1b
mov v0.16b, v6.16b
mov v1.16b, v7.16b
mov v2.16b, v6.16b
mov v3.16b, v7.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_444_w16_wpad):
1: // Copy and expand input, padding 8
ld1 {v0.8b}, [x1], x2
ld1 {v2.8b}, [x10], x2
ld1 {v4.8b}, [x1], x2
ld1 {v6.8b}, [x10], x2
ushll v0.8h, v0.8b, #3
ushll v2.8h, v2.8b, #3
ushll v4.8h, v4.8b, #3
ushll v6.8h, v6.8b, #3
dup v1.8h, v0.h[7]
dup v3.8h, v2.h[7]
dup v5.8h, v4.h[7]
dup v7.8h, v6.h[7]
subs w8, w8, #4
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], #64
add v16.8h, v16.8h, v4.8h
add v17.8h, v17.8h, v5.8h
add v18.8h, v18.8h, v6.8h
add v19.8h, v19.8h, v7.8h
b.gt 1b
mov v0.16b, v6.16b
mov v1.16b, v7.16b
mov v2.16b, v6.16b
mov v3.16b, v7.16b
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_444_w32):
AARCH64_VALID_JUMP_TARGET
movrel x7, ipred_cfl_ac_444_w32_tbl
lsr w3, w3, #1
ldrsw x3, [x7, w3, uxtw #2]
add x7, x7, x3
br x7
L(ipred_cfl_ac_444_w32_wpad0):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input, without padding
ld1 {v2.16b, v3.16b}, [x1], x2
ld1 {v6.16b, v7.16b}, [x10], x2
ushll v0.8h, v2.8b, #3
ushll2 v1.8h, v2.16b, #3
ushll v2.8h, v3.8b, #3
ushll2 v3.8h, v3.16b, #3
ushll v4.8h, v6.8b, #3
ushll2 v5.8h, v6.16b, #3
ushll v6.8h, v7.8b, #3
ushll2 v7.8h, v7.16b, #3
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], #64
add v16.8h, v16.8h, v4.8h
add v17.8h, v17.8h, v5.8h
add v18.8h, v18.8h, v6.8h
add v19.8h, v19.8h, v7.8h
b.gt 1b
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad2):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input, padding 8
ldr d2, [x1, #16]
ld1 {v1.16b}, [x1], x2
ldr d6, [x10, #16]
ld1 {v5.16b}, [x10], x2
ushll v2.8h, v2.8b, #3
ushll v0.8h, v1.8b, #3
ushll2 v1.8h, v1.16b, #3
ushll v6.8h, v6.8b, #3
ushll v4.8h, v5.8b, #3
ushll2 v5.8h, v5.16b, #3
dup v3.8h, v2.h[7]
dup v7.8h, v6.h[7]
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], #64
add v16.8h, v16.8h, v4.8h
add v17.8h, v17.8h, v5.8h
add v18.8h, v18.8h, v6.8h
add v19.8h, v19.8h, v7.8h
b.gt 1b
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad4):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input, padding 16
ld1 {v1.16b}, [x1], x2
ld1 {v5.16b}, [x10], x2
ushll v0.8h, v1.8b, #3
ushll2 v1.8h, v1.16b, #3
ushll v4.8h, v5.8b, #3
ushll2 v5.8h, v5.16b, #3
dup v2.8h, v1.h[7]
dup v3.8h, v1.h[7]
dup v6.8h, v5.h[7]
dup v7.8h, v5.h[7]
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], #64
add v16.8h, v16.8h, v4.8h
add v17.8h, v17.8h, v5.8h
add v18.8h, v18.8h, v6.8h
add v19.8h, v19.8h, v7.8h
b.gt 1b
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad6):
AARCH64_VALID_JUMP_TARGET
1: // Copy and expand input, padding 24
ld1 {v0.8b}, [x1], x2
ld1 {v4.8b}, [x10], x2
ushll v0.8h, v0.8b, #3
ushll v4.8h, v4.8b, #3
dup v1.8h, v0.h[7]
dup v2.8h, v0.h[7]
dup v3.8h, v0.h[7]
dup v5.8h, v4.h[7]
dup v6.8h, v4.h[7]
dup v7.8h, v4.h[7]
subs w8, w8, #2
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], #64
add v16.8h, v16.8h, v0.8h
add v17.8h, v17.8h, v1.8h
add v18.8h, v18.8h, v2.8h
add v19.8h, v19.8h, v3.8h
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], #64
add v16.8h, v16.8h, v4.8h
add v17.8h, v17.8h, v5.8h
add v18.8h, v18.8h, v6.8h
add v19.8h, v19.8h, v7.8h
b.gt 1b
L(ipred_cfl_ac_444_w32_hpad):
cbz w4, 3f
2: // Vertical padding (h_pad > 0)
subs w4, w4, #2
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], #64
add v16.8h, v16.8h, v4.8h
add v17.8h, v17.8h, v5.8h
add v18.8h, v18.8h, v6.8h
add v19.8h, v19.8h, v7.8h
st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], #64
add v16.8h, v16.8h, v4.8h
add v17.8h, v17.8h, v5.8h
add v18.8h, v18.8h, v6.8h
add v19.8h, v19.8h, v7.8h
b.gt 2b
3:
// Quadruple the height and reuse the w8 subtracting
lsl w6, w6, #2
// Aggregate the sums, with wider intermediates earlier than in
// ipred_cfl_ac_420_w8_calc_subtract_dc.
uaddlp v0.4s, v16.8h
uaddlp v1.4s, v17.8h
uaddlp v2.4s, v18.8h
uaddlp v3.4s, v19.8h
add v0.4s, v0.4s, v1.4s
add v2.4s, v2.4s, v3.4s
add v0.4s, v0.4s, v2.4s
addv s0, v0.4s // sum
sub x0, x0, w6, uxtw #4
urshl v4.2s, v0.2s, v31.2s // (sum + (1 << (log2sz - 1))) >>= log2sz
dup v4.8h, v4.h[0]
b L(ipred_cfl_ac_420_w8_subtract_dc)
endfunc
jumptable ipred_cfl_ac_444_tbl
.word L(ipred_cfl_ac_444_w32) - ipred_cfl_ac_444_tbl
.word L(ipred_cfl_ac_444_w16) - ipred_cfl_ac_444_tbl
.word L(ipred_cfl_ac_444_w8) - ipred_cfl_ac_444_tbl
.word L(ipred_cfl_ac_444_w4) - ipred_cfl_ac_444_tbl
endjumptable
jumptable ipred_cfl_ac_444_w32_tbl
.word L(ipred_cfl_ac_444_w32_wpad0) - ipred_cfl_ac_444_w32_tbl
.word L(ipred_cfl_ac_444_w32_wpad2) - ipred_cfl_ac_444_w32_tbl
.word L(ipred_cfl_ac_444_w32_wpad4) - ipred_cfl_ac_444_w32_tbl
.word L(ipred_cfl_ac_444_w32_wpad6) - ipred_cfl_ac_444_w32_tbl
endjumptable
|
Admenri/urge
| 7,695
|
third_party/dav1d/src/arm/64/cdef16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#include "cdef_tmpl.S"
.macro pad_top_bot_16 s1, s2, w, stride, reg, ret
tst w7, #1 // CDEF_HAVE_LEFT
b.eq 2f
// CDEF_HAVE_LEFT
sub \s1, \s1, #4
sub \s2, \s2, #4
tst w7, #2 // CDEF_HAVE_RIGHT
b.eq 1f
// CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
ldr \reg\()0, [\s1]
ldr d1, [\s1, #2*\w]
ldr \reg\()2, [\s2]
ldr d3, [\s2, #2*\w]
str \reg\()0, [x0]
str d1, [x0, #2*\w]
add x0, x0, #2*\stride
str \reg\()2, [x0]
str d3, [x0, #2*\w]
.if \ret
ret
.else
add x0, x0, #2*\stride
b 3f
.endif
1:
// CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
ldr \reg\()0, [\s1]
ldr s1, [\s1, #2*\w]
ldr \reg\()2, [\s2]
ldr s3, [\s2, #2*\w]
str \reg\()0, [x0]
str s1, [x0, #2*\w]
str s31, [x0, #2*\w+4]
add x0, x0, #2*\stride
str \reg\()2, [x0]
str s3, [x0, #2*\w]
str s31, [x0, #2*\w+4]
.if \ret
ret
.else
add x0, x0, #2*\stride
b 3f
.endif
2:
// !CDEF_HAVE_LEFT
tst w7, #2 // CDEF_HAVE_RIGHT
b.eq 1f
// !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
ldr \reg\()0, [\s1]
ldr s1, [\s1, #2*\w]
ldr \reg\()2, [\s2]
ldr s3, [\s2, #2*\w]
str s31, [x0]
stur \reg\()0, [x0, #4]
str s1, [x0, #4+2*\w]
add x0, x0, #2*\stride
str s31, [x0]
stur \reg\()2, [x0, #4]
str s3, [x0, #4+2*\w]
.if \ret
ret
.else
add x0, x0, #2*\stride
b 3f
.endif
1:
// !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
ldr \reg\()0, [\s1]
ldr \reg\()1, [\s2]
str s31, [x0]
stur \reg\()0, [x0, #4]
str s31, [x0, #4+2*\w]
add x0, x0, #2*\stride
str s31, [x0]
stur \reg\()1, [x0, #4]
str s31, [x0, #4+2*\w]
.if \ret
ret
.else
add x0, x0, #2*\stride
.endif
3:
.endm
.macro load_n_incr_16 dst, src, incr, w
.if \w == 4
ld1 {\dst\().4h}, [\src], \incr
.else
ld1 {\dst\().8h}, [\src], \incr
.endif
.endm
// void dav1d_cdef_paddingX_16bpc_neon(uint16_t *tmp, const pixel *src,
// ptrdiff_t src_stride, const pixel (*left)[2],
// const pixel *const top,
// const pixel *const bottom, int h,
// enum CdefEdgeFlags edges);
.macro padding_func_16 w, stride, reg
function cdef_padding\w\()_16bpc_neon, export=1
movi v30.8h, #0x80, lsl #8
mov v31.16b, v30.16b
sub x0, x0, #2*(2*\stride+2)
tst w7, #4 // CDEF_HAVE_TOP
b.ne 1f
// !CDEF_HAVE_TOP
st1 {v30.8h, v31.8h}, [x0], #32
.if \w == 8
st1 {v30.8h, v31.8h}, [x0], #32
.endif
b 3f
1:
// CDEF_HAVE_TOP
add x9, x4, x2
pad_top_bot_16 x4, x9, \w, \stride, \reg, 0
// Middle section
3:
tst w7, #1 // CDEF_HAVE_LEFT
b.eq 2f
// CDEF_HAVE_LEFT
tst w7, #2 // CDEF_HAVE_RIGHT
b.eq 1f
// CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
0:
ld1 {v0.s}[0], [x3], #4
ldr s2, [x1, #2*\w]
load_n_incr_16 v1, x1, x2, \w
subs w6, w6, #1
str s0, [x0]
stur \reg\()1, [x0, #4]
str s2, [x0, #4+2*\w]
add x0, x0, #2*\stride
b.gt 0b
b 3f
1:
// CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
ld1 {v0.s}[0], [x3], #4
load_n_incr_16 v1, x1, x2, \w
subs w6, w6, #1
str s0, [x0]
stur \reg\()1, [x0, #4]
str s31, [x0, #4+2*\w]
add x0, x0, #2*\stride
b.gt 1b
b 3f
2:
tst w7, #2 // CDEF_HAVE_RIGHT
b.eq 1f
// !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
0:
ldr s1, [x1, #2*\w]
load_n_incr_16 v0, x1, x2, \w
subs w6, w6, #1
str s31, [x0]
stur \reg\()0, [x0, #4]
str s1, [x0, #4+2*\w]
add x0, x0, #2*\stride
b.gt 0b
b 3f
1:
// !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
load_n_incr_16 v0, x1, x2, \w
subs w6, w6, #1
str s31, [x0]
stur \reg\()0, [x0, #4]
str s31, [x0, #4+2*\w]
add x0, x0, #2*\stride
b.gt 1b
3:
tst w7, #8 // CDEF_HAVE_BOTTOM
b.ne 1f
// !CDEF_HAVE_BOTTOM
st1 {v30.8h, v31.8h}, [x0], #32
.if \w == 8
st1 {v30.8h, v31.8h}, [x0], #32
.endif
ret
1:
// CDEF_HAVE_BOTTOM
add x9, x5, x2
pad_top_bot_16 x5, x9, \w, \stride, \reg, 1
endfunc
.endm
padding_func_16 8, 16, q
padding_func_16 4, 8, d
tables
filter 8, 16
filter 4, 16
find_dir 16
|
Admenri/urge
| 23,986
|
third_party/dav1d/src/arm/64/refmvs.S
|
/*
* Copyright © 2021, VideoLAN and dav1d authors
* Copyright © 2021, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm-offsets.h"
#include "src/arm/asm.S"
#include "util.S"
#define INVALID_MV 0x80008000
// void dav1d_splat_mv_neon(refmvs_block **rr, const refmvs_block *rmv,
// int bx4, int bw4, int bh4)
function splat_mv_neon, export=1
ld1 {v3.16b}, [x1]
clz w3, w3
movrel x5, splat_tbl
sub w3, w3, #26
ext v2.16b, v3.16b, v3.16b, #12
ldrsw x3, [x5, w3, uxtw #2]
add w2, w2, w2, lsl #1
ext v0.16b, v2.16b, v3.16b, #4
add x3, x5, x3
ext v1.16b, v2.16b, v3.16b, #8
lsl w2, w2, #2
ext v2.16b, v2.16b, v3.16b, #12
1:
ldr x1, [x0], #8
subs w4, w4, #1
add x1, x1, x2
br x3
10:
AARCH64_VALID_JUMP_TARGET
st1 {v0.8b}, [x1]
str s2, [x1, #8]
b.gt 1b
ret
20:
AARCH64_VALID_JUMP_TARGET
st1 {v0.16b}, [x1]
str d1, [x1, #16]
b.gt 1b
ret
320:
AARCH64_VALID_JUMP_TARGET
st1 {v0.16b, v1.16b, v2.16b}, [x1], #48
st1 {v0.16b, v1.16b, v2.16b}, [x1], #48
st1 {v0.16b, v1.16b, v2.16b}, [x1], #48
st1 {v0.16b, v1.16b, v2.16b}, [x1], #48
160:
AARCH64_VALID_JUMP_TARGET
st1 {v0.16b, v1.16b, v2.16b}, [x1], #48
st1 {v0.16b, v1.16b, v2.16b}, [x1], #48
80:
AARCH64_VALID_JUMP_TARGET
st1 {v0.16b, v1.16b, v2.16b}, [x1], #48
40:
AARCH64_VALID_JUMP_TARGET
st1 {v0.16b, v1.16b, v2.16b}, [x1]
b.gt 1b
ret
endfunc
jumptable splat_tbl
.word 320b - splat_tbl
.word 160b - splat_tbl
.word 80b - splat_tbl
.word 40b - splat_tbl
.word 20b - splat_tbl
.word 10b - splat_tbl
endjumptable
const mv_tbls, align=4
.byte 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255
.byte 0, 1, 2, 3, 8, 0, 1, 2, 3, 8, 0, 1, 2, 3, 8, 0
.byte 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4
.byte 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4
endconst
const mask_mult, align=4
.byte 1, 2, 1, 2, 0, 0, 0, 0
endconst
// void dav1d_save_tmvs_neon(refmvs_temporal_block *rp, ptrdiff_t stride,
// refmvs_block **rr, const uint8_t *ref_sign,
// int col_end8, int row_end8,
// int col_start8, int row_start8)
function save_tmvs_neon, export=1
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-16]!
mov x29, sp
movi v30.8b, #0
ld1 {v31.8b}, [x3]
movrel x8, save_tmvs_tbl
movrel x16, mask_mult
movrel x13, mv_tbls
ld1 {v29.8b}, [x16]
ext v31.8b, v30.8b, v31.8b, #7 // [0, ref_sign]
mov w15, #5
mov w14, #12*2
sxtw x4, w4
sxtw x6, w6
mul w1, w1, w15 // stride *= 5
sub w5, w5, w7 // h = row_end8 - row_start8
lsl w7, w7, #1 // row_start8 <<= 1
1:
mov w15, #5
and w9, w7, #30 // (y & 15) * 2
ldr x9, [x2, w9, uxtw #3] // b = rr[(y & 15) * 2]
add x9, x9, #12 // &b[... + 1]
madd x10, x4, x14, x9 // end_cand_b = &b[col_end8*2 + 1]
madd x9, x6, x14, x9 // cand_b = &b[x*2 + 1]
madd x3, x6, x15, x0 // &rp[x]
2:
ldrb w11, [x9, #10] // cand_b->bs
ld1 {v0.16b}, [x9] // cand_b->mv
add x11, x8, w11, uxtw #3
ldr h1, [x9, #8] // cand_b->ref
ldr w12, [x11] // bw8
mov x15, x8
add x9, x9, w12, uxtw #1 // cand_b += bw8*2
cmp x9, x10
mov v2.8b, v0.8b
b.ge 3f
ldrb w15, [x9, #10] // cand_b->bs
add x16, x9, #8
ld1 {v4.16b}, [x9] // cand_b->mv
add x15, x8, w15, uxtw #3
ld1 {v1.h}[1], [x16] // cand_b->ref
ldr w12, [x15] // bw8
add x9, x9, w12, uxtw #1 // cand_b += bw8*2
trn1 v2.2d, v0.2d, v4.2d
3:
abs v2.8h, v2.8h // abs(mv[].xy)
tbl v1.8b, {v31.16b}, v1.8b // ref_sign[ref]
ushr v2.8h, v2.8h, #12 // abs(mv[].xy) >> 12
umull v1.8h, v1.8b, v29.8b // ref_sign[ref] * {1, 2}
cmeq v2.4s, v2.4s, #0 // abs(mv[].xy) <= 4096
xtn v2.4h, v2.4s // abs() condition to 16 bit
and v1.8b, v1.8b, v2.8b // h[0-3] contains conditions for mv[0-1]
addp v1.4h, v1.4h, v1.4h // Combine condition for [1] and [0]
umov w16, v1.h[0] // Extract case for first block
umov w17, v1.h[1]
ldrsw x11, [x11, #4] // Fetch jump table entry
ldrsw x15, [x15, #4]
ldr q1, [x13, w16, uxtw #4] // Load permutation table base on case
ldr q5, [x13, w17, uxtw #4]
add x11, x8, x11 // Find jump table target
add x15, x8, x15
tbl v0.16b, {v0.16b}, v1.16b // Permute cand_b to output refmvs_temporal_block
tbl v4.16b, {v4.16b}, v5.16b
// v1 follows on v0, with another 3 full repetitions of the pattern.
ext v1.16b, v0.16b, v0.16b, #1
ext v5.16b, v4.16b, v4.16b, #1
// v2 ends with 3 complete repetitions of the pattern.
ext v2.16b, v0.16b, v1.16b, #4
ext v6.16b, v4.16b, v5.16b, #4
blr x11
b.ge 4f // if (cand_b >= end)
mov v0.16b, v4.16b
mov v1.16b, v5.16b
mov v2.16b, v6.16b
cmp x9, x10
blr x15
b.lt 2b // if (cand_b < end)
4:
subs w5, w5, #1 // h--
add w7, w7, #2 // y += 2
add x0, x0, x1 // rp += stride
b.gt 1b
ldp x29, x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
10:
AARCH64_VALID_CALL_TARGET
add x16, x3, #4
st1 {v0.s}[0], [x3]
st1 {v0.b}[4], [x16]
add x3, x3, #5
ret
20:
AARCH64_VALID_CALL_TARGET
add x16, x3, #8
st1 {v0.d}[0], [x3]
st1 {v0.h}[4], [x16]
add x3, x3, #2*5
ret
40:
AARCH64_VALID_CALL_TARGET
st1 {v0.16b}, [x3]
str s1, [x3, #16]
add x3, x3, #4*5
ret
80:
AARCH64_VALID_CALL_TARGET
// This writes 6 full entries plus 2 extra bytes
st1 {v0.16b, v1.16b}, [x3]
// Write the last few, overlapping with the first write.
stur q2, [x3, #(8*5-16)]
add x3, x3, #8*5
ret
160:
AARCH64_VALID_CALL_TARGET
add x16, x3, #6*5
add x17, x3, #12*5
// This writes 6 full entries plus 2 extra bytes
st1 {v0.16b, v1.16b}, [x3]
// Write another 6 full entries, slightly overlapping with the first set
st1 {v0.16b, v1.16b}, [x16]
// Write 8 bytes (one full entry) after the first 12
st1 {v0.8b}, [x17]
// Write the last 3 entries
str q2, [x3, #(16*5-16)]
add x3, x3, #16*5
ret
endfunc
jumptable save_tmvs_tbl
.word 16 * 12
.word 160b - save_tmvs_tbl
.word 16 * 12
.word 160b - save_tmvs_tbl
.word 8 * 12
.word 80b - save_tmvs_tbl
.word 8 * 12
.word 80b - save_tmvs_tbl
.word 8 * 12
.word 80b - save_tmvs_tbl
.word 8 * 12
.word 80b - save_tmvs_tbl
.word 4 * 12
.word 40b - save_tmvs_tbl
.word 4 * 12
.word 40b - save_tmvs_tbl
.word 4 * 12
.word 40b - save_tmvs_tbl
.word 4 * 12
.word 40b - save_tmvs_tbl
.word 2 * 12
.word 20b - save_tmvs_tbl
.word 2 * 12
.word 20b - save_tmvs_tbl
.word 2 * 12
.word 20b - save_tmvs_tbl
.word 2 * 12
.word 20b - save_tmvs_tbl
.word 2 * 12
.word 20b - save_tmvs_tbl
.word 1 * 12
.word 10b - save_tmvs_tbl
.word 1 * 12
.word 10b - save_tmvs_tbl
.word 1 * 12
.word 10b - save_tmvs_tbl
.word 1 * 12
.word 10b - save_tmvs_tbl
.word 1 * 12
.word 10b - save_tmvs_tbl
.word 1 * 12
.word 10b - save_tmvs_tbl
.word 1 * 12
.word 10b - save_tmvs_tbl
endjumptable
// void dav1d_load_tmvs_neon(const refmvs_frame *const rf, int tile_row_idx,
// const int col_start8, const int col_end8,
// const int row_start8, int row_end8)
function load_tmvs_neon, export=1
rf .req x0
tile_row_idx .req w1
col_start8 .req w2
col_end8 .req w3
row_start8 .req w4
row_end8 .req w5
col_start8i .req w6
col_end8i .req w7
rp_proj .req x8
stride5 .req x9
wstride5 .req w9
stp x28, x27, [sp, #-96]!
stp x26, x25, [sp, #16]
stp x24, x23, [sp, #32]
stp x22, x21, [sp, #48]
stp x20, x19, [sp, #64]
stp x29, x30, [sp, #80]
ldr w15, [rf, #RMVSF_N_TILE_THREADS]
ldp w16, w17, [rf, #RMVSF_IW8] // include rf->ih8 too
sub col_start8i, col_start8, #8 // col_start8 - 8
add col_end8i, col_end8, #8 // col_end8 + 8
ldr wstride5, [rf, #RMVSF_RP_STRIDE]
ldr rp_proj, [rf, #RMVSF_RP_PROJ]
cmp w15, #1
csel tile_row_idx, wzr, tile_row_idx, eq // if (rf->n_tile_threads == 1) tile_row_idx = 0
bic col_start8i, col_start8i, col_start8i, asr #31 // imax(col_start8 - 8, 0)
cmp col_end8i, w16
csel col_end8i, col_end8i, w16, lt // imin(col_end8 + 8, rf->iw8)
lsl tile_row_idx, tile_row_idx, #4 // 16 * tile_row_idx
cmp row_end8, w17
csel row_end8, row_end8, w17, lt // imin(row_end8, rf->ih8)
add wstride5, wstride5, wstride5, lsl #2 // stride * sizeof(refmvs_temporal_block)
and w15, row_start8, #15 // row_start8 & 15
add w10, col_start8, col_start8, lsl #2 // col_start8 * sizeof(refmvs_temporal_block)
smaddl rp_proj, tile_row_idx, wstride5, rp_proj // &rf->rp_proj[16 * stride * tile_row_idx]
smaddl x10, w15, wstride5, x10 // ((row_start8 & 15) * stride + col_start8) * sizeof(refmvs_temporal_block)
mov w15, #INVALID_MV
sub w11, col_end8, col_start8 // xfill loop count
add x10, x10, rp_proj // &rf->rp_proj[16 * stride * tile_row_idx + (row_start8 & 15) * stride + col_start8]
add x15, x15, x15, lsl #40 // first 64b of 4 [INVALID_MV, 0]... patterns
mov w17, #(INVALID_MV >> 8) // last 32b of 4 patterns
sub w12, row_end8, row_start8 // yfill loop count
ror x16, x15, #48 // second 64b of 4 patterns
ldr w19, [rf, #RMVSF_N_MFMVS]
5: // yfill loop
and w13, w11, #-4 // xfill 4x count by patterns
mov x14, x10 // fill_ptr = row_ptr
add x10, x10, stride5 // row_ptr += stride
sub w12, w12, #1 // y--
cbz w13, 3f
4: // xfill loop 4x
sub w13, w13, #4 // xfill 4x count -= 4
stp x15, x16, [x14]
str w17, [x14, #16]
add x14, x14, #20 // fill_ptr += 4 * sizeof(refmvs_temporal_block)
cbnz w13, 4b
3: // up to 3 residuals
tbz w11, #1, 1f
str x15, [x14]
strh w16, [x14, #8]
add x14, x14, #10 // fill_ptr += 2 * sizeof(refmvs_temporal_block)
1: // up to 1 residual
tbz w11, #0, 2f
str w15, [x14]
2:
cbnz w12, 5b // yfill loop
cbz w19, 11f // if (!rf->n_mfmvs) skip nloop
add x29, rf, #RMVSF_MFMV_REF2CUR
mov w10, #0 // n = 0
movi v3.2s, #255 // 0x3FFF >> 6, for MV clamp
movrel x1, div_mult_tbl
10: // nloop
ldr w16, [x29, x10, lsl #2] // ref2cur = rf->mfmv_ref2cur[n]
cmp w16, #-32 // instead of INT_MIN, we can use smaller constants
b.lt 9f // if (ref2cur == INT_MIN) continue
add x17, x10, #(RMVSF_MFMV_REF - RMVSF_MFMV_REF2CUR) // n - (&rf->mfmv_ref - &rf->mfmv_ref2cur)
mov x20, #4
ldrb w17, [x29, x17] // ref = rf->mfmv_ref[n]
ldr x13, [x29, #(RMVSF_RP_REF - RMVSF_MFMV_REF2CUR)]
mov w28, #28 // 7 * sizeof(int)
smaddl x20, row_start8, wstride5, x20 // row_start8 * stride * sizeof(refmvs_temporal_block) + 4
mov w12, row_start8 // y = row_start8
add x21, x29, #(RMVSF_MFMV_REF2REF - RMVSF_MFMV_REF2CUR - 4) // &rf->mfmv_ref2ref - 1
ldr x13, [x13, x17, lsl #3] // rf->rp_ref[ref]
smaddl x28, w28, w10, x21 // rf->mfmv_ref2ref[n] - 1
sub w17, w17, #4 // ref_sign = ref - 4
add x13, x13, x20 // r = &rf->rp_ref[ref][row_start8 * stride].ref
dup v0.2s, w17 // ref_sign
5: // yloop
and w14, w12, #-8 // y_sb_align = y & ~7
mov w11, col_start8i // x = col_start8i
add w15, w14, #8 // y_sb_align + 8
cmp w14, row_start8
csel w14, w14, row_start8, gt // imax(y_sb_align, row_start8)
cmp w15, row_end8
csel w15, w15, row_end8, lt // imin(y_sb_align + 8, row_end8)
4: // xloop
add x23, x13, x11, lsl #2 // partial &r[x] address
ldrb w22, [x23, x11] // b_ref = rb->ref
cbz w22, 6f // if (!b_ref) continue
ldr w24, [x28, x22, lsl #2] // ref2ref = rf->mfmv_ref2ref[n][b_ref - 1]
cbz w24, 6f // if (!ref2ref) continue
ldrh w20, [x1, x24, lsl #1] // div_mult[ref2ref]
add x23, x23, x11 // &r[x]
mul w20, w20, w16 // frac = ref2cur * div_mult[ref2ref]
ldur s1, [x23, #-4] // mv{y, x} = rb->mv
fmov s2, w20 // frac
sxtl v1.4s, v1.4h
mul v1.2s, v1.2s, v2.s[0] // offset{y, x} = frac * mv{y, x}
ssra v1.2s, v1.2s, #31 // offset{y, x} + (offset{y, x} >> 31)
ldur w25, [x23, #-4] // b_mv = rb->mv
srshr v1.2s, v1.2s, #14 // (offset{y, x} + (offset{y, x} >> 31) + 8192) >> 14
abs v2.2s, v1.2s // abs(offset{y, x})
eor v1.8b, v1.8b, v0.8b // offset{y, x} ^ ref_sign
sshr v2.2s, v2.2s, #6 // abs(offset{y, x}) >> 6
cmlt v1.2s, v1.2s, #0 // sign(offset{y, x} ^ ref_sign): -1 or 0
umin v2.2s, v2.2s, v3.2s // iclip(abs(offset{y, x}) >> 6, 0, 0x3FFF >> 6)
neg v4.2s, v2.2s
bsl v1.8b, v4.8b, v2.8b // apply_sign(iclip(abs(offset{y, x}) >> 6, 0, 0x3FFF >> 6))
fmov x20, d1 // offset{y, x}
add w21, w12, w20 // pos_y = y + offset.y
cmp w21, w14 // pos_y >= y_proj_start
b.lt 1f
cmp w21, w15 // pos_y < y_proj_end
b.ge 1f
add x26, x11, x20, asr #32 // pos_x = x + offset.x
and w27, w21, #15 // pos_y & 15
add x21, x26, x26, lsl #2 // pos_x * sizeof(refmvs_temporal_block)
umaddl x27, w27, wstride5, rp_proj // &rp_proj[(pos_y & 15) * stride]
add x27, x27, x21 // &rp_proj[(pos_y & 15) * stride + pos_x]
3: // copy loop
and w20, w11, #-8 // x_sb_align = x & ~7
sub w21, w20, #8 // x_sb_align - 8
cmp w21, col_start8
csel w21, w21, col_start8, gt // imax(x_sb_align - 8, col_start8)
cmp w26, w21 // pos_x >= imax(x_sb_align - 8, col_start8)
b.lt 2f
add w20, w20, #16 // x_sb_align + 16
cmp w20, col_end8
csel w20, w20, col_end8, lt // imin(x_sb_align + 16, col_end8)
cmp w26, w20 // pos_x < imin(x_sb_align + 16, col_end8)
b.ge 2f
str w25, [x27] // rp_proj[pos + pos_x].mv = rb->mv (b_mv)
strb w24, [x27, #4] // rp_proj[pos + pos_x].ref = ref2ref
2: // search part of copy loop
add w11, w11, #1 // x++
cmp w11, col_end8i // if (++x >= col_end8i) break xloop
b.ge 8f
ldrb w20, [x23, #5]! // rb++; rb->ref
cmp w20, w22 // if (rb->ref != b_ref) break
b.ne 7f
ldur w21, [x23, #-4] // rb->mv.n
cmp w21, w25 // if (rb->mv.n != b_mv.n) break
b.ne 7f
add w26, w26, #1 // pos_x++
add x27, x27, #5 // advance &rp_proj[(pos_y & 15) * stride + pos_x]
b 3b // copy loop
1: // search loop
add w11, w11, #1 // x++
cmp w11, col_end8i // if (++x >= col_end8i) break xloop
b.ge 8f
ldrb w20, [x23, #5]! // rb++; rb->ref
cmp w20, w22 // if (rb->ref != b_ref) break
b.ne 7f
ldur w21, [x23, #-4] // rb->mv.n
cmp w21, w25 // if (rb->mv.n == b_mv.n) continue
b.eq 1b // search loop
7:
cmp w11, col_end8i // x < col_end8i
b.lt 4b // xloop
6: // continue case of xloop
add w11, w11, #1 // x++
cmp w11, col_end8i // x < col_end8i
b.lt 4b // xloop
8:
add w12, w12, #1 // y++
add x13, x13, stride5 // r += stride
cmp w12, row_end8 // y < row_end8
b.lt 5b // yloop
9:
add w10, w10, #1
cmp w10, w19 // n < rf->n_mfmvs
b.lt 10b // nloop
11:
ldp x29, x30, [sp, #80]
ldp x20, x19, [sp, #64]
ldp x22, x21, [sp, #48]
ldp x24, x23, [sp, #32]
ldp x26, x25, [sp, #16]
ldp x28, x27, [sp], #96
ret
.unreq rf
.unreq tile_row_idx
.unreq col_start8
.unreq col_end8
.unreq row_start8
.unreq row_end8
.unreq col_start8i
.unreq col_end8i
.unreq rp_proj
.unreq stride5
.unreq wstride5
endfunc
const div_mult_tbl
.hword 0, 16384, 8192, 5461, 4096, 3276, 2730, 2340
.hword 2048, 1820, 1638, 1489, 1365, 1260, 1170, 1092
.hword 1024, 963, 910, 862, 819, 780, 744, 712
.hword 682, 655, 630, 606, 585, 564, 546, 528
endconst
|
Admenri/urge
| 24,281
|
third_party/dav1d/src/arm/64/msac.S
|
/*
* Copyright © 2019, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#define BUF_POS 0
#define BUF_END 8
#define DIF 16
#define RNG 24
#define CNT 28
#define ALLOW_UPDATE_CDF 32
#define COEFFS_BASE_OFFSET 30
#define MASKS8_OFFSET (64-COEFFS_BASE_OFFSET)
const coeffs
.short 60, 56, 52, 48, 44, 40, 36, 32, 28, 24, 20, 16, 12, 8, 4, 0
.short 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
// masks8
.short -0x202, -0x202, -0x202, -0x202, -0x202, -0x202, -0x202, 0xF0E
endconst
.macro ld1_n d0, d1, src, sz, n
.if \n <= 8
ld1 {\d0\sz}, [\src]
.else
ld1 {\d0\sz, \d1\sz}, [\src]
.endif
.endm
.macro st1_n s0, s1, dst, sz, n
.if \n <= 8
st1 {\s0\sz}, [\dst]
.else
st1 {\s0\sz, \s1\sz}, [\dst]
.endif
.endm
.macro ushr_n d0, d1, s0, s1, shift, sz, n
ushr \d0\sz, \s0\sz, \shift
.if \n == 16
ushr \d1\sz, \s1\sz, \shift
.endif
.endm
.macro add_n d0, d1, s0, s1, s2, s3, sz, n
add \d0\sz, \s0\sz, \s2\sz
.if \n == 16
add \d1\sz, \s1\sz, \s3\sz
.endif
.endm
.macro sub_n d0, d1, s0, s1, s2, s3, sz, n
sub \d0\sz, \s0\sz, \s2\sz
.if \n == 16
sub \d1\sz, \s1\sz, \s3\sz
.endif
.endm
.macro and_n d0, d1, s0, s1, s2, s3, sz, n
and \d0\sz, \s0\sz, \s2\sz
.if \n == 16
and \d1\sz, \s1\sz, \s3\sz
.endif
.endm
.macro cmhs_n d0, d1, s0, s1, s2, s3, sz, n
cmhs \d0\sz, \s0\sz, \s2\sz
.if \n == 16
cmhs \d1\sz, \s1\sz, \s3\sz
.endif
.endm
.macro sshl_n d0, d1, s0, s1, s2, s3, sz, n
sshl \d0\sz, \s0\sz, \s2\sz
.if \n == 16
sshl \d1\sz, \s1\sz, \s3\sz
.endif
.endm
.macro sqdmulh_n d0, d1, s0, s1, s2, s3, sz, n
sqdmulh \d0\sz, \s0\sz, \s2\sz
.if \n == 16
sqdmulh \d1\sz, \s1\sz, \s3\sz
.endif
.endm
.macro str_n idx0, idx1, dstreg, dstoff, n
str \idx0, [\dstreg, \dstoff]
.if \n == 16
str \idx1, [\dstreg, \dstoff + 16]
.endif
.endm
// unsigned dav1d_msac_decode_symbol_adapt4_neon(MsacContext *s, uint16_t *cdf,
// size_t n_symbols);
function msac_decode_symbol_adapt4_neon, export=1
.macro decode_update sz, szb, n
.if \n == 16
sub sp, sp, #48
.endif
add x8, x0, #RNG
ld1_n v0, v1, x1, \sz, \n // cdf
ld1r {v29\sz}, [x8] // rng
movrel x9, coeffs, COEFFS_BASE_OFFSET
movi v31\sz, #0x7f, lsl #8 // 0x7f00
sub x10, x9, x2, lsl #1
mvni v30\sz, #0x3f // 0xffc0
and v7\szb, v29\szb, v31\szb // rng & 0x7f00
.if \n == 16
str h29, [sp, #14] // store original u = s->rng
.endif
and_n v2, v3, v0, v1, v30, v30, \szb, \n // cdf & 0xffc0
ld1_n v4, v5, x10, \sz, \n // EC_MIN_PROB * (n_symbols - ret)
sqdmulh_n v6, v7, v2, v3, v7, v7, \sz, \n // ((cdf >> EC_PROB_SHIFT) * (r - 128)) >> 1
ldr d28, [x0, #DIF]
add_n v4, v5, v2, v3, v4, v5, \sz, \n // v = cdf + EC_MIN_PROB * (n_symbols - ret)
add_n v4, v5, v6, v7, v4, v5, \sz, \n // v = ((cdf >> EC_PROB_SHIFT) * r) >> 1 + EC_MIN_PROB * (n_symbols - ret)
dup v30\sz, v28.h[3] // dif >> (EC_WIN_SIZE - 16)
.if \n == 8
ldur q31, [x9, #MASKS8_OFFSET]
.elseif \n == 16
str_n q4, q5, sp, #16, \n // store v values to allow indexed access
.endif
// After the condition starts being true it continues, such that the vector looks like:
// 0, 0, 0 ... -1, -1
cmhs_n v2, v3, v30, v30, v4, v5, \sz, \n // c >= v
.if \n == 4
ext v29\szb, v29\szb, v4\szb, #6 // u
umov x15, v2.d[0]
ldr w4, [x0, #ALLOW_UPDATE_CDF]
rev x15, x15
sub v29\sz, v29\sz, v4\sz // rng = u-v
// rev + clz = count trailing zeros
clz x15, x15 // 16*ret
.elseif \n == 8
// The final short of the compare is always set.
// Using addv, subtract -0x202*ret from this value to create a lookup table for a short.
// For n == 8:
// -0x202 + -0x202 + ... + 0xF0E
// (0x202*7) | (1 << 8)
// ^-------offset for second byte of the short
and v31\szb, v31\szb, v2\szb
ext v29\szb, v29\szb, v4\szb, #14 // u
addv h31, v31\sz // ((2*ret + 1) << 8) | (2*ret)
ldr w4, [x0, #ALLOW_UPDATE_CDF]
sub v30\sz, v30\sz, v4\sz // (dif >> 48) - v
smov w15, v31.b[0] // 2*ret
sub v29\sz, v29\sz, v4\sz // rng = u-v
.elseif \n == 16
add v6\sz, v2\sz, v3\sz
addv h31, v6\sz // -n + ret
ldr w4, [x0, #ALLOW_UPDATE_CDF]
smov w15, v31.h[0]
.endif
cbz w4, 0f
// update_cdf
ldrh w3, [x1, x2, lsl #1] // count = cdf[n_symbols]
.if \n == 16
// 16 case has a lower bound that guarantees n_symbols > 2
mov w4, #-5
.elseif \n == 8
mvn w14, w2
mov w4, #-4
cmn w14, #3 // set C if n_symbols <= 2
.else
// if n_symbols < 4 (or < 6 even) then
// (1 + n_symbols) >> 2 == n_symbols > 2
add w14, w2, #17 // (1 + n_symbols) + (4 << 2)
.endif
sub_n v16, v17, v0, v1, v2, v3, \sz, \n // cdf + (i >= val ? 1 : 0)
orr v2\sz, #0x80, lsl #8
.if \n == 16
orr v3\sz, #0x80, lsl #8
.endif
.if \n == 16
sub w4, w4, w3, lsr #4 // -((count >> 4) + 5)
.elseif \n == 8
lsr w14, w3, #4 // count >> 4
sbc w4, w4, w14 // -((count >> 4) + (n_symbols > 2) + 4)
.else
neg w4, w14, lsr #2 // -((n_symbols > 2) + 4)
sub w4, w4, w3, lsr #4 // -((count >> 4) + (n_symbols > 2) + 4)
.endif
sub_n v2, v3, v2, v3, v0, v1, \sz, \n // (32768 - cdf[i]) or (-1 - cdf[i])
dup v6\sz, w4 // -rate
sub w3, w3, w3, lsr #5 // count - (count == 32)
sshl_n v2, v3, v2, v3, v6, v6, \sz, \n // ({32768,-1} - cdf[i]) >> rate
add w3, w3, #1 // count + (count < 32)
add_n v0, v1, v16, v17, v2, v3, \sz, \n // cdf + (32768 - cdf[i]) >> rate
st1_n v0, v1, x1, \sz, \n
strh w3, [x1, x2, lsl #1]
0:
// renorm
.if \n == 4
ldr w6, [x0, #CNT]
ldr x7, [x0, #DIF]
mov x4, v29.d[0] // rng (packed)
mov x3, v4.d[0] // v (packed)
// Shift 'v'/'rng' for ret into the 16 least sig bits. There is
// garbage in the remaining bits, but we can work around this.
lsr x4, x4, x15 // rng
lsr x3, x3, x15 // v
lsl w5, w4, #16 // rng << 16
sub x7, x7, x3, lsl #48 // dif - (v << 48)
clz w5, w5 // d = clz(rng << 16)
lsl w4, w4, w5 // rng << d
subs w6, w6, w5 // cnt -= d
lsl x7, x7, x5 // (dif - (v << 48)) << d
strh w4, [x0, #RNG]
b.lo 1f
str w6, [x0, #CNT]
str x7, [x0, #DIF]
lsr w0, w15, #4
ret
1:
lsr w15, w15, #4
b L(refill)
.elseif \n == 8
ldr w6, [x0, #CNT]
tbl v30.8b, {v30.16b}, v31.8b
tbl v29.8b, {v29.16b}, v31.8b
ins v28.h[3], v30.h[0] // dif - (v << 48)
clz v0.4h, v29.4h // d = clz(rng)
umov w5, v0.h[0]
ushl v29.4h, v29.4h, v0.4h // rng << d
// The vec for clz(rng) is filled with garbage after the first short,
// but ushl/sshl conveniently uses only the first byte for the shift
// amount.
ushl d28, d28, d0 // (dif - (v << 48)) << d
subs w6, w6, w5 // cnt -= d
str h29, [x0, #RNG]
b.lo 1f
str w6, [x0, #CNT]
str d28, [x0, #DIF]
lsr w0, w15, #1 // ret
ret
1:
lsr w15, w15, #1 // ret
mov x7, v28.d[0]
b L(refill)
.elseif \n == 16
add x8, sp, w15, sxtw #1
ldrh w3, [x8, #48] // v
ldurh w4, [x8, #46] // u
ldr w6, [x0, #CNT]
ldr x7, [x0, #DIF]
sub w4, w4, w3 // rng = u - v
clz w5, w4 // clz(rng)
eor w5, w5, #16 // d = clz(rng) ^ 16
sub x7, x7, x3, lsl #48 // dif - (v << 48)
lsl w4, w4, w5 // rng << d
subs w6, w6, w5 // cnt -= d
lsl x7, x7, x5 // (dif - (v << 48)) << d
str w4, [x0, #RNG]
add sp, sp, #48
b.lo 1f
str w6, [x0, #CNT]
str x7, [x0, #DIF]
add w0, w15, #\n // ret
ret
1:
add w15, w15, #\n // ret
b L(refill)
.endif
.endm
decode_update .4h, .8b, 4
L(refill):
// refill
ldp x3, x4, [x0] // BUF_POS, BUF_END
add x5, x3, #8
subs x5, x5, x4
b.hi 6f
ldr x8, [x3] // next_bits
add w4, w6, #-48 // shift_bits = cnt + 16 (- 64)
mvn x8, x8
neg w5, w4
rev x8, x8 // next_bits = bswap(next_bits)
lsr w5, w5, #3 // num_bytes_read
lsr x8, x8, x4 // next_bits >>= (shift_bits & 63)
2: // refill_end
add x3, x3, x5
add w6, w6, w5, lsl #3 // cnt += num_bits_read
str x3, [x0, #BUF_POS]
3: // refill_end2
orr x7, x7, x8 // dif |= next_bits
4: // end
str w6, [x0, #CNT]
str x7, [x0, #DIF]
mov w0, w15
ret
5: // pad_with_ones
add w8, w6, #-16
ror x8, x8, x8
b 3b
6: // refill_eob
cmp x3, x4
b.hs 5b
ldr x8, [x4, #-8]
lsl w5, w5, #3
lsr x8, x8, x5
add w5, w6, #-48
mvn x8, x8
sub w4, w4, w3 // num_bytes_left
rev x8, x8
lsr x8, x8, x5
neg w5, w5
lsr w5, w5, #3
cmp w5, w4
csel w5, w5, w4, lo // num_bytes_read
b 2b
endfunc
function msac_decode_symbol_adapt8_neon, export=1
decode_update .8h, .16b, 8
endfunc
function msac_decode_symbol_adapt16_neon, export=1
decode_update .8h, .16b, 16
endfunc
function msac_decode_hi_tok_neon, export=1
ld1 {v0.4h}, [x1] // cdf
add x16, x0, #RNG
movi v31.4h, #0x7f, lsl #8 // 0x7f00
movrel x17, coeffs, COEFFS_BASE_OFFSET-2*3
mvni v30.4h, #0x3f // 0xffc0
ldrh w9, [x1, #6] // count = cdf[n_symbols]
ld1r {v3.4h}, [x16] // rng
ld1 {v29.4h}, [x17] // EC_MIN_PROB * (n_symbols - ret)
add x17, x0, #DIF + 6
mov w13, #-24*8
and v17.8b, v0.8b, v30.8b // cdf & 0xffc0
ldr w10, [x0, #ALLOW_UPDATE_CDF]
ld1r {v1.8h}, [x17] // dif >> (EC_WIN_SIZE - 16)
ldr w6, [x0, #CNT]
ldr x7, [x0, #DIF]
1:
and v7.8b, v3.8b, v31.8b // rng & 0x7f00
sqdmulh v6.4h, v17.4h, v7.4h // ((cdf >> EC_PROB_SHIFT) * (r - 128)) >> 1
add v4.4h, v17.4h, v29.4h // v = cdf + EC_MIN_PROB * (n_symbols - ret)
add v4.4h, v6.4h, v4.4h // v = ((cdf >> EC_PROB_SHIFT) * r) >> 1 + EC_MIN_PROB * (n_symbols - ret)
cmhs v2.4h, v1.4h, v4.4h // c >= v
add w13, w13, #5*8
ext v18.8b, v3.8b, v4.8b, #6 // u
umov x15, v2.d[0]
rev x15, x15
sub v18.4h, v18.4h, v4.4h // rng = u-v
// rev + clz = count trailing zeros
clz x15, x15 // 16*ret
cbz w10, 2f
// update_cdf
sub v5.4h, v0.4h, v2.4h // cdf[i] + (i >= val ? 1 : 0)
mov w4, #-5
orr v2.4h, #0x80, lsl #8 // i >= val ? -1 : 32768
sub w4, w4, w9, lsr #4 // -((count >> 4) + 5)
sub v2.4h, v2.4h, v0.4h // (32768 - cdf[i]) or (-1 - cdf[i])
dup v6.4h, w4 // -rate
sub w9, w9, w9, lsr #5 // count - (count == 32)
sshl v2.4h, v2.4h, v6.4h // ({32768,-1} - cdf[i]) >> rate
add w9, w9, #1 // count + (count < 32)
add v0.4h, v5.4h, v2.4h // cdf[i] + (32768 - cdf[i]) >> rate
st1 {v0.4h}, [x1]
and v17.8b, v0.8b, v30.8b // cdf & 0xffc0
strh w9, [x1, #6]
2:
mov x4, v18.d[0] // rng (packed)
mov x3, v4.d[0] // v (packed)
// Shift 'v'/'rng' for ret into the 16 least sig bits. There is
// garbage in the remaining bits, but we can work around this.
lsr x4, x4, x15 // rng
lsr x3, x3, x15 // v
lsl w5, w4, #16 // rng << 16
sub x7, x7, x3, lsl #48 // dif - (v << 48)
clz w5, w5 // d = clz(rng << 16)
lsl w4, w4, w5 // rng << d
subs w6, w6, w5 // cnt -= d
lsl x7, x7, x5 // (dif - (v << 48)) << d
strh w4, [x0, #RNG]
dup v3.4h, w4
b.hs 5f
// refill
ldp x3, x4, [x0] // BUF_POS, BUF_END
add x5, x3, #8
subs x5, x5, x4
b.hi 7f
ldr x8, [x3] // next_bits
add w4, w6, #-48 // shift_bits = cnt + 16 (- 64)
mvn x8, x8
neg w5, w4
rev x8, x8 // next_bits = bswap(next_bits)
lsr w5, w5, #3 // num_bytes_read
lsr x8, x8, x4 // next_bits >>= (shift_bits & 63)
3: // refill_end
add x3, x3, x5
add w6, w6, w5, lsl #3 // cnt += num_bits_read
str x3, [x0, #BUF_POS]
4: // refill_end2
orr x7, x7, x8 // dif |= next_bits
5: // end
sub w15, w15, #5*8
lsr x12, x7, #48
adds w13, w13, w15 // carry = tok_br < 3 || tok == 15
dup v1.8h, w12
b.cc 1b // loop if !carry
add w13, w13, #30*8
str w6, [x0, #CNT]
str x7, [x0, #DIF]
lsr w0, w13, #4
ret
6: // pad_with_ones
add w8, w6, #-16
ror x8, x8, x8
b 4b
7: // refill_eob
cmp x3, x4
b.hs 6b
ldr x8, [x4, #-8]
lsl w5, w5, #3
lsr x8, x8, x5
add w5, w6, #-48
mvn x8, x8
sub w4, w4, w3 // num_bytes_left
rev x8, x8
lsr x8, x8, x5
neg w5, w5
lsr w5, w5, #3
cmp w5, w4
csel w5, w5, w4, lo // num_bytes_read
b 3b
endfunc
function msac_decode_bool_equi_neon, export=1
ldp w5, w6, [x0, #RNG] // + CNT
ldr x7, [x0, #DIF]
bic w4, w5, #0xff // r &= 0xff00
add w4, w4, #8
subs x8, x7, x4, lsl #47 // dif - vw
lsr w4, w4, #1 // v
sub w5, w5, w4 // r - v
cset w15, lo
csel w4, w5, w4, hs // if (ret) v = r - v;
csel x7, x8, x7, hs // if (ret) dif = dif - vw;
clz w5, w4 // clz(rng)
eor w5, w5, #16 // d = clz(rng) ^ 16
lsl w4, w4, w5 // rng << d
subs w6, w6, w5 // cnt -= d
lsl x7, x7, x5 // (dif - (v << 48)) << d
str w4, [x0, #RNG]
b.lo L(refill)
str w6, [x0, #CNT]
str x7, [x0, #DIF]
mov w0, w15
ret
endfunc
function msac_decode_bool_neon, export=1
ldp w5, w6, [x0, #RNG] // + CNT
ldr x7, [x0, #DIF]
lsr w4, w5, #8 // r >> 8
bic w1, w1, #0x3f // f &= ~63
mul w4, w4, w1
lsr w4, w4, #7
add w4, w4, #4 // v
subs x8, x7, x4, lsl #48 // dif - vw
sub w5, w5, w4 // r - v
cset w15, lo
csel w4, w5, w4, hs // if (ret) v = r - v;
csel x7, x8, x7, hs // if (ret) dif = dif - vw;
clz w5, w4 // clz(rng)
eor w5, w5, #16 // d = clz(rng) ^ 16
lsl w4, w4, w5 // rng << d
subs w6, w6, w5 // cnt -= d
lsl x7, x7, x5 // (dif - (v << 48)) << d
str w4, [x0, #RNG]
b.lo L(refill)
str w6, [x0, #CNT]
str x7, [x0, #DIF]
mov w0, w15
ret
endfunc
function msac_decode_bool_adapt_neon, export=1
ldr w9, [x1] // cdf[0-1]
ldp w5, w6, [x0, #RNG] // + CNT
ldr x7, [x0, #DIF]
lsr w4, w5, #8 // r >> 8
and w2, w9, #0xffc0 // f &= ~63
mul w4, w4, w2
lsr w4, w4, #7
add w4, w4, #4 // v
subs x8, x7, x4, lsl #48 // dif - vw
sub w5, w5, w4 // r - v
cset w15, lo
csel w4, w5, w4, hs // if (ret) v = r - v;
csel x7, x8, x7, hs // if (ret) dif = dif - vw;
ldr w10, [x0, #ALLOW_UPDATE_CDF]
clz w5, w4 // clz(rng)
eor w5, w5, #16 // d = clz(rng) ^ 16
cbz w10, 1f
lsr w2, w9, #16 // count = cdf[1]
and w9, w9, #0xffff // cdf[0]
sub w3, w2, w2, lsr #5 // count - (count >= 32)
lsr w2, w2, #4 // count >> 4
add w10, w3, #1 // count + (count < 32)
add w2, w2, #4 // rate = (count >> 4) | 4
sub w9, w9, w15 // cdf[0] -= bit
sub w11, w9, w15, lsl #15 // {cdf[0], cdf[0] - 32769}
asr w11, w11, w2 // {cdf[0], cdf[0] - 32769} >> rate
sub w9, w9, w11 // cdf[0]
strh w9, [x1]
strh w10, [x1, #2]
1:
lsl w4, w4, w5 // rng << d
subs w6, w6, w5 // cnt -= d
lsl x7, x7, x5 // (dif - (v << 48)) << d
str w4, [x0, #RNG]
b.lo L(refill)
str w6, [x0, #CNT]
str x7, [x0, #DIF]
mov w0, w15
ret
endfunc
|
Admenri/urge
| 74,701
|
third_party/dav1d/src/arm/64/filmgrain16.S
|
/*
* Copyright © 2021, VideoLAN and dav1d authors
* Copyright © 2021, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#include "src/arm/asm-offsets.h"
#define GRAIN_WIDTH 82
#define GRAIN_HEIGHT 73
#define SUB_GRAIN_WIDTH 44
#define SUB_GRAIN_HEIGHT 38
.macro increment_seed steps, shift=1
lsr w11, w2, #3
lsr w12, w2, #12
lsr w13, w2, #1
eor w11, w2, w11 // (r >> 0) ^ (r >> 3)
eor w12, w12, w13 // (r >> 12) ^ (r >> 1)
eor w11, w11, w12 // (r >> 0) ^ (r >> 3) ^ (r >> 12) ^ (r >> 1)
.if \shift
lsr w2, w2, #\steps
.endif
and w11, w11, #((1 << \steps) - 1) // bit
.if \shift
orr w2, w2, w11, lsl #(16 - \steps) // *state
.else
orr w2, w2, w11, lsl #16 // *state
.endif
.endm
.macro read_rand dest, bits, age
ubfx \dest, x2, #16 - \bits - \age, #\bits
.endm
.macro read_shift_rand dest, bits
ubfx \dest, x2, #17 - \bits, #\bits
lsr w2, w2, #1
.endm
// special calling convention:
// w2 holds seed
// x3 holds dav1d_gaussian_sequence
// clobbers x11-x15
// returns in v0.8h
function get_gaussian_neon
increment_seed 4
read_rand x14, 11, 3
read_rand x15, 11, 2
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {v0.h}[0], [x14]
read_rand x14, 11, 1
ld1 {v0.h}[1], [x15]
add x14, x3, x14, lsl #1
read_rand x15, 11, 0
increment_seed 4
add x15, x3, x15, lsl #1
ld1 {v0.h}[2], [x14]
read_rand x14, 11, 3
ld1 {v0.h}[3], [x15]
add x14, x3, x14, lsl #1
read_rand x15, 11, 2
ld1 {v0.h}[4], [x14]
add x15, x3, x15, lsl #1
read_rand x14, 11, 1
ld1 {v0.h}[5], [x15]
read_rand x15, 11, 0
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {v0.h}[6], [x14]
ld1 {v0.h}[7], [x15]
ret
endfunc
.macro store_grain_row r0, r1, r2, r3, r4, r5
st1 {\r0\().16b,\r1\().16b}, [x0], #32
st1 {\r2\().16b,\r3\().16b}, [x0], #32
st1 {\r4\().16b}, [x0], #16
st1 {\r5\().h}[0], [x0], #2
.endm
function get_grain_2_neon
increment_seed 2
read_rand x14, 11, 1
read_rand x15, 11, 0
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {v0.h}[0], [x14]
ld1 {v0.h}[1], [x15]
srshl v0.4h, v0.4h, v31.4h
ret
endfunc
.macro get_grain_2 dst
bl get_grain_2_neon
.ifnc \dst, v0
mov \dst\().8b, v0.8b
.endif
.endm
function get_grain_4_neon
increment_seed 4
read_rand x14, 11, 3
read_rand x15, 11, 2
add x14, x3, x14, lsl #1
add x15, x3, x15, lsl #1
ld1 {v0.h}[0], [x14]
read_rand x14, 11, 1
ld1 {v0.h}[1], [x15]
add x14, x3, x14, lsl #1
read_rand x15, 11, 0
add x15, x3, x15, lsl #1
ld1 {v0.h}[2], [x14]
ld1 {v0.h}[3], [x15]
srshl v0.4h, v0.4h, v31.4h
ret
endfunc
.macro get_grain_4 dst
bl get_grain_4_neon
.ifnc \dst, v0
mov \dst\().8b, v0.8b
.endif
.endm
// w15 holds the number of entries to produce
// w14, w16 and w17 hold the previous output entries
// v0 holds the vector of produced entries
// v1 holds the input vector of sums from above
.macro output_lag n
function output_lag\n\()_neon
1:
read_shift_rand x13, 11
mov w11, v1.s[0]
ldrsh w12, [x3, x13, lsl #1]
ext v0.16b, v0.16b, v0.16b, #2
.if \n == 1
madd w11, w14, w4, w11 // sum (above) + *coeff * prev output
.elseif \n == 2
madd w11, w16, w4, w11 // sum (above) + *coeff * prev output 1
madd w11, w14, w17, w11 // += *coeff * prev output 2
mov w16, w14
.else
madd w11, w17, w4, w11 // sum (above) + *coeff * prev output 1
madd w11, w16, w20, w11 // sum (above) + *coeff * prev output 2
madd w11, w14, w21, w11 // += *coeff * prev output 3
mov w17, w16
mov w16, w14
.endif
add w14, w11, w8 // 1 << (ar_coeff_shift - 1)
add w12, w12, w10 // 1 << (4 - bitdepth_min_8 + grain_scale_shift - 1)
asr w14, w14, w7 // >> ar_coeff_shift
asr w12, w12, w9 // >> (4 - bitdepth_min_8 + grain_scale_shift)
add w14, w14, w12
cmp w14, w5
csel w14, w14, w5, le
cmp w14, w6
csel w14, w14, w6, ge
subs w15, w15, #1
ext v1.16b, v1.16b, v1.16b, #4
ins v0.h[7], w14
b.gt 1b
ret
endfunc
.endm
output_lag 1
output_lag 2
output_lag 3
function sum_lag1_above_neon
sub x12, x0, #1*GRAIN_WIDTH*2 - 16
ld1 {v18.8h}, [x12] // load top right
ext v0.16b, v16.16b, v17.16b, #14 // top left, top mid
ext v1.16b, v17.16b, v18.16b, #2 // top mid, top right
smull v4.4s, v17.4h, v28.4h
smlal v4.4s, v0.4h, v27.4h
smlal v4.4s, v1.4h, v29.4h
smull2 v5.4s, v17.8h, v28.8h
smlal2 v5.4s, v0.8h, v27.8h
smlal2 v5.4s, v1.8h, v29.8h
mov v16.16b, v17.16b
mov v17.16b, v18.16b
ret
endfunc
.macro sum_lag_n_body lag, type, uv_layout, edge, elems, uv_coeff
bl sum_\lag\()_above_neon
.ifc \type, uv_420
add x12, x19, #GRAIN_WIDTH*2
ld1 {v22.8h, v23.8h}, [x19], #32
ld1 {v24.8h, v25.8h}, [x12]
addp v22.8h, v22.8h, v23.8h
addp v23.8h, v24.8h, v25.8h
add v22.8h, v22.8h, v23.8h
srshr v0.8h, v22.8h, #2
.endif
.ifc \type, uv_422
ld1 {v22.8h, v23.8h}, [x19], #32
addp v22.8h, v22.8h, v23.8h
srshr v0.8h, v22.8h, #1
.endif
.ifc \type, uv_444
ld1 {v0.8h}, [x19], #16
.endif
.if \uv_layout
.ifnb \uv_coeff
dup v1.8b, \uv_coeff
sxtl v1.8h, v1.8b
smlal v4.4s, v0.4h, v1.4h
smlal2 v5.4s, v0.8h, v1.8h
.else
smlal v4.4s, v0.4h, v30.4h
smlal2 v5.4s, v0.8h, v30.8h
.endif
.endif
.if \uv_layout && \elems == 8
b sum_\lag\()_y_\edge\()_start
.elseif \uv_layout == 444 && \elems == 7
b sum_\lag\()_y_\edge\()_start
.elseif \uv_layout == 422 && \elems == 1
b sum_\lag\()_uv_420_\edge\()_start
.else
sum_\lag\()_\type\()_\edge\()_start:
.if \elems > 4
.ifc \edge, left
increment_seed 4
read_rand x12, 11, 3
read_rand x13, 11, 2
read_rand x14, 11, 1
add x12, x3, x12, lsl #1
add x13, x3, x13, lsl #1
add x14, x3, x14, lsl #1
ld1 {v0.h}[5], [x12]
ld1 {v0.h}[6], [x13]
ld1 {v0.h}[7], [x14]
lsl x2, x2, #1 // shift back the state as if we'd done increment_seed with shift=0
srshl v0.8h, v0.8h, v31.8h
ext v4.16b, v4.16b, v4.16b, #12
.ifc \lag, lag3
smov w17, v0.h[5]
.endif
.ifnc \lag, lag1
smov w16, v0.h[6]
.endif
smov w14, v0.h[7]
mov v1.16b, v4.16b
mov w15, #1
bl output_\lag\()_neon
.else
increment_seed 4, shift=0
mov v1.16b, v4.16b
mov w15, #4
bl output_\lag\()_neon
.endif
increment_seed 4, shift=0
mov v1.16b, v5.16b
.ifc \edge, right
mov w15, #3
bl output_\lag\()_neon
read_shift_rand x15, 11
add x15, x3, x15, lsl #1
ld1 {v1.h}[0], [x15]
srshl v1.4h, v1.4h, v31.4h
ext v0.16b, v0.16b, v1.16b, #2
.else
mov w15, #4
bl output_\lag\()_neon
.endif
.else
// elems == 1
increment_seed 4, shift=0
mov v1.16b, v4.16b
mov w15, #1
bl output_\lag\()_neon
lsr w2, w2, #3
read_rand x12, 11, 2
read_rand x13, 11, 1
read_rand x14, 11, 0
add x12, x3, x12, lsl #1
add x13, x3, x13, lsl #1
add x14, x3, x14, lsl #1
ld1 {v1.h}[0], [x12]
ld1 {v1.h}[1], [x13]
ld1 {v1.h}[2], [x14]
srshl v1.4h, v1.4h, v31.4h
ext v0.16b, v0.16b, v1.16b, #14
.endif
st1 {v0.8h}, [x0], #16
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
.endif
.endm
.macro sum_lag1_func type, uv_layout, edge, elems=8
function sum_\type\()_lag1_\edge\()_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
.ifc \edge, left
sub x12, x0, #1*GRAIN_WIDTH*2
ld1 {v17.8h}, [x12] // load the previous block right above
.endif
sum_lag_n_body lag1, \type, \uv_layout, \edge, \elems
endfunc
.endm
sum_lag1_func y, 0, left
sum_lag1_func y, 0, mid
sum_lag1_func y, 0, right, 7
sum_lag1_func uv_444, 444, left
sum_lag1_func uv_444, 444, mid
sum_lag1_func uv_444, 444, right, 7
sum_lag1_func uv_422, 422, left
sum_lag1_func uv_422, 422, mid
sum_lag1_func uv_422, 422, right, 1
sum_lag1_func uv_420, 420, left
sum_lag1_func uv_420, 420, mid
sum_lag1_func uv_420, 420, right, 1
function sum_lag2_above_neon
sub x12, x0, #2*GRAIN_WIDTH*2 - 16
sub x13, x0, #1*GRAIN_WIDTH*2 - 16
ld1 {v18.8h}, [x12] // load top right
ld1 {v21.8h}, [x13]
dup v26.8b, v30.b[0]
ext v22.16b, v16.16b, v17.16b, #12 // top left, top mid
dup v27.8b, v30.b[1]
ext v23.16b, v16.16b, v17.16b, #14
sxtl v26.8h, v26.8b
dup v28.8b, v30.b[3]
ext v0.16b, v17.16b, v18.16b, #2 // top mid, top right
sxtl v27.8h, v27.8b
dup v29.8b, v30.b[4]
ext v1.16b, v17.16b, v18.16b, #4
sxtl v28.8h, v28.8b
sxtl v29.8h, v29.8b
smull v4.4s, v22.4h, v26.4h
smlal v4.4s, v23.4h, v27.4h
smlal v4.4s, v0.4h, v28.4h
smlal v4.4s, v1.4h, v29.4h
smull2 v5.4s, v22.8h, v26.8h
smlal2 v5.4s, v23.8h, v27.8h
smlal2 v5.4s, v0.8h, v28.8h
smlal2 v5.4s, v1.8h, v29.8h
dup v26.16b, v30.b[5]
ext v22.16b, v19.16b, v20.16b, #12 // top left, top mid
dup v27.16b, v30.b[6]
ext v23.16b, v19.16b, v20.16b, #14
sxtl v26.8h, v26.8b
dup v28.16b, v30.b[8]
ext v0.16b, v20.16b, v21.16b, #2 // top mid, top right
sxtl v27.8h, v27.8b
dup v29.16b, v30.b[9]
ext v1.16b, v20.16b, v21.16b, #4
sxtl v28.8h, v28.8b
sxtl v29.8h, v29.8b
smlal v4.4s, v22.4h, v26.4h
smlal v4.4s, v23.4h, v27.4h
smlal v4.4s, v0.4h, v28.4h
smlal v4.4s, v1.4h, v29.4h
smlal2 v5.4s, v22.8h, v26.8h
smlal2 v5.4s, v23.8h, v27.8h
smlal2 v5.4s, v0.8h, v28.8h
smlal2 v5.4s, v1.8h, v29.8h
dup v26.16b, v30.b[2]
dup v27.16b, v30.b[7]
sxtl v26.8h, v26.8b
sxtl v27.8h, v27.8b
smlal v4.4s, v17.4h, v26.4h
smlal v4.4s, v20.4h, v27.4h
smlal2 v5.4s, v17.8h, v26.8h
smlal2 v5.4s, v20.8h, v27.8h
mov v16.16b, v17.16b
mov v17.16b, v18.16b
mov v19.16b, v20.16b
mov v20.16b, v21.16b
ret
endfunc
.macro sum_lag2_func type, uv_layout, edge, elems=8
function sum_\type\()_lag2_\edge\()_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
.ifc \edge, left
sub x12, x0, #2*GRAIN_WIDTH*2
sub x13, x0, #1*GRAIN_WIDTH*2
ld1 {v17.8h}, [x12] // load the previous block right above
ld1 {v20.8h}, [x13]
.endif
sum_lag_n_body lag2, \type, \uv_layout, \edge, \elems, v30.b[12]
endfunc
.endm
sum_lag2_func y, 0, left
sum_lag2_func y, 0, mid
sum_lag2_func y, 0, right, 7
sum_lag2_func uv_444, 444, left
sum_lag2_func uv_444, 444, mid
sum_lag2_func uv_444, 444, right, 7
sum_lag2_func uv_422, 422, left
sum_lag2_func uv_422, 422, mid
sum_lag2_func uv_422, 422, right, 1
sum_lag2_func uv_420, 420, left
sum_lag2_func uv_420, 420, mid
sum_lag2_func uv_420, 420, right, 1
function sum_lag3_above_neon
sub x11, x0, #3*GRAIN_WIDTH*2 - 16
sub x12, x0, #2*GRAIN_WIDTH*2 - 16
sub x13, x0, #1*GRAIN_WIDTH*2 - 16
ld1 {v15.8h}, [x11] // load top right
ld1 {v18.8h}, [x12]
ld1 {v21.8h}, [x13]
dup v22.8b, v29.b[0]
ext v8.16b, v13.16b, v14.16b, #10 // top left, top mid
dup v23.8b, v29.b[1]
ext v9.16b, v13.16b, v14.16b, #12
sxtl v22.8h, v22.8b
dup v24.8b, v29.b[2]
sxtl v23.8h, v23.8b
dup v25.8b, v29.b[3]
ext v10.16b, v13.16b, v14.16b, #14
sxtl v24.8h, v24.8b
dup v26.8b, v29.b[4]
ext v11.16b, v14.16b, v15.16b, #2 // top mid, top right
sxtl v25.8h, v25.8b
dup v27.8b, v29.b[5]
ext v12.16b, v14.16b, v15.16b, #4
sxtl v26.8h, v26.8b
dup v28.8b, v29.b[6]
ext v13.16b, v14.16b, v15.16b, #6
sxtl v27.8h, v27.8b
sxtl v28.8h, v28.8b
smull v4.4s, v8.4h, v22.4h
smlal v4.4s, v9.4h, v23.4h
smlal v4.4s, v10.4h, v24.4h
smlal v4.4s, v11.4h, v26.4h
smlal v4.4s, v12.4h, v27.4h
smlal v4.4s, v13.4h, v28.4h
smlal v4.4s, v14.4h, v25.4h
smull2 v5.4s, v8.8h, v22.8h
smlal2 v5.4s, v9.8h, v23.8h
smlal2 v5.4s, v10.8h, v24.8h
smlal2 v5.4s, v11.8h, v26.8h
smlal2 v5.4s, v12.8h, v27.8h
smlal2 v5.4s, v13.8h, v28.8h
smlal2 v5.4s, v14.8h, v25.8h
dup v22.8b, v29.b[7]
ext v8.16b, v16.16b, v17.16b, #10 // top left, top mid
dup v23.8b, v29.b[8]
ext v9.16b, v16.16b, v17.16b, #12
sxtl v22.8h, v22.8b
dup v24.8b, v29.b[9]
sxtl v23.8h, v23.8b
dup v25.8b, v29.b[10]
ext v10.16b, v16.16b, v17.16b, #14
sxtl v24.8h, v24.8b
dup v26.8b, v29.b[11]
ext v11.16b, v17.16b, v18.16b, #2 // top mid, top right
sxtl v25.8h, v25.8b
dup v27.8b, v29.b[12]
ext v12.16b, v17.16b, v18.16b, #4
sxtl v26.8h, v26.8b
dup v28.8b, v29.b[13]
ext v13.16b, v17.16b, v18.16b, #6
sxtl v27.8h, v27.8b
sxtl v28.8h, v28.8b
smlal v4.4s, v8.4h, v22.4h
smlal v4.4s, v9.4h, v23.4h
smlal v4.4s, v10.4h, v24.4h
smlal v4.4s, v11.4h, v26.4h
smlal v4.4s, v12.4h, v27.4h
smlal v4.4s, v13.4h, v28.4h
smlal v4.4s, v17.4h, v25.4h
smlal2 v5.4s, v8.8h, v22.8h
smlal2 v5.4s, v9.8h, v23.8h
smlal2 v5.4s, v10.8h, v24.8h
smlal2 v5.4s, v11.8h, v26.8h
smlal2 v5.4s, v12.8h, v27.8h
smlal2 v5.4s, v13.8h, v28.8h
smlal2 v5.4s, v17.8h, v25.8h
dup v22.8b, v29.b[14]
ext v8.16b, v19.16b, v20.16b, #10 // top left, top mid
dup v23.8b, v29.b[15]
ext v9.16b, v19.16b, v20.16b, #12
sxtl v22.8h, v22.8b
dup v24.8b, v30.b[0]
sxtl v23.8h, v23.8b
dup v25.8b, v30.b[1]
ext v10.16b, v19.16b, v20.16b, #14
sxtl v24.8h, v24.8b
dup v26.8b, v30.b[2]
ext v11.16b, v20.16b, v21.16b, #2 // top mid, top right
sxtl v25.8h, v25.8b
dup v27.8b, v30.b[3]
ext v12.16b, v20.16b, v21.16b, #4
sxtl v26.8h, v26.8b
dup v28.8b, v30.b[4]
ext v13.16b, v20.16b, v21.16b, #6
sxtl v27.8h, v27.8b
sxtl v28.8h, v28.8b
smlal v4.4s, v8.4h, v22.4h
smlal v4.4s, v9.4h, v23.4h
smlal v4.4s, v10.4h, v24.4h
smlal v4.4s, v11.4h, v26.4h
smlal v4.4s, v12.4h, v27.4h
smlal v4.4s, v13.4h, v28.4h
smlal v4.4s, v20.4h, v25.4h
mov v16.16b, v17.16b
mov v17.16b, v18.16b
smlal2 v5.4s, v8.8h, v22.8h
smlal2 v5.4s, v9.8h, v23.8h
smlal2 v5.4s, v10.8h, v24.8h
smlal2 v5.4s, v11.8h, v26.8h
smlal2 v5.4s, v12.8h, v27.8h
smlal2 v5.4s, v13.8h, v28.8h
smlal2 v5.4s, v20.8h, v25.8h
mov v13.16b, v14.16b
mov v14.16b, v15.16b
mov v19.16b, v20.16b
mov v20.16b, v21.16b
ret
endfunc
.macro sum_lag3_func type, uv_layout, edge, elems=8
function sum_\type\()_lag3_\edge\()_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
.ifc \edge, left
sub x11, x0, #3*GRAIN_WIDTH*2
sub x12, x0, #2*GRAIN_WIDTH*2
sub x13, x0, #1*GRAIN_WIDTH*2
ld1 {v14.8h}, [x11] // load the previous block right above
ld1 {v17.8h}, [x12]
ld1 {v20.8h}, [x13]
.endif
sum_lag_n_body lag3, \type, \uv_layout, \edge, \elems, v30.b[8]
endfunc
.endm
sum_lag3_func y, 0, left
sum_lag3_func y, 0, mid
sum_lag3_func y, 0, right, 7
sum_lag3_func uv_444, 444, left
sum_lag3_func uv_444, 444, mid
sum_lag3_func uv_444, 444, right, 7
sum_lag3_func uv_422, 422, left
sum_lag3_func uv_422, 422, mid
sum_lag3_func uv_422, 422, right, 1
sum_lag3_func uv_420, 420, left
sum_lag3_func uv_420, 420, mid
sum_lag3_func uv_420, 420, right, 1
function generate_grain_rows_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
1:
mov w16, #80
2:
bl get_gaussian_neon
srshl v0.8h, v0.8h, v31.8h
subs w16, w16, #8
st1 {v0.8h}, [x0], #16
b.gt 2b
get_grain_2 v0
subs w1, w1, #1
st1 {v0.s}[0], [x0], #4
b.gt 1b
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
function generate_grain_rows_44_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
1:
mov w16, #40
2:
bl get_gaussian_neon
srshl v0.8h, v0.8h, v31.8h
subs w16, w16, #8
st1 {v0.8h}, [x0], #16
b.gt 2b
get_grain_4 v0
subs w1, w1, #1
st1 {v0.4h}, [x0]
add x0, x0, #GRAIN_WIDTH*2-80
b.gt 1b
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
function gen_grain_uv_444_lag0_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
ld1 {v4.8h}, [x19], #16
gen_grain_uv_lag0_8_start:
bl get_gaussian_neon
srshl v0.8h, v0.8h, v31.8h
gen_grain_uv_lag0_8_add:
and v4.16b, v4.16b, v1.16b
smull v2.4s, v4.4h, v27.4h
smull2 v3.4s, v4.8h, v27.8h
srshl v2.4s, v2.4s, v28.4s
srshl v3.4s, v3.4s, v28.4s
sqxtn v2.4h, v2.4s
sqxtn2 v2.8h, v3.4s
sqadd v2.8h, v2.8h, v0.8h
smin v2.8h, v2.8h, v25.8h
smax v2.8h, v2.8h, v26.8h
st1 {v2.8h}, [x0], #16
ldr x30, [sp], #16
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
function gen_grain_uv_420_lag0_8_neon
AARCH64_SIGN_LINK_REGISTER
add x12, x19, #GRAIN_WIDTH*2
str x30, [sp, #-16]!
ld1 {v16.8h, v17.8h}, [x19], #32
ld1 {v18.8h, v19.8h}, [x12]
addp v16.8h, v16.8h, v17.8h
addp v17.8h, v18.8h, v19.8h
add v16.8h, v16.8h, v17.8h
srshr v4.8h, v16.8h, #2
b gen_grain_uv_lag0_8_start
endfunc
function gen_grain_uv_422_lag0_8_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
ld1 {v16.8h, v17.8h}, [x19], #32
addp v16.8h, v16.8h, v17.8h
srshr v4.8h, v16.8h, #1
b gen_grain_uv_lag0_8_start
endfunc
function gen_grain_uv_420_lag0_4_neon
add x12, x19, #GRAIN_WIDTH*2
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
ld1 {v16.4h, v17.4h}, [x19]
ld1 {v18.4h, v19.4h}, [x12]
add x19, x19, #32
addp v16.4h, v16.4h, v17.4h
addp v17.4h, v18.4h, v19.4h
add v16.4h, v16.4h, v17.4h
srshr v4.4h, v16.4h, #2
get_grain_4 v0
b gen_grain_uv_lag0_8_add
endfunc
function gen_grain_uv_422_lag0_4_neon
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-16]!
ld1 {v16.4h, v17.4h}, [x19]
add x19, x19, #32
addp v16.4h, v16.4h, v17.4h
srshr v4.4h, v16.4h, #1
get_grain_4 v0
b gen_grain_uv_lag0_8_add
endfunc
.macro gen_grain_82 type
function generate_grain_\type\()_16bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
stp x30, x19, [sp, #-96]!
.ifc \type, uv_444
mov w13, w3
mov w14, #28
add x19, x1, #3*GRAIN_WIDTH*2
mov x1, x2
mul w13, w13, w14
clz w15, w4
.else
clz w15, w2
.endif
movrel x3, X(gaussian_sequence)
sub w15, w15, #24 // -bitdepth_min_8
ldr w2, [x1, #FGD_SEED]
ldr w9, [x1, #FGD_GRAIN_SCALE_SHIFT]
.ifc \type, y
add x4, x1, #FGD_AR_COEFFS_Y
.else
add x4, x1, #FGD_AR_COEFFS_UV
.endif
add w9, w9, w15 // grain_scale_shift - bitdepth_min_8
movrel x16, gen_grain_\type\()_tbl
ldr w17, [x1, #FGD_AR_COEFF_LAG]
add w9, w9, #4
ldrsw x17, [x16, w17, uxtw #2]
dup v31.8h, w9 // 4 - bitdepth_min_8 + data->grain_scale_shift
add x16, x16, x17
neg v31.8h, v31.8h
.ifc \type, uv_444
cmp w13, #0
mov w11, #0x49d8
mov w14, #0xb524
add x4, x4, w13, uxtw // Add offset to ar_coeffs_uv[1]
csel w11, w11, w14, ne
.endif
ldr w7, [x1, #FGD_AR_COEFF_SHIFT]
neg w15, w15 // bitdepth_min_8
mov w8, #1
mov w10, #1
lsl w8, w8, w7 // 1 << ar_coeff_shift
lsl w10, w10, w9 // 1 << (4 + data->grain_scale_shift)
lsr w8, w8, #1 // 1 << (ar_coeff_shift - 1)
lsr w10, w10, #1 // 1 << (4 + data->grain_scale_shift - 1)
mov w5, #128
lsl w5, w5, w15 // 128 << bitdepth_min_8
neg w6, w5 // -(128 << bitpdeth_min_8)
sub w5, w5, #1 // (128 << bitdepth_min_8) - 1
.ifc \type, uv_444
eor w2, w2, w11
.endif
br x16
L(generate_grain_\type\()_lag0):
AARCH64_VALID_JUMP_TARGET
.ifc \type, y
mov w1, #GRAIN_HEIGHT
bl generate_grain_rows_neon
.else
dup v28.4s, w7
ld1r {v27.8b}, [x4] // ar_coeffs_uv[0]
movi v0.16b, #0
movi v1.16b, #255
dup v25.8h, w5
dup v26.8h, w6
ext v29.16b, v0.16b, v1.16b, #10
ext v30.16b, v1.16b, v0.16b, #2
neg v28.4s, v28.4s
sxtl v27.8h, v27.8b
mov w1, #3
bl generate_grain_rows_neon
mov w1, #GRAIN_HEIGHT-3
1:
mov v1.16b, v29.16b
bl gen_grain_uv_444_lag0_neon // 8
movi v1.16b, #255
bl gen_grain_uv_444_lag0_neon // 16
bl gen_grain_uv_444_lag0_neon // 24
bl gen_grain_uv_444_lag0_neon // 32
bl gen_grain_uv_444_lag0_neon // 40
bl gen_grain_uv_444_lag0_neon // 48
bl gen_grain_uv_444_lag0_neon // 56
bl gen_grain_uv_444_lag0_neon // 64
bl gen_grain_uv_444_lag0_neon // 72
mov v1.16b, v30.16b
bl gen_grain_uv_444_lag0_neon // 80
get_grain_2 v16
subs w1, w1, #1
add x19, x19, #4
st1 {v16.s}[0], [x0], #4
b.gt 1b
.endif
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag1):
AARCH64_VALID_JUMP_TARGET
ld1r {v27.8b}, [x4], #1 // ar_coeffs_y[0]
ld1r {v28.8b}, [x4], #1 // ar_coeffs_y[1]
ld1r {v29.8b}, [x4] // ar_coeffs_y[2]
.ifc \type, y
ldrsb w4, [x4, #1] // ar_coeffs_y[3]
.else
add x4, x4, #2
.endif
mov w1, #3
.ifc \type, uv_444
ld1r {v30.8b}, [x4] // ar_coeffs_uv[4]
ldursb w4, [x4, #-1] // ar_coeffs_uv[3]
.endif
bl generate_grain_rows_neon
sxtl v27.8h, v27.8b
sxtl v28.8h, v28.8b
sxtl v29.8h, v29.8b
.ifc \type, uv_444
sxtl v30.8h, v30.8b
.endif
mov w1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag1_left_neon // 8
bl sum_\type\()_lag1_mid_neon // 16
bl sum_\type\()_lag1_mid_neon // 24
bl sum_\type\()_lag1_mid_neon // 32
bl sum_\type\()_lag1_mid_neon // 40
bl sum_\type\()_lag1_mid_neon // 48
bl sum_\type\()_lag1_mid_neon // 56
bl sum_\type\()_lag1_mid_neon // 64
bl sum_\type\()_lag1_mid_neon // 72
bl sum_\type\()_lag1_right_neon // 80
get_grain_2 v16
subs w1, w1, #1
.ifc \type, uv_444
add x19, x19, #4
.endif
st1 {v16.s}[0], [x0], #4
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag2):
AARCH64_VALID_JUMP_TARGET
ld1 {v30.16b}, [x4] // ar_coeffs_y[0-11], ar_coeffs_uv[0-12]
smov w4, v30.b[10]
smov w17, v30.b[11]
mov w1, #3
bl generate_grain_rows_neon
mov w1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag2_left_neon // 8
bl sum_\type\()_lag2_mid_neon // 16
bl sum_\type\()_lag2_mid_neon // 24
bl sum_\type\()_lag2_mid_neon // 32
bl sum_\type\()_lag2_mid_neon // 40
bl sum_\type\()_lag2_mid_neon // 48
bl sum_\type\()_lag2_mid_neon // 56
bl sum_\type\()_lag2_mid_neon // 64
bl sum_\type\()_lag2_mid_neon // 72
bl sum_\type\()_lag2_right_neon // 80
get_grain_2 v16
subs w1, w1, #1
.ifc \type, uv_444
add x19, x19, #4
.endif
st1 {v16.s}[0], [x0], #4
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag3):
AARCH64_VALID_JUMP_TARGET
ld1 {v29.16b, v30.16b}, [x4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
stp x20, x21, [sp, #80]
smov w4, v30.b[5]
smov w20, v30.b[6]
smov w21, v30.b[7]
mov w1, #3
bl generate_grain_rows_neon
mov w1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag3_left_neon // 8
bl sum_\type\()_lag3_mid_neon // 16
bl sum_\type\()_lag3_mid_neon // 24
bl sum_\type\()_lag3_mid_neon // 32
bl sum_\type\()_lag3_mid_neon // 40
bl sum_\type\()_lag3_mid_neon // 48
bl sum_\type\()_lag3_mid_neon // 56
bl sum_\type\()_lag3_mid_neon // 64
bl sum_\type\()_lag3_mid_neon // 72
bl sum_\type\()_lag3_right_neon // 80
get_grain_2 v16
subs w1, w1, #1
.ifc \type, uv_444
add x19, x19, #4
.endif
st1 {v16.s}[0], [x0], #4
b.gt 1b
ldp x20, x21, [sp, #80]
ldp d14, d15, [sp, #64]
ldp d12, d13, [sp, #48]
ldp d10, d11, [sp, #32]
ldp d8, d9, [sp, #16]
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
jumptable gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag0) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag1) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag2) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag3) - gen_grain_\type\()_tbl
endjumptable
.endm
gen_grain_82 y
gen_grain_82 uv_444
.macro set_height dst, type
.ifc \type, uv_420
mov \dst, #SUB_GRAIN_HEIGHT-3
.else
mov \dst, #GRAIN_HEIGHT-3
.endif
.endm
.macro increment_y_ptr reg, type
.ifc \type, uv_420
add \reg, \reg, #2*GRAIN_WIDTH*2-(6*32)
.else
sub \reg, \reg, #6*32-GRAIN_WIDTH*2
.endif
.endm
.macro gen_grain_44 type
function generate_grain_\type\()_16bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
stp x30, x19, [sp, #-96]!
mov w13, w3
mov w14, #28
add x19, x1, #(3*GRAIN_WIDTH-3)*2
mov x1, x2
mul w13, w13, w14
clz w15, w4
movrel x3, X(gaussian_sequence)
sub w15, w15, #24 // -bitdepth_min_8
ldr w2, [x1, #FGD_SEED]
ldr w9, [x1, #FGD_GRAIN_SCALE_SHIFT]
add x4, x1, #FGD_AR_COEFFS_UV
add w9, w9, w15 // grain_scale_shift - bitdepth_min_8
movrel x16, gen_grain_\type\()_tbl
ldr w17, [x1, #FGD_AR_COEFF_LAG]
add w9, w9, #4
ldrsw x17, [x16, w17, uxtw #2]
dup v31.8h, w9 // 4 - bitdepth_min_8 + data->grain_scale_shift
add x16, x16, x17
neg v31.8h, v31.8h
cmp w13, #0
mov w11, #0x49d8
mov w14, #0xb524
add x4, x4, w13, uxtw // Add offset to ar_coeffs_uv[1]
csel w11, w11, w14, ne
ldr w7, [x1, #FGD_AR_COEFF_SHIFT]
neg w15, w15 // bitdepth_min_8
mov w8, #1
mov w10, #1
lsl w8, w8, w7 // 1 << ar_coeff_shift
lsl w10, w10, w9 // 1 << (4 + data->grain_scale_shift)
lsr w8, w8, #1 // 1 << (ar_coeff_shift - 1)
lsr w10, w10, #1 // 1 << (4 + data->grain_scale_shift - 1)
mov w5, #128
lsl w5, w5, w15 // 128 << bitdepth_min_8
neg w6, w5 // -(128 << bitpdeth_min_8)
sub w5, w5, #1 // (128 << bitdepth_min_8) - 1
eor w2, w2, w11
br x16
L(generate_grain_\type\()_lag0):
AARCH64_VALID_JUMP_TARGET
dup v28.4s, w7
ld1r {v27.8b}, [x4] // ar_coeffs_uv[0]
movi v0.16b, #0
movi v1.16b, #255
dup v25.8h, w5
dup v26.8h, w6
ext v29.16b, v0.16b, v1.16b, #10
ext v30.16b, v1.16b, v0.16b, #14
neg v28.4s, v28.4s
sxtl v27.8h, v27.8b
mov w1, #3
bl generate_grain_rows_44_neon
set_height w1, \type
1:
mov v1.16b, v29.16b
bl gen_grain_\type\()_lag0_8_neon // 8
movi v1.16b, #255
bl gen_grain_\type\()_lag0_8_neon // 16
bl gen_grain_\type\()_lag0_8_neon // 24
bl gen_grain_\type\()_lag0_8_neon // 32
bl gen_grain_\type\()_lag0_8_neon // 40
mov v1.16b, v30.16b
bl gen_grain_\type\()_lag0_4_neon // 44
subs w1, w1, #1
increment_y_ptr x19, \type
add x0, x0, #GRAIN_WIDTH*2-6*16
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag1):
AARCH64_VALID_JUMP_TARGET
ld1r {v27.8b}, [x4], #1 // ar_coeffs_uv[0]
ld1r {v28.8b}, [x4], #1 // ar_coeffs_uv[1]
ld1r {v29.8b}, [x4] // ar_coeffs_uv[2]
add x4, x4, #2
mov w1, #3
ld1r {v30.8b}, [x4] // ar_coeffs_u4[4]
ldursb w4, [x4, #-1] // ar_coeffs_uv[3]
bl generate_grain_rows_44_neon
sxtl v27.8h, v27.8b
sxtl v28.8h, v28.8b
sxtl v29.8h, v29.8b
sxtl v30.8h, v30.8b
set_height w1, \type
1:
bl sum_\type\()_lag1_left_neon // 8
bl sum_\type\()_lag1_mid_neon // 16
bl sum_\type\()_lag1_mid_neon // 24
bl sum_\type\()_lag1_mid_neon // 32
bl sum_\type\()_lag1_mid_neon // 40
bl sum_\type\()_lag1_right_neon // 44
subs w1, w1, #1
increment_y_ptr x19, \type
add x0, x0, #GRAIN_WIDTH*2-6*16
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag2):
AARCH64_VALID_JUMP_TARGET
ld1 {v30.16b}, [x4] // ar_coeffs_uv[0-12]
smov w4, v30.b[10]
smov w17, v30.b[11]
mov w1, #3
bl generate_grain_rows_44_neon
set_height w1, \type
1:
bl sum_\type\()_lag2_left_neon // 8
bl sum_\type\()_lag2_mid_neon // 16
bl sum_\type\()_lag2_mid_neon // 24
bl sum_\type\()_lag2_mid_neon // 32
bl sum_\type\()_lag2_mid_neon // 40
bl sum_\type\()_lag2_right_neon // 44
subs w1, w1, #1
increment_y_ptr x19, \type
add x0, x0, #GRAIN_WIDTH*2-6*16
b.gt 1b
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
L(generate_grain_\type\()_lag3):
AARCH64_VALID_JUMP_TARGET
ldr q29, [x4] // ar_coeffs_uv[0-15]
ldr q30, [x4, #16] // ar_coeffs_uv[16-24]
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
stp x20, x21, [sp, #80]
smov w4, v30.b[5]
smov w20, v30.b[6]
smov w21, v30.b[7]
mov w1, #3
bl generate_grain_rows_44_neon
set_height w1, \type
1:
bl sum_\type\()_lag3_left_neon // 8
bl sum_\type\()_lag3_mid_neon // 16
bl sum_\type\()_lag3_mid_neon // 24
bl sum_\type\()_lag3_mid_neon // 32
bl sum_\type\()_lag3_mid_neon // 40
bl sum_\type\()_lag3_right_neon // 44
subs w1, w1, #1
increment_y_ptr x19, \type
add x0, x0, #GRAIN_WIDTH*2-6*16
b.gt 1b
ldp x20, x21, [sp, #80]
ldp d14, d15, [sp, #64]
ldp d12, d13, [sp, #48]
ldp d10, d11, [sp, #32]
ldp d8, d9, [sp, #16]
ldp x30, x19, [sp], #96
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
jumptable gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag0) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag1) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag2) - gen_grain_\type\()_tbl
.word L(generate_grain_\type\()_lag3) - gen_grain_\type\()_tbl
endjumptable
.endm
gen_grain_44 uv_420
gen_grain_44 uv_422
.macro gather_interleaved dst1, dst2, src1, src2, off
umov w14, \src1[0]
umov w15, \src2[1]
umov w16, \src1[2]
add x14, x14, x3
umov w17, \src2[3]
add x15, x15, x3
ld1 {\dst1}[0+\off], [x14]
umov w14, \src1[4]
add x16, x16, x3
ld1 {\dst2}[1+\off], [x15]
umov w15, \src2[5]
add x17, x17, x3
ld1 {\dst1}[2+\off], [x16]
umov w16, \src1[6]
add x14, x14, x3
ld1 {\dst2}[3+\off], [x17]
umov w17, \src2[7]
add x15, x15, x3
ld1 {\dst1}[4+\off], [x14]
add x16, x16, x3
ld1 {\dst2}[5+\off], [x15]
add x17, x17, x3
ld1 {\dst1}[6+\off], [x16]
ld1 {\dst2}[7+\off], [x17]
.endm
.macro gather dst1, dst2, src1, src2, src3, src4
gather_interleaved \dst1, \dst2, \src1, \src3, 0
gather_interleaved \dst2, \dst1, \src3, \src1, 0
gather_interleaved \dst1, \dst2, \src2, \src4, 8
gather_interleaved \dst2, \dst1, \src4, \src2, 8
.endm
function gather32_neon
gather v6.b, v7.b, v0.h, v1.h, v2.h, v3.h
ret
endfunc
function gather16_neon
gather_interleaved v6.b, v7.b, v0.h, v1.h, 0
gather_interleaved v7.b, v6.b, v1.h, v0.h, 0
ins v6.d[1], v7.d[0]
ret
endfunc
const overlap_coeffs_0, align=4
.short 27, 17, 0, 0
.short 17, 27, 32, 32
endconst
const overlap_coeffs_1, align=4
.short 23, 0, 0, 0
.short 22, 32, 32, 32
endconst
.macro calc_offset offx, offy, src, sx, sy
and \offy, \src, #0xF // randval & 0xF
lsr \offx, \src, #4 // randval >> 4
.if \sy == 0
add \offy, \offy, \offy // 2 * (randval & 0xF)
.endif
.if \sx == 0
add \offx, \offx, \offx // 2 * (randval >> 4)
.endif
.endm
.macro add_offset dst, offx, offy, src, stride
madd \dst, \stride, \offy, \src // grain_lut += grain_stride * offy
add \dst, \dst, \offx, uxtw #1 // grain_lut += offx
.endm
// void dav1d_fgy_32x32_16bpc_neon(pixel *const dst, const pixel *const src,
// const ptrdiff_t stride,
// const uint8_t scaling[SCALING_SIZE],
// const int scaling_shift,
// const entry grain_lut[][GRAIN_WIDTH],
// const int offsets[][2],
// const int h, const ptrdiff_t clip,
// const ptrdiff_t type,
// const int bitdepth_max);
function fgy_32x32_16bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-80]!
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
str d14, [sp, #64]
eor w4, w4, #15 // 15 - scaling_shift
ldr w11, [x6, #8] // offsets[1][0]
ldr w13, [x6, #4] // offsets[0][1]
ldr w15, [x6, #12] // offsets[1][1]
ldr w10, [sp, #96] // bitdepth_max
ldr w6, [x6] // offsets[0][0]
dup v26.8h, w10 // bitdepth_max
clz w10, w10
ldr w8, [sp, #80] // clip
sub w10, w10, #24 // -bitdepth_min_8
mov x9, #GRAIN_WIDTH*2 // grain_lut stride
neg w10, w10 // bitdepth_min_8
dup v29.8h, w4 // 15 - scaling_shift
dup v27.8h, w10 // bitdepth_min_8
movrel x16, overlap_coeffs_0
cbz w8, 1f
// clip
movi v30.8h, #16
movi v31.8h, #235
sshl v30.8h, v30.8h, v27.8h
sshl v31.8h, v31.8h, v27.8h
b 2f
1:
// no clip
movi v30.8h, #0
mov v31.16b, v26.16b // bitdepth_max
2:
ushr v26.8h, v26.8h, #1 // grain_max
not v25.16b, v26.16b // grain_min
ld1 {v27.4h, v28.4h}, [x16] // overlap_coeffs
add x5, x5, #18 // grain_lut += 9
add x5, x5, x9, lsl #3 // grain_lut += 8 * grain_stride
add x5, x5, x9 // grain_lut += grain_stride
calc_offset w11, w12, w11, 0, 0
calc_offset w13, w14, w13, 0, 0
calc_offset w15, w16, w15, 0, 0
calc_offset w6, w10, w6, 0, 0
add_offset x12, w11, x12, x5, x9
add_offset x14, w13, x14, x5, x9
add_offset x16, w15, x16, x5, x9
add_offset x5, w6, x10, x5, x9
ldr w11, [sp, #88] // type
movrel x13, fgy_loop_tbl
add x4, x12, #32*2 // grain_lut += FG_BLOCK_SIZE * bx
add x6, x14, x9, lsl #5 // grain_lut += grain_stride * FG_BLOCK_SIZE * by
tst w11, #1
ldrsw x11, [x13, w11, uxtw #2]
add x8, x16, x9, lsl #5 // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add x8, x8, #32*2 // grain_lut += FG_BLOCK_SIZE * bx
add x11, x13, x11
b.eq 1f
// y overlap
dup v8.8h, v27.h[0]
dup v9.8h, v27.h[1]
mov w10, w7 // backup actual h
mov w7, #2
1:
br x11
endfunc
function fgy_loop_neon
.macro fgy ox, oy
L(loop_\ox\oy):
AARCH64_VALID_JUMP_TARGET
1:
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x1], x2 // src
.if \ox
ld1 {v20.4h}, [x4], x9 // grain_lut old
.endif
.if \oy
ld1 {v21.8h, v22.8h, v23.8h, v24.8h}, [x6], x9 // grain_lut top
.endif
.if \ox && \oy
ld1 {v14.4h}, [x8], x9 // grain_lut top old
.endif
mvni v4.8h, #0xf0, lsl #8 // 0x0fff
ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x5], x9 // grain_lut
// Make sure that uninitialized pixels out of range past the right
// edge are in range; their actual values shouldn't matter.
and v0.16b, v0.16b, v4.16b
and v1.16b, v1.16b, v4.16b
and v2.16b, v2.16b, v4.16b
and v3.16b, v3.16b, v4.16b
bl gather32_neon
.if \ox
smull v20.4s, v20.4h, v27.4h
smlal v20.4s, v16.4h, v28.4h
.endif
.if \oy
.if \ox
smull v14.4s, v14.4h, v27.4h
smlal v14.4s, v21.4h, v28.4h
sqrshrn v20.4h, v20.4s, #5
sqrshrn v14.4h, v14.4s, #5
smin v20.4h, v20.4h, v26.4h
smin v14.4h, v14.4h, v26.4h
smax v20.4h, v20.4h, v25.4h
smax v14.4h, v14.4h, v25.4h
.endif
.if \ox
smull v10.4s, v20.4h, v9.4h
.else
smull v10.4s, v16.4h, v9.4h
.endif
smull2 v11.4s, v16.8h, v9.8h
smull v12.4s, v17.4h, v9.4h
smull2 v13.4s, v17.8h, v9.8h
smull v16.4s, v18.4h, v9.4h
smull2 v17.4s, v18.8h, v9.8h
smull v18.4s, v19.4h, v9.4h
smull2 v19.4s, v19.8h, v9.8h
.if \ox
smlal v10.4s, v14.4h, v8.4h
.else
smlal v10.4s, v21.4h, v8.4h
.endif
smlal2 v11.4s, v21.8h, v8.8h
smlal v12.4s, v22.4h, v8.4h
smlal2 v13.4s, v22.8h, v8.8h
smlal v16.4s, v23.4h, v8.4h
smlal2 v17.4s, v23.8h, v8.8h
smlal v18.4s, v24.4h, v8.4h
smlal2 v19.4s, v24.8h, v8.8h
sqrshrn v10.4h, v10.4s, #5
sqrshrn2 v10.8h, v11.4s, #5
sqrshrn v11.4h, v12.4s, #5
sqrshrn2 v11.8h, v13.4s, #5
sqrshrn v12.4h, v16.4s, #5
sqrshrn2 v12.8h, v17.4s, #5
sqrshrn v13.4h, v18.4s, #5
sqrshrn2 v13.8h, v19.4s, #5
smin v16.8h, v10.8h, v26.8h
smin v17.8h, v11.8h, v26.8h
smin v18.8h, v12.8h, v26.8h
smin v19.8h, v13.8h, v26.8h
smax v16.8h, v16.8h, v25.8h
smax v17.8h, v17.8h, v25.8h
smax v18.8h, v18.8h, v25.8h
smax v19.8h, v19.8h, v25.8h
.endif
uxtl v4.8h, v6.8b // scaling
.if \ox && !\oy
sqrshrn v20.4h, v20.4s, #5
.endif
uxtl2 v5.8h, v6.16b
.if \ox && !\oy
smin v20.4h, v20.4h, v26.4h
.endif
uxtl v6.8h, v7.8b
.if \ox && !\oy
smax v20.4h, v20.4h, v25.4h
.endif
uxtl2 v7.8h, v7.16b
.if \ox && !\oy
ins v16.d[0], v20.d[0]
.endif
ushl v4.8h, v4.8h, v29.8h // scaling << (15 - scaling_shift)
ushl v5.8h, v5.8h, v29.8h
ushl v6.8h, v6.8h, v29.8h
ushl v7.8h, v7.8h, v29.8h
sqrdmulh v20.8h, v16.8h, v4.8h // round2((scaling << (15 - scaling_shift) * grain, 15)
sqrdmulh v21.8h, v17.8h, v5.8h
sqrdmulh v22.8h, v18.8h, v6.8h
sqrdmulh v23.8h, v19.8h, v7.8h
usqadd v0.8h, v20.8h // *src + noise
usqadd v1.8h, v21.8h
usqadd v2.8h, v22.8h
usqadd v3.8h, v23.8h
umax v0.8h, v0.8h, v30.8h
umax v1.8h, v1.8h, v30.8h
umax v2.8h, v2.8h, v30.8h
umax v3.8h, v3.8h, v30.8h
umin v0.8h, v0.8h, v31.8h
umin v1.8h, v1.8h, v31.8h
umin v2.8h, v2.8h, v31.8h
umin v3.8h, v3.8h, v31.8h
subs w7, w7, #1
.if \oy
dup v8.8h, v28.h[0]
dup v9.8h, v28.h[1]
.endif
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x2 // dst
b.gt 1b
.if \oy
cmp w10, #2
sub w7, w10, #2 // restore actual remaining h
b.gt L(loop_\ox\()0)
.endif
ldr d14, [sp, #64]
ldp d12, d13, [sp, #48]
ldp d10, d11, [sp, #32]
ldp d8, d9, [sp, #16]
ldr x30, [sp], #80
AARCH64_VALIDATE_LINK_REGISTER
ret
.endm
fgy 0, 0
fgy 0, 1
fgy 1, 0
fgy 1, 1
endfunc
jumptable fgy_loop_tbl
.word L(loop_00) - fgy_loop_tbl
.word L(loop_01) - fgy_loop_tbl
.word L(loop_10) - fgy_loop_tbl
.word L(loop_11) - fgy_loop_tbl
endjumptable
// void dav1d_fguv_32x32_420_16bpc_neon(pixel *const dst,
// const pixel *const src,
// const ptrdiff_t stride,
// const uint8_t scaling[SCALING_SIZE],
// const Dav1dFilmGrainData *const data,
// const entry grain_lut[][GRAIN_WIDTH],
// const pixel *const luma_row,
// const ptrdiff_t luma_stride,
// const int offsets[][2],
// const ptrdiff_t h, const ptrdiff_t uv,
// const ptrdiff_t is_id,
// const ptrdiff_t type,
// const int bitdepth_max);
.macro fguv layout, sx, sy
function fguv_32x32_\layout\()_16bpc_neon, export=1
AARCH64_SIGN_LINK_REGISTER
str x30, [sp, #-80]!
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
ldp x8, x9, [sp, #80] // offsets, h
ldp x10, x11, [sp, #96] // uv, is_id
ldr w16, [sp, #120] // bitdepth_max
ldr w13, [x4, #FGD_SCALING_SHIFT]
ldr w12, [x4, #FGD_CLIP_TO_RESTRICTED_RANGE]
dup v23.8h, w16 // bitdepth_max
clz w16, w16
eor w13, w13, #15 // 15 - scaling_shift
sub w16, w16, #24 // -bitdepth_min_8
// !csfl
add x10, x4, x10, lsl #2 // + 4*uv
add x14, x10, #FGD_UV_LUMA_MULT
add x15, x10, #FGD_UV_MULT
add x10, x10, #FGD_UV_OFFSET
neg w16, w16 // bitdepth_min_8
ld1r {v8.8h}, [x14] // uv_luma_mult
ld1r {v24.8h}, [x10] // uv_offset
ld1r {v9.8h}, [x15] // uv_mult
dup v29.8h, w13 // 15 - scaling_shift
dup v27.8h, w16 // bitdepth_min_8
cbz w12, 1f
// clip
movi v30.8h, #16
movi v31.8h, #240
sshl v30.8h, v30.8h, v27.8h
sshl v31.8h, v31.8h, v27.8h
cbz w11, 2f
// is_id
movi v31.8h, #235
sshl v31.8h, v31.8h, v27.8h
b 2f
1:
// no clip
movi v30.8h, #0
mov v31.16b, v23.16b // bitdepth_max
2:
ushr v15.8h, v23.8h, #1 // grain_max
sshl v24.8h, v24.8h, v27.8h // uv_offset << bitdepth_min_8
not v14.16b, v15.16b // grain_min
ldr w12, [x8, #8] // offsets[1][0]
ldr w14, [x8, #4] // offsets[0][1]
ldr w16, [x8, #12] // offsets[1][1]
ldr w8, [x8] // offsets[0][0]
mov x10, #GRAIN_WIDTH*2 // grain_lut stride
add x5, x5, #(2*(3 + (2 >> \sx)*3)) // grain_lut += 9 or 6
.if \sy
add x5, x5, x10, lsl #2 // grain_lut += 4 * grain_stride
add x5, x5, x10, lsl #1 // grain_lut += 2 * grain_stride
.else
add x5, x5, x10, lsl #3 // grain_lut += 8 * grain_stride
add x5, x5, x10 // grain_lut += grain_stride
.endif
calc_offset w12, w13, w12, \sx, \sy
calc_offset w14, w15, w14, \sx, \sy
calc_offset w16, w17, w16, \sx, \sy
calc_offset w8, w11, w8, \sx, \sy
add_offset x13, w12, x13, x5, x10
add_offset x15, w14, x15, x5, x10
add_offset x17, w16, x17, x5, x10
add_offset x5, w8, x11, x5, x10
add x4, x13, #2*(32 >> \sx) // grain_lut += FG_BLOCK_SIZE * bx
add x8, x15, x10, lsl #(5 - \sy) // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add x11, x17, x10, lsl #(5 - \sy) // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add x11, x11, #2*(32 >> \sx) // grain_lut += FG_BLOCK_SIZE * bx
ldr w13, [sp, #112] // type
movrel x16, overlap_coeffs_\sx
movrel x14, fguv_loop_sx\sx\()_tbl
ld1 {v27.4h, v28.4h}, [x16] // overlap_coeffs
tst w13, #1
ldrsw x13, [x14, w13, uxtw #2]
b.eq 1f
// y overlap
sub w12, w9, #(2 >> \sy) // backup remaining h
mov w9, #(2 >> \sy)
1:
add x13, x14, x13
.if \sy
movi v25.8h, #23
movi v26.8h, #22
.else
movi v25.8h, #27
movi v26.8h, #17
.endif
.if \sy
add x7, x7, x7 // luma_stride *= 2
.endif
br x13
endfunc
.endm
fguv 420, 1, 1
fguv 422, 1, 0
fguv 444, 0, 0
function fguv_loop_sx0_neon
.macro fguv_loop_sx0 csfl, ox, oy
L(fguv_loop_sx0_csfl\csfl\()_\ox\oy):
AARCH64_VALID_JUMP_TARGET
1:
.if \ox
ld1 {v4.4h}, [x4], x10 // grain_lut old
.endif
.if \oy
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x8], x10 // grain_lut top
.endif
.if \ox && \oy
ld1 {v5.4h}, [x11], x10 // grain_lut top old
.endif
ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [x5], x10 // grain_lut
.if \ox
smull v4.4s, v4.4h, v27.4h
smlal v4.4s, v16.4h, v28.4h
.endif
.if \oy
.if \ox
smull v5.4s, v5.4h, v27.4h
smlal v5.4s, v0.4h, v28.4h
sqrshrn v4.4h, v4.4s, #5
sqrshrn v5.4h, v5.4s, #5
smin v4.4h, v4.4h, v15.4h
smin v5.4h, v5.4h, v15.4h
smax v4.4h, v4.4h, v14.4h
smax v5.4h, v5.4h, v14.4h
ins v16.d[0], v4.d[0]
ins v0.d[0], v5.d[0]
.endif
smull v6.4s, v16.4h, v26.4h
smull2 v7.4s, v16.8h, v26.8h
smull v10.4s, v17.4h, v26.4h
smull2 v11.4s, v17.8h, v26.8h
smull v16.4s, v18.4h, v26.4h
smull2 v17.4s, v18.8h, v26.8h
smull v18.4s, v19.4h, v26.4h
smull2 v19.4s, v19.8h, v26.8h
smlal v6.4s, v0.4h, v25.4h
smlal2 v7.4s, v0.8h, v25.8h
smlal v10.4s, v1.4h, v25.4h
smlal2 v11.4s, v1.8h, v25.8h
smlal v16.4s, v2.4h, v25.4h
smlal2 v17.4s, v2.8h, v25.8h
smlal v18.4s, v3.4h, v25.4h
smlal2 v19.4s, v3.8h, v25.8h
sqrshrn v6.4h, v6.4s, #5
sqrshrn2 v6.8h, v7.4s, #5
sqrshrn v7.4h, v10.4s, #5
sqrshrn2 v7.8h, v11.4s, #5
sqrshrn v10.4h, v16.4s, #5
sqrshrn2 v10.8h, v17.4s, #5
sqrshrn v11.4h, v18.4s, #5
sqrshrn2 v11.8h, v19.4s, #5
.endif
.if \ox && !\oy
sqrshrn v4.4h, v4.4s, #5
smin v4.4h, v4.4h, v15.4h
.endif
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x7 // luma
.if \oy
smin v16.8h, v6.8h, v15.8h
smin v17.8h, v7.8h, v15.8h
smin v18.8h, v10.8h, v15.8h
smin v19.8h, v11.8h, v15.8h
smax v16.8h, v16.8h, v14.8h
smax v17.8h, v17.8h, v14.8h
smax v18.8h, v18.8h, v14.8h
smax v19.8h, v19.8h, v14.8h
.endif
.if \ox && !\oy
smax v4.4h, v4.4h, v14.4h
.endif
ld1 {v10.8h, v11.8h, v12.8h, v13.8h}, [x1], x2 // src
.if \ox && !\oy
ins v16.d[0], v4.d[0]
.endif
.if !\csfl
smull v4.4s, v0.4h, v8.4h
smull2 v5.4s, v0.8h, v8.8h
smull v6.4s, v1.4h, v8.4h
smull2 v7.4s, v1.8h, v8.8h
smull v0.4s, v2.4h, v8.4h
smull2 v1.4s, v2.8h, v8.8h
smull v2.4s, v3.4h, v8.4h
smull2 v3.4s, v3.8h, v8.8h
smlal v4.4s, v10.4h, v9.4h
smlal2 v5.4s, v10.8h, v9.8h
smlal v6.4s, v11.4h, v9.4h
smlal2 v7.4s, v11.8h, v9.8h
smlal v0.4s, v12.4h, v9.4h
smlal2 v1.4s, v12.8h, v9.8h
smlal v2.4s, v13.4h, v9.4h
smlal2 v3.4s, v13.8h, v9.8h
shrn v4.4h, v4.4s, #6
shrn2 v4.8h, v5.4s, #6
shrn v5.4h, v6.4s, #6
shrn2 v5.8h, v7.4s, #6
shrn v6.4h, v0.4s, #6
shrn2 v6.8h, v1.4s, #6
shrn v7.4h, v2.4s, #6
shrn2 v7.8h, v3.4s, #6
add v0.8h, v4.8h, v24.8h
add v1.8h, v5.8h, v24.8h
add v2.8h, v6.8h, v24.8h
add v3.8h, v7.8h, v24.8h
movi v20.8h, #0
smin v0.8h, v0.8h, v23.8h
smin v1.8h, v1.8h, v23.8h
smin v2.8h, v2.8h, v23.8h
smin v3.8h, v3.8h, v23.8h
smax v0.8h, v0.8h, v20.8h
smax v1.8h, v1.8h, v20.8h
smax v2.8h, v2.8h, v20.8h
smax v3.8h, v3.8h, v20.8h
.else
// Make sure that uninitialized pixels out of range past the right
// edge are in range; their actual values shouldn't matter.
and v0.16b, v0.16b, v23.16b
and v1.16b, v1.16b, v23.16b
and v2.16b, v2.16b, v23.16b
and v3.16b, v3.16b, v23.16b
.endif
bl gather32_neon
uxtl v4.8h, v6.8b // scaling
uxtl2 v5.8h, v6.16b
uxtl v6.8h, v7.8b
uxtl2 v7.8h, v7.16b
ushl v4.8h, v4.8h, v29.8h // scaling << (15 - scaling_shift)
ushl v5.8h, v5.8h, v29.8h
ushl v6.8h, v6.8h, v29.8h
ushl v7.8h, v7.8h, v29.8h
sqrdmulh v16.8h, v16.8h, v4.8h // round2((scaling << (15 - scaling_shift) * grain, 15)
sqrdmulh v17.8h, v17.8h, v5.8h
sqrdmulh v18.8h, v18.8h, v6.8h
sqrdmulh v19.8h, v19.8h, v7.8h
usqadd v10.8h, v16.8h // *src + noise
usqadd v11.8h, v17.8h
usqadd v12.8h, v18.8h
usqadd v13.8h, v19.8h
umax v0.8h, v10.8h, v30.8h
umax v1.8h, v11.8h, v30.8h
umax v2.8h, v12.8h, v30.8h
umax v3.8h, v13.8h, v30.8h
umin v0.8h, v0.8h, v31.8h
umin v1.8h, v1.8h, v31.8h
umin v2.8h, v2.8h, v31.8h
umin v3.8h, v3.8h, v31.8h
subs w9, w9, #1
.if \oy
dup v25.8h, v28.h[0]
dup v26.8h, v28.h[1]
.endif
st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], x2 // dst
b.gt 1b
.if \oy
cmp w12, #0
mov w9, w12 // restore actual remaining h
b.gt L(fguv_loop_sx0_csfl\csfl\()_\ox\()0)
.endif
b 9f
.endm
fguv_loop_sx0 0, 0, 0
fguv_loop_sx0 0, 0, 1
fguv_loop_sx0 0, 1, 0
fguv_loop_sx0 0, 1, 1
fguv_loop_sx0 1, 0, 0
fguv_loop_sx0 1, 0, 1
fguv_loop_sx0 1, 1, 0
fguv_loop_sx0 1, 1, 1
9:
ldp d14, d15, [sp, #64]
ldp d12, d13, [sp, #48]
ldp d10, d11, [sp, #32]
ldp d8, d9, [sp, #16]
ldr x30, [sp], #80
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
jumptable fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl0_00) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl0_01) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl0_10) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl0_11) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl1_00) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl1_01) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl1_10) - fguv_loop_sx0_tbl
.word L(fguv_loop_sx0_csfl1_11) - fguv_loop_sx0_tbl
endjumptable
function fguv_loop_sx1_neon
.macro fguv_loop_sx1 csfl, ox, oy
L(fguv_loop_sx1_csfl\csfl\()_\ox\oy):
AARCH64_VALID_JUMP_TARGET
1:
.if \ox
ld1 {v18.4h}, [x4], x10 // grain_lut old
.endif
.if \oy
ld1 {v20.8h, v21.8h}, [x8], x10 // grain_lut top
.endif
.if \ox && \oy
ld1 {v19.4h}, [x11], x10 // grain_lut top old
.endif
ld1 {v16.8h, v17.8h}, [x5], x10 // grain_lut
.if \ox
smull v18.4s, v18.4h, v27.4h
smlal v18.4s, v16.4h, v28.4h
.endif
.if \oy
.if \ox
smull v19.4s, v19.4h, v27.4h
smlal v19.4s, v20.4h, v28.4h
sqrshrn v18.4h, v18.4s, #5
sqrshrn v19.4h, v19.4s, #5
smin v18.4h, v18.4h, v15.4h
smin v19.4h, v19.4h, v15.4h
smax v18.4h, v18.4h, v14.4h
smax v19.4h, v19.4h, v14.4h
ins v16.d[0], v18.d[0]
ins v20.d[0], v19.d[0]
.endif
smull v0.4s, v16.4h, v26.4h
smull2 v1.4s, v16.8h, v26.8h
smull v2.4s, v17.4h, v26.4h
smull2 v3.4s, v17.8h, v26.8h
smlal v0.4s, v20.4h, v25.4h
smlal2 v1.4s, v20.8h, v25.8h
smlal v2.4s, v21.4h, v25.4h
smlal2 v3.4s, v21.8h, v25.8h
sqrshrn v16.4h, v0.4s, #5
sqrshrn2 v16.8h, v1.4s, #5
sqrshrn v17.4h, v2.4s, #5
sqrshrn2 v17.8h, v3.4s, #5
.endif
.if \ox && !\oy
sqrshrn v18.4h, v18.4s, #5
smin v18.4h, v18.4h, v15.4h
.endif
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], x7 // luma
.if \oy
smin v16.8h, v16.8h, v15.8h
smin v17.8h, v17.8h, v15.8h
smax v16.8h, v16.8h, v14.8h
smax v17.8h, v17.8h, v14.8h
.endif
.if \ox && !\oy
smax v18.4h, v18.4h, v14.4h
.endif
ld1 {v10.8h, v11.8h}, [x1], x2 // src
.if \ox && !\oy
ins v16.d[0], v18.d[0]
.endif
addp v0.8h, v0.8h, v1.8h
addp v1.8h, v2.8h, v3.8h
urshr v0.8h, v0.8h, #1
urshr v1.8h, v1.8h, #1
.if !\csfl
smull v2.4s, v0.4h, v8.4h
smull2 v3.4s, v0.8h, v8.8h
smull v0.4s, v1.4h, v8.4h
smull2 v1.4s, v1.8h, v8.8h
smlal v2.4s, v10.4h, v9.4h
smlal2 v3.4s, v10.8h, v9.8h
smlal v0.4s, v11.4h, v9.4h
smlal2 v1.4s, v11.8h, v9.8h
shrn v2.4h, v2.4s, #6
shrn2 v2.8h, v3.4s, #6
shrn v3.4h, v0.4s, #6
shrn2 v3.8h, v1.4s, #6
add v0.8h, v2.8h, v24.8h
add v1.8h, v3.8h, v24.8h
movi v2.8h, #0
smin v0.8h, v0.8h, v23.8h
smin v1.8h, v1.8h, v23.8h
smax v0.8h, v0.8h, v2.8h
smax v1.8h, v1.8h, v2.8h
.else
// Make sure that uninitialized pixels out of range past the right
// edge are in range; their actual values shouldn't matter.
and v0.16b, v0.16b, v23.16b
and v1.16b, v1.16b, v23.16b
.endif
bl gather16_neon
uxtl v4.8h, v6.8b // scaling
uxtl2 v5.8h, v6.16b
ushl v4.8h, v4.8h, v29.8h // scaling << (15 - scaling_shift)
ushl v5.8h, v5.8h, v29.8h
sqrdmulh v16.8h, v16.8h, v4.8h // round2((scaling << (15 - scaling_shift) * grain, 15)
sqrdmulh v17.8h, v17.8h, v5.8h
usqadd v10.8h, v16.8h // *src + noise
usqadd v11.8h, v17.8h
umax v0.8h, v10.8h, v30.8h
umax v1.8h, v11.8h, v30.8h
umin v0.8h, v0.8h, v31.8h
umin v1.8h, v1.8h, v31.8h
.if \oy
mov v16.16b, v25.16b
.endif
subs w9, w9, #1
.if \oy
mov v25.16b, v26.16b
mov v26.16b, v16.16b
.endif
st1 {v0.8h, v1.8h}, [x0], x2 // dst
b.gt 1b
.if \oy
cmp w12, #0
mov w9, w12 // restore actual remaining h
b.gt L(fguv_loop_sx1_csfl\csfl\()_\ox\()0)
.endif
b 9f
.endm
fguv_loop_sx1 0, 0, 0
fguv_loop_sx1 0, 0, 1
fguv_loop_sx1 0, 1, 0
fguv_loop_sx1 0, 1, 1
fguv_loop_sx1 1, 0, 0
fguv_loop_sx1 1, 0, 1
fguv_loop_sx1 1, 1, 0
fguv_loop_sx1 1, 1, 1
9:
ldp d14, d15, [sp, #64]
ldp d12, d13, [sp, #48]
ldp d10, d11, [sp, #32]
ldp d8, d9, [sp, #16]
ldr x30, [sp], #80
AARCH64_VALIDATE_LINK_REGISTER
ret
endfunc
jumptable fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl0_00) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl0_01) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl0_10) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl0_11) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl1_00) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl1_01) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl1_10) - fguv_loop_sx1_tbl
.word L(fguv_loop_sx1_csfl1_11) - fguv_loop_sx1_tbl
endjumptable
|
Admenri/urge
| 38,048
|
third_party/dav1d/src/arm/64/loopfilter16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
// depending on how many pixels need to be stored, returns:
// x14 = (1 << 0) : 0 pixels
// x14 = (1 << 4) : inner 4 pixels
// x14 = (1 << 6) : inner 6 pixels
// x14 = 0 : all pixels
.macro loop_filter wd
function lpf_8_wd\wd\()_neon
uabd v0.8h, v22.8h, v23.8h // abs(p1 - p0)
uabd v1.8h, v25.8h, v24.8h // abs(q1 - q0)
uabd v2.8h, v23.8h, v24.8h // abs(p0 - q0)
uabd v3.8h, v22.8h, v25.8h // abs(p1 - q1)
.if \wd >= 6
uabd v4.8h, v21.8h, v22.8h // abs(p2 - p1)
uabd v5.8h, v26.8h, v25.8h // abs(q2 - q1)
.endif
.if \wd >= 8
uabd v6.8h, v20.8h, v21.8h // abs(p3 - p2)
uabd v7.8h, v27.8h, v26.8h // abs(q3 - q3)
.endif
.if \wd >= 6
umax v4.8h, v4.8h, v5.8h
.endif
uqadd v2.8h, v2.8h, v2.8h // abs(p0 - q0) * 2
.if \wd >= 8
umax v6.8h, v6.8h, v7.8h
.endif
ushr v3.8h, v3.8h, #1
.if \wd >= 8
umax v4.8h, v4.8h, v6.8h
.endif
.if \wd >= 6
and v4.16b, v4.16b, v14.16b
.endif
umax v0.8h, v0.8h, v1.8h // max(abs(p1 - p0), abs(q1 - q0))
uqadd v2.8h, v2.8h, v3.8h // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
.if \wd >= 6
umax v4.8h, v0.8h, v4.8h
cmhs v1.8h, v11.8h, v4.8h // max(abs(p1 - p0), abs(q1 - q0), abs(), abs(), ...) <= I
.else
cmhs v1.8h, v11.8h, v0.8h // max(abs(p1 - p0), abs(q1 - q0)) <= I
.endif
cmhs v2.8h, v10.8h, v2.8h // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1 <= E
and v1.16b, v1.16b, v2.16b // fm
and v1.16b, v1.16b, v13.16b // fm && wd >= 4
.if \wd >= 6
and v14.16b, v14.16b, v1.16b // fm && wd > 4
.endif
.if \wd >= 16
and v15.16b, v15.16b, v1.16b // fm && wd == 16
.endif
mov x16, v1.d[0]
mov x17, v1.d[1]
adds x16, x16, x17
b.ne 9f // if (!fm || wd < 4) return;
mov x14, #(1 << 0)
ret
9:
.if \wd >= 6
movi v10.8h, #1
uabd v2.8h, v21.8h, v23.8h // abs(p2 - p0)
uabd v3.8h, v22.8h, v23.8h // abs(p1 - p0)
uabd v4.8h, v25.8h, v24.8h // abs(q1 - q0)
uabd v5.8h, v26.8h, v24.8h // abs(q2 - q0)
dup v9.8h, w9 // bitdepth_min_8
.if \wd >= 8
uabd v6.8h, v20.8h, v23.8h // abs(p3 - p0)
uabd v7.8h, v27.8h, v24.8h // abs(q3 - q0)
.endif
umax v2.8h, v2.8h, v3.8h
umax v4.8h, v4.8h, v5.8h
.if \wd >= 8
umax v6.8h, v6.8h, v7.8h
.endif
umax v2.8h, v2.8h, v4.8h
ushl v10.8h, v10.8h, v9.8h // F = 1 << bitdepth_min_8
.if \wd >= 8
umax v2.8h, v2.8h, v6.8h
.endif
.if \wd == 16
uabd v3.8h, v17.8h, v23.8h // abs(p6 - p0)
uabd v4.8h, v18.8h, v23.8h // abs(p5 - p0)
uabd v5.8h, v19.8h, v23.8h // abs(p4 - p0)
.endif
cmhs v2.8h, v10.8h, v2.8h // flat8in
.if \wd == 16
uabd v6.8h, v28.8h, v24.8h // abs(q4 - q0)
uabd v7.8h, v29.8h, v24.8h // abs(q5 - q0)
uabd v8.8h, v30.8h, v24.8h // abs(q6 - q0)
.endif
and v14.16b, v2.16b, v14.16b // flat8in && fm && wd > 4
bic v1.16b, v1.16b, v14.16b // fm && wd >= 4 && !flat8in
.if \wd == 16
umax v3.8h, v3.8h, v4.8h
umax v5.8h, v5.8h, v6.8h
.endif
mov x16, v1.d[0]
mov x17, v1.d[1]
.if \wd == 16
umax v7.8h, v7.8h, v8.8h
umax v3.8h, v3.8h, v5.8h
umax v3.8h, v3.8h, v7.8h
cmhs v3.8h, v10.8h, v3.8h // flat8out
.endif
adds x16, x16, x17
.if \wd == 16
and v15.16b, v15.16b, v3.16b // flat8out && fm && wd == 16
and v15.16b, v15.16b, v14.16b // flat8out && flat8in && fm && wd == 16
bic v14.16b, v14.16b, v15.16b // flat8in && fm && wd >= 4 && !flat8out
.endif
b.eq 1f // skip wd == 4 case
.endif
dup v3.8h, w8 // bitdepth_max
sub v2.8h, v22.8h, v25.8h // p1 - q1
ushr v3.8h, v3.8h, #1 // 128 << bitdepth_min_8 - 1
cmhi v0.8h, v0.8h, v12.8h // hev
not v9.16b, v3.16b // - 128 * (1 << bitdepth_min_8)
smin v2.8h, v2.8h, v3.8h // iclip_diff(p1 - q1)
smax v2.8h, v2.8h, v9.8h // iclip_diff(p1 - q1)
and v4.16b, v2.16b, v0.16b // if (hev) iclip_diff(p1 - q1)
sub v2.8h, v24.8h, v23.8h
movi v5.8h, #3
bic v0.16b, v1.16b, v0.16b // (fm && wd >= 4 && !hev)
mul v2.8h, v2.8h, v5.8h
movi v6.8h, #4
add v2.8h, v2.8h, v4.8h
smin v2.8h, v2.8h, v3.8h // f = iclip_diff()
smax v2.8h, v2.8h, v9.8h // f = iclip_diff()
sqadd v4.8h, v6.8h, v2.8h // f + 4
sqadd v5.8h, v5.8h, v2.8h // f + 3
smin v4.8h, v4.8h, v3.8h // imin(f + 4, 128 << bitdepth_min_8 - 1)
smin v5.8h, v5.8h, v3.8h // imin(f + 3, 128 << bitdepth_min_8 - 1)
sshr v4.8h, v4.8h, #3 // f1
sshr v5.8h, v5.8h, #3 // f2
movi v9.8h, #0
dup v3.8h, w8 // bitdepth_max
sqadd v2.8h, v23.8h, v5.8h // p0 + f2
sqsub v6.8h, v24.8h, v4.8h // q0 - f1
srshr v4.8h, v4.8h, #1 // (f1 + 1) >> 1
smin v2.8h, v2.8h, v3.8h // out p0 = iclip_pixel()
smin v6.8h, v6.8h, v3.8h // out q0 = iclip_pixel()
smax v2.8h, v2.8h, v9.8h // out p0 = iclip_pixel()
smax v6.8h, v6.8h, v9.8h // out q0 = iclip_pixel()
bit v23.16b, v2.16b, v1.16b // if (fm && wd >= 4)
bit v24.16b, v6.16b, v1.16b // if (fm && wd >= 4)
sqadd v2.8h, v22.8h, v4.8h // p1 + f
sqsub v6.8h, v25.8h, v4.8h // q1 - f
smin v2.8h, v2.8h, v3.8h // out p1 = iclip_pixel()
smin v6.8h, v6.8h, v3.8h // out q1 = iclip_pixel()
smax v2.8h, v2.8h, v9.8h // out p1 = iclip_pixel()
smax v6.8h, v6.8h, v9.8h // out q1 = iclip_pixel()
bit v22.16b, v2.16b, v0.16b // if (fm && wd >= 4 && !hev)
bit v25.16b, v6.16b, v0.16b // if (fm && wd >= 4 && !hev)
1:
.if \wd == 6
mov x16, v14.d[0]
mov x17, v14.d[1]
adds x16, x16, x17
b.eq 2f // skip if there's no flat8in
add v0.8h, v21.8h, v21.8h // p2 * 2
add v2.8h, v21.8h, v22.8h // p2 + p1
add v4.8h, v22.8h, v23.8h // p1 + p0
add v6.8h, v23.8h, v24.8h // p0 + q0
add v8.8h, v0.8h, v2.8h
add v10.8h, v4.8h, v6.8h
add v12.8h, v24.8h, v25.8h // q0 + q1
add v8.8h, v8.8h, v10.8h
sub v12.8h, v12.8h, v0.8h
add v10.8h, v25.8h, v26.8h // q1 + q2
urshr v0.8h, v8.8h, #3 // out p1
add v8.8h, v8.8h, v12.8h
sub v10.8h, v10.8h, v2.8h
add v12.8h, v26.8h, v26.8h // q2 + q2
urshr v1.8h, v8.8h, #3 // out p0
add v8.8h, v8.8h, v10.8h
sub v12.8h, v12.8h, v4.8h
urshr v2.8h, v8.8h, #3 // out q0
bit v22.16b, v0.16b, v14.16b // p1 if (flat8in)
add v8.8h, v8.8h, v12.8h
bit v23.16b, v1.16b, v14.16b // p0 if (flat8in)
urshr v3.8h, v8.8h, #3 // out q1
bit v24.16b, v2.16b, v14.16b // q0 if (flat8in)
bit v25.16b, v3.16b, v14.16b // q1 if (flat8in)
.elseif \wd >= 8
mov x16, v14.d[0]
mov x17, v14.d[1]
adds x16, x16, x17
.if \wd == 8
b.eq 8f // skip if there's no flat8in
.else
b.eq 2f // skip if there's no flat8in
.endif
add v0.8h, v20.8h, v21.8h // p3 + p2
add v2.8h, v22.8h, v25.8h // p1 + q1
add v4.8h, v20.8h, v22.8h // p3 + p1
add v6.8h, v23.8h, v26.8h // p0 + q2
add v8.8h, v0.8h, v0.8h // 2 * (p3 + p2)
add v9.8h, v23.8h, v24.8h // p0 + q0
add v8.8h, v8.8h, v4.8h // + p3 + p1
sub v2.8h, v2.8h, v0.8h // p1 + q1 - p3 - p2
add v8.8h, v8.8h, v9.8h // + p0 + q0
sub v6.8h, v6.8h, v4.8h // p0 + q2 - p3 - p1
urshr v10.8h, v8.8h, #3 // out p2
add v8.8h, v8.8h, v2.8h
add v0.8h, v20.8h, v23.8h // p3 + p0
add v2.8h, v24.8h, v27.8h // q0 + q3
urshr v11.8h, v8.8h, #3 // out p1
add v8.8h, v8.8h, v6.8h
sub v2.8h, v2.8h, v0.8h // q0 + q3 - p3 - p0
add v4.8h, v21.8h, v24.8h // p2 + q0
add v6.8h, v25.8h, v27.8h // q1 + q3
urshr v12.8h, v8.8h, #3 // out p0
add v8.8h, v8.8h, v2.8h
sub v6.8h, v6.8h, v4.8h // q1 + q3 - p2 - q0
add v0.8h, v22.8h, v25.8h // p1 + q1
add v2.8h, v26.8h, v27.8h // q2 + q3
urshr v13.8h, v8.8h, #3 // out q0
add v8.8h, v8.8h, v6.8h
sub v2.8h, v2.8h, v0.8h // q2 + q3 - p1 - q1
urshr v0.8h, v8.8h, #3 // out q1
add v8.8h, v8.8h, v2.8h
bit v21.16b, v10.16b, v14.16b
bit v22.16b, v11.16b, v14.16b
bit v23.16b, v12.16b, v14.16b
urshr v1.8h, v8.8h, #3 // out q2
bit v24.16b, v13.16b, v14.16b
bit v25.16b, v0.16b, v14.16b
bit v26.16b, v1.16b, v14.16b
.endif
2:
.if \wd == 16
mov x16, v15.d[0]
mov x17, v15.d[1]
adds x16, x16, x17
b.ne 1f // check if flat8out is needed
mov x16, v14.d[0]
mov x17, v14.d[1]
adds x16, x16, x17
b.eq 8f // if there was no flat8in, just write the inner 4 pixels
b 7f // if flat8in was used, write the inner 6 pixels
1:
add v2.8h, v17.8h, v17.8h // p6 + p6
add v4.8h, v17.8h, v18.8h // p6 + p5
add v6.8h, v17.8h, v19.8h // p6 + p4
add v8.8h, v17.8h, v20.8h // p6 + p3
add v12.8h, v2.8h, v4.8h
add v10.8h, v6.8h, v8.8h
add v6.8h, v17.8h, v21.8h // p6 + p2
add v12.8h, v12.8h, v10.8h
add v8.8h, v17.8h, v22.8h // p6 + p1
add v10.8h, v18.8h, v23.8h // p5 + p0
add v6.8h, v6.8h, v8.8h
add v8.8h, v19.8h, v24.8h // p4 + q0
add v12.8h, v12.8h, v6.8h
add v10.8h, v10.8h, v8.8h
add v6.8h, v20.8h, v25.8h // p3 + q1
add v12.8h, v12.8h, v10.8h
sub v6.8h, v6.8h, v2.8h
add v2.8h, v21.8h, v26.8h // p2 + q2
urshr v0.8h, v12.8h, #4 // out p5
add v12.8h, v12.8h, v6.8h // - (p6 + p6) + (p3 + q1)
sub v2.8h, v2.8h, v4.8h
add v4.8h, v22.8h, v27.8h // p1 + q3
add v6.8h, v17.8h, v19.8h // p6 + p4
urshr v1.8h, v12.8h, #4 // out p4
add v12.8h, v12.8h, v2.8h // - (p6 + p5) + (p2 + q2)
sub v4.8h, v4.8h, v6.8h
add v6.8h, v23.8h, v28.8h // p0 + q4
add v8.8h, v17.8h, v20.8h // p6 + p3
urshr v2.8h, v12.8h, #4 // out p3
add v12.8h, v12.8h, v4.8h // - (p6 + p4) + (p1 + q3)
sub v6.8h, v6.8h, v8.8h
add v8.8h, v24.8h, v29.8h // q0 + q5
add v4.8h, v17.8h, v21.8h // p6 + p2
urshr v3.8h, v12.8h, #4 // out p2
add v12.8h, v12.8h, v6.8h // - (p6 + p3) + (p0 + q4)
sub v8.8h, v8.8h, v4.8h
add v6.8h, v25.8h, v30.8h // q1 + q6
add v10.8h, v17.8h, v22.8h // p6 + p1
urshr v4.8h, v12.8h, #4 // out p1
add v12.8h, v12.8h, v8.8h // - (p6 + p2) + (q0 + q5)
sub v6.8h, v6.8h, v10.8h
add v8.8h, v26.8h, v30.8h // q2 + q6
bif v0.16b, v18.16b, v15.16b // out p5
add v10.8h, v18.8h, v23.8h // p5 + p0
urshr v5.8h, v12.8h, #4 // out p0
add v12.8h, v12.8h, v6.8h // - (p6 + p1) + (q1 + q6)
sub v8.8h, v8.8h, v10.8h
add v10.8h, v27.8h, v30.8h // q3 + q6
bif v1.16b, v19.16b, v15.16b // out p4
add v18.8h, v19.8h, v24.8h // p4 + q0
urshr v6.8h, v12.8h, #4 // out q0
add v12.8h, v12.8h, v8.8h // - (p5 + p0) + (q2 + q6)
sub v10.8h, v10.8h, v18.8h
add v8.8h, v28.8h, v30.8h // q4 + q6
bif v2.16b, v20.16b, v15.16b // out p3
add v18.8h, v20.8h, v25.8h // p3 + q1
urshr v7.8h, v12.8h, #4 // out q1
add v12.8h, v12.8h, v10.8h // - (p4 + q0) + (q3 + q6)
sub v18.8h, v8.8h, v18.8h
add v10.8h, v29.8h, v30.8h // q5 + q6
bif v3.16b, v21.16b, v15.16b // out p2
add v20.8h, v21.8h, v26.8h // p2 + q2
urshr v8.8h, v12.8h, #4 // out q2
add v12.8h, v12.8h, v18.8h // - (p3 + q1) + (q4 + q6)
sub v10.8h, v10.8h, v20.8h
add v18.8h, v30.8h, v30.8h // q6 + q6
bif v4.16b, v22.16b, v15.16b // out p1
add v20.8h, v22.8h, v27.8h // p1 + q3
urshr v9.8h, v12.8h, #4 // out q3
add v12.8h, v12.8h, v10.8h // - (p2 + q2) + (q5 + q6)
sub v18.8h, v18.8h, v20.8h
bif v5.16b, v23.16b, v15.16b // out p0
urshr v10.8h, v12.8h, #4 // out q4
add v12.8h, v12.8h, v18.8h // - (p1 + q3) + (q6 + q6)
urshr v11.8h, v12.8h, #4 // out q5
bif v6.16b, v24.16b, v15.16b // out q0
bif v7.16b, v25.16b, v15.16b // out q1
bif v8.16b, v26.16b, v15.16b // out q2
bif v9.16b, v27.16b, v15.16b // out q3
bif v10.16b, v28.16b, v15.16b // out q4
bif v11.16b, v29.16b, v15.16b // out q5
.endif
mov x14, #0
ret
.if \wd == 16
7:
// Return to a shorter epilogue, writing only the inner 6 pixels
mov x14, #(1 << 6)
ret
.endif
.if \wd >= 8
8:
// Return to a shorter epilogue, writing only the inner 4 pixels
mov x14, #(1 << 4)
ret
.endif
endfunc
.endm
loop_filter 16
loop_filter 8
loop_filter 6
loop_filter 4
.macro lpf_8_wd16
bl lpf_8_wd16_neon
cbz x14, 1f
tbnz x14, #6, 7f
tbnz x14, #4, 8f
ret x15
1:
.endm
.macro lpf_8_wd8
bl lpf_8_wd8_neon
cbz x14, 1f
tbnz x14, #4, 8f
ret x15
1:
.endm
.macro lpf_8_wd6
bl lpf_8_wd6_neon
cbz x14, 1f
ret x15
1:
.endm
.macro lpf_8_wd4
bl lpf_8_wd4_neon
cbz x14, 1f
ret x15
1:
.endm
function lpf_v_4_8_neon
mov x15, x30
sub x16, x0, x1, lsl #1
ld1 {v22.8h}, [x16], x1 // p1
ld1 {v24.8h}, [x0], x1 // q0
ld1 {v23.8h}, [x16], x1 // p0
ld1 {v25.8h}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
lpf_8_wd4
sub x16, x0, x1, lsl #1
st1 {v22.8h}, [x16], x1 // p1
st1 {v24.8h}, [x0], x1 // q0
st1 {v23.8h}, [x16], x1 // p0
st1 {v25.8h}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
ret x15
endfunc
function lpf_h_4_8_neon
mov x15, x30
sub x16, x0, #4
add x0, x16, x1, lsl #2
ld1 {v22.d}[0], [x16], x1
ld1 {v22.d}[1], [x0], x1
ld1 {v23.d}[0], [x16], x1
ld1 {v23.d}[1], [x0], x1
ld1 {v24.d}[0], [x16], x1
ld1 {v24.d}[1], [x0], x1
ld1 {v25.d}[0], [x16], x1
ld1 {v25.d}[1], [x0], x1
add x0, x0, #4
transpose_4x8h v22, v23, v24, v25, v26, v27, v28, v29
lpf_8_wd4
sub x16, x0, x1, lsl #3
sub x16, x16, #4
transpose_4x8h v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #2
st1 {v22.d}[0], [x16], x1
st1 {v22.d}[1], [x0], x1
st1 {v23.d}[0], [x16], x1
st1 {v23.d}[1], [x0], x1
st1 {v24.d}[0], [x16], x1
st1 {v24.d}[1], [x0], x1
st1 {v25.d}[0], [x16], x1
st1 {v25.d}[1], [x0], x1
add x0, x0, #4
ret x15
endfunc
function lpf_v_6_8_neon
mov x15, x30
sub x16, x0, x1, lsl #1
sub x16, x16, x1
ld1 {v21.8h}, [x16], x1 // p2
ld1 {v24.8h}, [x0], x1 // q0
ld1 {v22.8h}, [x16], x1 // p1
ld1 {v25.8h}, [x0], x1 // q1
ld1 {v23.8h}, [x16], x1 // p0
ld1 {v26.8h}, [x0], x1 // q2
sub x0, x0, x1, lsl #1
sub x0, x0, x1
lpf_8_wd6
sub x16, x0, x1, lsl #1
st1 {v22.8h}, [x16], x1 // p1
st1 {v24.8h}, [x0], x1 // q0
st1 {v23.8h}, [x16], x1 // p0
st1 {v25.8h}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
ret x15
endfunc
function lpf_h_6_8_neon
mov x15, x30
sub x16, x0, #8
add x0, x16, x1, lsl #2
ld1 {v20.8h}, [x16], x1
ld1 {v24.8h}, [x0], x1
ld1 {v21.8h}, [x16], x1
ld1 {v25.8h}, [x0], x1
ld1 {v22.8h}, [x16], x1
ld1 {v26.8h}, [x0], x1
ld1 {v23.8h}, [x16], x1
ld1 {v27.8h}, [x0], x1
add x0, x0, #8
transpose_8x8h v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
lpf_8_wd6
sub x16, x0, x1, lsl #3
sub x16, x16, #4
transpose_4x8h v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #2
st1 {v22.d}[0], [x16], x1
st1 {v22.d}[1], [x0], x1
st1 {v23.d}[0], [x16], x1
st1 {v23.d}[1], [x0], x1
st1 {v24.d}[0], [x16], x1
st1 {v24.d}[1], [x0], x1
st1 {v25.d}[0], [x16], x1
st1 {v25.d}[1], [x0], x1
add x0, x0, #4
ret x15
endfunc
function lpf_v_8_8_neon
mov x15, x30
sub x16, x0, x1, lsl #2
ld1 {v20.8h}, [x16], x1 // p3
ld1 {v24.8h}, [x0], x1 // q0
ld1 {v21.8h}, [x16], x1 // p2
ld1 {v25.8h}, [x0], x1 // q1
ld1 {v22.8h}, [x16], x1 // p1
ld1 {v26.8h}, [x0], x1 // q2
ld1 {v23.8h}, [x16], x1 // p0
ld1 {v27.8h}, [x0], x1 // q3
sub x0, x0, x1, lsl #2
lpf_8_wd8
sub x16, x0, x1, lsl #1
sub x16, x16, x1
st1 {v21.8h}, [x16], x1 // p2
st1 {v24.8h}, [x0], x1 // q0
st1 {v22.8h}, [x16], x1 // p1
st1 {v25.8h}, [x0], x1 // q1
st1 {v23.8h}, [x16], x1 // p0
st1 {v26.8h}, [x0], x1 // q2
sub x0, x0, x1, lsl #1
sub x0, x0, x1
ret x15
8:
sub x16, x0, x1, lsl #1
st1 {v22.8h}, [x16], x1 // p1
st1 {v24.8h}, [x0], x1 // q0
st1 {v23.8h}, [x16], x1 // p0
st1 {v25.8h}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
ret x15
endfunc
function lpf_h_8_8_neon
mov x15, x30
sub x16, x0, #8
add x0, x16, x1, lsl #2
ld1 {v20.8h}, [x16], x1
ld1 {v24.8h}, [x0], x1
ld1 {v21.8h}, [x16], x1
ld1 {v25.8h}, [x0], x1
ld1 {v22.8h}, [x16], x1
ld1 {v26.8h}, [x0], x1
ld1 {v23.8h}, [x16], x1
ld1 {v27.8h}, [x0], x1
add x0, x0, #8
transpose_8x8h v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
lpf_8_wd8
sub x16, x0, x1, lsl #3
sub x16, x16, #8
transpose_8x8h v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #2
st1 {v20.8h}, [x16], x1
st1 {v24.8h}, [x0], x1
st1 {v21.8h}, [x16], x1
st1 {v25.8h}, [x0], x1
st1 {v22.8h}, [x16], x1
st1 {v26.8h}, [x0], x1
st1 {v23.8h}, [x16], x1
st1 {v27.8h}, [x0], x1
add x0, x0, #8
ret x15
8:
sub x16, x0, x1, lsl #3
sub x16, x16, #4
transpose_4x8h v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #2
st1 {v22.d}[0], [x16], x1
st1 {v22.d}[1], [x0], x1
st1 {v23.d}[0], [x16], x1
st1 {v23.d}[1], [x0], x1
st1 {v24.d}[0], [x16], x1
st1 {v24.d}[1], [x0], x1
st1 {v25.d}[0], [x16], x1
st1 {v25.d}[1], [x0], x1
add x0, x0, #4
ret x15
endfunc
function lpf_v_16_8_neon
mov x15, x30
sub x16, x0, x1, lsl #3
add x16, x16, x1
ld1 {v17.8h}, [x16], x1 // p6
ld1 {v24.8h}, [x0], x1 // q0
ld1 {v18.8h}, [x16], x1 // p5
ld1 {v25.8h}, [x0], x1 // q1
ld1 {v19.8h}, [x16], x1 // p4
ld1 {v26.8h}, [x0], x1 // q2
ld1 {v20.8h}, [x16], x1 // p3
ld1 {v27.8h}, [x0], x1 // q3
ld1 {v21.8h}, [x16], x1 // p2
ld1 {v28.8h}, [x0], x1 // q4
ld1 {v22.8h}, [x16], x1 // p1
ld1 {v29.8h}, [x0], x1 // q5
ld1 {v23.8h}, [x16], x1 // p0
ld1 {v30.8h}, [x0], x1 // q6
sub x0, x0, x1, lsl #3
add x0, x0, x1
lpf_8_wd16
sub x16, x0, x1, lsl #2
sub x16, x16, x1, lsl #1
st1 {v0.8h}, [x16], x1 // p5
st1 {v6.8h}, [x0], x1 // q0
st1 {v1.8h}, [x16], x1 // p4
st1 {v7.8h}, [x0], x1 // q1
st1 {v2.8h}, [x16], x1 // p3
st1 {v8.8h}, [x0], x1 // q2
st1 {v3.8h}, [x16], x1 // p2
st1 {v9.8h}, [x0], x1 // q3
st1 {v4.8h}, [x16], x1 // p1
st1 {v10.8h}, [x0], x1 // q4
st1 {v5.8h}, [x16], x1 // p0
st1 {v11.8h}, [x0], x1 // q5
sub x0, x0, x1, lsl #2
sub x0, x0, x1, lsl #1
ret x15
7:
sub x16, x0, x1
sub x16, x16, x1, lsl #1
st1 {v21.8h}, [x16], x1 // p2
st1 {v24.8h}, [x0], x1 // q0
st1 {v22.8h}, [x16], x1 // p1
st1 {v25.8h}, [x0], x1 // q1
st1 {v23.8h}, [x16], x1 // p0
st1 {v26.8h}, [x0], x1 // q2
sub x0, x0, x1, lsl #1
sub x0, x0, x1
ret x15
8:
sub x16, x0, x1, lsl #1
st1 {v22.8h}, [x16], x1 // p1
st1 {v24.8h}, [x0], x1 // q0
st1 {v23.8h}, [x16], x1 // p0
st1 {v25.8h}, [x0], x1 // q1
sub x0, x0, x1, lsl #1
ret x15
endfunc
function lpf_h_16_8_neon
mov x15, x30
sub x16, x0, #16
ld1 {v16.8h}, [x16], x1
ld1 {v24.8h}, [x0], x1
ld1 {v17.8h}, [x16], x1
ld1 {v25.8h}, [x0], x1
ld1 {v18.8h}, [x16], x1
ld1 {v26.8h}, [x0], x1
ld1 {v19.8h}, [x16], x1
ld1 {v27.8h}, [x0], x1
ld1 {v20.8h}, [x16], x1
ld1 {v28.8h}, [x0], x1
ld1 {v21.8h}, [x16], x1
ld1 {v29.8h}, [x0], x1
ld1 {v22.8h}, [x16], x1
ld1 {v30.8h}, [x0], x1
ld1 {v23.8h}, [x16], x1
ld1 {v31.8h}, [x0], x1
transpose_8x8h v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
transpose_8x8h v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
lpf_8_wd16
sub x0, x0, x1, lsl #3
sub x16, x0, #16
transpose_8x8h v16, v17, v0, v1, v2, v3, v4, v5, v18, v19
transpose_8x8h v6, v7, v8, v9, v10, v11, v30, v31, v18, v19
st1 {v16.8h}, [x16], x1
st1 {v6.8h}, [x0], x1
st1 {v17.8h}, [x16], x1
st1 {v7.8h}, [x0], x1
st1 {v0.8h}, [x16], x1
st1 {v8.8h}, [x0], x1
st1 {v1.8h}, [x16], x1
st1 {v9.8h}, [x0], x1
st1 {v2.8h}, [x16], x1
st1 {v10.8h}, [x0], x1
st1 {v3.8h}, [x16], x1
st1 {v11.8h}, [x0], x1
st1 {v4.8h}, [x16], x1
st1 {v30.8h}, [x0], x1
st1 {v5.8h}, [x16], x1
st1 {v31.8h}, [x0], x1
ret x15
7:
sub x16, x0, x1, lsl #3
sub x16, x16, #8
transpose_8x8h v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #2
st1 {v20.8h}, [x16], x1
st1 {v24.8h}, [x0], x1
st1 {v21.8h}, [x16], x1
st1 {v25.8h}, [x0], x1
st1 {v22.8h}, [x16], x1
st1 {v26.8h}, [x0], x1
st1 {v23.8h}, [x16], x1
st1 {v27.8h}, [x0], x1
add x0, x0, #8
ret x15
8:
sub x16, x0, x1, lsl #3
sub x16, x16, #4
transpose_4x8h v22, v23, v24, v25, v26, v27, v28, v29
add x0, x16, x1, lsl #2
st1 {v22.d}[0], [x16], x1
st1 {v22.d}[1], [x0], x1
st1 {v23.d}[0], [x16], x1
st1 {v23.d}[1], [x0], x1
st1 {v24.d}[0], [x16], x1
st1 {v24.d}[1], [x0], x1
st1 {v25.d}[0], [x16], x1
st1 {v25.d}[1], [x0], x1
add x0, x0, #4
ret x15
endfunc
// void dav1d_lpf_v_sb_y_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const uint32_t *const vmask,
// const uint8_t (*l)[4], ptrdiff_t b4_stride,
// const Av1FilterLUT *lut, const int w,
// const int bitdepth_max)
.macro lpf_func dir, type
function lpf_\dir\()_sb_\type\()_16bpc_neon, export=1
mov x11, x30
mov w8, w7 // bitdepth_max
clz w9, w8
mov w10, #24
sub w9, w10, w9 // bitdepth_min_8
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
ldp w6, w7, [x2] // vmask[0], vmask[1]
.ifc \type, y
ldr w2, [x2, #8] // vmask[2]
.endif
add x5, x5, #128 // Move to sharp part of lut
.ifc \type, y
orr w7, w7, w2 // vmask[1] |= vmask[2]
.endif
.ifc \dir, v
sub x4, x3, x4, lsl #2
.else
sub x3, x3, #4
lsl x4, x4, #2
.endif
orr w6, w6, w7 // vmask[0] |= vmask[1]
1:
tst w6, #0x03
.ifc \dir, v
ld1 {v0.8b}, [x4], #8
ld1 {v1.8b}, [x3], #8
.else
ld2 {v0.s,v1.s}[0], [x3], x4
ld2 {v0.s,v1.s}[1], [x3], x4
.endif
b.eq 7f // if (!(vm & bits)) continue;
ld1r {v5.8b}, [x5] // sharp[0]
add x5, x5, #8
movi v2.2s, #0xff
dup v13.2s, w6 // vmask[0]
dup v31.8h, w9 // bitdepth_min_8
and v0.8b, v0.8b, v2.8b // Keep only lowest byte in each 32 bit word
and v1.8b, v1.8b, v2.8b
cmtst v3.8b, v1.8b, v2.8b // Check for nonzero values in l[0][0]
movi v4.8b, #1
ld1r {v6.8b}, [x5] // sharp[1]
sub x5, x5, #8
bif v1.8b, v0.8b, v3.8b // if (!l[0][0]) L = l[offset][0]
cmtst v2.2s, v1.2s, v2.2s // L != 0
mul v1.2s, v1.2s, v4.2s // L
.ifc \type, y
dup v15.2s, w2 // vmask[2]
.endif
dup v14.2s, w7 // vmask[1]
mov x16, v2.d[0]
cmp x16, #0
b.eq 7f // if (!L) continue;
neg v5.8b, v5.8b // -sharp[0]
movrel x16, word_12
ushr v12.8b, v1.8b, #4 // H
ld1 {v16.2s}, [x16]
sshl v3.8b, v1.8b, v5.8b // L >> sharp[0]
.ifc \type, y
cmtst v15.2s, v15.2s, v16.2s // if (vmask[2] & bits)
.endif
movi v7.8b, #2
umin v3.8b, v3.8b, v6.8b // imin(L >> sharp[0], sharp[1])
add v0.8b, v1.8b, v7.8b // L + 2
umax v11.8b, v3.8b, v4.8b // imax(imin(), 1) = limit = I
add v0.8b, v0.8b, v0.8b // 2*(L + 2)
cmtst v14.2s, v14.2s, v16.2s // if (vmask[1] & bits)
uxtl v12.8h, v12.8b
add v10.8b, v0.8b, v11.8b // 2*(L + 2) + limit = E
cmtst v13.2s, v13.2s, v16.2s // if (vmask[0] & bits)
uxtl v11.8h, v11.8b
uxtl v10.8h, v10.8b
and v13.8b, v13.8b, v2.8b // vmask[0] &= L != 0
sxtl v14.8h, v14.8b
sxtl v13.8h, v13.8b
.ifc \type, y
sxtl v15.8h, v15.8b
.endif
ushl v12.8h, v12.8h, v31.8h
ushl v11.8h, v11.8h, v31.8h
ushl v10.8h, v10.8h, v31.8h
.ifc \type, y
tst w2, #0x03
b.eq 2f
// wd16
bl lpf_\dir\()_16_8_neon
b 8f
2:
.endif
tst w7, #0x03
b.eq 3f
.ifc \type, y
// wd8
bl lpf_\dir\()_8_8_neon
.else
// wd6
bl lpf_\dir\()_6_8_neon
.endif
b 8f
3:
// wd4
bl lpf_\dir\()_4_8_neon
.ifc \dir, h
b 8f
7:
// For dir h, the functions above increment x0.
// If the whole function is skipped, increment it here instead.
add x0, x0, x1, lsl #3
.else
7:
.endif
8:
lsr w6, w6, #2 // vmask[0] >>= 2
lsr w7, w7, #2 // vmask[1] >>= 2
.ifc \type, y
lsr w2, w2, #2 // vmask[2] >>= 2
.endif
.ifc \dir, v
add x0, x0, #16
.else
// For dir h, x0 is returned incremented
.endif
cbnz w6, 1b
ldp d14, d15, [sp, #0x30]
ldp d12, d13, [sp, #0x20]
ldp d10, d11, [sp, #0x10]
ldp d8, d9, [sp], 0x40
ret x11
endfunc
.endm
lpf_func v, y
lpf_func h, y
lpf_func v, uv
lpf_func h, uv
const word_12
.word 1, 2
endconst
|
Admenri/urge
| 19,138
|
third_party/dav1d/src/arm/32/cdef.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#include "cdef_tmpl.S"
// n1 = s0/d0
// w1 = d0/q0
// n2 = s4/d2
// w2 = d2/q1
.macro pad_top_bottom s1, s2, w, stride, n1, w1, n2, w2, align, ret
tst r7, #1 // CDEF_HAVE_LEFT
beq 2f
// CDEF_HAVE_LEFT
tst r7, #2 // CDEF_HAVE_RIGHT
beq 1f
// CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
ldrh r12, [\s1, #-2]
vldr \n1, [\s1]
vdup.16 d4, r12
ldrh r12, [\s1, #\w]
vmov.16 d4[1], r12
ldrh r12, [\s2, #-2]
vldr \n2, [\s2]
vmov.16 d4[2], r12
ldrh r12, [\s2, #\w]
vmovl.u8 q0, d0
vmov.16 d4[3], r12
vmovl.u8 q1, d2
vmovl.u8 q2, d4
vstr s8, [r0, #-4]
vst1.16 {\w1}, [r0, :\align]
vstr s9, [r0, #2*\w]
add r0, r0, #2*\stride
vstr s10, [r0, #-4]
vst1.16 {\w2}, [r0, :\align]
vstr s11, [r0, #2*\w]
.if \ret
pop {r4-r8,pc}
.else
add r0, r0, #2*\stride
b 3f
.endif
1:
// CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
ldrh r12, [\s1, #-2]
vldr \n1, [\s1]
vdup.16 d4, r12
ldrh r12, [\s2, #-2]
vldr \n2, [\s2]
vmovl.u8 q0, d0
vmov.16 d4[1], r12
vmovl.u8 q1, d2
vmovl.u8 q2, d4
vstr s8, [r0, #-4]
vst1.16 {\w1}, [r0, :\align]
vstr s12, [r0, #2*\w]
add r0, r0, #2*\stride
vstr s9, [r0, #-4]
vst1.16 {\w2}, [r0, :\align]
vstr s12, [r0, #2*\w]
.if \ret
pop {r4-r8,pc}
.else
add r0, r0, #2*\stride
b 3f
.endif
2:
// !CDEF_HAVE_LEFT
tst r7, #2 // CDEF_HAVE_RIGHT
beq 1f
// !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
vldr \n1, [\s1]
ldrh r12, [\s1, #\w]
vldr \n2, [\s2]
vdup.16 d4, r12
ldrh r12, [\s2, #\w]
vmovl.u8 q0, d0
vmov.16 d4[1], r12
vmovl.u8 q1, d2
vmovl.u8 q2, d4
vstr s12, [r0, #-4]
vst1.16 {\w1}, [r0, :\align]
vstr s8, [r0, #2*\w]
add r0, r0, #2*\stride
vstr s12, [r0, #-4]
vst1.16 {\w2}, [r0, :\align]
vstr s9, [r0, #2*\w]
.if \ret
pop {r4-r8,pc}
.else
add r0, r0, #2*\stride
b 3f
.endif
1:
// !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
vldr \n1, [\s1]
vldr \n2, [\s2]
vmovl.u8 q0, d0
vmovl.u8 q1, d2
vstr s12, [r0, #-4]
vst1.16 {\w1}, [r0, :\align]
vstr s12, [r0, #2*\w]
add r0, r0, #2*\stride
vstr s12, [r0, #-4]
vst1.16 {\w2}, [r0, :\align]
vstr s12, [r0, #2*\w]
.if \ret
pop {r4-r8,pc}
.else
add r0, r0, #2*\stride
.endif
3:
.endm
.macro load_n_incr dst, src, incr, w
.if \w == 4
vld1.32 {\dst\()[0]}, [\src, :32], \incr
.else
vld1.8 {\dst\()}, [\src, :64], \incr
.endif
.endm
// void dav1d_cdef_paddingX_8bpc_neon(uint16_t *tmp, const pixel *src,
// ptrdiff_t src_stride, const pixel (*left)[2],
// const pixel *const top,
// const pixel *const bottom, int h,
// enum CdefEdgeFlags edges);
// n1 = s0/d0
// w1 = d0/q0
// n2 = s4/d2
// w2 = d2/q1
.macro padding_func w, stride, n1, w1, n2, w2, align
function cdef_padding\w\()_8bpc_neon, export=1
push {r4-r8,lr}
ldrd r4, r5, [sp, #24]
ldrd r6, r7, [sp, #32]
cmp r7, #0xf // fully edged
beq cdef_padding\w\()_edged_8bpc_neon
vmov.i16 q3, #0x8000
tst r7, #4 // CDEF_HAVE_TOP
bne 1f
// !CDEF_HAVE_TOP
sub r12, r0, #2*(2*\stride+2)
vmov.i16 q2, #0x8000
vst1.16 {q2,q3}, [r12]!
.if \w == 8
vst1.16 {q2,q3}, [r12]!
.endif
b 3f
1:
// CDEF_HAVE_TOP
add r8, r4, r2
sub r0, r0, #2*(2*\stride)
pad_top_bottom r4, r8, \w, \stride, \n1, \w1, \n2, \w2, \align, 0
// Middle section
3:
tst r7, #1 // CDEF_HAVE_LEFT
beq 2f
// CDEF_HAVE_LEFT
tst r7, #2 // CDEF_HAVE_RIGHT
beq 1f
// CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
0:
vld1.16 {d2[]}, [r3, :16]!
ldrh r12, [r1, #\w]
load_n_incr d0, r1, r2, \w
subs r6, r6, #1
vmov.16 d2[1], r12
vmovl.u8 q0, d0
vmovl.u8 q1, d2
vstr s4, [r0, #-4]
vst1.16 {\w1}, [r0, :\align]
vstr s5, [r0, #2*\w]
add r0, r0, #2*\stride
bgt 0b
b 3f
1:
// CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
vld1.16 {d2[]}, [r3, :16]!
load_n_incr d0, r1, r2, \w
subs r6, r6, #1
vmovl.u8 q0, d0
vmovl.u8 q1, d2
vstr s4, [r0, #-4]
vst1.16 {\w1}, [r0, :\align]
vstr s12, [r0, #2*\w]
add r0, r0, #2*\stride
bgt 1b
b 3f
2:
tst r7, #2 // CDEF_HAVE_RIGHT
beq 1f
// !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
0:
ldrh r12, [r1, #\w]
load_n_incr d0, r1, r2, \w
vdup.16 d2, r12
subs r6, r6, #1
vmovl.u8 q0, d0
vmovl.u8 q1, d2
vstr s12, [r0, #-4]
vst1.16 {\w1}, [r0, :\align]
vstr s4, [r0, #2*\w]
add r0, r0, #2*\stride
bgt 0b
b 3f
1:
// !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
load_n_incr d0, r1, r2, \w
subs r6, r6, #1
vmovl.u8 q0, d0
vstr s12, [r0, #-4]
vst1.16 {\w1}, [r0, :\align]
vstr s12, [r0, #2*\w]
add r0, r0, #2*\stride
bgt 1b
3:
tst r7, #8 // CDEF_HAVE_BOTTOM
bne 1f
// !CDEF_HAVE_BOTTOM
sub r12, r0, #4
vmov.i16 q2, #0x8000
vst1.16 {q2,q3}, [r12]!
.if \w == 8
vst1.16 {q2,q3}, [r12]!
.endif
pop {r4-r8,pc}
1:
// CDEF_HAVE_BOTTOM
add r8, r5, r2
pad_top_bottom r5, r8, \w, \stride, \n1, \w1, \n2, \w2, \align, 1
endfunc
.endm
padding_func 8, 16, d0, q0, d2, q1, 128
padding_func 4, 8, s0, d0, s4, d2, 64
// void cdef_paddingX_edged_8bpc_neon(uint16_t *tmp, const pixel *src,
// ptrdiff_t src_stride, const pixel (*left)[2],
// const pixel *const top,
// const pixel *const bottom, int h,
// enum CdefEdgeFlags edges);
.macro padding_func_edged w, stride, reg, align
function cdef_padding\w\()_edged_8bpc_neon
sub r0, r0, #(2*\stride)
ldrh r12, [r4, #-2]
vldr \reg, [r4]
add r8, r4, r2
strh r12, [r0, #-2]
ldrh r12, [r4, #\w]
vstr \reg, [r0]
strh r12, [r0, #\w]
ldrh r12, [r8, #-2]
vldr \reg, [r8]
strh r12, [r0, #\stride-2]
ldrh r12, [r8, #\w]
vstr \reg, [r0, #\stride]
strh r12, [r0, #\stride+\w]
add r0, r0, #2*\stride
0:
ldrh r12, [r3], #2
vldr \reg, [r1]
str r12, [r0, #-2]
ldrh r12, [r1, #\w]
add r1, r1, r2
subs r6, r6, #1
vstr \reg, [r0]
str r12, [r0, #\w]
add r0, r0, #\stride
bgt 0b
ldrh r12, [r5, #-2]
vldr \reg, [r5]
add r8, r5, r2
strh r12, [r0, #-2]
ldrh r12, [r5, #\w]
vstr \reg, [r0]
strh r12, [r0, #\w]
ldrh r12, [r8, #-2]
vldr \reg, [r8]
strh r12, [r0, #\stride-2]
ldrh r12, [r8, #\w]
vstr \reg, [r0, #\stride]
strh r12, [r0, #\stride+\w]
pop {r4-r8,pc}
endfunc
.endm
padding_func_edged 8, 16, d0, 64
padding_func_edged 4, 8, s0, 32
tables
filter 8, 8
filter 4, 8
find_dir 8
.macro load_px_8 d11, d12, d21, d22, w
.if \w == 8
add r6, r2, r9 // x + off
sub r9, r2, r9 // x - off
vld1.8 {\d11}, [r6] // p0
add r6, r6, #16 // += stride
vld1.8 {\d21}, [r9] // p1
add r9, r9, #16 // += stride
vld1.8 {\d12}, [r6] // p0
vld1.8 {\d22}, [r9] // p1
.else
add r6, r2, r9 // x + off
sub r9, r2, r9 // x - off
vld1.32 {\d11[0]}, [r6] // p0
add r6, r6, #8 // += stride
vld1.32 {\d21[0]}, [r9] // p1
add r9, r9, #8 // += stride
vld1.32 {\d11[1]}, [r6] // p0
add r6, r6, #8 // += stride
vld1.32 {\d21[1]}, [r9] // p1
add r9, r9, #8 // += stride
vld1.32 {\d12[0]}, [r6] // p0
add r6, r6, #8 // += stride
vld1.32 {\d22[0]}, [r9] // p1
add r9, r9, #8 // += stride
vld1.32 {\d12[1]}, [r6] // p0
vld1.32 {\d22[1]}, [r9] // p1
.endif
.endm
.macro handle_pixel_8 s1, s2, thresh_vec, shift, tap, min
.if \min
vmin.u8 q3, q3, \s1
vmax.u8 q4, q4, \s1
vmin.u8 q3, q3, \s2
vmax.u8 q4, q4, \s2
.endif
vabd.u8 q8, q0, \s1 // abs(diff)
vabd.u8 q11, q0, \s2 // abs(diff)
vshl.u8 q9, q8, \shift // abs(diff) >> shift
vshl.u8 q12, q11, \shift // abs(diff) >> shift
vqsub.u8 q9, \thresh_vec, q9 // clip = imax(0, threshold - (abs(diff) >> shift))
vqsub.u8 q12, \thresh_vec, q12// clip = imax(0, threshold - (abs(diff) >> shift))
vcgt.u8 q10, q0, \s1 // px > p0
vcgt.u8 q13, q0, \s2 // px > p1
vmin.u8 q9, q9, q8 // imin(abs(diff), clip)
vmin.u8 q12, q12, q11 // imin(abs(diff), clip)
vneg.s8 q8, q9 // -imin()
vneg.s8 q11, q12 // -imin()
vbsl q10, q8, q9 // constrain() = imax(imin(diff, clip), -clip)
vdup.8 d18, \tap // taps[k]
vbsl q13, q11, q12 // constrain() = imax(imin(diff, clip), -clip)
vmlal.s8 q1, d20, d18 // sum += taps[k] * constrain()
vmlal.s8 q1, d26, d18 // sum += taps[k] * constrain()
vmlal.s8 q2, d21, d18 // sum += taps[k] * constrain()
vmlal.s8 q2, d27, d18 // sum += taps[k] * constrain()
.endm
// void cdef_filterX_edged_neon(pixel *dst, ptrdiff_t dst_stride,
// const uint16_t *tmp, int pri_strength,
// int sec_strength, int dir, int damping,
// int h, size_t edges);
.macro filter_func_8 w, pri, sec, min, suffix
function cdef_filter\w\suffix\()_edged_neon
.if \pri
movrel_local r8, pri_taps
and r9, r3, #1
add r8, r8, r9, lsl #1
.endif
movrel_local r9, directions\w
add r5, r9, r5, lsl #1
vmov.u8 d17, #7
vdup.8 d16, r6 // damping
vmov.8 d8[0], r3
vmov.8 d8[1], r4
vclz.i8 d8, d8 // clz(threshold)
vsub.i8 d8, d17, d8 // ulog2(threshold)
vqsub.u8 d8, d16, d8 // shift = imax(0, damping - ulog2(threshold))
vneg.s8 d8, d8 // -shift
.if \sec
vdup.8 q6, d8[1]
.endif
.if \pri
vdup.8 q5, d8[0]
.endif
1:
.if \w == 8
add r12, r2, #16
vld1.8 {d0}, [r2, :64] // px
vld1.8 {d1}, [r12, :64] // px
.else
add r12, r2, #8
vld1.32 {d0[0]}, [r2, :32] // px
add r9, r2, #2*8
vld1.32 {d0[1]}, [r12, :32] // px
add r12, r12, #2*8
vld1.32 {d1[0]}, [r9, :32] // px
vld1.32 {d1[1]}, [r12, :32] // px
.endif
vmov.u8 q1, #0 // sum
vmov.u8 q2, #0 // sum
.if \min
vmov.u16 q3, q0 // min
vmov.u16 q4, q0 // max
.endif
// Instead of loading sec_taps 2, 1 from memory, just set it
// to 2 initially and decrease for the second round.
// This is also used as loop counter.
mov lr, #2 // sec_taps[0]
2:
.if \pri
ldrsb r9, [r5] // off1
load_px_8 d28, d29, d30, d31, \w
.endif
.if \sec
add r5, r5, #4 // +2*2
ldrsb r9, [r5] // off2
.endif
.if \pri
ldrb r12, [r8] // *pri_taps
vdup.8 q7, r3 // threshold
handle_pixel_8 q14, q15, q7, q5, r12, \min
.endif
.if \sec
load_px_8 d28, d29, d30, d31, \w
add r5, r5, #8 // +2*4
ldrsb r9, [r5] // off3
vdup.8 q7, r4 // threshold
handle_pixel_8 q14, q15, q7, q6, lr, \min
load_px_8 d28, d29, d30, d31, \w
handle_pixel_8 q14, q15, q7, q6, lr, \min
sub r5, r5, #11 // r5 -= 2*(2+4); r5 += 1;
.else
add r5, r5, #1 // r5 += 1
.endif
subs lr, lr, #1 // sec_tap-- (value)
.if \pri
add r8, r8, #1 // pri_taps++ (pointer)
.endif
bne 2b
vshr.s16 q14, q1, #15 // -(sum < 0)
vshr.s16 q15, q2, #15 // -(sum < 0)
vadd.i16 q1, q1, q14 // sum - (sum < 0)
vadd.i16 q2, q2, q15 // sum - (sum < 0)
vrshr.s16 q1, q1, #4 // (8 + sum - (sum < 0)) >> 4
vrshr.s16 q2, q2, #4 // (8 + sum - (sum < 0)) >> 4
vaddw.u8 q1, q1, d0 // px + (8 + sum ...) >> 4
vaddw.u8 q2, q2, d1 // px + (8 + sum ...) >> 4
vqmovun.s16 d0, q1
vqmovun.s16 d1, q2
.if \min
vmin.u8 q0, q0, q4
vmax.u8 q0, q0, q3 // iclip(px + .., min, max)
.endif
.if \w == 8
vst1.8 {d0}, [r0, :64], r1
add r2, r2, #2*16 // tmp += 2*tmp_stride
subs r7, r7, #2 // h -= 2
vst1.8 {d1}, [r0, :64], r1
.else
vst1.32 {d0[0]}, [r0, :32], r1
add r2, r2, #4*8 // tmp += 4*tmp_stride
vst1.32 {d0[1]}, [r0, :32], r1
subs r7, r7, #4 // h -= 4
vst1.32 {d1[0]}, [r0, :32], r1
vst1.32 {d1[1]}, [r0, :32], r1
.endif
// Reset pri_taps and directions back to the original point
sub r5, r5, #2
.if \pri
sub r8, r8, #2
.endif
bgt 1b
vpop {q4-q7}
pop {r4-r9,pc}
endfunc
.endm
.macro filter_8 w
filter_func_8 \w, pri=1, sec=0, min=0, suffix=_pri
filter_func_8 \w, pri=0, sec=1, min=0, suffix=_sec
filter_func_8 \w, pri=1, sec=1, min=1, suffix=_pri_sec
.endm
filter_8 8
filter_8 4
|
Admenri/urge
| 123,791
|
third_party/dav1d/src/arm/32/itx16.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/arm/asm.S"
#include "util.S"
// The exported functions in this file have got the following signature:
// void itxfm_add(pixel *dst, ptrdiff_t dst_stride, coef *coeff, int eob);
// Most of the functions use the following register layout:
// r0-r3 external parameters
// r4 function pointer to first transform
// r5 function pointer to second transform
// r6 output parameter for helper function
// r7 input parameter for helper function
// r8 input stride for helper function
// r9 scratch variable for helper functions
// r10-r11 pointer to list of eob thresholds, eob threshold value,
// scratch variables within helper functions (backed up)
// The SIMD registers most often use the following layout:
// d0-d3 multiplication coefficients
// d4-d7 scratch registers
// d8-d15 unused in some transforms, used for scratch registers in others
// d16-v31 inputs/outputs of transforms
// Potential further optimizations, that are left unimplemented for now:
// - Trying to keep multiplication coefficients in registers across multiple
// transform functions. (The register layout is designed to potentially
// allow this.)
// - Use a simplified version of the transforms themselves for cases where
// we know a significant number of inputs are zero. E.g. if the eob value
// indicates only a quarter of input values are set, for idct16 and up,
// a significant amount of calculation can be skipped, at the cost of more
// code duplication and special casing.
// A macro for cases where a thumb mov can express the constant in one
// instruction, while arm mode requires two separate movw+movt pairs.
.macro mov_const reg, val
#if CONFIG_THUMB
mov.w \reg, #\val
#else
movw \reg, #((\val) & 0xffff)
movt \reg, #(((\val) >> 16) & 0xffff)
#endif
.endm
const idct_coeffs, align=4
// idct4
.int 2896, 2896*8*(1<<16), 1567, 3784
// idct8
.int 799, 4017, 3406, 2276
// idct16
.int 401, 4076, 3166, 2598
.int 1931, 3612, 3920, 1189
// idct32
.int 201, 4091, 3035, 2751
.int 1751, 3703, 3857, 1380
.int 995, 3973, 3513, 2106
.int 2440, 3290, 4052, 601
endconst
const idct64_coeffs, align=4
.int 101*8*(1<<16), 4095*8*(1<<16), 2967*8*(1<<16), -2824*8*(1<<16)
.int 1660*8*(1<<16), 3745*8*(1<<16), 3822*8*(1<<16), -1474*8*(1<<16)
.int 4076, 401, 4017, 799
.int 4036*8*(1<<16), -700*8*(1<<16), 2359*8*(1<<16), 3349*8*(1<<16)
.int 3461*8*(1<<16), -2191*8*(1<<16), 897*8*(1<<16), 3996*8*(1<<16)
.int -3166, -2598, -799, -4017
.int 501*8*(1<<16), 4065*8*(1<<16), 3229*8*(1<<16), -2520*8*(1<<16)
.int 2019*8*(1<<16), 3564*8*(1<<16), 3948*8*(1<<16), -1092*8*(1<<16)
.int 3612, 1931, 2276, 3406
.int 4085*8*(1<<16), -301*8*(1<<16), 2675*8*(1<<16), 3102*8*(1<<16)
.int 3659*8*(1<<16), -1842*8*(1<<16), 1285*8*(1<<16), 3889*8*(1<<16)
.int -3920, -1189, -3406, -2276
endconst
const iadst4_coeffs, align=4
.int 1321, 3803, 2482, 3344
endconst
const iadst8_coeffs, align=4
.int 4076, 401, 3612, 1931
.int 2598, 3166, 1189, 3920
// idct_coeffs
.int 2896, 0, 1567, 3784
endconst
const iadst16_coeffs, align=4
.int 4091, 201, 3973, 995
.int 3703, 1751, 3290, 2440
.int 2751, 3035, 2106, 3513
.int 1380, 3857, 601, 4052
endconst
.macro vmul_vmla d0, s0, s1, c0, c1
vmul.i32 \d0, \s0, \c0
vmla.i32 \d0, \s1, \c1
.endm
.macro vmul_vmls d0, s0, s1, c0, c1
vmul.i32 \d0, \s0, \c0
vmls.i32 \d0, \s1, \c1
.endm
.macro scale_input c, r0, r1, r2 r3, r4, r5, r6, r7
vqrdmulh.s32 \r0, \r0, \c
vqrdmulh.s32 \r1, \r1, \c
.ifnb \r2
vqrdmulh.s32 \r2, \r2, \c
vqrdmulh.s32 \r3, \r3, \c
.endif
.ifnb \r4
vqrdmulh.s32 \r4, \r4, \c
vqrdmulh.s32 \r5, \r5, \c
vqrdmulh.s32 \r6, \r6, \c
vqrdmulh.s32 \r7, \r7, \c
.endif
.endm
.macro load_add_store load, shift, addsrc, adddst, max, min, store, dst, src, shiftbits=4
.ifnb \load
vld1.16 {\load}, [\src, :128], r1
.endif
.ifnb \shift
vrshr.s16 \shift, \shift, #\shiftbits
.endif
.ifnb \addsrc
vqadd.s16 \adddst, \adddst, \addsrc
.endif
.ifnb \max
vmax.s16 \max, \max, q6
.endif
.ifnb \min
vmin.s16 \min, \min, q7
.endif
.ifnb \store
vst1.16 {\store}, [\dst, :128], r1
.endif
.endm
.macro load_add_store_8x8 dst, src, shiftbits=4
mov \src, \dst
vmov.i16 q6, #0
vmvn.i16 q7, #0xfc00 // 0x3ff
load_add_store q0, q8, , , , , , \dst, \src, \shiftbits
load_add_store q1, q9, , , , , , \dst, \src, \shiftbits
load_add_store q2, q10, q0, q8, , , , \dst, \src, \shiftbits
load_add_store q3, q11, q1, q9, q8, , , \dst, \src, \shiftbits
load_add_store q4, q12, q2, q10, q9, q8, , \dst, \src, \shiftbits
load_add_store q5, q13, q3, q11, q10, q9, q8, \dst, \src, \shiftbits
load_add_store q0, q14, q4, q12, q11, q10, q9, \dst, \src, \shiftbits
load_add_store q1, q15, q5, q13, q12, q11, q10, \dst, \src, \shiftbits
load_add_store , , q0, q14, q13, q12, q11, \dst, \src, \shiftbits
load_add_store , , q1, q15, q14, q13, q12, \dst, \src, \shiftbits
load_add_store , , , , q15, q14, q13, \dst, \src, \shiftbits
load_add_store , , , , , q15, q14, \dst, \src, \shiftbits
load_add_store , , , , , , q15, \dst, \src, \shiftbits
.endm
.macro load_add_store_8x4 dst, src, shiftbits=4
mov \src, \dst
vmov.i16 q6, #0
vmvn.i16 q7, #0xfc00 // 0x3ff
load_add_store q0, q8, , , , , , \dst, \src, \shiftbits
load_add_store q1, q9, , , , , , \dst, \src, \shiftbits
load_add_store q2, q10, q0, q8, , , , \dst, \src, \shiftbits
load_add_store q3, q11, q1, q9, q8, , , \dst, \src, \shiftbits
load_add_store , , q2, q10, q9, q8, , \dst, \src, \shiftbits
load_add_store , , q3, q11, q10, q9, q8, \dst, \src, \shiftbits
load_add_store , , , , q11, q10, q9, \dst, \src, \shiftbits
load_add_store , , , , , q11, q10, \dst, \src, \shiftbits
load_add_store , , , , , , q11, \dst, \src, \shiftbits
.endm
.macro load_add_store4 load1, load2, shift, addsrc, adddst, max, min, store1, store2, dst, src, shiftbits=4
.ifnb \load1
vld1.16 {\load1}, [\src, :64], r1
.endif
.ifnb \shift
vrshr.s16 \shift, \shift, #\shiftbits
.endif
.ifnb \load2
vld1.16 {\load2}, [\src, :64], r1
.endif
.ifnb \addsrc
vqadd.s16 \adddst, \adddst, \addsrc
.endif
.ifnb \max
vmax.s16 \max, \max, q6
.endif
.ifnb \store1
vst1.16 {\store1}, [\dst, :64], r1
.endif
.ifnb \min
vmin.s16 \min, \min, q7
.endif
.ifnb \store2
vst1.16 {\store2}, [\dst, :64], r1
.endif
.endm
.macro load_add_store_4x16 dst, src
mov \src, \dst
vmov.i16 q6, #0
vmvn.i16 q7, #0xfc00 // 0x3ff
mov \src, \dst
load_add_store4 d0, d1, q8, , , , , , , \dst, \src
load_add_store4 d2, d3, q9, , , , , , , \dst, \src
load_add_store4 d4, d5, q10, q0, q8, , , , , \dst, \src
load_add_store4 d6, d7, q11, q1, q9, q8, , , , \dst, \src
load_add_store4 d8, d9, q12, q2, q10, q9, q8, , , \dst, \src
load_add_store4 d10, d11, q13, q3, q11, q10, q9, d16, d17, \dst, \src
load_add_store4 d0, d1, q14, q4, q12, q11, q10, d18, d19, \dst, \src
load_add_store4 d2, d3, q15, q5, q13, q12, q11, d20, d21, \dst, \src
load_add_store4 , , , q0, q14, q13, q12, d22, d23, \dst, \src
load_add_store4 , , , q1, q15, q14, q13, d24, d25, \dst, \src
load_add_store4 , , , , , q15, q14, d26, d27, \dst, \src
load_add_store4 , , , , , , q15, d28, d29, \dst, \src
load_add_store4 , , , , , , , d30, d31, \dst, \src
.endm
.macro load_add_store_4x8 dst, src, shiftbits=4
mov \src, \dst
vmov.i16 q6, #0
vmvn.i16 q7, #0xfc00 // 0x3ff
mov \src, \dst
load_add_store4 d0, d1, q8, , , , , , , \dst, \src, \shiftbits
load_add_store4 d2, d3, q9, , , , , , , \dst, \src, \shiftbits
load_add_store4 d4, d5, q10, q0, q8, , , , , \dst, \src, \shiftbits
load_add_store4 d6, d7, q11, q1, q9, q8, , , , \dst, \src, \shiftbits
load_add_store4 , , , q2, q10, q9, q8, , , \dst, \src, \shiftbits
load_add_store4 , , , q3, q11, q10, q9, d16, d17, \dst, \src, \shiftbits
load_add_store4 , , , , , q11, q10, d18, d19, \dst, \src, \shiftbits
load_add_store4 , , , , , , q11, d20, d21, \dst, \src, \shiftbits
load_add_store4 , , , , , , , d22, d23, \dst, \src, \shiftbits
.endm
.macro load_add_store_4x4 dst, src, shiftbits=4
mov \src, \dst
vmov.i16 q6, #0
vmvn.i16 q7, #0xfc00 // 0x3ff
mov \src, \dst
load_add_store4 d0, d1, q8, , , , , , , \dst, \src, \shiftbits
load_add_store4 d2, d3, q9, q0, q8, , , , , \dst, \src, \shiftbits
load_add_store4 , , , q1, q9, q8, , , , \dst, \src, \shiftbits
load_add_store4 , , , , , q9, q8, , , \dst, \src, \shiftbits
load_add_store4 , , , , , , q9, d16, d17, \dst, \src, \shiftbits
load_add_store4 , , , , , , , d18, d19, \dst, \src, \shiftbits
.endm
.macro idct_dc w, h, shift
cmp r3, #0
bne 1f
vmov.i16 q14, #0
mov_const r12, 2896*8*(1<<16)
vld1.32 {d24[], d25[]}, [r2, :32]
vdup.32 d0, r12
vqrdmulh.s32 q13, q12, d0[0]
vst1.32 {d28[0]}, [r2, :32]
.if (\w == 2*\h) || (2*\w == \h)
vqrdmulh.s32 q13, q13, d0[0]
.endif
.if \shift > 0
vqrshrn.s32 d24, q13, #\shift
vqrshrn.s32 d25, q13, #\shift
.else
vqmovn.s32 d24, q13
vqmovn.s32 d25, q13
.endif
vqrdmulh.s16 q12, q12, d0[1]
mov r3, #\h
vrshr.s16 q12, q12, #4
b idct_dc_w\w\()_neon
1:
.endm
function idct_dc_w4_neon
vmvn.i16 q15, #0xfc00 // 0x3ff
1:
vld1.16 {d0}, [r0, :64], r1
vld1.16 {d1}, [r0, :64], r1
vld1.16 {d2}, [r0, :64], r1
vld1.16 {d3}, [r0, :64], r1
subs r3, r3, #4
vqadd.s16 q0, q0, q12
sub r0, r0, r1, lsl #2
vqadd.s16 q1, q1, q12
vmax.s16 q0, q0, q14
vmax.s16 q1, q1, q14
vmin.s16 q0, q0, q15
vst1.16 {d0}, [r0, :64], r1
vmin.s16 q1, q1, q15
vst1.16 {d1}, [r0, :64], r1
vst1.16 {d2}, [r0, :64], r1
vst1.16 {d3}, [r0, :64], r1
bgt 1b
bx lr
endfunc
function idct_dc_w8_neon
vmvn.i16 q15, #0xfc00 // 0x3ff
1:
vld1.16 {q0}, [r0, :128], r1
subs r3, r3, #4
vld1.16 {q1}, [r0, :128], r1
vqadd.s16 q0, q0, q12
vld1.16 {q2}, [r0, :128], r1
vqadd.s16 q1, q1, q12
vld1.16 {q3}, [r0, :128], r1
vqadd.s16 q2, q2, q12
vqadd.s16 q3, q3, q12
sub r0, r0, r1, lsl #2
vmax.s16 q0, q0, q14
vmax.s16 q1, q1, q14
vmax.s16 q2, q2, q14
vmax.s16 q3, q3, q14
vmin.s16 q0, q0, q15
vmin.s16 q1, q1, q15
vst1.16 {q0}, [r0, :128], r1
vmin.s16 q2, q2, q15
vst1.16 {q1}, [r0, :128], r1
vmin.s16 q3, q3, q15
vst1.16 {q2}, [r0, :128], r1
vst1.16 {q3}, [r0, :128], r1
bgt 1b
bx lr
endfunc
function idct_dc_w16_neon
vmvn.i16 q15, #0xfc00 // 0x3ff
1:
vld1.16 {q0, q1}, [r0, :128], r1
subs r3, r3, #2
vld1.16 {q2, q3}, [r0, :128], r1
vqadd.s16 q0, q0, q12
vqadd.s16 q1, q1, q12
vqadd.s16 q2, q2, q12
vqadd.s16 q3, q3, q12
sub r0, r0, r1, lsl #1
vmax.s16 q0, q0, q14
vmax.s16 q1, q1, q14
vmax.s16 q2, q2, q14
vmax.s16 q3, q3, q14
vmin.s16 q0, q0, q15
vmin.s16 q1, q1, q15
vmin.s16 q2, q2, q15
vst1.16 {q0, q1}, [r0, :128], r1
vmin.s16 q3, q3, q15
vst1.16 {q2, q3}, [r0, :128], r1
bgt 1b
bx lr
endfunc
function idct_dc_w32_neon
sub r1, r1, #32
vmvn.i16 q15, #0xfc00 // 0x3ff
1:
vld1.16 {q0, q1}, [r0, :128]!
subs r3, r3, #1
vld1.16 {q2, q3}, [r0, :128]
vqadd.s16 q0, q0, q12
vqadd.s16 q1, q1, q12
vqadd.s16 q2, q2, q12
vqadd.s16 q3, q3, q12
sub r0, r0, #32
vmax.s16 q0, q0, q14
vmax.s16 q1, q1, q14
vmax.s16 q2, q2, q14
vmax.s16 q3, q3, q14
vmin.s16 q0, q0, q15
vmin.s16 q1, q1, q15
vmin.s16 q2, q2, q15
vst1.16 {q0, q1}, [r0, :128]!
vmin.s16 q3, q3, q15
vst1.16 {q2, q3}, [r0, :128], r1
bgt 1b
bx lr
endfunc
function idct_dc_w64_neon
sub r1, r1, #96
vmvn.i16 q15, #0xfc00 // 0x3ff
1:
vld1.16 {q0, q1}, [r0, :128]!
subs r3, r3, #1
vld1.16 {q2, q3}, [r0, :128]!
vqadd.s16 q0, q0, q12
vld1.16 {q8, q9}, [r0, :128]!
vqadd.s16 q1, q1, q12
vld1.16 {q10, q11}, [r0, :128]
vqadd.s16 q2, q2, q12
vqadd.s16 q3, q3, q12
vqadd.s16 q8, q8, q12
vqadd.s16 q9, q9, q12
vqadd.s16 q10, q10, q12
vqadd.s16 q11, q11, q12
sub r0, r0, #96
vmax.s16 q0, q0, q14
vmax.s16 q1, q1, q14
vmax.s16 q2, q2, q14
vmax.s16 q3, q3, q14
vmax.s16 q8, q8, q14
vmax.s16 q9, q9, q14
vmax.s16 q10, q10, q14
vmax.s16 q11, q11, q14
vmin.s16 q0, q0, q15
vmin.s16 q1, q1, q15
vmin.s16 q2, q2, q15
vmin.s16 q3, q3, q15
vmin.s16 q8, q8, q15
vst1.16 {q0, q1}, [r0, :128]!
vmin.s16 q9, q9, q15
vst1.16 {q2, q3}, [r0, :128]!
vmin.s16 q10, q10, q15
vst1.16 {q8, q9}, [r0, :128]!
vmin.s16 q11, q11, q15
vst1.16 {q10, q11}, [r0, :128], r1
bgt 1b
bx lr
endfunc
.macro iwht4
vadd.i32 q8, q8, q9
vsub.i32 q13, q10, q11
vsub.i32 q12, q8, q13
vshr.s32 q12, q12, #1
vsub.i32 q10, q12, q9
vsub.i32 q9, q12, q11
vadd.i32 q11, q13, q10
vsub.i32 q8, q8, q9
.endm
.macro idct_4s_x4 r0, r1, r2, r3
vmul_vmla q4, \r1, \r3, d1[1], d1[0]
vmul_vmla q2, \r0, \r2, d0[0], d0[0]
vmul_vmls q3, \r1, \r3, d1[0], d1[1]
vmul_vmls q5, \r0, \r2, d0[0], d0[0]
vrshr.s32 q4, q4, #12
vrshr.s32 q2, q2, #12
vrshr.s32 q3, q3, #12
vrshr.s32 q5, q5, #12
vqadd.s32 \r0, q2, q4
vqsub.s32 \r3, q2, q4
vqadd.s32 \r1, q5, q3
vqsub.s32 \r2, q5, q3
.endm
.macro idct_2s_x4 r0, r1, r2, r3
vmul_vmla d6, \r1, \r3, d1[1], d1[0]
vmul_vmla d4, \r0, \r2, d0[0], d0[0]
vmul_vmls d5, \r1, \r3, d1[0], d1[1]
vmul_vmls d7, \r0, \r2, d0[0], d0[0]
vrshr.s32 d6, d6, #12
vrshr.s32 d4, d4, #12
vrshr.s32 d5, d5, #12
vrshr.s32 d7, d7, #12
vqadd.s32 \r0, d4, d6
vqsub.s32 \r3, d4, d6
vqadd.s32 \r1, d7, d5
vqsub.s32 \r2, d7, d5
.endm
function inv_dct_4s_x4_neon
movrel_local r12, idct_coeffs
vld1.32 {d0, d1}, [r12, :128]
idct_4s_x4 q8, q9, q10, q11
bx lr
endfunc
.macro iadst_4x4 o0, o1, o2, o3
movrel_local r12, iadst4_coeffs
vld1.32 {d0, d1}, [r12, :128]
vsub.i32 q1, q8, q10
vmul.i32 q2, q8, d0[0]
vmla.i32 q2, q10, d0[1]
vmla.i32 q2, q11, d1[0]
vmul.i32 q4, q9, d1[1]
vadd.i32 q1, q1, q11
vmul.i32 q3, q8, d1[0]
vmls.i32 q3, q10, d0[0]
vmls.i32 q3, q11, d0[1]
vadd.i32 \o3, q2, q3
vmul.i32 \o2, q1, d1[1]
vadd.i32 \o0, q2, q4
vadd.i32 \o1, q3, q4
vsub.i32 \o3, \o3, q4
vrshr.s32 \o0, \o0, #12
vrshr.s32 \o2, \o2, #12
vrshr.s32 \o1, \o1, #12
vrshr.s32 \o3, \o3, #12
.endm
function inv_adst_4s_x4_neon
iadst_4x4 q8, q9, q10, q11
bx lr
endfunc
function inv_flipadst_4s_x4_neon
iadst_4x4 q11, q10, q9, q8
bx lr
endfunc
function inv_identity_4s_x4_neon
mov r12, #0
movt r12, #(5793-4096)*8
vdup.32 d0, r12
vqrdmulh.s32 q1, q8, d0[0]
vqrdmulh.s32 q2, q9, d0[0]
vqrdmulh.s32 q3, q10, d0[0]
vqrdmulh.s32 q4, q11, d0[0]
vqadd.s32 q8, q8, q1
vqadd.s32 q9, q9, q2
vqadd.s32 q10, q10, q3
vqadd.s32 q11, q11, q4
bx lr
endfunc
function inv_txfm_add_wht_wht_4x4_16bpc_neon, export=1
push {r4-r5,lr}
vpush {q4-q5}
vmov.i16 q14, #0
vmov.i16 q15, #0
vld1.32 {q8, q9}, [r2, :128]
vst1.32 {q14, q15}, [r2, :128]!
vshr.s32 q8, q8, #2
vld1.32 {q10, q11}, [r2, :128]
vshr.s32 q9, q9, #2
vshr.s32 q10, q10, #2
vshr.s32 q11, q11, #2
iwht4
vst1.32 {q14, q15}, [r2, :128]
transpose_4x4s q8, q9, q10, q11, d16, d17, d18, d19, d20, d21, d22, d23
iwht4
vld1.16 {d0}, [r0, :64], r1
vqmovn.s32 d16, q8
vld1.16 {d1}, [r0, :64], r1
vqmovn.s32 d17, q9
vld1.16 {d2}, [r0, :64], r1
vqmovn.s32 d18, q10
vld1.16 {d3}, [r0, :64], r1
vqmovn.s32 d19, q11
b L(itx_4x4_end)
endfunc
function inv_txfm_add_4x4_neon
vmov.i16 q14, #0
vmov.i16 q15, #0
vld1.32 {q8, q9}, [r2, :128]
vst1.16 {q14, q15}, [r2, :128]!
vld1.32 {q10, q11}, [r2, :128]
vst1.16 {q14, q15}, [r2, :128]
blx r4
vqmovn.s32 d16, q8
vqmovn.s32 d17, q9
vqmovn.s32 d18, q10
vqmovn.s32 d19, q11
transpose_4x4h q8, q9, d16, d17, d18, d19
blx r5
vld1.16 {d0}, [r0, :64], r1
vld1.16 {d1}, [r0, :64], r1
vrshr.s16 q8, q8, #4
vld1.16 {d2}, [r0, :64], r1
vrshr.s16 q9, q9, #4
vld1.16 {d3}, [r0, :64], r1
L(itx_4x4_end):
// read bitdepth_max from the callers stack
ldr r4, [sp, #44]
vdup.i16 q15, r4
sub r0, r0, r1, lsl #2
vqadd.s16 q8, q8, q0
vqadd.s16 q9, q9, q1
vmax.s16 q8, q8, q14
vmax.s16 q9, q9, q14
vmin.s16 q8, q8, q15
vmin.s16 q9, q9, q15
vst1.16 {d16}, [r0, :64], r1
vst1.16 {d17}, [r0, :64], r1
vst1.16 {d18}, [r0, :64], r1
vst1.16 {d19}, [r0, :64], r1
vpop {q4-q5}
pop {r4-r5,pc}
endfunc
.macro def_fn_4x4 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_4x4_16bpc_neon, export=1
push {r4-r5,lr}
vpush {q4-q5}
.ifc \txfm1\()_\txfm2, dct_dct
cmp r3, #0
bne 1f
vmov.i16 q14, #0
mov_const r12, 2896*8*(1<<16)
vld1.32 {d16[], d17[]}, [r2, :32]
vdup.32 d4, r12
vst1.32 {d28[0]}, [r2, :32]
vqrdmulh.s32 q8, q8, d4[0]
vld1.16 {d0}, [r0, :64], r1
vqmovn.s32 d20, q8
vqmovn.s32 d21, q8
vld1.16 {d1}, [r0, :64], r1
vqrdmulh.s16 q10, q10, d4[1]
vld1.16 {d2}, [r0, :64], r1
vrshr.s16 q8, q10, #4
vld1.16 {d3}, [r0, :64], r1
vrshr.s16 q9, q10, #4
b L(itx_4x4_end)
1:
.endif
movrel_local r4, inv_\txfm1\()_4s_x4_neon
movrel r5, X(inv_\txfm2\()_4h_x4_neon)
b inv_txfm_add_4x4_neon
endfunc
.endm
def_fn_4x4 dct, dct
def_fn_4x4 identity, identity
def_fn_4x4 dct, adst
def_fn_4x4 dct, flipadst
def_fn_4x4 dct, identity
def_fn_4x4 adst, dct
def_fn_4x4 adst, adst
def_fn_4x4 adst, flipadst
def_fn_4x4 flipadst, dct
def_fn_4x4 flipadst, adst
def_fn_4x4 flipadst, flipadst
def_fn_4x4 identity, dct
def_fn_4x4 adst, identity
def_fn_4x4 flipadst, identity
def_fn_4x4 identity, adst
def_fn_4x4 identity, flipadst
.macro idct_4s_x8 r0, r1, r2, r3, r4, r5, r6, r7
idct_4s_x4 \r0, \r2, \r4, \r6
vmov.i32 q5, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
vmvn.i32 q4, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
.irp r, \r0, \r2, \r4, \r6
vmin.s32 \r, \r, q5
.endr
.irp r, \r0, \r2, \r4, \r6
vmax.s32 \r, \r, q4
.endr
vmul_vmls q2, \r1, \r7, d2[0], d2[1] // -> t4a
vmul_vmla q3, \r1, \r7, d2[1], d2[0] // -> t7a
vmul_vmls q6, \r5, \r3, d3[0], d3[1] // -> t5a
vmul_vmla q7, \r5, \r3, d3[1], d3[0] // -> t6a
vrshr.s32 \r1, q2, #12 // t4a
vrshr.s32 \r7, q3, #12 // t7a
vrshr.s32 \r3, q6, #12 // t5a
vrshr.s32 \r5, q7, #12 // t6a
vqadd.s32 q2, \r1, \r3 // t4
vqsub.s32 \r1, \r1, \r3 // t5a
vqadd.s32 q3, \r7, \r5 // t7
vqsub.s32 \r3, \r7, \r5 // t6a
.irp r, q2, \r1, q3, \r3
vmin.s32 \r, \r, q5
.endr
.irp r, q2, \r1, q3, \r3
vmax.s32 \r, \r, q4
.endr
vmul_vmls q7, \r3, \r1, d0[0], d0[0] // -> t5
vmul_vmla q6, \r3, \r1, d0[0], d0[0] // -> t6
vrshr.s32 q7, q7, #12 // t5
vrshr.s32 q5, q6, #12 // t6
vqsub.s32 \r7, \r0, q3 // out7
vqadd.s32 \r0, \r0, q3 // out0
vqadd.s32 \r1, \r2, q5 // out1
vqsub.s32 q6, \r2, q5 // out6
vqadd.s32 \r2, \r4, q7 // out2
vqsub.s32 \r5, \r4, q7 // out5
vqadd.s32 \r3, \r6, q2 // out3
vqsub.s32 \r4, \r6, q2 // out4
vmov \r6, q6 // out6
.endm
.macro idct_2s_x8 r0, r1, r2, r3, r4, r5, r6, r7
idct_2s_x4 \r0, \r2, \r4, \r6
vmov.i32 d9, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
vmvn.i32 d8, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
.irp r, \r0, \r2, \r4, \r6
vmin.s32 \r, \r, d9
.endr
.irp r, \r0, \r2, \r4, \r6
vmax.s32 \r, \r, d8
.endr
vmul_vmls d4, \r1, \r7, d2[0], d2[1] // -> t4a
vmul_vmla d5, \r1, \r7, d2[1], d2[0] // -> t7a
vmul_vmls d6, \r5, \r3, d3[0], d3[1] // -> t5a
vmul_vmla d7, \r5, \r3, d3[1], d3[0] // -> t6a
vrshr.s32 \r1, d4, #12 // t4a
vrshr.s32 \r7, d5, #12 // t7a
vrshr.s32 \r3, d6, #12 // t5a
vrshr.s32 \r5, d7, #12 // t6a
vqadd.s32 d4, \r1, \r3 // t4
vqsub.s32 \r1, \r1, \r3 // t5a
vqadd.s32 d5, \r7, \r5 // t7
vqsub.s32 \r3, \r7, \r5 // t6a
.irp r, d4, \r1, d5, \r3
vmin.s32 \r, \r, d9
.endr
.irp r, d4, \r1, d5, \r3
vmax.s32 \r, \r, d8
.endr
vmul_vmls d6, \r3, \r1, d0[0], d0[0] // -> t5
vmul_vmla d7, \r3, \r1, d0[0], d0[0] // -> t6
vrshr.s32 d6, d6, #12 // t5
vrshr.s32 d7, d7, #12 // t6
vqsub.s32 \r7, \r0, d5 // out7
vqadd.s32 \r0, \r0, d5 // out0
vqadd.s32 \r1, \r2, d7 // out1
vqsub.s32 d7, \r2, d7 // out6
vqadd.s32 \r2, \r4, d6 // out2
vqsub.s32 \r5, \r4, d6 // out5
vqadd.s32 \r3, \r6, d4 // out3
vqsub.s32 \r4, \r6, d4 // out4
vmov \r6, d7 // out6
.endm
function inv_dct_4s_x8_neon
movrel_local r12, idct_coeffs
vld1.32 {q0, q1}, [r12, :128]
idct_4s_x8 q8, q9, q10, q11, q12, q13, q14, q15
bx lr
endfunc
.macro iadst_4s_x8 r0, r1, r2, r3, r4, r5, r6, r7
movrel_local r12, iadst8_coeffs
vld1.32 {q0, q1}, [r12, :128]!
vmul_vmla q2, q15, q8, d0[0], d0[1]
vmul_vmls q3, q15, q8, d0[1], d0[0]
vmul_vmla q4, q13, q10, d1[0], d1[1]
vrshr.s32 q8, q2, #12 // t0a
vrshr.s32 q15, q3, #12 // t1a
vmul_vmls q5, q13, q10, d1[1], d1[0]
vmul_vmla q6, q11, q12, d2[0], d2[1]
vrshr.s32 q10, q4, #12 // t2a
vrshr.s32 q13, q5, #12 // t3a
vmul_vmls q7, q11, q12, d2[1], d2[0]
vmul_vmla q2, q9, q14, d3[0], d3[1]
vrshr.s32 q12, q6, #12 // t4a
vrshr.s32 q11, q7, #12 // t5a
vmul_vmls q3, q9, q14, d3[1], d3[0]
vrshr.s32 q14, q2, #12 // t6a
vrshr.s32 q9, q3, #12 // t7a
vld1.32 {q0}, [r12]
vqadd.s32 q2, q8, q12 // t0
vqsub.s32 q3, q8, q12 // t4
vmov.i32 q12, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
vqadd.s32 q4, q15, q11 // t1
vqsub.s32 q5, q15, q11 // t5
vqadd.s32 q6, q10, q14 // t2
vqsub.s32 q7, q10, q14 // t6
vmvn.i32 q14, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
vqadd.s32 q10, q13, q9 // t3
vqsub.s32 q11, q13, q9 // t7
.irp r, q2, q3, q4, q5, q6, q7, q10, q11
vmin.s32 \r, \r, q12
.endr
.irp r, q2, q3, q4, q5, q6, q7, q10, q11
vmax.s32 \r, \r, q14
.endr
vmul_vmla q8, q3, q5, d1[1], d1[0]
vmul_vmls q13, q3, q5, d1[0], d1[1]
vmul_vmls q14, q11, q7, d1[1], d1[0]
vrshr.s32 q3, q8, #12 // t4a
vrshr.s32 q5, q13, #12 // t5a
vmul_vmla q8, q11, q7, d1[0], d1[1]
vrshr.s32 q7, q14, #12 // t6a
vrshr.s32 q11, q8, #12 // t7a
vqadd.s32 \r0, q2, q6 // out0
vqsub.s32 q2, q2, q6 // t2
vqadd.s32 \r7, q4, q10 // out7
vqsub.s32 q4, q4, q10 // t3
vmvn.i32 q10, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
vqadd.s32 \r1, q3, q7 // out1
vqsub.s32 q3, q3, q7 // t6
vqadd.s32 \r6, q5, q11 // out6
vqsub.s32 q5, q5, q11 // t7
// Not clipping the output registers, as they will be downshifted and
// narrowed afterwards anyway.
.irp r, q2, q4, q3, q5
vmin.s32 \r, \r, q12
.endr
.irp r, q2, q4, q3, q5
vmax.s32 \r, \r, q10
.endr
vqneg.s32 \r7, \r7 // out7
vqneg.s32 \r1, \r1 // out1
vmul_vmla q10, q2, q4, d0[0], d0[0] // -> out3 (q11 or q12)
vmul_vmls q6, q2, q4, d0[0], d0[0] // -> out4 (q12 or q11)
vmul_vmls q12, q3, q5, d0[0], d0[0] // -> out5 (q13 or q10)
vrshr.s32 q2, q10, #12 // out3
vmul_vmla q10, q3, q5, d0[0], d0[0] // -> out2 (q10 or q13)
vrshr.s32 q3, q12, #12 // out5
vrshr.s32 \r2, q10, #12 // out2 (q10 or q13)
vrshr.s32 \r4, q6, #12 // out4 (q12 or q11)
vqneg.s32 \r3, q2 // out3
vqneg.s32 \r5, q3 // out5
.endm
function inv_adst_4s_x8_neon
iadst_4s_x8 q8, q9, q10, q11, q12, q13, q14, q15
bx lr
endfunc
function inv_flipadst_4s_x8_neon
iadst_4s_x8 q15, q14, q13, q12, q11, q10, q9, q8
bx lr
endfunc
function inv_identity_4s_x8_neon
vqshl.s32 q8, q8, #1
vqshl.s32 q9, q9, #1
vqshl.s32 q10, q10, #1
vqshl.s32 q11, q11, #1
vqshl.s32 q12, q12, #1
vqshl.s32 q13, q13, #1
vqshl.s32 q14, q14, #1
vqshl.s32 q15, q15, #1
bx lr
endfunc
function inv_txfm_add_8x8_neon
vmov.i32 q0, #0
mov r7, #8*4
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.32 {\i}, [r2, :128]
vst1.32 {q0}, [r2, :128], r7
.endr
blx r4
vqrshrn.s32 d16, q8, #1
vqrshrn.s32 d17, q12, #1
vqrshrn.s32 d18, q9, #1
vqrshrn.s32 d19, q13, #1
vqrshrn.s32 d20, q10, #1
vqrshrn.s32 d21, q14, #1
vqrshrn.s32 d22, q11, #1
vqrshrn.s32 d23, q15, #1
cmp r3, r10
transpose_4x8h q8, q9, q10, q11
blt 1f
sub r2, r2, r7, lsl #3
vpush {q8-q11}
add r2, r2, #16
vmov.i32 q0, #0
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.32 {\i}, [r2, :128]
vst1.32 {q0}, [r2, :128], r7
.endr
blx r4
vqrshrn.s32 d31, q15, #1
vqrshrn.s32 d30, q11, #1
vqrshrn.s32 d29, q14, #1
vqrshrn.s32 d28, q10, #1
vqrshrn.s32 d27, q13, #1
vqrshrn.s32 d26, q9, #1
vqrshrn.s32 d25, q12, #1
vqrshrn.s32 d24, q8, #1
vpop {q8-q11}
transpose_4x8h q12, q13, q14, q15
b 2f
1:
vmov.i16 q12, #0
vmov.i16 q13, #0
vmov.i16 q14, #0
vmov.i16 q15, #0
2:
blx r5
load_add_store_8x8 r0, r7
vpop {q4-q7}
pop {r4-r5,r7,r10,pc}
endfunc
.macro def_fn_8x8 txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_8x8_16bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc 8, 8, 1
.endif
push {r4-r5,r7,r10,lr}
vpush {q4-q7}
mov r10, #\eob_half
movrel_local r4, inv_\txfm1\()_4s_x8_neon
movrel r5, X(inv_\txfm2\()_8h_x8_neon)
b inv_txfm_add_8x8_neon
endfunc
.endm
def_fn_8x8 dct, dct, 10
def_fn_8x8 identity, identity, 10
def_fn_8x8 dct, adst, 10
def_fn_8x8 dct, flipadst, 10
def_fn_8x8 dct, identity, 4
def_fn_8x8 adst, dct, 10
def_fn_8x8 adst, adst, 10
def_fn_8x8 adst, flipadst, 10
def_fn_8x8 flipadst, dct, 10
def_fn_8x8 flipadst, adst, 10
def_fn_8x8 flipadst, flipadst, 10
def_fn_8x8 identity, dct, 4
def_fn_8x8 adst, identity, 4
def_fn_8x8 flipadst, identity, 4
def_fn_8x8 identity, adst, 4
def_fn_8x8 identity, flipadst, 4
function inv_txfm_add_8x4_neon
mov_const r12, 2896*8*(1<<16)
vmov.i32 q0, #0
vmov.i32 q1, #0
vld1.16 {q8, q9}, [r2, :128]
vst1.16 {q0, q1}, [r2, :128]!
vdup.32 d4, r12
vld1.16 {q10, q11}, [r2, :128]
vst1.16 {q0, q1}, [r2, :128]!
vld1.16 {q12, q13}, [r2, :128]
vst1.16 {q0, q1}, [r2, :128]!
vld1.16 {q14, q15}, [r2, :128]
vst1.16 {q0, q1}, [r2, :128]!
scale_input d4[0], q8, q9, q10, q11, q12, q13, q14, q15
blx r4
vqmovn.s32 d16, q8
vqmovn.s32 d17, q9
vqmovn.s32 d18, q10
vqmovn.s32 d19, q11
vqmovn.s32 d20, q12
vqmovn.s32 d21, q13
vqmovn.s32 d22, q14
vqmovn.s32 d23, q15
transpose_4x4h q8, q9, d16, d17, d18, d19
transpose_4x4h q10, q11, d20, d21, d22, d23
vswp d17, d20
vswp d19, d21
vswp d18, d20
vswp d21, d22
blx r5
load_add_store_8x4 r0, r7
vpop {q4-q7}
pop {r4-r5,r7,r10,pc}
endfunc
function inv_txfm_add_4x8_neon
mov_const r12, 2896*8*(1<<16)
vmov.i32 q0, #0
cmp r3, r10
mov r7, #32
blt 1f
add r2, r2, #16
vdup.32 d2, r12
.irp i, q8, q9, q10, q11
vld1.32 {\i}, [r2, :128]
vst1.32 {q0}, [r2, :128], r7
.endr
scale_input d2[0], q8, q9, q10, q11
sub r2, r2, r7, lsl #2
blx r4
sub r2, r2, #16
vqmovn.s32 d24, q8
vqmovn.s32 d25, q9
vqmovn.s32 d26, q10
vqmovn.s32 d27, q11
transpose_4x4h q12, q13, d24, d25, d26, d27
b 2f
1:
vmov.i16 q12, #0
vmov.i16 q13, #0
2:
mov_const r12, 2896*8*(1<<16)
vmov.i32 q0, #0
vdup.32 d2, r12
.irp i, q8, q9, q10, q11
vld1.32 {\i}, [r2, :128]
vst1.32 {q0}, [r2, :128], r7
.endr
scale_input d2[0], q8, q9, q10, q11
blx r4
vqmovn.s32 d16, q8
vqmovn.s32 d17, q9
vqmovn.s32 d18, q10
vqmovn.s32 d19, q11
transpose_4x4h q8, q9, d16, d17, d18, d19
vmov q10, q12
vmov q11, q13
blx r5
load_add_store_4x8 r0, r7
vpop {q4-q7}
pop {r4-r5,r7,r10,pc}
endfunc
.macro def_fn_48 w, h, txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_16bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 0
.endif
push {r4-r5,r7,r10,lr}
vpush {q4-q7}
movrel_local r4, inv_\txfm1\()_4s_x\w\()_neon
.if \w == 4
mov r10, #\eob_half
.endif
movrel r5, X(inv_\txfm2\()_\w\()h_x\h\()_neon)
b inv_txfm_add_\w\()x\h\()_neon
endfunc
.endm
.macro def_fns_48 w, h
def_fn_48 \w, \h, dct, dct, 13
def_fn_48 \w, \h, identity, identity, 13
def_fn_48 \w, \h, dct, adst, 13
def_fn_48 \w, \h, dct, flipadst, 13
def_fn_48 \w, \h, dct, identity, 4
def_fn_48 \w, \h, adst, dct, 13
def_fn_48 \w, \h, adst, adst, 13
def_fn_48 \w, \h, adst, flipadst, 13
def_fn_48 \w, \h, flipadst, dct, 13
def_fn_48 \w, \h, flipadst, adst, 13
def_fn_48 \w, \h, flipadst, flipadst, 13
def_fn_48 \w, \h, identity, dct, 16
def_fn_48 \w, \h, adst, identity, 4
def_fn_48 \w, \h, flipadst, identity, 4
def_fn_48 \w, \h, identity, adst, 16
def_fn_48 \w, \h, identity, flipadst, 16
.endm
def_fns_48 4, 8
def_fns_48 8, 4
function inv_dct_2s_x16_neon
movrel_local r12, idct_coeffs
vld1.32 {q0, q1}, [r12, :128]!
idct_2s_x8 d16, d18, d20, d22, d24, d26, d28, d30
// idct_8 leaves the row_clip_max/min constants in d9 and d8
.irp r, d16, d18, d20, d22, d24, d26, d28, d30
vmin.s32 \r, \r, d9
.endr
.irp r, d16, d18, d20, d22, d24, d26, d28, d30
vmax.s32 \r, \r, d8
.endr
vld1.32 {q0, q1}, [r12, :128]
sub r12, r12, #32
vmul_vmls d4, d17, d31, d0[0], d0[1] // -> t8a
vmul_vmla d5, d17, d31, d0[1], d0[0] // -> t15a
vmul_vmls d6, d25, d23, d1[0], d1[1] // -> t9a
vrshr.s32 d17, d4, #12 // t8a
vrshr.s32 d31, d5, #12 // t15a
vmul_vmla d4, d25, d23, d1[1], d1[0] // -> t14a
vmul_vmls d5, d21, d27, d2[0], d2[1] // -> t10a
vrshr.s32 d23, d6, #12 // t9a
vrshr.s32 d25, d4, #12 // t14a
vmul_vmla d6, d21, d27, d2[1], d2[0] // -> t13a
vmul_vmls d4, d29, d19, d3[0], d3[1] // -> t11a
vrshr.s32 d21, d5, #12 // t10a
vrshr.s32 d27, d6, #12 // t13a
vmul_vmla d5, d29, d19, d3[1], d3[0] // -> t12a
vrshr.s32 d19, d4, #12 // t11a
vrshr.s32 d29, d5, #12 // t12a
vld1.32 {q0}, [r12, :128]
vqsub.s32 d4, d17, d23 // t9
vqadd.s32 d17, d17, d23 // t8
vqsub.s32 d5, d31, d25 // t14
vqadd.s32 d31, d31, d25 // t15
vqsub.s32 d23, d19, d21 // t10
vqadd.s32 d19, d19, d21 // t11
vqadd.s32 d25, d29, d27 // t12
vqsub.s32 d29, d29, d27 // t13
.irp r, d4, d17, d5, d31, d23, d19, d25, d29
vmin.s32 \r, \r, d9
.endr
.irp r, d4, d17, d5, d31, d23, d19, d25, d29
vmax.s32 \r, \r, d8
.endr
vmul_vmls d6, d5, d4, d1[0], d1[1] // -> t9a
vmul_vmla d7, d5, d4, d1[1], d1[0] // -> t14a
vrshr.s32 d21, d6, #12 // t9a
vrshr.s32 d27, d7, #12 // t14a
vmul_vmls d6, d29, d23, d1[0], d1[1] // -> t13a
vmul_vmla d7, d29, d23, d1[1], d1[0] // -> t10a
vrshr.s32 d29, d6, #12 // t13a
vneg.s32 d7, d7
vrshr.s32 d23, d7, #12 // t10a
vqsub.s32 d4, d17, d19 // t11a
vqadd.s32 d17, d17, d19 // t8a
vqsub.s32 d5, d31, d25 // t12a
vqadd.s32 d31, d31, d25 // t15a
vqadd.s32 d19, d21, d23 // t9
vqsub.s32 d21, d21, d23 // t10
vqsub.s32 d25, d27, d29 // t13
vqadd.s32 d27, d27, d29 // t14
.irp r, d4, d17, d5, d31, d19, d21, d25, d27
vmin.s32 \r, \r, d9
.endr
.irp r, d4, d17, d5, d31, d19, d21, d25, d27
vmax.s32 \r, \r, d8
.endr
vmul_vmls d6, d5, d4, d0[0], d0[0] // -> t11
vmul_vmla d7, d5, d4, d0[0], d0[0] // -> t12
vmul_vmls d4, d25, d21, d0[0], d0[0] // -> t10a
vrshr.s32 d6, d6, #12 // t11
vrshr.s32 d7, d7, #12 // t12
vmul_vmla d5, d25, d21, d0[0], d0[0] // -> t13a
vrshr.s32 d4, d4, #12 // t10a
vrshr.s32 d5, d5, #12 // t13a
vqadd.s32 d8, d16, d31 // out0
vqsub.s32 d31, d16, d31 // out15
vmov d16, d8
vqadd.s32 d23, d30, d17 // out7
vqsub.s32 d9, d30, d17 // out8
vqadd.s32 d17, d18, d27 // out1
vqsub.s32 d30, d18, d27 // out14
vqadd.s32 d18, d20, d5 // out2
vqsub.s32 d29, d20, d5 // out13
vqadd.s32 d5, d28, d19 // out6
vqsub.s32 d25, d28, d19 // out9
vqadd.s32 d19, d22, d7 // out3
vqsub.s32 d28, d22, d7 // out12
vqadd.s32 d20, d24, d6 // out4
vqsub.s32 d27, d24, d6 // out11
vqadd.s32 d21, d26, d4 // out5
vqsub.s32 d26, d26, d4 // out10
vmov d24, d9
vmov d22, d5
bx lr
endfunc
.macro iadst_16 o0, o1, o2, o3, o4, o5, o6, o7, o8, o9, o10, o11, o12, o13, o14, o15
movrel_local r12, iadst16_coeffs
vld1.32 {q0, q1}, [r12, :128]!
vmul_vmla d4, d31, d16, d0[0], d0[1] // -> t0
vmul_vmls d6, d31, d16, d0[1], d0[0] // -> t1
vmul_vmla d8, d29, d18, d1[0], d1[1] // -> t2
vrshr.s32 d16, d4, #12 // t0
vrshr.s32 d31, d6, #12 // t1
vmul_vmls d4, d29, d18, d1[1], d1[0] // -> t3
vmul_vmla d6, d27, d20, d2[0], d2[1] // -> t4
vrshr.s32 d18, d8, #12 // t2
vrshr.s32 d29, d4, #12 // t3
vmul_vmls d8, d27, d20, d2[1], d2[0] // -> t5
vmul_vmla d4, d25, d22, d3[0], d3[1] // -> t6
vrshr.s32 d20, d6, #12 // t4
vrshr.s32 d27, d8, #12 // t5
vmul_vmls d6, d25, d22, d3[1], d3[0] // -> t7
vld1.32 {q0, q1}, [r12, :128]
movrel_local r12, idct_coeffs
vmul_vmla d8, d23, d24, d0[0], d0[1] // -> t8
vrshr.s32 d22, d4, #12 // t6
vrshr.s32 d25, d6, #12 // t7
vmul_vmls d4, d23, d24, d0[1], d0[0] // -> t9
vmul_vmla d6, d21, d26, d1[0], d1[1] // -> t10
vrshr.s32 d23, d8, #12 // t8
vrshr.s32 d24, d4, #12 // t9
vmul_vmls d8, d21, d26, d1[1], d1[0] // -> t11
vmul_vmla d4, d19, d28, d2[0], d2[1] // -> t12
vrshr.s32 d21, d6, #12 // t10
vrshr.s32 d26, d8, #12 // t11
vmul_vmls d6, d19, d28, d2[1], d2[0] // -> t13
vmul_vmla d8, d17, d30, d3[0], d3[1] // -> t14
vrshr.s32 d19, d4, #12 // t12
vrshr.s32 d28, d6, #12 // t13
vmul_vmls d4, d17, d30, d3[1], d3[0] // -> t15
vrshr.s32 d17, d8, #12 // t14
vrshr.s32 d30, d4, #12 // t15
vld1.32 {q0, q1}, [r12, :128]
vmov.i32 d11, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
vmvn.i32 d10, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
vqsub.s32 d5, d16, d23 // t8a
vqadd.s32 d16, d16, d23 // t0a
vqsub.s32 d7, d31, d24 // t9a
vqadd.s32 d31, d31, d24 // t1a
vqadd.s32 d23, d18, d21 // t2a
vqsub.s32 d18, d18, d21 // t10a
vqadd.s32 d24, d29, d26 // t3a
vqsub.s32 d29, d29, d26 // t11a
vqadd.s32 d21, d20, d19 // t4a
vqsub.s32 d20, d20, d19 // t12a
vqadd.s32 d26, d27, d28 // t5a
vqsub.s32 d27, d27, d28 // t13a
vqadd.s32 d19, d22, d17 // t6a
vqsub.s32 d22, d22, d17 // t14a
vqadd.s32 d28, d25, d30 // t7a
vqsub.s32 d25, d25, d30 // t15a
.irp r, d5, d16, d7, d31, d23, d18, d24, d29, d21, d20, d26, d27, d19, d22, d28, d25
vmin.s32 \r, \r, d11
.endr
.irp r, d5, d16, d7, d31, d23, d18, d24, d29, d21, d20, d26, d27, d19, d22, d28, d25
vmax.s32 \r, \r, d10
.endr
vmul_vmla d4, d5, d7, d2[1], d2[0] // -> t8
vmul_vmls d6, d5, d7, d2[0], d2[1] // -> t9
vmul_vmla d8, d18, d29, d3[1], d3[0] // -> t10
vrshr.s32 d17, d4, #12 // t8
vrshr.s32 d30, d6, #12 // t9
vmul_vmls d4, d18, d29, d3[0], d3[1] // -> t11
vmul_vmls d6, d27, d20, d2[1], d2[0] // -> t12
vrshr.s32 d18, d8, #12 // t10
vrshr.s32 d29, d4, #12 // t11
vmul_vmla d8, d27, d20, d2[0], d2[1] // -> t13
vmul_vmls d4, d25, d22, d3[1], d3[0] // -> t14
vrshr.s32 d27, d6, #12 // t12
vrshr.s32 d20, d8, #12 // t13
vmul_vmla d6, d25, d22, d3[0], d3[1] // -> t15
vrshr.s32 d25, d4, #12 // t14
vrshr.s32 d22, d6, #12 // t15
vqsub.s32 d2, d16, d21 // t4
vqadd.s32 d16, d16, d21 // t0
vqsub.s32 d3, d31, d26 // t5
vqadd.s32 d31, d31, d26 // t1
vqadd.s32 d21, d23, d19 // t2
vqsub.s32 d23, d23, d19 // t6
vqadd.s32 d26, d24, d28 // t3
vqsub.s32 d24, d24, d28 // t7
vqadd.s32 d19, d17, d27 // t8a
vqsub.s32 d17, d17, d27 // t12a
vqadd.s32 d28, d30, d20 // t9a
vqsub.s32 d30, d30, d20 // t13a
vqadd.s32 d27, d18, d25 // t10a
vqsub.s32 d18, d18, d25 // t14a
vqadd.s32 d20, d29, d22 // t11a
vqsub.s32 d29, d29, d22 // t15a
.irp r, d2, d16, d3, d31, d21, d23, d26, d24, d19, d17, d28, d30, d27, d18, d20, d29
vmin.s32 \r, \r, d11
.endr
.irp r, d2, d16, d3, d31, d21, d23, d26, d24, d19, d17, d28, d30, d27, d18, d20, d29
vmax.s32 \r, \r, d10
.endr
vmul_vmla d4, d2, d3, d1[1], d1[0] // -> t4a
vmul_vmls d6, d2, d3, d1[0], d1[1] // -> t5a
vmul_vmls d8, d24, d23, d1[1], d1[0] // -> t6a
vrshr.s32 d22, d4, #12 // t4a
vrshr.s32 d25, d6, #12 // t5a
vmul_vmla d4, d24, d23, d1[0], d1[1] // -> t7a
vmul_vmla d6, d17, d30, d1[1], d1[0] // -> t12
vrshr.s32 d24, d8, #12 // t6a
vrshr.s32 d23, d4, #12 // t7a
vmul_vmls d8, d17, d30, d1[0], d1[1] // -> t13
vmul_vmls d4, d29, d18, d1[1], d1[0] // -> t14
vrshr.s32 d17, d6, #12 // t12
vmul_vmla d6, d29, d18, d1[0], d1[1] // -> t15
vrshr.s32 d29, d8, #12 // t13
vrshr.s32 d30, d4, #12 // t14
vrshr.s32 d18, d6, #12 // t15
vqsub.s32 d2, d16, d21 // t2a
.ifc \o0, d16
vqadd.s32 \o0, d16, d21 // out0
vqsub.s32 d21, d31, d26 // t3a
vqadd.s32 \o15,d31, d26 // out15
.else
vqadd.s32 d4, d16, d21 // out0
vqsub.s32 d21, d31, d26 // t3a
vqadd.s32 \o15,d31, d26 // out15
vmov \o0, d4
.endif
vqsub.s32 d3, d29, d18 // t15a
vqadd.s32 \o13,d29, d18 // out13
vqadd.s32 \o2, d17, d30 // out2
vqsub.s32 d26, d17, d30 // t14a
vqadd.s32 \o1, d19, d27 // out1
vqsub.s32 d27, d19, d27 // t10
vqadd.s32 \o14,d28, d20 // out14
vqsub.s32 d20, d28, d20 // t11
vqadd.s32 \o3, d22, d24 // out3
vqsub.s32 d22, d22, d24 // t6
vqadd.s32 \o12,d25, d23 // out12
vqsub.s32 d23, d25, d23 // t7
// Not clipping the output registers, as they will be downshifted and
// narrowed afterwards anyway.
.irp r, d2, d21, d3, d26, d27, d20, d22, d23
vmin.s32 \r, \r, d11
.endr
.irp r, d2, d21, d3, d26, d27, d20, d22, d23
vmax.s32 \r, \r, d10
.endr
vqneg.s32 \o15, \o15 // out15
vqneg.s32 \o13,\o13 // out13
vqneg.s32 \o1, \o1 // out1
vqneg.s32 \o3, \o3 // out3
vmul_vmls d24, d2, d21, d0[0], d0[0] // -> out8 (d24 or d23)
vmul_vmla d4, d2, d21, d0[0], d0[0] // -> out7 (d23 or d24)
vmul_vmla d6, d26, d3, d0[0], d0[0] // -> out5 (d21 or d26)
vrshr.s32 d24, d24, #12 // out8
vrshr.s32 d4, d4, #12 // out7
vrshr.s32 d5, d6, #12 // out5
vmul_vmls d8, d26, d3, d0[0], d0[0] // -> out10 (d26 or d21)
vmul_vmla d2, d22, d23, d0[0], d0[0] // -> out4 (d20 or d27)
vrshr.s32 d26, d8, #12 // out10
vmul_vmls d8, d22, d23, d0[0], d0[0] // -> out11 (d27 or d20)
vmul_vmla d22, d27, d20, d0[0], d0[0] // -> out6 (d22 or d25)
vmul_vmls d6, d27, d20, d0[0], d0[0] // -> out9 (d25 or d22)
vrshr.s32 \o4, d2, #12 // out4
vrshr.s32 d7, d6, #12 // out9
vrshr.s32 d6, d8, #12 // out11
vrshr.s32 \o6, d22, #12 // out6
.ifc \o8, d23
vmov \o8, d24
vmov \o10,d26
.endif
vqneg.s32 \o7, d4 // out7
vqneg.s32 \o5, d5 // out5
vqneg.s32 \o11,d6 // out11
vqneg.s32 \o9, d7 // out9
.endm
function inv_adst_2s_x16_neon
iadst_16 d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
bx lr
endfunc
function inv_flipadst_2s_x16_neon
iadst_16 d31, d30, d29, d28, d27, d26, d25, d24, d23, d22, d21, d20, d19, d18, d17, d16
bx lr
endfunc
function inv_identity_2s_x16_neon
mov r12, #0
movt r12, #2*(5793-4096)*8
vdup.32 d0, r12
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vqrdmulh.s32 q1, \i, d0[0]
vqadd.s32 \i, \i, \i
vqadd.s32 \i, \i, q1
.endr
bx lr
endfunc
.macro identity_8x4_shift1 c
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vqrdmulh.s32 q2, \i, \c
vrshr.s32 q2, q2, #1
vqadd.s32 \i, \i, q2
.endr
.endm
.macro identity_8x4 c
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vqrdmulh.s32 q2, \i, \c
vqadd.s32 \i, \i, \i
vqadd.s32 \i, \i, q2
.endr
.endm
.macro def_horz_16 scale=0, shift=2, suffix
function inv_txfm_horz\suffix\()_16x2_neon
push {lr}
vmov.i32 d7, #0
.if \scale
mov_const r12, 2896*8*(1<<16)
vdup.32 d1, r12
.endif
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.32 {\i}, [r7, :64]
vst1.32 {d7}, [r7, :64], r8
.endr
.if \scale
scale_input d1[0], q8, q9, q10, q11, q12, q13, q14, q15
.endif
blx r4
vqrshrn.s32 d16, q8, #\shift
vqrshrn.s32 d17, q9, #\shift
vqrshrn.s32 d18, q10, #\shift
vqrshrn.s32 d19, q11, #\shift
vqrshrn.s32 d20, q12, #\shift
vqrshrn.s32 d21, q13, #\shift
vqrshrn.s32 d22, q14, #\shift
vqrshrn.s32 d23, q15, #\shift
.if \scale
b L(horz_16x2_epilog)
.else
L(horz_16x2_epilog):
vuzp.16 q8, q9
vuzp.16 q10, q11
.irp i, q8, q10, q9, q11
vst1.16 {\i}, [r6, :128]!
.endr
pop {pc}
.endif
endfunc
.endm
def_horz_16 scale=1, shift=1, suffix=_scale
def_horz_16 scale=0, shift=2
function inv_txfm_add_vert_4x16_neon
push {lr}
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.16 {\i}, [r7, :64], r8
.endr
blx r5
load_add_store_4x16 r6, r7
pop {pc}
endfunc
function inv_txfm_add_16x16_neon
sub_sp_align 512
ldrh r11, [r10], #2
.irp i, 0, 2, 4, 6, 8, 10, 12, 14
add r6, sp, #(\i*16*2)
.if \i > 0
mov r8, #(16 - \i)
cmp r3, r11
blt 1f
.if \i < 14
ldrh r11, [r10], #2
.endif
.endif
add r7, r2, #(\i*4)
mov r8, #16*4
bl inv_txfm_horz_16x2_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 2
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12
add r6, r0, #(\i*2)
add r7, sp, #(\i*2)
mov r8, #32
bl inv_txfm_add_vert_4x16_neon
.endr
add_sp_align 512
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
const eob_16x16
.short 3, 10, 21, 36, 55, 78, 105, 256
endconst
const eob_16x16_identity
.short 2, 4, 6, 8, 10, 12, 14, 256
endconst
.macro def_fn_16x16 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_16x16_16bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc 16, 16, 2
.endif
push {r4-r11,lr}
vpush {q4-q7}
movrel_local r4, inv_\txfm1\()_2s_x16_neon
movrel r5, X(inv_\txfm2\()_4h_x16_neon)
.ifc \txfm1, identity
.ifc \txfm2, identity
movrel_local r10, eob_16x16
.else
movrel_local r10, eob_16x16_identity
.endif
.else
.ifc \txfm2, identity
movrel_local r10, eob_16x16_identity
.else
movrel_local r10, eob_16x16
.endif
.endif
b inv_txfm_add_16x16_neon
endfunc
.endm
def_fn_16x16 dct, dct
def_fn_16x16 identity, identity
def_fn_16x16 dct, adst
def_fn_16x16 dct, flipadst
def_fn_16x16 dct, identity
def_fn_16x16 adst, dct
def_fn_16x16 adst, adst
def_fn_16x16 adst, flipadst
def_fn_16x16 flipadst, dct
def_fn_16x16 flipadst, adst
def_fn_16x16 flipadst, flipadst
def_fn_16x16 identity, dct
function inv_txfm_add_16x4_neon
cmp r3, r10
mov r11, #16
blt 1f
add r6, r2, #8
vmov.i32 d4, #0
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.32 {\i}, [r6, :64]
vst1.32 {d4}, [r6, :64], r11
.endr
blx r4
vqrshrn.s32 d16, q8, #1
vqrshrn.s32 d17, q9, #1
vqrshrn.s32 d18, q10, #1
vqrshrn.s32 d19, q11, #1
vqrshrn.s32 d20, q12, #1
vqrshrn.s32 d21, q13, #1
vqrshrn.s32 d22, q14, #1
vqrshrn.s32 d23, q15, #1
vuzp.16 q8, q9
mov r6, sp
vuzp.16 q10, q11
vpush {q8-q11}
b 2f
1:
vmov.i16 q8, #0
vmov.i16 q9, #0
mov r6, sp
vpush {q8-q9}
vpush {q8-q9}
2:
vmov.i32 d4, #0
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.32 {\i}, [r2, :64]
vst1.32 {d4}, [r2, :64], r11
.endr
blx r4
vqrshrn.s32 d16, q8, #1
vqrshrn.s32 d17, q9, #1
vqrshrn.s32 d18, q10, #1
vqrshrn.s32 d19, q11, #1
vqrshrn.s32 d20, q12, #1
vqrshrn.s32 d21, q13, #1
vqrshrn.s32 d22, q14, #1
vqrshrn.s32 d23, q15, #1
vuzp.16 q8, q9
mov r6, sp
vuzp.16 q10, q11
vmov q12, q10
vmov q13, q11
vpop {q10-q11}
blx r5
mov r6, r0
load_add_store_8x4 r6, r7
vpop {q10-q11}
vmov q8, q12
vmov q9, q13
blx r5
add r6, r0, #16
load_add_store_8x4 r6, r7
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_4x16_neon
ldrh r9, [r10, #4]
mov r11, #64
cmp r3, r9
ldrh r9, [r10, #2]
blt 1f
add r6, r2, #48
vmov.i32 q2, #0
.irp i, q8, q9, q10, q11
vld1.32 {\i}, [r6, :128]
vst1.32 {q2}, [r6, :128], r11
.endr
blx r4
vqrshrn.s32 d28, q8, #1
vqrshrn.s32 d29, q9, #1
vqrshrn.s32 d30, q10, #1
vqrshrn.s32 d31, q11, #1
transpose_4x4h q14, q15, d28, d29, d30, d31
b 2f
1:
vmov.i16 q14, #0
vmov.i16 q15, #0
2:
cmp r3, r9
ldrh r9, [r10]
blt 1f
add r6, r2, #32
vmov.i32 q2, #0
.irp i, q8, q9, q10, q11
vld1.32 {\i}, [r6, :128]
vst1.32 {q2}, [r6, :128], r11
.endr
blx r4
vqrshrn.s32 d24, q8, #1
vqrshrn.s32 d25, q9, #1
vqrshrn.s32 d26, q10, #1
vqrshrn.s32 d27, q11, #1
transpose_4x4h q12, q13, d24, d25, d26, d27
b 2f
1:
vmov.i16 q12, #0
vmov.i16 q13, #0
2:
cmp r3, r9
blt 1f
add r6, r2, #16
vmov.i32 q2, #0
.irp i, q8, q9, q10, q11
vld1.32 {\i}, [r6, :128]
vst1.32 {q2}, [r6, :128], r11
.endr
blx r4
vqrshrn.s32 d16, q8, #1
vqrshrn.s32 d17, q9, #1
vqrshrn.s32 d18, q10, #1
vqrshrn.s32 d19, q11, #1
transpose_4x4h q8, q9, d16, d17, d18, d19
b 2f
1:
vmov.i16 q8, #0
vmov.i16 q9, #0
2:
vmov.i16 q2, #0
vpush {q8-q9}
.irp i, q8, q9, q10, q11
vld1.16 {\i}, [r2, :128]
vst1.16 {q2}, [r2, :128], r11
.endr
blx r4
vqrshrn.s32 d16, q8, #1
vqrshrn.s32 d17, q9, #1
vqrshrn.s32 d18, q10, #1
vqrshrn.s32 d19, q11, #1
transpose_4x4h q8, q9, d16, d17, d18, d19
vpop {q10-q11}
blx r5
load_add_store_4x16 r0, r6
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
const eob_4x16
.short 13, 29, 45, 64
endconst
const eob_4x16_identity1
.short 16, 32, 48, 64
endconst
const eob_4x16_identity2
.short 4, 8, 12, 64
endconst
.macro def_fn_416 w, h, txfm1, txfm2, eob_16x4
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_16bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 1
.endif
push {r4-r11,lr}
vpush {q4-q7}
.if \w == 4
movrel_local r4, inv_\txfm1\()_4s_x\w\()_neon
movrel r5, X(inv_\txfm2\()_4h_x\h\()_neon)
.ifc \txfm1, identity
.ifc \txfm2, identity
movrel_local r10, eob_4x16
.else
movrel_local r10, eob_4x16_identity1
.endif
.else
.ifc \txfm2, identity
movrel_local r10, eob_4x16_identity2
.else
movrel_local r10, eob_4x16
.endif
.endif
.else
mov r10, #\eob_16x4
movrel_local r4, inv_\txfm1\()_2s_x\w\()_neon
movrel r5, X(inv_\txfm2\()_8h_x\h\()_neon)
.endif
b inv_txfm_add_\w\()x\h\()_neon
endfunc
.endm
.macro def_fns_416 w, h
def_fn_416 \w, \h, dct, dct, 3
def_fn_416 \w, \h, identity, identity, 3
def_fn_416 \w, \h, dct, adst, 3
def_fn_416 \w, \h, dct, flipadst, 3
def_fn_416 \w, \h, dct, identity, 2
def_fn_416 \w, \h, adst, dct, 3
def_fn_416 \w, \h, adst, adst, 3
def_fn_416 \w, \h, adst, flipadst, 3
def_fn_416 \w, \h, flipadst, dct, 3
def_fn_416 \w, \h, flipadst, adst, 3
def_fn_416 \w, \h, flipadst, flipadst, 3
def_fn_416 \w, \h, identity, dct, 2
def_fn_416 \w, \h, adst, identity, 2
def_fn_416 \w, \h, flipadst, identity, 2
def_fn_416 \w, \h, identity, adst, 2
def_fn_416 \w, \h, identity, flipadst, 2
.endm
def_fns_416 4, 16
def_fns_416 16, 4
function inv_txfm_add_16x8_neon
sub_sp_align 256
ldrh r11, [r10], #2
.irp i, 0, 2, 4, 6
add r6, sp, #(\i*16*2)
.if \i > 0
mov r8, #(8 - \i)
cmp r3, r11
blt 1f
.if \i < 6
ldrh r11, [r10], #2
.endif
.endif
add r7, r2, #(\i*4)
mov r8, #8*4
bl inv_txfm_horz_scale_16x2_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 2
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 8
add r7, sp, #(\i*2)
mov r8, #32
.irp j, q8, q9, q10, q11, q12, q13, q14, q15
vld1.16 {\j}, [r7, :128], r8
.endr
blx r5
add r6, r0, #(\i*2)
load_add_store_8x8 r6, r7
.endr
add_sp_align 256
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_8x16_neon
add r10, r10, #2
sub_sp_align 256
ldrh r11, [r10], #4
.irp i, 0, 4, 8, 12
add r6, sp, #(\i*8*2)
.if \i > 0
mov r8, #(16 - \i)
cmp r3, r11
blt 1f
.if \i < 12
ldrh r11, [r10], #4
.endif
.endif
add r7, r2, #(\i*4)
mov r8, #16*4
mov_const r12, 2896*8*(1<<16)
vmov.i32 q2, #0
vdup.32 d0, r12
.irp j, q8, q9, q10, q11, q12, q13, q14, q15
vld1.32 {\j}, [r7, :128]
vst1.32 {q2}, [r7, :128], r8
.endr
scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
blx r4
vqrshrn.s32 d16, q8, #1
vqrshrn.s32 d17, q9, #1
vqrshrn.s32 d18, q10, #1
vqrshrn.s32 d19, q11, #1
vqrshrn.s32 d20, q12, #1
vqrshrn.s32 d21, q13, #1
vqrshrn.s32 d22, q14, #1
vqrshrn.s32 d23, q15, #1
transpose_4x4h q8, q9, d16, d17, d18, d19
transpose_4x4h q10, q11, d20, d21, d22, d23
.irp j, d16, d20, d17, d21, d18, d22, d19, d23
vst1.16 {\j}, [r6, :64]!
.endr
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #4
.rept 2
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4
add r6, r0, #(\i*2)
add r7, sp, #(\i*2)
mov r8, #16
bl inv_txfm_add_vert_4x16_neon
.endr
add_sp_align 256
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
const eob_8x16
.short 3, 10, 21, 43, 59, 75, 91, 128
endconst
const eob_8x16_identity1
.short 2, 4, 6, 64, 80, 96, 112, 128
endconst
const eob_8x16_identity2
.short 2, 4, 6, 8, 10, 12, 14, 128
endconst
.macro def_fn_816 w, h, txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_16bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 1
.endif
push {r4-r11,lr}
vpush {q4-q7}
.if \w == 8
movrel_local r4, inv_\txfm1\()_4s_x8_neon
movrel r5, X(inv_\txfm2\()_4h_x16_neon)
.else
movrel_local r4, inv_\txfm1\()_2s_x16_neon
movrel r5, X(inv_\txfm2\()_8h_x8_neon)
.endif
.ifc \txfm1, identity
.ifc \txfm2, identity
movrel_local r10, eob_8x16
.else
movrel_local r10, eob_8x16_identity1
.endif
.else
.ifc \txfm2, identity
movrel_local r10, eob_8x16_identity2
.else
movrel_local r10, eob_8x16
.endif
.endif
b inv_txfm_add_\w\()x\h\()_neon
endfunc
.endm
.macro def_fns_816 w, h
def_fn_816 \w, \h, dct, dct
def_fn_816 \w, \h, identity, identity
def_fn_816 \w, \h, dct, adst
def_fn_816 \w, \h, dct, flipadst
def_fn_816 \w, \h, dct, identity
def_fn_816 \w, \h, adst, dct
def_fn_816 \w, \h, adst, adst
def_fn_816 \w, \h, adst, flipadst
def_fn_816 \w, \h, flipadst, dct
def_fn_816 \w, \h, flipadst, adst
def_fn_816 \w, \h, flipadst, flipadst
def_fn_816 \w, \h, identity, dct
def_fn_816 \w, \h, adst, identity
def_fn_816 \w, \h, flipadst, identity
def_fn_816 \w, \h, identity, adst
def_fn_816 \w, \h, identity, flipadst
.endm
def_fns_816 8, 16
def_fns_816 16, 8
function inv_dct32_odd_2s_x16_neon
movrel_local r12, idct_coeffs, 4*16
vld1.32 {q0, q1}, [r12, :128]!
vmul_vmls d4, d16, d31, d0[0], d0[1] // -> t16a
vmul_vmla d6, d16, d31, d0[1], d0[0] // -> t31a
vmul_vmls d8, d24, d23, d1[0], d1[1] // -> t17a
vrshr.s32 d16, d4, #12 // t16a
vrshr.s32 d31, d6, #12 // t31a
vmul_vmla d4, d24, d23, d1[1], d1[0] // -> t30a
vmul_vmls d6, d20, d27, d2[0], d2[1] // -> t18a
vrshr.s32 d24, d8, #12 // t17a
vrshr.s32 d23, d4, #12 // t30a
vmul_vmla d8, d20, d27, d2[1], d2[0] // -> t29a
vmul_vmls d4, d28, d19, d3[0], d3[1] // -> t19a
vrshr.s32 d20, d6, #12 // t18a
vrshr.s32 d27, d8, #12 // t29a
vmul_vmla d6, d28, d19, d3[1], d3[0] // -> t28a
vld1.32 {q0, q1}, [r12, :128]
sub r12, r12, #4*24
vmul_vmls d8, d18, d29, d0[0], d0[1] // -> t20a
vrshr.s32 d28, d4, #12 // t19a
vrshr.s32 d19, d6, #12 // t28a
vmul_vmla d4, d18, d29, d0[1], d0[0] // -> t27a
vmul_vmls d6, d26, d21, d1[0], d1[1] // -> t21a
vrshr.s32 d18, d8, #12 // t20a
vrshr.s32 d29, d4, #12 // t27a
vmul_vmla d8, d26, d21, d1[1], d1[0] // -> t26a
vmul_vmls d4, d22, d25, d2[0], d2[1] // -> t22a
vrshr.s32 d26, d6, #12 // t21a
vrshr.s32 d21, d8, #12 // t26a
vmul_vmla d6, d22, d25, d2[1], d2[0] // -> t25a
vmul_vmls d8, d30, d17, d3[0], d3[1] // -> t23a
vrshr.s32 d22, d4, #12 // t22a
vrshr.s32 d25, d6, #12 // t25a
vmul_vmla d4, d30, d17, d3[1], d3[0] // -> t24a
vrshr.s32 d30, d8, #12 // t23a
vrshr.s32 d17, d4, #12 // t24a
vld1.32 {q0, q1}, [r12, :128]
vmov.i32 d11, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
vmvn.i32 d10, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
vqsub.s32 d5, d16, d24 // t17
vqadd.s32 d16, d16, d24 // t16
vqsub.s32 d7, d31, d23 // t30
vqadd.s32 d31, d31, d23 // t31
vqsub.s32 d24, d28, d20 // t18
vqadd.s32 d28, d28, d20 // t19
vqadd.s32 d23, d18, d26 // t20
vqsub.s32 d18, d18, d26 // t21
vqsub.s32 d20, d30, d22 // t22
vqadd.s32 d30, d30, d22 // t23
vqadd.s32 d26, d17, d25 // t24
vqsub.s32 d17, d17, d25 // t25
vqsub.s32 d22, d29, d21 // t26
vqadd.s32 d29, d29, d21 // t27
vqadd.s32 d25, d19, d27 // t28
vqsub.s32 d19, d19, d27 // t29
.irp r, d5, d16, d7, d31, d24, d28, d23, d18, d20, d30, d26, d17, d22, d29, d25, d19
vmin.s32 \r, \r, d11
.endr
.irp r, d5, d16, d7, d31, d24, d28, d23, d18, d20, d30, d26, d17, d22, d29, d25, d19
vmax.s32 \r, \r, d10
.endr
vmul_vmls d4, d7, d5, d2[0], d2[1] // -> t17a
vmul_vmla d6, d7, d5, d2[1], d2[0] // -> t30a
vmul_vmla d8, d19, d24, d2[1], d2[0] // -> t18a
vrshr.s32 d21, d4, #12 // t17a
vrshr.s32 d27, d6, #12 // t30a
vneg.s32 d8, d8 // -> t18a
vmul_vmls d5, d19, d24, d2[0], d2[1] // -> t29a
vmul_vmls d4, d22, d18, d3[0], d3[1] // -> t21a
vrshr.s32 d19, d8, #12 // t18a
vrshr.s32 d24, d5, #12 // t29a
vmul_vmla d6, d22, d18, d3[1], d3[0] // -> t26a
vmul_vmla d8, d17, d20, d3[1], d3[0] // -> t22a
vrshr.s32 d22, d4, #12 // t21a
vrshr.s32 d18, d6, #12 // t26a
vneg.s32 d8, d8 // -> t22a
vmul_vmls d5, d17, d20, d3[0], d3[1] // -> t25a
vrshr.s32 d17, d8, #12 // t22a
vrshr.s32 d20, d5, #12 // t25a
vqsub.s32 d2, d27, d24 // t29
vqadd.s32 d27, d27, d24 // t30
vqsub.s32 d3, d21, d19 // t18
vqadd.s32 d21, d21, d19 // t17
vqsub.s32 d24, d16, d28 // t19a
vqadd.s32 d16, d16, d28 // t16a
vqsub.s32 d19, d30, d23 // t20a
vqadd.s32 d30, d30, d23 // t23a
vqsub.s32 d28, d17, d22 // t21
vqadd.s32 d17, d17, d22 // t22
vqadd.s32 d23, d26, d29 // t24a
vqsub.s32 d26, d26, d29 // t27a
vqadd.s32 d22, d20, d18 // t25
vqsub.s32 d20, d20, d18 // t26
vqsub.s32 d29, d31, d25 // t28a
vqadd.s32 d31, d31, d25 // t31a
.irp r, d2, d27, d3, d21, d24, d16, d19, d30, d28, d17, d23, d26, d22, d20, d29, d31
vmin.s32 \r, \r, d11
.endr
.irp r, d2, d27, d3, d21, d24, d16, d19, d30, d28, d17, d23, d26, d22, d20, d29, d31
vmax.s32 \r, \r, d10
.endr
vmul_vmls d4, d2, d3, d1[0], d1[1] // -> t18a
vmul_vmla d6, d2, d3, d1[1], d1[0] // -> t29a
vmul_vmls d8, d29, d24, d1[0], d1[1] // -> t19
vrshr.s32 d18, d4, #12 // t18a
vrshr.s32 d25, d6, #12 // t29a
vmul_vmla d5, d29, d24, d1[1], d1[0] // -> t28
vmul_vmla d4, d26, d19, d1[1], d1[0] // -> t20
vrshr.s32 d29, d8, #12 // t19
vrshr.s32 d24, d5, #12 // t28
vneg.s32 d4, d4 // -> t20
vmul_vmls d6, d26, d19, d1[0], d1[1] // -> t27
vmul_vmla d8, d20, d28, d1[1], d1[0] // -> t21a
vrshr.s32 d26, d4, #12 // t20
vrshr.s32 d19, d6, #12 // t27
vneg.s32 d8, d8 // -> t21a
vmul_vmls d5, d20, d28, d1[0], d1[1] // -> t26a
vrshr.s32 d20, d8, #12 // t21a
vrshr.s32 d28, d5, #12 // t26a
vqsub.s32 d2, d16, d30 // t23
vqadd.s32 d16, d16, d30 // t16 = out16
vqsub.s32 d3, d31, d23 // t24
vqadd.s32 d31, d31, d23 // t31 = out31
vqsub.s32 d23, d21, d17 // t22a
vqadd.s32 d17, d21, d17 // t17a = out17
vqadd.s32 d30, d27, d22 // t30a = out30
vqsub.s32 d21, d27, d22 // t25a
vqsub.s32 d27, d18, d20 // t21
vqadd.s32 d18, d18, d20 // t18 = out18
vqadd.s32 d4, d29, d26 // t19a = out19
vqsub.s32 d26, d29, d26 // t20a
vqadd.s32 d29, d25, d28 // t29 = out29
vqsub.s32 d25, d25, d28 // t26
vqadd.s32 d28, d24, d19 // t28a = out28
vqsub.s32 d24, d24, d19 // t27a
vmov d19, d4 // out19
.irp r, d2, d16, d3, d31, d23, d17, d30, d21, d27, d18, d19, d26, d29, d25, d28, d24
vmin.s32 \r, \r, d11
.endr
.irp r, d2, d16, d3, d31, d23, d17, d30, d21, d27, d18, d19, d26, d29, d25, d28, d24
vmax.s32 \r, \r, d10
.endr
vmul_vmls d4, d24, d26, d0[0], d0[0] // -> t20
vmul_vmla d6, d24, d26, d0[0], d0[0] // -> t27
vrshr.s32 d20, d4, #12 // t20
vrshr.s32 d22, d6, #12 // t27
vmul_vmla d4, d25, d27, d0[0], d0[0] // -> t26a
vmul_vmls d6, d25, d27, d0[0], d0[0] // -> t21a
vmov d27, d22 // t27
vrshr.s32 d26, d4, #12 // t26a
vmul_vmls d24, d21, d23, d0[0], d0[0] // -> t22
vmul_vmla d4, d21, d23, d0[0], d0[0] // -> t25
vrshr.s32 d21, d6, #12 // t21a
vrshr.s32 d22, d24, #12 // t22
vrshr.s32 d25, d4, #12 // t25
vmul_vmls d4, d3, d2, d0[0], d0[0] // -> t23a
vmul_vmla d6, d3, d2, d0[0], d0[0] // -> t24a
vrshr.s32 d23, d4, #12 // t23a
vrshr.s32 d24, d6, #12 // t24a
bx lr
endfunc
.macro def_horz_32 scale=0, shift=2, suffix
function inv_txfm_horz\suffix\()_dct_32x2_neon
push {lr}
vmov.i32 d7, #0
lsl r8, r8, #1
.if \scale
mov_const r12, 2896*8*(1<<16)
vdup.32 d0, r12
.endif
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.32 {\i}, [r7, :64]
vst1.32 {d7}, [r7, :64], r8
.endr
sub r7, r7, r8, lsl #4
add r7, r7, r8, lsr #1
.if \scale
scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
.endif
bl inv_dct_2s_x16_neon
// idct_16 leaves the row_clip_max/min constants in d9 and d8,
// but here we want to use full q registers for clipping.
vmov.i32 q3, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
vmvn.i32 q2, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
.irp r, q8, q9, q10, q11, q12, q13, q14, q15
vmin.s32 \r, \r, q3
.endr
.irp r, q8, q9, q10, q11, q12, q13, q14, q15
vmax.s32 \r, \r, q2
.endr
vtrn.32 d16, d17
vtrn.32 d18, d19
vtrn.32 d20, d21
vtrn.32 d22, d23
vtrn.32 d24, d25
vtrn.32 d26, d27
vtrn.32 d28, d29
vtrn.32 d30, d31
.macro store1 r0, r1, r2, r3
vst1.16 {\r0}, [r6, :64]!
vst1.16 {\r1}, [r6, :64]!
vst1.16 {\r2}, [r6, :64]!
vst1.16 {\r3}, [r6, :64]!
.endm
store1 d16, d18, d20, d22
store1 d24, d26, d28, d30
store1 d17, d19, d21, d23
store1 d25, d27, d29, d31
.purgem store1
sub r6, r6, #64*2
vmov.i32 d7, #0
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.32 {\i}, [r7, :64]
vst1.32 {d7}, [r7, :64], r8
.endr
.if \scale
// This relies on the fact that the idct also leaves the right coeff in d0[1]
scale_input d0[1], q8, q9, q10, q11, q12, q13, q14, q15
.endif
bl inv_dct32_odd_2s_x16_neon
vtrn.32 d31, d30
vtrn.32 d29, d28
vtrn.32 d27, d26
vtrn.32 d25, d24
vtrn.32 d23, d22
vtrn.32 d21, d20
vtrn.32 d19, d18
vtrn.32 d17, d16
.macro store2 r0, r1, r2, r3, r4, r5, r6, r7, shift
vld1.32 {q0, q1}, [r6, :128]!
vld1.32 {q2, q3}, [r6, :128]
sub r6, r6, #32
vqsub.s32 d15, d0, \r0
vqadd.s32 d0, d0, \r0
vqsub.s32 d14, d1, \r1
vqadd.s32 d1, d1, \r1
vqsub.s32 d13, d2, \r2
vqadd.s32 d2, d2, \r2
vqsub.s32 d12, d3, \r3
vqadd.s32 d3, d3, \r3
vqsub.s32 d11, d4, \r4
vqadd.s32 d4, d4, \r4
vqsub.s32 d10, d5, \r5
vqadd.s32 d5, d5, \r5
vqsub.s32 d9, d6, \r6
vqadd.s32 d6, d6, \r6
vqsub.s32 d8, d7, \r7
vqadd.s32 d7, d7, \r7
vqrshrn.s32 d0, q0, #\shift
vqrshrn.s32 d1, q1, #\shift
vqrshrn.s32 d2, q2, #\shift
vqrshrn.s32 d3, q3, #\shift
vqrshrn.s32 d4, q4, #\shift
vqrshrn.s32 d5, q5, #\shift
vqrshrn.s32 d6, q6, #\shift
vqrshrn.s32 d7, q7, #\shift
vrev32.16 q2, q2
vrev32.16 q3, q3
vst1.16 {q0, q1}, [r6, :128]!
vst1.16 {q2, q3}, [r6, :128]!
.endm
store2 d31, d29, d27, d25, d23, d21, d19, d17, \shift
store2 d30, d28, d26, d24, d22, d20, d18, d16, \shift
.purgem store2
pop {pc}
endfunc
.endm
def_horz_32 scale=0, shift=2
def_horz_32 scale=1, shift=1, suffix=_scale
function inv_txfm_add_vert_dct_4x32_neon
push {r10-r11,lr}
lsl r8, r8, #1
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.16 {\i}, [r7, :64], r8
.endr
sub r7, r7, r8, lsl #4
bl X(inv_dct_4h_x16_neon)
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vst1.16 {\i}, [r7, :64], r8
.endr
sub r7, r7, r8, lsl #4
add r7, r7, r8, lsr #1
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.16 {\i}, [r7, :64], r8
.endr
sub r7, r7, r8, lsl #4
sub r7, r7, r8, lsr #1
bl X(inv_dct32_odd_4h_x16_neon)
neg r9, r8
mov r10, r6
vmov.i16 q6, #0
vmvn.i16 q7, #0xfc00 // 0x3ff
.macro combine r0, r1, r2, r3, op, stride
vld1.16 {d4}, [r7, :64], \stride
vld1.16 {d0}, [r10, :64], r1
vld1.16 {d5}, [r7, :64], \stride
vld1.16 {d1}, [r10, :64], r1
\op\().s16 d4, d4, \r0
vld1.16 {d6}, [r7, :64], \stride
vld1.16 {d2}, [r10, :64], r1
\op\().s16 d5, d5, \r1
vld1.16 {d3}, [r10, :64], r1
vrshr.s16 q2, q2, #4
\op\().s16 d6, d6, \r2
vld1.16 {d7}, [r7, :64], \stride
vqadd.s16 q0, q0, q2
\op\().s16 d7, d7, \r3
vmax.s16 q0, q0, q6
vrshr.s16 q3, q3, #4
vmin.s16 q0, q0, q7
vqadd.s16 q1, q1, q3
vst1.16 {d0}, [r6, :64], r1
vmax.s16 q1, q1, q6
vst1.16 {d1}, [r6, :64], r1
vmin.s16 q1, q1, q7
vst1.16 {d2}, [r6, :64], r1
vst1.16 {d3}, [r6, :64], r1
.endm
combine d31, d30, d29, d28, vqadd, r8
combine d27, d26, d25, d24, vqadd, r8
combine d23, d22, d21, d20, vqadd, r8
combine d19, d18, d17, d16, vqadd, r8
sub r7, r7, r8
combine d16, d17, d18, d19, vqsub, r9
combine d20, d21, d22, d23, vqsub, r9
combine d24, d25, d26, d27, vqsub, r9
combine d28, d29, d30, d31, vqsub, r9
.purgem combine
pop {r10-r11,pc}
endfunc
const eob_32x32
.short 3, 10, 21, 36, 55, 78, 105, 136, 171, 210, 253, 300, 351, 406, 465, 1024
endconst
const eob_16x32
.short 3, 10, 21, 36, 55, 78, 105, 151, 183, 215, 247, 279, 311, 343, 375, 512
endconst
const eob_16x32_shortside
.short 3, 10, 21, 36, 55, 78, 105, 512
endconst
const eob_8x32
.short 3, 10, 21, 43, 59, 75, 91, 107, 123, 139, 155, 171, 187, 203, 219, 256
endconst
function inv_txfm_add_identity_identity_32x32_16bpc_neon, export=1
push {r4-r7,lr}
vpush {q6-q7}
movrel_local r5, eob_32x32, 2
mov r6, #4*32
1:
mov r12, #0
movrel_local r4, eob_32x32, 6
2:
vmov.i32 q0, #0
add r12, r12, #8
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.32 {\i}, [r2, :128]
vst1.32 {q0}, [r2, :128], r6
.endr
vqmovn.s32 d16, q8
vqmovn.s32 d17, q12
vqmovn.s32 d18, q9
vqmovn.s32 d19, q13
vqmovn.s32 d20, q10
vqmovn.s32 d21, q14
vqmovn.s32 d22, q11
vqmovn.s32 d23, q15
transpose_4x8h q8, q9, q10, q11
load_add_store_8x4 r0, r7, shiftbits=2
ldrh lr, [r4], #8
sub r0, r0, r1, lsl #2
cmp r3, lr
add r0, r0, #2*8
bge 2b
ldrh lr, [r5], #4
cmp r3, lr
blt 9f
sub r0, r0, r12, lsl #1
add r0, r0, r1, lsl #2
mls r2, r6, r12, r2
add r2, r2, #4*4
b 1b
9:
vpop {q6-q7}
pop {r4-r7,pc}
endfunc
.macro shift_8_regs op, shift
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
\op \i, \i, #\shift
.endr
.endm
.macro def_identity_1632 w, h, wshort, hshort
function inv_txfm_add_identity_identity_\w\()x\h\()_16bpc_neon, export=1
push {r4-r9,lr}
vpush {q6-q7}
mov r9, #0
mov_const r8, 2896*8*(1<<16)
movt r9, #2*(5793-4096)*8
movrel_local r5, eob_16x32\hshort, 2
mov r6, #4*\h
1:
mov r12, #0
movrel_local r4, eob_16x32\wshort, 6
2:
vdup.i32 d0, r8
vmov.i32 q1, #0
vmov.32 d0[1], r9
add r12, r12, #8
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.32 {\i}, [r2, :128]
vst1.32 {q1}, [r2, :128], r6
.endr
scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
.if \w == 16
// 16x32
identity_8x4_shift1 d0[1]
.else
// 32x16
shift_8_regs vqshl.s32, 1
identity_8x4 d0[1]
.endif
vqmovn.s32 d16, q8
vqmovn.s32 d17, q12
vqmovn.s32 d18, q9
vqmovn.s32 d19, q13
vqmovn.s32 d20, q10
vqmovn.s32 d21, q14
vqmovn.s32 d22, q11
vqmovn.s32 d23, q15
transpose_4x8h q8, q9, q10, q11
.if \w == 16
load_add_store_8x4 r0, r7, shiftbits=2
.else
load_add_store_8x4 r0, r7, shiftbits=4
.endif
ldrh lr, [r4], #8
sub r0, r0, r1, lsl #2
cmp r3, lr
add r0, r0, #2*8
bge 2b
ldrh lr, [r5], #4
cmp r3, lr
blt 9f
sub r0, r0, r12, lsl #1
add r0, r0, r1, lsl #2
mls r2, r6, r12, r2
add r2, r2, #4*4
b 1b
9:
vpop {q6-q7}
pop {r4-r9,pc}
endfunc
.endm
def_identity_1632 16, 32, _shortside,
def_identity_1632 32, 16, , _shortside
.macro def_identity_832 w, h
function inv_txfm_add_identity_identity_\w\()x\h\()_16bpc_neon, export=1
push {r4-r5,lr}
vpush {q6-q7}
movrel_local r4, eob_8x32, 2
mov r12, #4*\h
1:
ldrh lr, [r4], #4
.if \w == 8
vmov.i32 q0, #0
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.32 {\i}, [r2, :128]
vst1.32 {q0}, [r2, :128], r12
.endr
vqrshrn.s32 d16, q8, #1
vqrshrn.s32 d17, q12, #1
vqrshrn.s32 d18, q9, #1
vqrshrn.s32 d19, q13, #1
vqrshrn.s32 d20, q10, #1
vqrshrn.s32 d21, q14, #1
vqrshrn.s32 d22, q11, #1
vqrshrn.s32 d23, q15, #1
transpose_4x8h q8, q9, q10, q11
cmp r3, lr
load_add_store_8x4 r0, r5, shiftbits=2
blt 9f
sub r2, r2, r12, lsl #3
add r2, r2, #4*4
.else
vmov.i32 q0, #0
vmov.i32 q1, #0
vld1.32 {q8, q9}, [r2, :128]
vst1.32 {q0, q1}, [r2, :128], r12
vld1.32 {q10, q11}, [r2, :128]
vst1.32 {q0, q1}, [r2, :128], r12
vld1.32 {q12, q13}, [r2, :128]
vst1.32 {q0, q1}, [r2, :128], r12
vld1.32 {q14, q15}, [r2, :128]
vst1.32 {q0, q1}, [r2, :128], r12
vqmovn.s32 d16, q8
vqmovn.s32 d17, q10
vqmovn.s32 d20, q9
vqmovn.s32 d21, q11
vqmovn.s32 d18, q12
vqmovn.s32 d19, q14
vqmovn.s32 d22, q13
vqmovn.s32 d23, q15
transpose_4x4h q8, q9, d16, d17, d18, d19
transpose_4x4h q10, q11, d20, d21, d22, d23
cmp r3, lr
load_add_store_4x8 r0, r5, shiftbits=3
blt 9f
sub r0, r0, r1, lsl #3
add r0, r0, #2*4
.endif
b 1b
9:
vpop {q6-q7}
pop {r4-r5,pc}
endfunc
.endm
def_identity_832 8, 32
def_identity_832 32, 8
function inv_txfm_add_dct_dct_32x32_16bpc_neon, export=1
idct_dc 32, 32, 2
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 2048
movrel_local r10, eob_32x32
ldrh r11, [r10], #2
.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
add r6, sp, #(\i*32*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.if \i < 30
ldrh r11, [r10], #2
.endif
.endif
add r7, r2, #(\i*4)
mov r8, #32*4
bl inv_txfm_horz_dct_32x2_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, r0, #(\i*2)
add r7, sp, #(\i*2)
mov r8, #32*2
bl inv_txfm_add_vert_dct_4x32_neon
.endr
add_sp_align 2048
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_16x32_16bpc_neon, export=1
idct_dc 16, 32, 1
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 1024
movrel_local r10, eob_16x32
ldrh r11, [r10], #2
movrel_local r4, inv_dct_2s_x16_neon
.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
add r6, sp, #(\i*16*2)
add r7, r2, #(\i*4)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.if \i < 30
ldrh r11, [r10], #2
.endif
.endif
mov r8, #4*32
bl inv_txfm_horz_scale_16x2_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 2
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12
add r6, r0, #(\i*2)
add r7, sp, #(\i*2)
mov r8, #16*2
bl inv_txfm_add_vert_dct_4x32_neon
.endr
add_sp_align 1024
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_32x16_16bpc_neon, export=1
idct_dc 32, 16, 1
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 1024
movrel_local r10, eob_16x32
ldrh r11, [r10], #2
movrel r5, X(inv_dct_4h_x16_neon)
.irp i, 0, 2, 4, 6, 8, 10, 12, 14
add r6, sp, #(\i*32*2)
add r7, r2, #(\i*4)
.if \i > 0
mov r8, #(16 - \i)
cmp r3, r11
blt 1f
.if \i < 14
ldrh r11, [r10], #2
.endif
.endif
mov r8, #4*16
bl inv_txfm_horz_scale_dct_32x2_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, r0, #(\i*2)
add r7, sp, #(\i*2)
mov r8, #32*2
bl inv_txfm_add_vert_4x16_neon
.endr
add_sp_align 1024
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_8x32_16bpc_neon, export=1
idct_dc 8, 32, 2
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 512
movrel_local r10, eob_8x32, 2
mov r8, #4*32
mov r9, #32
mov r6, sp
1:
vmov.i32 q0, #0
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.32 {\i}, [r2, :128]
vst1.32 {q0}, [r2, :128], r8
.endr
ldrh r11, [r10], #4
sub r2, r2, r8, lsl #3
sub r9, r9, #4
add r2, r2, #4*4
bl inv_dct_4s_x8_neon
vqrshrn.s32 d16, q8, #2
vqrshrn.s32 d18, q9, #2
vqrshrn.s32 d20, q10, #2
vqrshrn.s32 d22, q11, #2
vqrshrn.s32 d17, q12, #2
vqrshrn.s32 d19, q13, #2
vqrshrn.s32 d21, q14, #2
vqrshrn.s32 d23, q15, #2
transpose_4x8h q8, q9, q10, q11
vst1.16 {q8, q9}, [r6, :128]!
cmp r3, r11
vst1.16 {q10, q11}, [r6, :128]!
bge 1b
cmp r9, #0
beq 3f
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r9, r9, #4
.rept 2
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4
add r6, r0, #(\i*2)
add r7, sp, #(\i*2)
mov r8, #8*2
bl inv_txfm_add_vert_dct_4x32_neon
.endr
add_sp_align 512
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_32x8_16bpc_neon, export=1
idct_dc 32, 8, 2
push {r4-r11,lr}
vpush {q4-q7}
movrel_local r10, eob_8x32
sub_sp_align 512
ldrh r11, [r10], #2
.irp i, 0, 2, 4, 6
add r6, sp, #(\i*32*2)
add r7, r2, #(\i*4)
.if \i > 0
cmp r3, r11
mov r8, #(8 - \i)
blt 1f
.if \i < 6
ldrh r11, [r10], #2
.endif
.endif
mov r8, #8*4
bl inv_txfm_horz_dct_32x2_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
mov r8, #2*32
mov r9, #0
1:
add r6, r0, r9, lsl #1
add r7, sp, r9, lsl #1 // #(\i*2)
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.16 {\i}, [r7, :128], r8
.endr
add r9, r9, #8
bl X(inv_dct_8h_x8_neon)
cmp r9, #32
load_add_store_8x8 r6, r7
blt 1b
add_sp_align 512
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_dct64_step1_neon
// in1/31/17/15 -> t32a/33/34a/35/60/61a/62/63a
// in7/25/23/ 9 -> t56a/57/58a/59/36/37a/38/39a
// in5/27/21/11 -> t40a/41/42a/43/52/53a/54/55a
// in3/29/19/13 -> t48a/49/50a/51/44/45a/46/47a
vld1.32 {q0, q1}, [r12, :128]!
vqrdmulh.s32 d23, d16, d0[1] // t63a
vqrdmulh.s32 d16, d16, d0[0] // t32a
vqrdmulh.s32 d22, d17, d1[0] // t62a
vqrdmulh.s32 d17, d17, d1[1] // t33a
vqrdmulh.s32 d21, d18, d2[1] // t61a
vqrdmulh.s32 d18, d18, d2[0] // t34a
vqrdmulh.s32 d20, d19, d3[0] // t60a
vqrdmulh.s32 d19, d19, d3[1] // t35a
vld1.32 {q0}, [r12, :128]!
vqadd.s32 d24, d16, d17 // t32
vqsub.s32 d25, d16, d17 // t33
vqsub.s32 d26, d19, d18 // t34
vqadd.s32 d27, d19, d18 // t35
vqadd.s32 d28, d20, d21 // t60
vqsub.s32 d29, d20, d21 // t61
vqsub.s32 d30, d23, d22 // t62
vqadd.s32 d31, d23, d22 // t63
.irp r, q12, q13, q14, q15
vmin.s32 \r, \r, q5
.endr
.irp r, q12, q13, q14, q15
vmax.s32 \r, \r, q4
.endr
vmul_vmla d4, d29, d26, d0[0], d0[1] // -> t34a
vmul_vmls d6, d29, d26, d0[1], d0[0] // -> t61a
vneg.s32 d4, d4 // t34a
vmul_vmls d7, d30, d25, d0[1], d0[0] // -> t33a
vrshr.s32 d26, d4, #12 // t34a
vmul_vmla d4, d30, d25, d0[0], d0[1] // -> t62a
vrshr.s32 d29, d6, #12 // t61a
vrshr.s32 d25, d7, #12 // t33a
vrshr.s32 d30, d4, #12 // t62a
vqadd.s32 d16, d24, d27 // t32a
vqsub.s32 d19, d24, d27 // t35a
vqadd.s32 d17, d25, d26 // t33
vqsub.s32 d18, d25, d26 // t34
vqsub.s32 d20, d31, d28 // t60a
vqadd.s32 d23, d31, d28 // t63a
vqsub.s32 d21, d30, d29 // t61
vqadd.s32 d22, d30, d29 // t62
.irp r, q8, q9, q10, q11
vmin.s32 \r, \r, q5
.endr
.irp r, q8, q9, q10, q11
vmax.s32 \r, \r, q4
.endr
vmul_vmla d4, d21, d18, d1[0], d1[1] // -> t61a
vmul_vmls d6, d21, d18, d1[1], d1[0] // -> t34a
vmul_vmla d7, d20, d19, d1[0], d1[1] // -> t60
vrshr.s32 d21, d4, #12 // t61a
vrshr.s32 d18, d6, #12 // t34a
vmul_vmls d4, d20, d19, d1[1], d1[0] // -> t35
vrshr.s32 d20, d7, #12 // t60
vrshr.s32 d19, d4, #12 // t35
vst1.32 {d16, d17, d18, d19}, [r6, :128]!
vst1.32 {d20, d21, d22, d23}, [r6, :128]!
bx lr
endfunc
function inv_dct64_step2_neon
movrel_local r12, idct_coeffs
vld1.32 {q0}, [r12, :128]
1:
// t32a/33/34a/35/60/61a/62/63a
// t56a/57/58a/59/36/37a/38/39a
// t40a/41/42a/43/52/53a/54/55a
// t48a/49/50a/51/44/45a/46/47a
vldr d16, [r6, #4*2*0] // t32a
vldr d17, [r9, #4*2*8] // t39a
vldr d18, [r9, #4*2*0] // t63a
vldr d19, [r6, #4*2*8] // t56a
vldr d20, [r6, #4*2*16] // t40a
vldr d21, [r9, #4*2*24] // t47a
vldr d22, [r9, #4*2*16] // t55a
vldr d23, [r6, #4*2*24] // t48a
vqadd.s32 d24, d16, d17 // t32
vqsub.s32 d25, d16, d17 // t39
vqadd.s32 d26, d18, d19 // t63
vqsub.s32 d27, d18, d19 // t56
vqsub.s32 d28, d21, d20 // t40
vqadd.s32 d29, d21, d20 // t47
vqadd.s32 d30, d23, d22 // t48
vqsub.s32 d31, d23, d22 // t55
.irp r, q12, q13, q14, q15
vmin.s32 \r, \r, q5
.endr
.irp r, q12, q13, q14, q15
vmax.s32 \r, \r, q4
.endr
vmul_vmla d4, d27, d25, d1[1], d1[0] // -> t56a
vmul_vmls d6, d27, d25, d1[0], d1[1] // -> t39a
vmul_vmla d7, d31, d28, d1[1], d1[0] // -> t40a
vrshr.s32 d25, d4, #12 // t56a
vrshr.s32 d27, d6, #12 // t39a
vneg.s32 d7, d7 // t40a
vmul_vmls d4, d31, d28, d1[0], d1[1] // -> t55a
vrshr.s32 d31, d7, #12 // t40a
vrshr.s32 d28, d4, #12 // t55a
vqadd.s32 d16, d24, d29 // t32a
vqsub.s32 d19, d24, d29 // t47a
vqadd.s32 d17, d27, d31 // t39
vqsub.s32 d18, d27, d31 // t40
vqsub.s32 d20, d26, d30 // t48a
vqadd.s32 d23, d26, d30 // t63a
vqsub.s32 d21, d25, d28 // t55
vqadd.s32 d22, d25, d28 // t56
.irp r, q8, q9, q10, q11
vmin.s32 \r, \r, q5
.endr
.irp r, q8, q9, q10, q11
vmax.s32 \r, \r, q4
.endr
vmul_vmls d4, d21, d18, d0[0], d0[0] // -> t40a
vmul_vmla d6, d21, d18, d0[0], d0[0] // -> t55a
vmul_vmls d7, d20, d19, d0[0], d0[0] // -> t47
vrshr.s32 d18, d4, #12 // t40a
vrshr.s32 d21, d6, #12 // t55a
vmul_vmla d4, d20, d19, d0[0], d0[0] // -> t48
vrshr.s32 d19, d7, #12 // t47
vrshr.s32 d20, d4, #12 // t48
vstr d16, [r6, #4*2*0] // t32a
vstr d17, [r9, #4*2*0] // t39
vstr d18, [r6, #4*2*8] // t40a
vstr d19, [r9, #4*2*8] // t47
vstr d20, [r6, #4*2*16] // t48
vstr d21, [r9, #4*2*16] // t55a
vstr d22, [r6, #4*2*24] // t56
vstr d23, [r9, #4*2*24] // t63a
add r6, r6, #4*2
sub r9, r9, #4*2
cmp r6, r9
blt 1b
bx lr
endfunc
.macro load8 src, strd, zero, clear
.irp i, d16, d17, d18, d19, d20, d21, d22, d23
.if \clear
vld1.32 {\i}, [\src, :64]
vst1.32 {\zero}, [\src, :64], \strd
.else
vld1.32 {\i}, [\src, :64], \strd
.endif
.endr
.endm
.macro store16 dst
vst1.32 {q8, q9}, [\dst, :128]!
vst1.32 {q10, q11}, [\dst, :128]!
vst1.32 {q12, q13}, [\dst, :128]!
vst1.32 {q14, q15}, [\dst, :128]!
.endm
.macro clear_upper8
.irp i, q12, q13, q14, q15
vmov.i32 \i, #0
.endr
.endm
.macro vmov_if reg, val, cond
.if \cond
vmov.i32 \reg, \val
.endif
.endm
.macro movdup_if reg, gpr, val, cond
.if \cond
mov_const \gpr, \val
vdup.32 \reg, \gpr
.endif
.endm
.macro vst1_if regs, dst, dstalign, cond
.if \cond
vst1.32 \regs, \dst, \dstalign
.endif
.endm
.macro scale_if cond, c, r0, r1, r2, r3, r4, r5, r6, r7
.if \cond
scale_input \c, \r0, \r1, \r2, \r3, \r4, \r5, \r6, \r7
.endif
.endm
.macro def_dct64_func suffix, clear=0, scale=0
function inv_txfm_dct\suffix\()_2s_x64_neon
mov r6, sp
push {r10-r11,lr}
lsl r8, r8, #2
movdup_if d0, r12, 2896*8*(1<<16), \scale
vmov_if d7, #0, \clear
load8 r7, r8, d7, \clear
clear_upper8
sub r7, r7, r8, lsl #3
add r7, r7, r8, lsr #1
scale_if \scale, d0[0], q8, q9, q10, q11
bl inv_dct_2s_x16_neon
// idct_16 leaves the row_clip_max/min constants in d9 and d8,
// but here we want to use full q registers for clipping.
vmov.i32 q3, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
vmvn.i32 q2, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
.irp r, q8, q9, q10, q11, q12, q13, q14, q15
vmin.s32 \r, \r, q3
.endr
.irp r, q8, q9, q10, q11, q12, q13, q14, q15
vmax.s32 \r, \r, q2
.endr
store16 r6
movdup_if d0, r12, 2896*8*(1<<16), \scale
vmov_if d7, #0, \clear
load8 r7, r8, d7, \clear
clear_upper8
sub r7, r7, r8, lsl #3
lsr r8, r8, #1
sub r7, r7, r8, lsr #1
scale_if \scale, d0[0], q8, q9, q10, q11
bl inv_dct32_odd_2s_x16_neon
add r10, r6, #8*15
sub r6, r6, #8*16
mov r9, #-8
vmov.i32 d1, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
vmvn.i32 d0, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
.macro store_addsub r0, r1, r2, r3
vld1.32 {d2}, [r6, :64]!
vld1.32 {d3}, [r6, :64]!
vqadd.s32 d6, d2, \r0
vqsub.s32 \r0, d2, \r0
vld1.32 {d4}, [r6, :64]!
vqadd.s32 d7, d3, \r1
vqsub.s32 \r1, d3, \r1
vmin.s32 d6, d6, d1
vmin.s32 \r0, \r0, d1
vld1.32 {d5}, [r6, :64]!
vqadd.s32 d2, d4, \r2
sub r6, r6, #8*4
vmax.s32 d6, d6, d0
vmax.s32 \r0, \r0, d0
vqsub.s32 \r2, d4, \r2
vmin.s32 d7, d7, d1
vmin.s32 \r1, \r1, d1
vst1.32 {d6}, [r6, :64]!
vst1.32 {\r0}, [r10, :64], r9
vmin.s32 d2, d2, d1
vmin.s32 \r2, \r2, d1
vmax.s32 d7, d7, d0
vmax.s32 \r1, \r1, d0
vqadd.s32 d3, d5, \r3
vqsub.s32 \r3, d5, \r3
vmax.s32 d2, d2, d0
vmax.s32 \r2, \r2, d0
vmin.s32 d3, d3, d1
vmin.s32 \r3, \r3, d1
vst1.32 {d7}, [r6, :64]!
vst1.32 {\r1}, [r10, :64], r9
vmax.s32 d3, d3, d0
vmax.s32 \r3, \r3, d0
vst1.32 {d2}, [r6, :64]!
vst1.32 {\r2}, [r10, :64], r9
vst1.32 {d3}, [r6, :64]!
vst1.32 {\r3}, [r10, :64], r9
.endm
store_addsub d31, d30, d29, d28
store_addsub d27, d26, d25, d24
store_addsub d23, d22, d21, d20
store_addsub d19, d18, d17, d16
.purgem store_addsub
add r6, r6, #2*4*16
movrel_local r12, idct64_coeffs
vmov.i32 q5, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
vmvn.i32 q4, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
movdup_if d0, lr, 2896*8*(1<<16), \scale
vmov_if d7, #0, \clear
add r9, r7, r8, lsl #4 // offset 16
add r10, r7, r8, lsl #3 // offset 8
sub r9, r9, r8 // offset 15
sub r11, r10, r8 // offset 7
vld1.32 {d16}, [r7, :64] // in1 (offset 0)
vld1.32 {d17}, [r9, :64] // in31 (offset 15)
vld1.32 {d18}, [r10, :64] // in17 (offset 8)
vld1.32 {d19}, [r11, :64] // in15 (offset 7)
vst1_if {d7}, [r7, :64], \clear
vst1_if {d7}, [r9, :64], \clear
vst1_if {d7}, [r10, :64], \clear
vst1_if {d7}, [r11, :64], \clear
scale_if \scale, d0[0], q8, q9
bl inv_dct64_step1_neon
movdup_if d0, lr, 2896*8*(1<<16), \scale
vmov_if d7, #0, \clear
add r7, r7, r8, lsl #2 // offset 4
sub r9, r9, r8, lsl #2 // offset 11
sub r10, r7, r8 // offset 3
add r11, r9, r8 // offset 12
vld1.32 {d16}, [r10, :64] // in7 (offset 3)
vld1.32 {d17}, [r11, :64] // in25 (offset 12)
vld1.32 {d18}, [r9, :64] // in23 (offset 11)
vld1.32 {d19}, [r7, :64] // in9 (offset 4)
vst1_if {d7}, [r7, :64], \clear
vst1_if {d7}, [r9, :64], \clear
vst1_if {d7}, [r10, :64], \clear
vst1_if {d7}, [r11, :64], \clear
scale_if \scale, d0[0], q8, q9
bl inv_dct64_step1_neon
movdup_if d0, lr, 2896*8*(1<<16), \scale
vmov_if d7, #0, \clear
sub r10, r10, r8, lsl #1 // offset 1
sub r9, r9, r8, lsl #1 // offset 9
add r10, r10, r8 // offset 2
add r9, r9, r8 // offset 10
add r7, r7, r8 // offset 5
add r11, r11, r8 // offset 13
vld1.32 d16, [r10, :64] // in5 (offset 2)
vld1.32 d17, [r11, :64] // in27 (offset 13)
vld1.32 d18, [r9, :64] // in21 (offset 10)
vld1.32 d19, [r7, :64] // in11 (offset 5)
vst1_if d7, [r10, :64], \clear
vst1_if d7, [r11, :64], \clear
vst1_if d7, [r9, :64], \clear
vst1_if d7, [r7, :64], \clear
scale_if \scale, d0[0], q8, q9
bl inv_dct64_step1_neon
movdup_if d0, lr, 2896*8*(1<<16), \scale
vmov_if d7, #0, \clear
sub r10, r10, r8 // offset 1
sub r9, r9, r8 // offset 9
add r11, r11, r8 // offset 14
add r7, r7, r8 // offset 6
vld1.32 d16, [r10, :64] // in3 (offset 1)
vld1.32 d17, [r11, :64] // in29 (offset 14)
vld1.32 d18, [r9, :64] // in19 (offset 9)
vld1.32 d19, [r7, :64] // in13 (offset 6)
vst1_if d7, [r10, :64], \clear
vst1_if d7, [r11, :64], \clear
vst1_if d7, [r9, :64], \clear
vst1_if d7, [r7, :64], \clear
scale_if \scale, d0[0], q8, q9
bl inv_dct64_step1_neon
sub r6, r6, #2*4*32
add r9, r6, #2*4*7
bl inv_dct64_step2_neon
pop {r10-r11,pc}
endfunc
.endm
def_dct64_func _clear, clear=1
def_dct64_func _clear_scale, clear=1, scale=1
function inv_txfm_horz_dct_64x2_neon
vdup.32 q4, r9
mov r7, sp
add r8, sp, #2*4*(64 - 4)
add r9, r6, #2*56
push {r10-r11,lr}
mov r10, #2*64
mov r11, #-2*4*4
1:
vld1.32 {d16, d17, d18, d19}, [r7, :128]!
vld1.32 {d28, d29, d30, d31}, [r8, :128], r11
vld1.32 {d20, d21, d22, d23}, [r7, :128]!
vld1.32 {d24, d25, d26, d27}, [r8, :128], r11
vtrn.32 d16, d17
vtrn.32 d18, d19
vtrn.32 d20, d21
vtrn.32 d22, d23
vtrn.32 d31, d30
vtrn.32 d29, d28
vtrn.32 d27, d26
vtrn.32 d25, d24
.macro store_addsub src0, src1, src2, src3, src4, src5, src6, src7
vqsub.s32 d7, \src0, \src1
vqsub.s32 d6, \src2, \src3
vqsub.s32 d5, \src4, \src5
vqsub.s32 d4, \src6, \src7
vqadd.s32 d0, \src0, \src1
vqadd.s32 d1, \src2, \src3
vqadd.s32 d2, \src4, \src5
vqadd.s32 d3, \src6, \src7
vrshl.s32 q3, q3, q4
vrshl.s32 q2, q2, q4
vrshl.s32 q0, q0, q4
vrshl.s32 q1, q1, q4
vqmovn.s32 d7, q3
vqmovn.s32 d6, q2
vqmovn.s32 d0, q0
vqmovn.s32 d1, q1
vrev32.16 q3, q3
vst1.16 {q0}, [r6, :128], r10
vst1.16 {q3}, [r9, :128], r10
.endm
store_addsub d16, d31, d18, d29, d20, d27, d22, d25
store_addsub d17, d30, d19, d28, d21, d26, d23, d24
.purgem store_addsub
sub r6, r6, r10, lsl #1
sub r9, r9, r10, lsl #1
add r6, r6, #16
sub r9, r9, #16
cmp r7, r8
blt 1b
pop {r10-r11,pc}
endfunc
function inv_txfm_add_vert_dct_4x64_neon
lsl r8, r8, #1
mov r7, sp
add r8, sp, #2*4*(64 - 4)
add r9, r6, r1, lsl #6
sub r9, r9, r1
push {r10-r11,lr}
neg r10, r1
mov r11, #-2*4*4
1:
vld1.16 {d16, d17, d18, d19}, [r7, :128]!
vld1.16 {d28, d29, d30, d31}, [r8, :128], r11
vld1.16 {d20, d21, d22, d23}, [r7, :128]!
vld1.16 {d24, d25, d26, d27}, [r8, :128], r11
vmov.i16 q6, #0
vmvn.i16 q7, #0xfc00 // 0x3ff
.macro add_dest_addsub src0, src1, src2, src3
vld1.16 {d0}, [r6, :64], r1
vld1.16 {d1}, [r9, :64], r10
vqadd.s16 d4, \src0, \src1
vld1.16 {d2}, [r6, :64]
vqsub.s16 d5, \src0, \src1
vld1.16 {d3}, [r9, :64]
vqadd.s16 d6, \src2, \src3
vqsub.s16 d7, \src2, \src3
sub r6, r6, r1
sub r9, r9, r10
vrshr.s16 q2, q2, #4
vrshr.s16 q3, q3, #4
vqadd.s16 q2, q2, q0
vqadd.s16 q3, q3, q1
vmax.s16 q2, q2, q6
vmax.s16 q3, q3, q6
vmin.s16 q2, q2, q7
vmin.s16 q3, q3, q7
vst1.16 {d4}, [r6, :64], r1
vst1.16 {d5}, [r9, :64], r10
vst1.16 {d6}, [r6, :64], r1
vst1.16 {d7}, [r9, :64], r10
.endm
add_dest_addsub d16, d31, d17, d30
add_dest_addsub d18, d29, d19, d28
add_dest_addsub d20, d27, d21, d26
add_dest_addsub d22, d25, d23, d24
.purgem add_dest_addsub
cmp r7, r8
blt 1b
pop {r10-r11,pc}
endfunc
function inv_txfm_add_dct_dct_64x64_16bpc_neon, export=1
idct_dc 64, 64, 2
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 64*32*2+64*4*2
add r5, sp, #64*4*2
movrel_local r10, eob_32x32
.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
add r6, r5, #(\i*64*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.endif
add r7, r2, #(\i*4)
mov r8, #32*4
bl inv_txfm_dct_clear_2s_x64_neon
add r6, r5, #(\i*64*2)
mov r9, #-2 // shift
bl inv_txfm_horz_dct_64x2_neon
.if \i < 30
ldrh r11, [r10], #2
.endif
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 8
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
add r7, r5, #(\i*2)
mov r8, #64*2
bl X(inv_txfm_dct_4h_x64_neon)
add r6, r0, #(\i*2)
bl inv_txfm_add_vert_dct_4x64_neon
.endr
add_sp_align 64*32*2+64*4*2
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_64x32_16bpc_neon, export=1
idct_dc 64, 32, 1
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 64*32*2+64*4*2
add r5, sp, #64*4*2
movrel_local r10, eob_32x32
.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
add r6, r5, #(\i*64*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.endif
add r7, r2, #(\i*4)
mov r8, #32*4
bl inv_txfm_dct_clear_scale_2s_x64_neon
add r6, r5, #(\i*64*2)
mov r9, #-1 // shift
bl inv_txfm_horz_dct_64x2_neon
.if \i < 30
ldrh r11, [r10], #2
.endif
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 8
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
add r6, r0, #(\i*2)
add r7, r5, #(\i*2)
mov r8, #64*2
bl inv_txfm_add_vert_dct_4x32_neon
.endr
add_sp_align 64*32*2+64*4*2
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_32x64_16bpc_neon, export=1
idct_dc 32, 64, 1
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 32*32*2+64*4*2
add r5, sp, #64*4*2
movrel_local r10, eob_32x32
ldrh r11, [r10], #2
.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
add r6, r5, #(\i*32*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.if \i < 30
ldrh r11, [r10], #2
.endif
.endif
add r7, r2, #(\i*4)
mov r8, #32*4
bl inv_txfm_horz_scale_dct_32x2_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r7, r5, #(\i*2)
mov r8, #32*2
bl X(inv_txfm_dct_4h_x64_neon)
add r6, r0, #(\i*2)
bl inv_txfm_add_vert_dct_4x64_neon
.endr
add_sp_align 32*32*2+64*4*2
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_64x16_16bpc_neon, export=1
idct_dc 64, 16, 2
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 64*16*2+64*4*2
add r4, sp, #64*4*2
movrel_local r10, eob_16x32
.irp i, 0, 2, 4, 6, 8, 10, 12, 14
add r6, r4, #(\i*64*2)
.if \i > 0
mov r8, #(16 - \i)
cmp r3, r11
blt 1f
.endif
add r7, r2, #(\i*4)
mov r8, #16*4
bl inv_txfm_dct_clear_2s_x64_neon
add r6, r4, #(\i*64*2)
mov r9, #-2 // shift
bl inv_txfm_horz_dct_64x2_neon
.if \i < 8
ldrh r11, [r10], #2
.endif
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 8
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
movrel r5, X(inv_dct_4h_x16_neon)
.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
add r6, r0, #(\i*2)
add r7, r4, #(\i*2)
mov r8, #64*2
bl inv_txfm_add_vert_4x16_neon
.endr
add_sp_align 64*16*2+64*4*2
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_16x64_16bpc_neon, export=1
idct_dc 16, 64, 2
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 16*32*2+64*4*2
add r5, sp, #64*4*2
movrel_local r10, eob_16x32
ldrh r11, [r10], #2
movrel_local r4, inv_dct_2s_x16_neon
.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
add r6, r5, #(\i*16*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.if \i < 30
ldrh r11, [r10], #2
.endif
.endif
add r7, r2, #(\i*4)
mov r8, #32*4
bl inv_txfm_horz_16x2_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 2
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12
add r7, r5, #(\i*2)
mov r8, #16*2
bl X(inv_txfm_dct_4h_x64_neon)
add r6, r0, #(\i*2)
bl inv_txfm_add_vert_dct_4x64_neon
.endr
add_sp_align 16*32*2+64*4*2
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
|
Admenri/urge
| 131,893
|
third_party/dav1d/src/arm/32/mc16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2018, Janne Grunau
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#define PREP_BIAS 8192
.macro avg d0, d00, d01, d1, d10, d11
vld1.16 {q0, q1}, [r2, :128]!
vld1.16 {q2, q3}, [r3, :128]!
vqadd.s16 q0, q0, q2
vqadd.s16 q1, q1, q3
vmax.s16 q0, q0, q12 // -2*PREP_BIAS - 1 << intermediate_bits
vmax.s16 q1, q1, q12 // -2*PREP_BIAS - 1 << intermediate_bits
vqsub.s16 q0, q0, q12 // -2*PREP_BIAS - 1 << intermediate_bits
vqsub.s16 q1, q1, q12 // -2*PREP_BIAS - 1 << intermediate_bits
vshl.s16 \d0, q0, q13 // -(intermediate_bits+1)
vshl.s16 \d1, q1, q13 // -(intermediate_bits+1)
.endm
.macro w_avg d0, d00, d01, d1, d10, d11
vld1.16 {q0, q1}, [r2, :128]!
vld1.16 {q2, q3}, [r3, :128]!
// This difference requires a 17 bit range, and all bits are
// significant for the following multiplication.
vsubl.s16 \d0, d4, d0
vsubl.s16 q0, d5, d1
vsubl.s16 \d1, d6, d2
vsubl.s16 q1, d7, d3
vmul.s32 \d0, \d0, q4
vmul.s32 q0, q0, q4
vmul.s32 \d1, \d1, q4
vmul.s32 q1, q1, q4
vshr.s32 \d0, \d0, #4
vshr.s32 q0, q0, #4
vshr.s32 \d1, \d1, #4
vshr.s32 q1, q1, #4
vaddw.s16 \d0, \d0, d4
vaddw.s16 q0, q0, d5
vaddw.s16 \d1, \d1, d6
vaddw.s16 q1, q1, d7
vmovn.i32 \d00, \d0
vmovn.i32 \d01, q0
vmovn.i32 \d10, \d1
vmovn.i32 \d11, q1
vrshl.s16 \d0, \d0, q13 // -intermediate_bits
vrshl.s16 \d1, \d1, q13 // -intermediate_bits
vadd.s16 \d0, \d0, q12 // PREP_BIAS >> intermediate_bits
vadd.s16 \d1, \d1, q12 // PREP_BIAS >> intermediate_bits
vmin.s16 \d0, \d0, q15 // bitdepth_max
vmin.s16 \d1, \d1, q15 // bitdepth_max
vmax.s16 \d0, \d0, q14 // 0
vmax.s16 \d1, \d1, q14 // 0
.endm
.macro mask d0, d00, d01, d1, d10, d11
vld1.8 {q7}, [r6, :128]!
vld1.16 {q0, q1}, [r2, :128]!
vneg.s8 q7, q7
vld1.16 {q2, q3}, [r3, :128]!
vmovl.s8 q6, d14
vmovl.s8 q7, d15
vmovl.s16 q4, d12
vmovl.s16 q5, d13
vmovl.s16 q6, d14
vmovl.s16 q7, d15
vsubl.s16 \d0, d4, d0
vsubl.s16 q0, d5, d1
vsubl.s16 \d1, d6, d2
vsubl.s16 q1, d7, d3
vmul.s32 \d0, \d0, q4
vmul.s32 q0, q0, q5
vmul.s32 \d1, \d1, q6
vmul.s32 q1, q1, q7
vshr.s32 \d0, \d0, #6
vshr.s32 q0, q0, #6
vshr.s32 \d1, \d1, #6
vshr.s32 q1, q1, #6
vaddw.s16 \d0, \d0, d4
vaddw.s16 q0, q0, d5
vaddw.s16 \d1, \d1, d6
vaddw.s16 q1, q1, d7
vmovn.i32 \d00, \d0
vmovn.i32 \d01, q0
vmovn.i32 \d10, \d1
vmovn.i32 \d11, q1
vrshl.s16 \d0, \d0, q13 // -intermediate_bits
vrshl.s16 \d1, \d1, q13 // -intermediate_bits
vadd.s16 \d0, \d0, q12 // PREP_BIAS >> intermediate_bits
vadd.s16 \d1, \d1, q12 // PREP_BIAS >> intermediate_bits
vmin.s16 \d0, \d0, q15 // bitdepth_max
vmin.s16 \d1, \d1, q15 // bitdepth_max
vmax.s16 \d0, \d0, q14 // 0
vmax.s16 \d1, \d1, q14 // 0
.endm
.macro bidir_fn type, bdmax
function \type\()_16bpc_neon, export=1
push {r4-r7,lr}
ldrd r4, r5, [sp, #20]
ldr r6, [sp, #28]
clz r4, r4
.ifnc \type, avg
ldr r7, [sp, #32]
vmov.i16 q14, #0
vdup.16 q15, r7 // bitdepth_max
.endif
.ifc \type, w_avg
vpush {q4}
.endif
.ifc \type, mask
vpush {q4-q7}
.endif
clz r7, \bdmax
sub r7, r7, #18 // intermediate_bits = clz(bitdepth_max) - 18
.ifc \type, avg
mov lr, #1
movw r12, #2*PREP_BIAS
lsl lr, lr, r7 // 1 << intermediate_bits
neg r12, r12 // -2*PREP_BIAS
add r7, r7, #1
sub r12, r12, lr // -2*PREP_BIAS - 1 << intermediate_bits
neg r7, r7 // -(intermediate_bits+1)
vdup.16 q12, r12 // -2*PREP_BIAS - 1 << intermediate_bits
vdup.16 q13, r7 // -(intermediate_bits+1)
.else
mov r12, #PREP_BIAS
lsr r12, r12, r7 // PREP_BIAS >> intermediate_bits
neg r7, r7 // -intermediate_bits
vdup.16 q12, r12 // PREP_BIAS >> intermediate_bits
vdup.16 q13, r7 // -intermediate_bits
.endif
.ifc \type, w_avg
vdup.32 q4, r6
vneg.s32 q4, q4
.endif
adr r7, L(\type\()_tbl)
sub r4, r4, #24
\type q8, d16, d17, q9, d18, d19
ldr r4, [r7, r4, lsl #2]
add r7, r7, r4
bx r7
.align 2
L(\type\()_tbl):
.word 1280f - L(\type\()_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_tbl) + CONFIG_THUMB
40:
add r7, r0, r1
lsl r1, r1, #1
4:
subs r5, r5, #4
vst1.16 {d16}, [r0, :64], r1
vst1.16 {d17}, [r7, :64], r1
vst1.16 {d18}, [r0, :64], r1
vst1.16 {d19}, [r7, :64], r1
ble 0f
\type q8, d16, d17, q9, d18, d19
b 4b
80:
add r7, r0, r1
lsl r1, r1, #1
8:
vst1.16 {q8}, [r0, :128], r1
subs r5, r5, #2
vst1.16 {q9}, [r7, :128], r1
ble 0f
\type q8, d16, d17, q9, d18, d19
b 8b
160:
16:
\type q10, d20, d21, q11, d22, d23
vst1.16 {q8, q9}, [r0, :128], r1
subs r5, r5, #2
vst1.16 {q10, q11}, [r0, :128], r1
ble 0f
\type q8, d16, d17, q9, d18, d19
b 16b
320:
add r7, r0, #32
32:
\type q10, d20, d21, q11, d22, d23
vst1.16 {q8, q9}, [r0, :128], r1
subs r5, r5, #1
vst1.16 {q10, q11}, [r7, :128], r1
ble 0f
\type q8, d16, d17, q9, d18, d19
b 32b
640:
add r7, r0, #32
mov r12, #64
sub r1, r1, #64
64:
\type q10, d20, d21, q11, d22, d23
vst1.16 {q8, q9}, [r0, :128], r12
\type q8, d16, d17, q9, d18, d19
vst1.16 {q10, q11}, [r7, :128], r12
\type q10, d20, d21, q11, d22, d23
vst1.16 {q8, q9}, [r0, :128], r1
subs r5, r5, #1
vst1.16 {q10, q11}, [r7, :128], r1
ble 0f
\type q8, d16, d17, q9, d18, d19
b 64b
1280:
add r7, r0, #32
mov r12, #64
sub r1, r1, #192
128:
\type q10, d20, d21, q11, d22, d23
vst1.16 {q8, q9}, [r0, :128], r12
\type q8, d16, d17, q9, d18, d19
vst1.16 {q10, q11}, [r7, :128], r12
\type q10, d20, d21, q11, d22, d23
vst1.16 {q8, q9}, [r0, :128], r12
\type q8, d16, d17, q9, d18, d19
vst1.16 {q10, q11}, [r7, :128], r12
\type q10, d20, d21, q11, d22, d23
vst1.16 {q8, q9}, [r0, :128], r12
\type q8, d16, d17, q9, d18, d19
vst1.16 {q10, q11}, [r7, :128], r12
\type q10, d20, d21, q11, d22, d23
vst1.16 {q8, q9}, [r0, :128], r1
subs r5, r5, #1
vst1.16 {q10, q11}, [r7, :128], r1
ble 0f
\type q8, d16, d17, q9, d18, d19
b 128b
0:
.ifc \type, mask
vpop {q4-q7}
.endif
.ifc \type, w_avg
vpop {q4}
.endif
pop {r4-r7,pc}
endfunc
.endm
bidir_fn avg, r6
bidir_fn w_avg, r7
bidir_fn mask, r7
.macro w_mask_fn type
function w_mask_\type\()_16bpc_neon, export=1
push {r4-r10,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #96]
ldrd r6, r7, [sp, #104]
ldr r8, [sp, #112]
clz r9, r4
adr lr, L(w_mask_\type\()_tbl)
vdup.16 q15, r8 // bitdepth_max
sub r9, r9, #24
clz r8, r8 // clz(bitdepth_max)
ldr r9, [lr, r9, lsl #2]
add r9, lr, r9
sub r8, r8, #12 // sh = intermediate_bits + 6 = clz(bitdepth_max) - 12
mov r10, #PREP_BIAS*64
neg r8, r8 // -sh
movw r12, #27615 // (64 + 1 - 38)<<mask_sh - 1 - mask_rnd
vdup.32 q14, r8 // -sh
vdup.16 q0, r12
.if \type == 444
vmov.i8 q1, #64
.elseif \type == 422
vdup.8 d4, r7
vmov.i8 d2, #129
vsub.i16 d2, d2, d4
.elseif \type == 420
vdup.16 q2, r7
vmov.i16 q1, #0x100
vsub.i16 q1, q1, q2
.endif
add r12, r0, r1
lsl r1, r1, #1
bx r9
.align 2
L(w_mask_\type\()_tbl):
.word 1280f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 640f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 320f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 160f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 8f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 4f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
4:
vld1.16 {q2, q3}, [r2, :128]! // tmp1 (four rows at once)
vld1.16 {q4, q5}, [r3, :128]! // tmp2 (four rows at once)
subs r5, r5, #4
vdup.32 q13, r10 // PREP_BIAS*64
vabd.s16 q6, q2, q4 // abs(tmp1 - tmp2)
vabd.s16 q7, q3, q5
vsubl.s16 q8, d8, d4 // tmp2 - tmp1 (requires 17 bit)
vsubl.s16 q9, d9, d5
vsubl.s16 q10, d10, d6
vsubl.s16 q11, d11, d7
vqsub.u16 q6, q0, q6 // 27615 - abs()
vqsub.u16 q7, q0, q7
vshll.s16 q5, d7, #6 // tmp1 << 6
vshll.s16 q4, d6, #6
vshll.s16 q3, d5, #6
vshll.s16 q2, d4, #6
vshr.u16 q6, q6, #10 // 64-m = (27615 - abs()) >> mask_sh
vshr.u16 q7, q7, #10
vadd.i32 q2, q2, q13 // += PREP_BIAS*64
vadd.i32 q3, q3, q13
vadd.i32 q4, q4, q13
vadd.i32 q5, q5, q13
vmovl.u16 q12, d12
vmovl.u16 q13, d13
vmla.i32 q2, q8, q12 // (tmp2-tmp1)*(64-m)
vmovl.u16 q12, d14
vmla.i32 q3, q9, q13
vmovl.u16 q13, d15
vmla.i32 q4, q10, q12
vmla.i32 q5, q11, q13
vrshl.s32 q2, q2, q14 // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
vrshl.s32 q3, q3, q14
vrshl.s32 q4, q4, q14
vrshl.s32 q5, q5, q14
vqmovun.s32 d4, q2 // iclip_pixel
vqmovun.s32 d5, q3
vqmovun.s32 d6, q4
vqmovun.s32 d7, q5
vmin.u16 q2, q2, q15 // iclip_pixel
vmin.u16 q3, q3, q15 // iclip_pixel
.if \type == 444
vmovn.i16 d12, q6 // 64 - m
vmovn.i16 d13, q7
vsub.i16 q6, q1, q6 // m
vst1.8 {q6}, [r6, :128]!
.elseif \type == 422
vpadd.i16 d12, d12, d13 // (64 - m) + (64 - n) (column wise addition)
vpadd.i16 d13, d14, d15
vmovn.i16 d12, q6
vhsub.u8 d12, d2, d12 // ((129 - sign) - ((64 - m) + (64 - n)) >> 1
vst1.8 {d12}, [r6, :64]!
.elseif \type == 420
vadd.i16 d12, d12, d13 // (64 - my1) + (64 - my2) (row wise addition)
vadd.i16 d13, d14, d15
vpadd.i16 d12, d12, d13 // (128 - m) + (128 - n) (column wise addition)
vsub.i16 d12, d2, d12 // (256 - sign) - ((128 - m) + (128 - n))
vrshrn.i16 d12, q6, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
vst1.32 {d12[0]}, [r6, :32]!
.endif
vst1.16 {d4}, [r0, :64], r1
vst1.16 {d5}, [r12, :64], r1
vst1.16 {d6}, [r0, :64], r1
vst1.16 {d7}, [r12, :64], r1
bgt 4b
vpop {q4-q7}
pop {r4-r10,pc}
8:
vld1.16 {q2, q3}, [r2, :128]! // tmp1
vld1.16 {q4, q5}, [r3, :128]! // tmp2
subs r5, r5, #2
vdup.32 q13, r10 // PREP_BIAS*64
vabd.s16 q6, q2, q4 // abs(tmp1 - tmp2)
vabd.s16 q7, q3, q5
vsubl.s16 q8, d8, d4 // tmp2 - tmp1 (requires 17 bit)
vsubl.s16 q9, d9, d5
vsubl.s16 q10, d10, d6
vsubl.s16 q11, d11, d7
vqsub.u16 q6, q0, q6 // 27615 - abs()
vqsub.u16 q7, q0, q7
vshll.s16 q5, d7, #6 // tmp1 << 6
vshll.s16 q4, d6, #6
vshll.s16 q3, d5, #6
vshll.s16 q2, d4, #6
vshr.u16 q6, q6, #10 // 64-m = (27615 - abs()) >> mask_sh
vshr.u16 q7, q7, #10
vadd.i32 q2, q2, q13 // += PREP_BIAS*64
vadd.i32 q3, q3, q13
vadd.i32 q4, q4, q13
vadd.i32 q5, q5, q13
vmovl.u16 q12, d12
vmovl.u16 q13, d13
vmla.i32 q2, q8, q12 // (tmp2-tmp1)*(64-m)
vmovl.u16 q12, d14
vmla.i32 q3, q9, q13
vmovl.u16 q13, d15
vmla.i32 q4, q10, q12
vmla.i32 q5, q11, q13
vrshl.s32 q2, q2, q14 // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
vrshl.s32 q3, q3, q14
vrshl.s32 q4, q4, q14
vrshl.s32 q5, q5, q14
vqmovun.s32 d4, q2 // iclip_pixel
vqmovun.s32 d5, q3
vqmovun.s32 d6, q4
vqmovun.s32 d7, q5
vmin.u16 q2, q2, q15 // iclip_pixel
vmin.u16 q3, q3, q15 // iclip_pixel
.if \type == 444
vmovn.i16 d12, q6 // 64 - m
vmovn.i16 d13, q7
vsub.i16 q6, q1, q6 // m
vst1.8 {q6}, [r6, :128]!
.elseif \type == 422
vpadd.i16 d12, d12, d13 // (64 - m) + (64 - n) (column wise addition)
vpadd.i16 d13, d14, d15
vmovn.i16 d12, q6
vhsub.u8 d12, d2, d12 // ((129 - sign) - ((64 - m) + (64 - n)) >> 1
vst1.8 {d12}, [r6, :64]!
.elseif \type == 420
vadd.i16 q6, q6, q7 // (64 - my1) + (64 - my2) (row wise addition)
vpadd.i16 d12, d12, d13 // (128 - m) + (128 - n) (column wise addition)
vsub.i16 d12, d2, d12 // (256 - sign) - ((128 - m) + (128 - n))
vrshrn.i16 d12, q6, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
vst1.32 {d12[0]}, [r6, :32]!
.endif
vst1.16 {q2}, [r0, :128], r1
vst1.16 {q3}, [r12, :128], r1
bgt 8b
vpop {q4-q7}
pop {r4-r10,pc}
1280:
640:
320:
160:
sub r1, r1, r4, lsl #1
.if \type == 444
add lr, r6, r4
.elseif \type == 422
add lr, r6, r4, lsr #1
.endif
add r7, r2, r4, lsl #1
add r9, r3, r4, lsl #1
161:
mov r8, r4
16:
vld1.16 {q2}, [r2, :128]! // tmp1
vld1.16 {q4}, [r3, :128]! // tmp2
vld1.16 {q3}, [r7, :128]!
vld1.16 {q5}, [r9, :128]!
subs r8, r8, #8
vdup.32 q13, r10 // PREP_BIAS*64
vabd.s16 q6, q2, q4 // abs(tmp1 - tmp2)
vabd.s16 q7, q3, q5
vsubl.s16 q8, d8, d4 // tmp2 - tmp1 (requires 17 bit)
vsubl.s16 q9, d9, d5
vsubl.s16 q10, d10, d6
vsubl.s16 q11, d11, d7
vqsub.u16 q6, q0, q6 // 27615 - abs()
vqsub.u16 q7, q0, q7
vshll.s16 q5, d7, #6 // tmp1 << 6
vshll.s16 q4, d6, #6
vshll.s16 q3, d5, #6
vshll.s16 q2, d4, #6
vshr.u16 q6, q6, #10 // 64-m = (27615 - abs()) >> mask_sh
vshr.u16 q7, q7, #10
vadd.i32 q2, q2, q13 // += PREP_BIAS*64
vadd.i32 q3, q3, q13
vadd.i32 q4, q4, q13
vadd.i32 q5, q5, q13
vmovl.u16 q12, d12
vmovl.u16 q13, d13
vmla.i32 q2, q8, q12 // (tmp2-tmp1)*(64-m)
vmovl.u16 q12, d14
vmla.i32 q3, q9, q13
vmovl.u16 q13, d15
vmla.i32 q4, q10, q12
vmla.i32 q5, q11, q13
vrshl.s32 q2, q2, q14 // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
vrshl.s32 q3, q3, q14
vrshl.s32 q4, q4, q14
vrshl.s32 q5, q5, q14
vqmovun.s32 d4, q2 // iclip_pixel
vqmovun.s32 d5, q3
vqmovun.s32 d6, q4
vqmovun.s32 d7, q5
vmin.u16 q2, q2, q15 // iclip_pixel
vmin.u16 q3, q3, q15 // iclip_pixel
.if \type == 444
vmovn.i16 d12, q6 // 64 - m
vmovn.i16 d13, q7
vsub.i16 q6, q1, q6 // m
vst1.8 {d12}, [r6, :64]!
vst1.8 {d13}, [lr, :64]!
.elseif \type == 422
vpadd.i16 d12, d12, d13 // (64 - m) + (64 - n) (column wise addition)
vpadd.i16 d13, d14, d15
vmovn.i16 d12, q6
vhsub.u8 d12, d2, d12 // ((129 - sign) - ((64 - m) + (64 - n)) >> 1
vst1.32 {d12[0]}, [r6, :32]!
vst1.32 {d12[1]}, [lr, :32]!
.elseif \type == 420
vadd.i16 q6, q6, q7 // (64 - my1) + (64 - my2) (row wise addition)
vpadd.i16 d12, d12, d13 // (128 - m) + (128 - n) (column wise addition)
vsub.i16 d12, d2, d12 // (256 - sign) - ((128 - m) + (128 - n))
vrshrn.i16 d12, q6, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
vst1.32 {d12[0]}, [r6, :32]!
.endif
vst1.16 {q2}, [r0, :128]!
vst1.16 {q3}, [r12, :128]!
bgt 16b
subs r5, r5, #2
add r2, r2, r4, lsl #1
add r3, r3, r4, lsl #1
add r7, r7, r4, lsl #1
add r9, r9, r4, lsl #1
.if \type == 444
add r6, r6, r4
add lr, lr, r4
.elseif \type == 422
add r6, r6, r4, lsr #1
add lr, lr, r4, lsr #1
.endif
add r0, r0, r1
add r12, r12, r1
bgt 161b
vpop {q4-q7}
pop {r4-r10,pc}
endfunc
.endm
w_mask_fn 444
w_mask_fn 422
w_mask_fn 420
function blend_16bpc_neon, export=1
push {r4-r5,lr}
ldrd r4, r5, [sp, #12]
clz lr, r3
adr r3, L(blend_tbl)
sub lr, lr, #26
ldr lr, [r3, lr, lsl #2]
add r3, r3, lr
bx r3
.align 2
L(blend_tbl):
.word 320f - L(blend_tbl) + CONFIG_THUMB
.word 160f - L(blend_tbl) + CONFIG_THUMB
.word 80f - L(blend_tbl) + CONFIG_THUMB
.word 40f - L(blend_tbl) + CONFIG_THUMB
40:
add r12, r0, r1
lsl r1, r1, #1
4:
vld1.8 {d4}, [r5, :64]!
vld1.16 {q1}, [r2, :128]!
vld1.16 {d0}, [r0, :64]
vneg.s8 d4, d4 // -m
subs r4, r4, #2
vld1.16 {d1}, [r12, :64]
vmovl.s8 q2, d4
vshl.i16 q2, q2, #9 // -m << 9
vsub.i16 q1, q0, q1 // a - b
vqrdmulh.s16 q1, q1, q2 // ((a-b)*-m + 32) >> 6
vadd.i16 q0, q0, q1
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d1}, [r12, :64], r1
bgt 4b
pop {r4-r5,pc}
80:
add r12, r0, r1
lsl r1, r1, #1
8:
vld1.8 {q8}, [r5, :128]!
vld1.16 {q2, q3}, [r2, :128]!
vneg.s8 q9, q8 // -m
vld1.16 {q0}, [r0, :128]
vld1.16 {q1}, [r12, :128]
vmovl.s8 q8, d18
vmovl.s8 q9, d19
vshl.i16 q8, q8, #9 // -m << 9
vshl.i16 q9, q9, #9
vsub.i16 q2, q0, q2 // a - b
vsub.i16 q3, q1, q3
subs r4, r4, #2
vqrdmulh.s16 q2, q2, q8 // ((a-b)*-m + 32) >> 6
vqrdmulh.s16 q3, q3, q9
vadd.i16 q0, q0, q2
vadd.i16 q1, q1, q3
vst1.16 {q0}, [r0, :128], r1
vst1.16 {q1}, [r12, :128], r1
bgt 8b
pop {r4-r5,pc}
160:
add r12, r0, r1
lsl r1, r1, #1
16:
vld1.8 {q12, q13}, [r5, :128]!
vld1.16 {q8, q9}, [r2, :128]!
subs r4, r4, #2
vneg.s8 q14, q12 // -m
vld1.16 {q0, q1}, [r0, :128]
vneg.s8 q15, q13
vld1.16 {q10, q11}, [r2, :128]!
vmovl.s8 q12, d28
vmovl.s8 q13, d29
vmovl.s8 q14, d30
vmovl.s8 q15, d31
vld1.16 {q2, q3}, [r12, :128]
vshl.i16 q12, q12, #9 // -m << 9
vshl.i16 q13, q13, #9
vshl.i16 q14, q14, #9
vshl.i16 q15, q15, #9
vsub.i16 q8, q0, q8 // a - b
vsub.i16 q9, q1, q9
vsub.i16 q10, q2, q10
vsub.i16 q11, q3, q11
vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
vqrdmulh.s16 q9, q9, q13
vqrdmulh.s16 q10, q10, q14
vqrdmulh.s16 q11, q11, q15
vadd.i16 q0, q0, q8
vadd.i16 q1, q1, q9
vadd.i16 q2, q2, q10
vst1.16 {q0, q1}, [r0, :128], r1
vadd.i16 q3, q3, q11
vst1.16 {q2, q3}, [r12, :128], r1
bgt 16b
pop {r4-r5,pc}
320:
add r12, r0, #32
32:
vld1.8 {q12, q13}, [r5, :128]!
vld1.16 {q8, q9}, [r2, :128]!
subs r4, r4, #1
vneg.s8 q14, q12 // -m
vld1.16 {q0, q1}, [r0, :128]
vneg.s8 q15, q13
vld1.16 {q10, q11}, [r2, :128]!
vmovl.s8 q12, d28
vmovl.s8 q13, d29
vmovl.s8 q14, d30
vmovl.s8 q15, d31
vld1.16 {q2, q3}, [r12, :128]
vshl.i16 q12, q12, #9 // -m << 9
vshl.i16 q13, q13, #9
vshl.i16 q14, q14, #9
vshl.i16 q15, q15, #9
vsub.i16 q8, q0, q8 // a - b
vsub.i16 q9, q1, q9
vsub.i16 q10, q2, q10
vsub.i16 q11, q3, q11
vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
vqrdmulh.s16 q9, q9, q13
vqrdmulh.s16 q10, q10, q14
vqrdmulh.s16 q11, q11, q15
vadd.i16 q0, q0, q8
vadd.i16 q1, q1, q9
vadd.i16 q2, q2, q10
vst1.16 {q0, q1}, [r0, :128], r1
vadd.i16 q3, q3, q11
vst1.16 {q2, q3}, [r12, :128], r1
bgt 32b
pop {r4-r5,pc}
endfunc
function blend_h_16bpc_neon, export=1
push {r4-r5,lr}
ldr r4, [sp, #12]
movrel r5, X(obmc_masks)
add r5, r5, r4
sub r4, r4, r4, lsr #2
clz lr, r3
adr r12, L(blend_h_tbl)
sub lr, lr, #24
ldr lr, [r12, lr, lsl #2]
add r12, r12, lr
bx r12
.align 2
L(blend_h_tbl):
.word 1280f - L(blend_h_tbl) + CONFIG_THUMB
.word 640f - L(blend_h_tbl) + CONFIG_THUMB
.word 320f - L(blend_h_tbl) + CONFIG_THUMB
.word 160f - L(blend_h_tbl) + CONFIG_THUMB
.word 80f - L(blend_h_tbl) + CONFIG_THUMB
.word 40f - L(blend_h_tbl) + CONFIG_THUMB
.word 20f - L(blend_h_tbl) + CONFIG_THUMB
20:
add r12, r0, r1
lsl r1, r1, #1
2:
vld2.8 {d4[], d5[]}, [r5, :16]!
vld1.16 {d2}, [r2, :64]!
vext.8 d4, d4, d5, #6
subs r4, r4, #2
vneg.s8 d4, d4 // -m
vld1.32 {d0[]}, [r0, :32]
vld1.32 {d0[1]}, [r12, :32]
vmovl.s8 q2, d4
vshl.i16 d4, d4, #9 // -m << 9
vsub.i16 d2, d0, d2 // a - b
vqrdmulh.s16 d2, d2, d4 // ((a-b)*-m + 32) >> 6
vadd.i16 d0, d0, d2
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[1]}, [r12, :32], r1
bgt 2b
pop {r4-r5,pc}
40:
add r12, r0, r1
lsl r1, r1, #1
4:
vld2.8 {d4[], d5[]}, [r5, :16]!
vld1.16 {q1}, [r2, :128]!
vext.8 d4, d4, d5, #4
subs r4, r4, #2
vneg.s8 d4, d4 // -m
vld1.16 {d0}, [r0, :64]
vld1.16 {d1}, [r12, :64]
vmovl.s8 q2, d4
vshl.i16 q2, q2, #9 // -m << 9
vsub.i16 q1, q0, q1 // a - b
vqrdmulh.s16 q1, q1, q2 // ((a-b)*-m + 32) >> 6
vadd.i16 q0, q0, q1
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d1}, [r12, :64], r1
bgt 4b
pop {r4-r5,pc}
80:
add r12, r0, r1
lsl r1, r1, #1
8:
vld2.8 {d16[], d17[]}, [r5, :16]!
vld1.16 {q2, q3}, [r2, :128]!
vneg.s8 q9, q8 // -m
vld1.16 {q0}, [r0, :128]
subs r4, r4, #2
vmovl.s8 q8, d18
vmovl.s8 q9, d19
vld1.16 {q1}, [r12, :128]
vshl.i16 q8, q8, #9 // -m << 9
vshl.i16 q9, q9, #9
vsub.i16 q2, q0, q2 // a - b
vsub.i16 q3, q1, q3
vqrdmulh.s16 q2, q2, q8 // ((a-b)*-m + 32) >> 6
vqrdmulh.s16 q3, q3, q9
vadd.i16 q0, q0, q2
vadd.i16 q1, q1, q3
vst1.16 {q0}, [r0, :128], r1
vst1.16 {q1}, [r12, :128], r1
bgt 8b
pop {r4-r5,pc}
160:
add r12, r0, r1
lsl r1, r1, #1
16:
vld2.8 {d24[], d25[]}, [r5, :16]!
vld1.16 {q8, q9}, [r2, :128]!
subs r4, r4, #2
vneg.s8 q13, q12 // -m
vld1.16 {q0, q1}, [r0, :128]
vmovl.s8 q12, d26
vld1.16 {q10, q11}, [r2, :128]!
vmovl.s8 q13, d27
vld1.16 {q2, q3}, [r12, :128]
vshl.i16 q12, q12, #9 // -m << 9
vshl.i16 q13, q13, #9
vsub.i16 q8, q0, q8 // a - b
vsub.i16 q9, q1, q9
vsub.i16 q10, q2, q10
vsub.i16 q11, q3, q11
vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
vqrdmulh.s16 q9, q9, q12
vqrdmulh.s16 q10, q10, q13
vqrdmulh.s16 q11, q11, q13
vadd.i16 q0, q0, q8
vadd.i16 q1, q1, q9
vadd.i16 q2, q2, q10
vadd.i16 q3, q3, q11
vst1.16 {q0, q1}, [r0, :128], r1
vst1.16 {q2, q3}, [r12, :128], r1
bgt 16b
pop {r4-r5,pc}
1280:
640:
320:
sub r1, r1, r3, lsl #1
321:
vld1.8 {d24[]}, [r5]!
mov r12, r3
vneg.s8 d24, d24 // -m
vmovl.s8 q12, d24
vshl.i16 q12, q12, #9 // -m << 9
32:
vld1.16 {q8, q9}, [r2, :128]!
vld1.16 {q0, q1}, [r0, :128]!
subs r12, r12, #32
vld1.16 {q10, q11}, [r2, :128]!
vld1.16 {q2, q3}, [r0, :128]
vsub.i16 q8, q0, q8 // a - b
vsub.i16 q9, q1, q9
vsub.i16 q10, q2, q10
vsub.i16 q11, q3, q11
sub r0, r0, #32
vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
vqrdmulh.s16 q9, q9, q12
vqrdmulh.s16 q10, q10, q12
vqrdmulh.s16 q11, q11, q12
vadd.i16 q0, q0, q8
vadd.i16 q1, q1, q9
vadd.i16 q2, q2, q10
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q3, q3, q11
vst1.16 {q2, q3}, [r0, :128]!
bgt 32b
subs r4, r4, #1
add r0, r0, r1
bgt 321b
pop {r4-r5,pc}
endfunc
function blend_v_16bpc_neon, export=1
push {r4,lr}
ldr r4, [sp, #8]
movrel lr, X(obmc_masks)
add lr, lr, r3
clz r12, r3
adr r3, L(blend_v_tbl)
sub r12, r12, #26
ldr r12, [r3, r12, lsl #2]
add r3, r3, r12
bx r3
.align 2
L(blend_v_tbl):
.word 320f - L(blend_v_tbl) + CONFIG_THUMB
.word 160f - L(blend_v_tbl) + CONFIG_THUMB
.word 80f - L(blend_v_tbl) + CONFIG_THUMB
.word 40f - L(blend_v_tbl) + CONFIG_THUMB
.word 20f - L(blend_v_tbl) + CONFIG_THUMB
20:
add r12, r0, r1
lsl r1, r1, #1
vld1.8 {d4[]}, [lr]
vneg.s8 d4, d4 // -m
vmovl.s8 q2, d4
vshl.i16 d4, d4, #9 // -m << 9
2:
vld1.32 {d2[]}, [r2, :32]!
vld1.16 {d0[]}, [r0, :16]
subs r4, r4, #2
vld1.16 {d2[1]}, [r2, :16]
vld1.16 {d0[1]}, [r12, :16]
add r2, r2, #4
vsub.i16 d2, d0, d2 // a - b
vqrdmulh.s16 d2, d2, d4 // ((a-b)*-m + 32) >> 6
vadd.i16 d0, d0, d2
vst1.16 {d0[0]}, [r0, :16], r1
vst1.16 {d0[1]}, [r12, :16], r1
bgt 2b
pop {r4,pc}
40:
vld1.32 {d4[]}, [lr, :32]
add r12, r0, r1
vneg.s8 d4, d4 // -m
lsl r1, r1, #1
vmovl.s8 q2, d4
sub r1, r1, #4
vshl.i16 q2, q2, #9 // -m << 9
4:
vld1.16 {q1}, [r2, :128]!
vld1.16 {d0}, [r0, :64]
vld1.16 {d1}, [r12, :64]
subs r4, r4, #2
vsub.i16 q1, q0, q1 // a - b
vqrdmulh.s16 q1, q1, q2 // ((a-b)*-m + 32) >> 6
vadd.i16 q0, q0, q1
vst1.32 {d0[0]}, [r0, :32]!
vst1.32 {d1[0]}, [r12, :32]!
vst1.16 {d0[2]}, [r0, :16], r1
vst1.16 {d1[2]}, [r12, :16], r1
bgt 4b
pop {r4,pc}
80:
vld1.8 {d16}, [lr, :64]
add r12, r0, r1
vneg.s8 d16, d16 // -m
lsl r1, r1, #1
vmovl.s8 q8, d16
sub r1, r1, #8
vshl.i16 q8, q8, #9 // -m << 9
8:
vld1.16 {q2, q3}, [r2, :128]!
vld1.16 {q0}, [r0, :128]
vld1.16 {q1}, [r12, :128]
subs r4, r4, #2
vsub.i16 q2, q0, q2 // a - b
vsub.i16 q3, q1, q3
vqrdmulh.s16 q2, q2, q8 // ((a-b)*-m + 32) >> 6
vqrdmulh.s16 q3, q3, q8
vadd.i16 q0, q0, q2
vadd.i16 q1, q1, q3
vst1.16 {d0}, [r0, :64]!
vst1.16 {d2}, [r12, :64]!
vst1.32 {d1[0]}, [r0, :32], r1
vst1.32 {d3[0]}, [r12, :32], r1
bgt 8b
pop {r4,pc}
160:
vld1.8 {q12}, [lr, :128]
add r12, r0, r1
vneg.s8 q13, q12 // -m
lsl r1, r1, #1
vmovl.s8 q12, d26
vmovl.s8 q13, d27
vshl.i16 q12, q12, #9 // -m << 9
vshl.i16 d26, d26, #9
16:
vld1.16 {q8, q9}, [r2, :128]!
vld1.16 {d0, d1, d2}, [r0, :64]
subs r4, r4, #2
vld1.16 {q10, q11}, [r2, :128]!
vsub.i16 q8, q0, q8 // a - b
vld1.16 {d4, d5, d6}, [r12, :64]
vsub.i16 d18, d2, d18
vsub.i16 q10, q2, q10
vsub.i16 d22, d6, d22
vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
vqrdmulh.s16 d18, d18, d26
vqrdmulh.s16 q10, q10, q12
vqrdmulh.s16 d22, d22, d26
vadd.i16 q0, q0, q8
vadd.i16 d2, d2, d18
vadd.i16 q2, q2, q10
vst1.16 {d0, d1, d2}, [r0, :64], r1
vadd.i16 d6, d6, d22
vst1.16 {d4, d5, d6}, [r12, :64], r1
bgt 16b
pop {r4,pc}
320:
vld1.8 {d24, d25, d26}, [lr, :64]
vneg.s8 q14, q12 // -m
vneg.s8 d30, d26
vmovl.s8 q12, d28
vmovl.s8 q13, d29
vmovl.s8 q14, d30
sub r1, r1, #32
vshl.i16 q12, q12, #9 // -m << 9
vshl.i16 q13, q13, #9
vshl.i16 q14, q14, #9
32:
vld1.16 {q8, q9}, [r2, :128]!
vld1.16 {q0, q1}, [r0, :128]!
subs r4, r4, #1
vld1.16 {q10}, [r2, :128]
vsub.i16 q8, q0, q8 // a - b
vld1.16 {q2}, [r0, :128]
sub r0, r0, #32
vsub.i16 q9, q1, q9
vsub.i16 q10, q2, q10
vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
vqrdmulh.s16 q9, q9, q13
vqrdmulh.s16 q10, q10, q14
vadd.i16 q0, q0, q8
vadd.i16 q1, q1, q9
vadd.i16 q2, q2, q10
vst1.16 {q0, q1}, [r0, :128]!
add r2, r2, #32
vst1.16 {q2}, [r0, :128], r1
bgt 32b
pop {r4,pc}
endfunc
// This has got the same signature as the put_8tap functions,
// and assumes that r9 is set to (clz(w)-24).
function put_neon
adr r10, L(put_tbl)
ldr r9, [r10, r9, lsl #2]
add r10, r10, r9
bx r10
.align 2
L(put_tbl):
.word 1280f - L(put_tbl) + CONFIG_THUMB
.word 640f - L(put_tbl) + CONFIG_THUMB
.word 320f - L(put_tbl) + CONFIG_THUMB
.word 16f - L(put_tbl) + CONFIG_THUMB
.word 80f - L(put_tbl) + CONFIG_THUMB
.word 4f - L(put_tbl) + CONFIG_THUMB
.word 2f - L(put_tbl) + CONFIG_THUMB
2:
vld1.32 {d0[]}, [r2], r3
vld1.32 {d1[]}, [r2], r3
subs r5, r5, #2
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d1[1]}, [r0, :32], r1
bgt 2b
pop {r4-r11,pc}
4:
vld1.16 {d0}, [r2], r3
vld1.16 {d1}, [r2], r3
subs r5, r5, #2
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d1}, [r0, :64], r1
bgt 4b
pop {r4-r11,pc}
80:
add r8, r0, r1
lsl r1, r1, #1
add r9, r2, r3
lsl r3, r3, #1
8:
vld1.16 {q0}, [r2], r3
vld1.16 {q1}, [r9], r3
subs r5, r5, #2
vst1.16 {q0}, [r0, :128], r1
vst1.16 {q1}, [r8, :128], r1
bgt 8b
pop {r4-r11,pc}
16:
vld1.16 {q0, q1}, [r2], r3
subs r5, r5, #1
vst1.16 {q0, q1}, [r0, :128], r1
bgt 16b
pop {r4-r11,pc}
320:
sub r1, r1, #32
sub r3, r3, #32
32:
vld1.16 {q0, q1}, [r2]!
vst1.16 {q0, q1}, [r0, :128]!
vld1.16 {q2, q3}, [r2], r3
subs r5, r5, #1
vst1.16 {q2, q3}, [r0, :128], r1
bgt 32b
pop {r4-r11,pc}
640:
sub r1, r1, #96
sub r3, r3, #96
64:
vld1.16 {q8, q9}, [r2]!
vst1.16 {q8, q9}, [r0, :128]!
vld1.16 {q10, q11}, [r2]!
vst1.16 {q10, q11}, [r0, :128]!
vld1.16 {q12, q13}, [r2]!
vst1.16 {q12, q13}, [r0, :128]!
vld1.16 {q14, q15}, [r2], r3
subs r5, r5, #1
vst1.16 {q14, q15}, [r0, :128], r1
bgt 64b
pop {r4-r11,pc}
1280:
sub r1, r1, #224
sub r3, r3, #224
128:
vld1.16 {q8, q9}, [r2]!
vst1.16 {q8, q9}, [r0, :128]!
vld1.16 {q10, q11}, [r2]!
vst1.16 {q10, q11}, [r0, :128]!
vld1.16 {q12, q13}, [r2]!
vst1.16 {q12, q13}, [r0, :128]!
vld1.16 {q14, q15}, [r2]!
vst1.16 {q14, q15}, [r0, :128]!
vld1.16 {q8, q9}, [r2]!
vst1.16 {q8, q9}, [r0, :128]!
vld1.16 {q10, q11}, [r2]!
vst1.16 {q10, q11}, [r0, :128]!
vld1.16 {q12, q13}, [r2]!
vst1.16 {q12, q13}, [r0, :128]!
vld1.16 {q14, q15}, [r2], r3
subs r5, r5, #1
vst1.16 {q14, q15}, [r0, :128], r1
bgt 128b
pop {r4-r11,pc}
endfunc
// This has got the same signature as the prep_8tap functions,
// and assumes that r9 is set to (clz(w)-24), r7 to intermediate_bits and
// r8 to w*2.
function prep_neon
adr r10, L(prep_tbl)
ldr r9, [r10, r9, lsl #2]
vdup.16 q15, r7 // intermediate_bits
vmov.i16 q14, #PREP_BIAS
add r10, r10, r9
bx r10
.align 2
L(prep_tbl):
.word 1280f - L(prep_tbl) + CONFIG_THUMB
.word 640f - L(prep_tbl) + CONFIG_THUMB
.word 320f - L(prep_tbl) + CONFIG_THUMB
.word 16f - L(prep_tbl) + CONFIG_THUMB
.word 80f - L(prep_tbl) + CONFIG_THUMB
.word 40f - L(prep_tbl) + CONFIG_THUMB
40:
add r9, r1, r2
lsl r2, r2, #1
4:
vld1.16 {d0}, [r1], r2
vld1.16 {d1}, [r9], r2
subs r4, r4, #2
vshl.s16 q0, q0, q15
vsub.i16 q0, q0, q14
vst1.16 {q0}, [r0, :128]!
bgt 4b
pop {r4-r11,pc}
80:
add r9, r1, r2
lsl r2, r2, #1
8:
vld1.16 {q0}, [r1], r2
vld1.16 {q1}, [r9], r2
subs r4, r4, #2
vshl.s16 q0, q0, q15
vshl.s16 q1, q1, q15
vsub.i16 q0, q0, q14
vsub.i16 q1, q1, q14
vst1.16 {q0, q1}, [r0, :128]!
bgt 8b
pop {r4-r11,pc}
16:
vld1.16 {q0, q1}, [r1], r2
vshl.s16 q0, q0, q15
vld1.16 {q2, q3}, [r1], r2
subs r4, r4, #2
vshl.s16 q1, q1, q15
vshl.s16 q2, q2, q15
vshl.s16 q3, q3, q15
vsub.i16 q0, q0, q14
vsub.i16 q1, q1, q14
vsub.i16 q2, q2, q14
vst1.16 {q0, q1}, [r0, :128]!
vsub.i16 q3, q3, q14
vst1.16 {q2, q3}, [r0, :128]!
bgt 16b
pop {r4-r11,pc}
320:
sub r2, r2, #32
32:
vld1.16 {q0, q1}, [r1]!
subs r4, r4, #1
vshl.s16 q0, q0, q15
vld1.16 {q2, q3}, [r1], r2
vshl.s16 q1, q1, q15
vshl.s16 q2, q2, q15
vshl.s16 q3, q3, q15
vsub.i16 q0, q0, q14
vsub.i16 q1, q1, q14
vsub.i16 q2, q2, q14
vst1.16 {q0, q1}, [r0, :128]!
vsub.i16 q3, q3, q14
vst1.16 {q2, q3}, [r0, :128]!
bgt 32b
pop {r4-r11,pc}
640:
sub r2, r2, #96
64:
vld1.16 {q0, q1}, [r1]!
subs r4, r4, #1
vshl.s16 q0, q0, q15
vld1.16 {q2, q3}, [r1]!
vshl.s16 q1, q1, q15
vld1.16 {q8, q9}, [r1]!
vshl.s16 q2, q2, q15
vld1.16 {q10, q11}, [r1], r2
vshl.s16 q3, q3, q15
vshl.s16 q8, q8, q15
vshl.s16 q9, q9, q15
vshl.s16 q10, q10, q15
vshl.s16 q11, q11, q15
vsub.i16 q0, q0, q14
vsub.i16 q1, q1, q14
vsub.i16 q2, q2, q14
vsub.i16 q3, q3, q14
vsub.i16 q8, q8, q14
vst1.16 {q0, q1}, [r0, :128]!
vsub.i16 q9, q9, q14
vst1.16 {q2, q3}, [r0, :128]!
vsub.i16 q10, q10, q14
vst1.16 {q8, q9}, [r0, :128]!
vsub.i16 q11, q11, q14
vst1.16 {q10, q11}, [r0, :128]!
bgt 64b
pop {r4-r11,pc}
1280:
sub r2, r2, #224
128:
vld1.16 {q0, q1}, [r1]!
subs r4, r4, #1
vshl.s16 q0, q0, q15
vld1.16 {q2, q3}, [r1]!
vshl.s16 q1, q1, q15
vld1.16 {q8, q9}, [r1]!
vshl.s16 q2, q2, q15
vld1.16 {q10, q11}, [r1]!
vshl.s16 q3, q3, q15
vshl.s16 q8, q8, q15
vshl.s16 q9, q9, q15
vshl.s16 q10, q10, q15
vshl.s16 q11, q11, q15
vsub.i16 q0, q0, q14
vsub.i16 q1, q1, q14
vsub.i16 q2, q2, q14
vsub.i16 q3, q3, q14
vsub.i16 q8, q8, q14
vst1.16 {q0, q1}, [r0, :128]!
vld1.16 {q0, q1}, [r1]!
vsub.i16 q9, q9, q14
vsub.i16 q10, q10, q14
vst1.16 {q2, q3}, [r0, :128]!
vld1.16 {q2, q3}, [r1]!
vsub.i16 q11, q11, q14
vshl.s16 q0, q0, q15
vst1.16 {q8, q9}, [r0, :128]!
vld1.16 {q8, q9}, [r1]!
vshl.s16 q1, q1, q15
vshl.s16 q2, q2, q15
vst1.16 {q10, q11}, [r0, :128]!
vld1.16 {q10, q11}, [r1], r2
vshl.s16 q3, q3, q15
vshl.s16 q8, q8, q15
vshl.s16 q9, q9, q15
vshl.s16 q10, q10, q15
vshl.s16 q11, q11, q15
vsub.i16 q0, q0, q14
vsub.i16 q1, q1, q14
vsub.i16 q2, q2, q14
vsub.i16 q3, q3, q14
vsub.i16 q8, q8, q14
vst1.16 {q0, q1}, [r0, :128]!
vsub.i16 q9, q9, q14
vst1.16 {q2, q3}, [r0, :128]!
vsub.i16 q10, q10, q14
vst1.16 {q8, q9}, [r0, :128]!
vsub.i16 q11, q11, q14
vst1.16 {q10, q11}, [r0, :128]!
bgt 128b
pop {r4-r11,pc}
endfunc
.macro load_slice s0, s1, strd, wd, d0, d1, d2, d3, d4, d5, d6
vld1.\wd {\d0[]}, [\s0], \strd
vld1.\wd {\d1[]}, [\s1], \strd
.ifnb \d2
vld1.\wd {\d2[]}, [\s0], \strd
vld1.\wd {\d3[]}, [\s1], \strd
.endif
.ifnb \d4
vld1.\wd {\d4[]}, [\s0], \strd
.endif
.ifnb \d5
vld1.\wd {\d5[]}, [\s1], \strd
.endif
.ifnb \d6
vld1.\wd {\d6[]}, [\s0], \strd
.endif
.endm
.macro load_reg s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
vld1.16 {\d0}, [\s0], \strd
vld1.16 {\d1}, [\s1], \strd
.ifnb \d2
vld1.16 {\d2}, [\s0], \strd
vld1.16 {\d3}, [\s1], \strd
.endif
.ifnb \d4
vld1.16 {\d4}, [\s0], \strd
.endif
.ifnb \d5
vld1.16 {\d5}, [\s1], \strd
.endif
.ifnb \d6
vld1.16 {\d6}, [\s0], \strd
.endif
.endm
.macro load_regpair s0, s1, strd, d0, d1, d2, d3, d4, d5
vld1.16 {\d0, \d1}, [\s0], \strd
.ifnb \d2
vld1.16 {\d2, \d3}, [\s1], \strd
.endif
.ifnb \d4
vld1.16 {\d4, \d5}, [\s0], \strd
.endif
.endm
.macro load_32 s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_slice \s0, \s1, \strd, 32, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro load_16s16 s0, s1, strd, d0, d1, d2, d3, d4, d5
load_regpair \s0, \s1, \strd, \d0, \d1, \d2, \d3, \d4, \d5
.endm
.macro interleave_1_32 r0, r1, r2, r3, r4
vext.8 \r0, \r0, \r1, #4
vext.8 \r1, \r1, \r2, #4
.ifnb \r3
vext.8 \r2, \r2, \r3, #4
vext.8 \r3, \r3, \r4, #4
.endif
.endm
.macro vmin_u16 c, r0, r1, r2, r3
vmin.u16 \r0, \r0, \c
.ifnb \r1
vmin.u16 \r1, \r1, \c
.endif
.ifnb \r2
vmin.u16 \r2, \r2, \c
vmin.u16 \r3, \r3, \c
.endif
.endm
.macro vsub_i16 c, r0, r1, r2, r3
vsub.i16 \r0, \r0, \c
.ifnb \r1
vsub.i16 \r1, \r1, \c
.endif
.ifnb \r2
vsub.i16 \r2, \r2, \c
vsub.i16 \r3, \r3, \c
.endif
.endm
.macro vmull_vmlal_4 d, s0, s1, s2, s3
vmull.s16 \d, \s0, d0[0]
vmlal.s16 \d, \s1, d0[1]
vmlal.s16 \d, \s2, d0[2]
vmlal.s16 \d, \s3, d0[3]
.endm
.macro vmull_vmlal_8 d, s0, s1, s2, s3, s4, s5, s6, s7
vmull.s16 \d, \s0, d0[0]
vmlal.s16 \d, \s1, d0[1]
vmlal.s16 \d, \s2, d0[2]
vmlal.s16 \d, \s3, d0[3]
vmlal.s16 \d, \s4, d1[0]
vmlal.s16 \d, \s5, d1[1]
vmlal.s16 \d, \s6, d1[2]
vmlal.s16 \d, \s7, d1[3]
.endm
.macro vqrshrun_s32 shift, q0, d0, q1, d1, q2, d2, q3, d3
vqrshrun.s32 \d0, \q0, #\shift
.ifnb \q1
vqrshrun.s32 \d1, \q1, #\shift
.endif
.ifnb \q2
vqrshrun.s32 \d2, \q2, #\shift
vqrshrun.s32 \d3, \q3, #\shift
.endif
.endm
.macro vmovn_i32 q0, d0, q1, d1, q2, d2, q3, d3
vmovn.i32 \d0, \q0
.ifnb \q1
vmovn.i32 \d1, \q1
.endif
.ifnb \q2
vmovn.i32 \d2, \q2
vmovn.i32 \d3, \q3
.endif
.endm
.macro vrshl_s32 shift, r0, r1, r2, r3
vrshl.s32 \r0, \r0, \shift
vrshl.s32 \r1, \r1, \shift
.ifnb \r2
vrshl.s32 \r2, \r2, \shift
vrshl.s32 \r3, \r3, \shift
.endif
.endm
.macro vst1_32 strd, r0, r1
vst1.32 {\r0[0]}, [r0, :32], \strd
vst1.32 {\r0[1]}, [r9, :32], \strd
.ifnb \r1
vst1.32 {\r1[0]}, [r0, :32], \strd
vst1.32 {\r1[1]}, [r9, :32], \strd
.endif
.endm
.macro vst1_reg strd, align, r0, r1, r2, r3, r4, r5, r6, r7
vst1.16 {\r0}, [r0, \align], \strd
vst1.16 {\r1}, [r9, \align], \strd
.ifnb \r2
vst1.16 {\r2}, [r0, \align], \strd
vst1.16 {\r3}, [r9, \align], \strd
.endif
.ifnb \r4
vst1.16 {\r4}, [r0, \align], \strd
vst1.16 {\r5}, [r9, \align], \strd
vst1.16 {\r6}, [r0, \align], \strd
vst1.16 {\r7}, [r9, \align], \strd
.endif
.endm
.macro finalize type, q0, q1, d0, d1, q2, q3, d2, d3
.ifc \type, put
vqrshrun_s32 6, \q0, \d0, \q1, \d1, \q2, \d2, \q3, \d3
vmin_u16 q15, \q0, \q1
.else
vrshl_s32 q14, \q0, \q1, \q2, \q3 // -(6-intermediate_bits)
vmovn_i32 \q0, \d0, \q1, \d1, \q2, \d2, \q3, \d3
vsub_i16 q15, \q0, \q1 // PREP_BIAS
.endif
.endm
.macro shift_store_4 type, strd, q0, q1, d0, d1, q2, q3, d2, d3
finalize \type, \q0, \q1, \d0, \d1, \q2, \q3, \d2, \d3
vst1_reg \strd, :64, \d0, \d1, \d2, \d3
.endm
.macro shift_store_8 type, strd, q0, q1, d0, d1, q2, q3, d2, d3
finalize \type, \q0, \q1, \d0, \d1, \q2, \q3, \d2, \d3
vst1_reg \strd, :128, \q0, \q1
.endm
.macro shift_store_16 type, strd, q0, q1, d0, d1, q2, q3, d2, d3
finalize \type, \q0, \q1, \d0, \d1, \q2, \q3, \d2, \d3
vst1.16 {\q0, \q1}, [r0, :128], \strd
.endm
.macro make_8tap_fn op, type, type_h, type_v
function \op\()_8tap_\type\()_16bpc_neon, export=1
push {r4-r11,lr}
movw r9, \type_h
movw r10, \type_v
b \op\()_8tap_neon
endfunc
.endm
// No spaces in these expressions, due to gas-preprocessor.
#define REGULAR ((0*15<<7)|3*15)
#define SMOOTH ((1*15<<7)|4*15)
#define SHARP ((2*15<<7)|3*15)
.macro filter_fn type, dst, d_strd, src, s_strd, w, h, mx, my, bdmax, ds2, sr2
make_8tap_fn \type, regular, REGULAR, REGULAR
make_8tap_fn \type, regular_smooth, REGULAR, SMOOTH
make_8tap_fn \type, regular_sharp, REGULAR, SHARP
make_8tap_fn \type, smooth, SMOOTH, SMOOTH
make_8tap_fn \type, smooth_regular, SMOOTH, REGULAR
make_8tap_fn \type, smooth_sharp, SMOOTH, SHARP
make_8tap_fn \type, sharp, SHARP, SHARP
make_8tap_fn \type, sharp_regular, SHARP, REGULAR
make_8tap_fn \type, sharp_smooth, SHARP, SMOOTH
function \type\()_8tap_neon
ldrd r4, r5, [sp, #36]
ldrd r6, r7, [sp, #44]
.ifc \bdmax, r8
ldr r8, [sp, #52]
.endif
movw r11, #0x4081 // (1 << 14) | (1 << 7) | (1 << 0)
mul \mx, \mx, r11
mul \my, \my, r11
add \mx, \mx, r9 // mx, 8tap_h, 4tap_h
add \my, \my, r10 // my, 8tap_v, 4tap_v
.ifc \type, prep
lsl \d_strd, \w, #1
.endif
vdup.16 q15, \bdmax // bitdepth_max
clz \bdmax, \bdmax
clz r9, \w
sub \bdmax, \bdmax, #18 // intermediate_bits = clz(bitdepth_max) - 18
tst \mx, #(0x7f << 14)
sub r9, r9, #24
add lr, \bdmax, #6 // 6 + intermediate_bits
rsb r12, \bdmax, #6 // 6 - intermediate_bits
movrel r11, X(mc_subpel_filters), -8
bne L(\type\()_8tap_h)
tst \my, #(0x7f << 14)
bne L(\type\()_8tap_v)
b \type\()_neon
L(\type\()_8tap_h):
cmp \w, #4
ubfx r10, \mx, #7, #7
and \mx, \mx, #0x7f
it gt
movgt \mx, r10
tst \my, #(0x7f << 14)
add \mx, r11, \mx, lsl #3
bne L(\type\()_8tap_hv)
adr r10, L(\type\()_8tap_h_tbl)
vdup.32 q14, r12 // 6 - intermediate_bits
ldr r9, [r10, r9, lsl #2]
vneg.s32 q14, q14 // -(6-intermediate_bits)
.ifc \type, put
vdup.16 q13, \bdmax // intermediate_bits
.else
vmov.i16 q13, #PREP_BIAS
.endif
add r10, r10, r9
.ifc \type, put
vneg.s16 q13, q13 // -intermediate_bits
.endif
bx r10
.align 2
L(\type\()_8tap_h_tbl):
.word 1280f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
20: // 2xN h
.ifc \type, put
add \mx, \mx, #2
vld1.32 {d0[]}, [\mx]
sub \src, \src, #2
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vmovl.s8 q0, d0
2:
vld1.16 {q2}, [\src], \s_strd
vld1.16 {q3}, [\sr2], \s_strd
vext.8 d5, d4, d5, #2
vext.8 d7, d6, d7, #2
subs \h, \h, #2
vtrn.32 d4, d6
vtrn.32 d5, d7
vmull.s16 q1, d4, d0[0]
vmlal.s16 q1, d5, d0[1]
vmlal.s16 q1, d6, d0[2]
vmlal.s16 q1, d7, d0[3]
vrshl.s32 q1, q1, q14 // -(6-intermediate_bits)
vqmovun.s32 d2, q1
vrshl.s16 d2, d2, d26 // -intermediate_bits
vmin.u16 d2, d2, d30
vst1.32 {d2[0]}, [\dst, :32], \d_strd
vst1.32 {d2[1]}, [\ds2, :32], \d_strd
bgt 2b
pop {r4-r11,pc}
.endif
40: // 4xN h
add \mx, \mx, #2
vld1.32 {d0[]}, [\mx]
sub \src, \src, #2
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vmovl.s8 q0, d0
4:
vld1.16 {q8}, [\src], \s_strd
vld1.16 {q11}, [\sr2], \s_strd
vext.8 d18, d16, d17, #2
vext.8 d19, d16, d17, #4
vext.8 d20, d16, d17, #6
vext.8 d24, d22, d23, #2
vext.8 d25, d22, d23, #4
vext.8 d21, d22, d23, #6
subs \h, \h, #2
vmull.s16 q2, d16, d0[0]
vmlal.s16 q2, d18, d0[1]
vmlal.s16 q2, d19, d0[2]
vmlal.s16 q2, d20, d0[3]
vmull.s16 q3, d22, d0[0]
vmlal.s16 q3, d24, d0[1]
vmlal.s16 q3, d25, d0[2]
vmlal.s16 q3, d21, d0[3]
vrshl.s32 q2, q2, q14 // -(6-intermediate_bits)
vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
.ifc \type, put
vqmovun.s32 d4, q2
vqmovun.s32 d5, q3
vrshl.s16 q2, q2, q13 // -intermediate_bits
vmin.u16 q2, q2, q15
.else
vmovn.s32 d4, q2
vmovn.s32 d5, q3
vsub.i16 q2, q2, q13 // PREP_BIAS
.endif
vst1.16 {d4}, [\dst, :64], \d_strd
vst1.16 {d5}, [\ds2, :64], \d_strd
bgt 4b
pop {r4-r11,pc}
80:
160:
320:
640:
1280: // 8xN, 16xN, 32xN, ... h
vpush {q4-q5}
vld1.8 {d0}, [\mx, :64]
sub \src, \src, #6
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
vmovl.s8 q0, d0
sub \s_strd, \s_strd, \w, lsl #1
sub \s_strd, \s_strd, #16
.ifc \type, put
lsl \d_strd, \d_strd, #1
sub \d_strd, \d_strd, \w, lsl #1
.endif
81:
vld1.16 {q8, q9}, [\src]!
vld1.16 {q10, q11}, [\sr2]!
mov \mx, \w
8:
vmull.s16 q1, d16, d0[0]
vmull.s16 q2, d17, d0[0]
vmull.s16 q3, d20, d0[0]
vmull.s16 q4, d21, d0[0]
.irpc i, 1234567
vext.8 q12, q8, q9, #(2*\i)
vext.8 q5, q10, q11, #(2*\i)
.if \i < 4
vmlal.s16 q1, d24, d0[\i]
vmlal.s16 q2, d25, d0[\i]
vmlal.s16 q3, d10, d0[\i]
vmlal.s16 q4, d11, d0[\i]
.else
vmlal.s16 q1, d24, d1[\i-4]
vmlal.s16 q2, d25, d1[\i-4]
vmlal.s16 q3, d10, d1[\i-4]
vmlal.s16 q4, d11, d1[\i-4]
.endif
.endr
subs \mx, \mx, #8
vrshl.s32 q1, q1, q14 // -(6-intermediate_bits)
vrshl.s32 q2, q2, q14 // -(6-intermediate_bits)
vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
vrshl.s32 q4, q4, q14 // -(6-intermediate_bits)
.ifc \type, put
vqmovun.s32 d2, q1
vqmovun.s32 d3, q2
vqmovun.s32 d4, q3
vqmovun.s32 d5, q4
vrshl.s16 q1, q1, q13 // -intermediate_bits
vrshl.s16 q2, q2, q13 // -intermediate_bits
vmin.u16 q1, q1, q15
vmin.u16 q2, q2, q15
.else
vmovn.s32 d2, q1
vmovn.s32 d3, q2
vmovn.s32 d4, q3
vmovn.s32 d5, q4
vsub.i16 q1, q1, q13 // PREP_BIAS
vsub.i16 q2, q2, q13 // PREP_BIAS
.endif
vst1.16 {q1}, [\dst, :128]!
vst1.16 {q2}, [\ds2, :128]!
ble 9f
vmov q8, q9
vmov q10, q11
vld1.16 {q9}, [\src]!
vld1.16 {q11}, [\sr2]!
b 8b
9:
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
add \src, \src, \s_strd
add \sr2, \sr2, \s_strd
subs \h, \h, #2
bgt 81b
vpop {q4-q5}
pop {r4-r11,pc}
L(\type\()_8tap_v):
cmp \h, #4
ubfx r10, \my, #7, #7
and \my, \my, #0x7f
it gt
movgt \my, r10
add \my, r11, \my, lsl #3
.ifc \type, prep
vdup.32 q14, r12 // 6 - intermediate_bits
vmov.i16 q15, #PREP_BIAS
.endif
adr r10, L(\type\()_8tap_v_tbl)
ldr r9, [r10, r9, lsl #2]
.ifc \type, prep
vneg.s32 q14, q14 // -(6-intermediate_bits)
.endif
add r10, r10, r9
bx r10
.align 2
L(\type\()_8tap_v_tbl):
.word 1280f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
20: // 2xN v
.ifc \type, put
bgt 28f
cmp \h, #2
add \my, \my, #2
vld1.32 {d0[]}, [\my]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
// 2x2 v
load_32 \src, \sr2, \s_strd, d1, d2, d3, d4, d5
interleave_1_32 d1, d2, d3, d4, d5
bgt 24f
vmull_vmlal_4 q8, d1, d2, d3, d4
vqrshrun_s32 6, q8, d16
vmin_u16 d30, d16
vst1_32 \d_strd, d16
pop {r4-r11,pc}
24: // 2x4 v
load_32 \sr2, \src, \s_strd, d6, d7
interleave_1_32 d5, d6, d7
vmull_vmlal_4 q8, d1, d2, d3, d4
vmull_vmlal_4 q9, d3, d4, d5, d6
vqrshrun_s32 6, q8, d16, q9, d17
vmin_u16 q15, q8
vst1_32 \d_strd, d16, d17
pop {r4-r11,pc}
28: // 2x6, 2x8, 2x12, 2x16 v
vld1.8 {d0}, [\my, :64]
sub \sr2, \src, \s_strd, lsl #1
add \ds2, \dst, \d_strd
sub \src, \sr2, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vmovl.s8 q0, d0
load_32 \src, \sr2, \s_strd, d2, d3, d4, d5, d6, d7, d16
interleave_1_32 d2, d3, d4, d5, d6
interleave_1_32 d6, d7, d16
216:
subs \h, \h, #4
load_32 \sr2, \src, \s_strd, d17, d18, d19, d20
interleave_1_32 d16, d17, d18, d19, d20
vmull_vmlal_8 q13, d2, d3, d4, d5, d6, d7, d16, d17
vmull_vmlal_8 q1, d4, d5, d6, d7, d16, d17, d18, d19
vqrshrun_s32 6, q13, d26, q1, d27
vmin_u16 q15, q13
vst1_32 \d_strd, d26, d27
ble 0f
cmp \h, #2
vmov q1, q3
vmov q2, q8
vmov q3, q9
vmov d16, d20
beq 26f
b 216b
26:
load_32 \sr2, \src, \s_strd, d17, d18
interleave_1_32 d16, d17, d18
vmull_vmlal_8 q13, d2, d3, d4, d5, d6, d7, d16, d17
vqrshrun_s32 6, q13, d26
vmin_u16 d30, d26
vst1_32 \d_strd, d26
0:
pop {r4-r11,pc}
.endif
40:
bgt 480f
// 4x2, 4x4 v
cmp \h, #2
add \my, \my, #2
vld1.32 {d0[]}, [\my]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
load_reg \src, \sr2, \s_strd, d1, d2, d3, d4, d5
vmull_vmlal_4 q8, d1, d2, d3, d4
vmull_vmlal_4 q9, d2, d3, d4, d5
shift_store_4 \type, \d_strd, q8, q9, d16, d17
ble 0f
load_reg \sr2, \src, \s_strd, d6, d7
vmull_vmlal_4 q8, d3, d4, d5, d6
vmull_vmlal_4 q9, d4, d5, d6, d7
shift_store_4 \type, \d_strd, q8, q9, d16, d17
0:
pop {r4-r11,pc}
480: // 4x6, 4x8, 4x12, 4x16 v
vld1.8 {d0}, [\my, :64]
sub \sr2, \src, \s_strd, lsl #1
add \ds2, \dst, \d_strd
sub \src, \sr2, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
load_reg \src, \sr2, \s_strd, d16, d17, d18, d19, d20, d21, d22
48:
subs \h, \h, #4
load_reg \sr2, \src, \s_strd, d23, d24, d25, d26
vmull_vmlal_8 q1, d16, d17, d18, d19, d20, d21, d22, d23
vmull_vmlal_8 q2, d17, d18, d19, d20, d21, d22, d23, d24
vmull_vmlal_8 q3, d18, d19, d20, d21, d22, d23, d24, d25
vmull_vmlal_8 q8, d19, d20, d21, d22, d23, d24, d25, d26
shift_store_4 \type, \d_strd, q1, q2, d2, d3, q3, q8, d4, d5
ble 0f
cmp \h, #2
vmov q8, q10
vmov q9, q11
vmov q10, q12
vmov d22, d26
beq 46f
b 48b
46:
load_reg \sr2, \src, \s_strd, d23, d24
vmull_vmlal_8 q1, d16, d17, d18, d19, d20, d21, d22, d23
vmull_vmlal_8 q2, d17, d18, d19, d20, d21, d22, d23, d24
shift_store_4 \type, \d_strd, q1, q2, d2, d3
0:
pop {r4-r11,pc}
80:
bgt 880f
// 8x2, 8x4 v
cmp \h, #2
add \my, \my, #2
vld1.32 {d0[]}, [\my]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
load_reg \src, \sr2, \s_strd, q1, q2, q3, q8, q9
vmull_vmlal_4 q10, d2, d4, d6, d16
vmull_vmlal_4 q11, d3, d5, d7, d17
vmull_vmlal_4 q12, d4, d6, d16, d18
vmull_vmlal_4 q13, d5, d7, d17, d19
shift_store_8 \type, \d_strd, q10, q11, d20, d21, q12, q13, d22, d23
ble 0f
load_reg \sr2, \src, \s_strd, q10, q11
vmull_vmlal_4 q1, d6, d16, d18, d20
vmull_vmlal_4 q2, d7, d17, d19, d21
vmull_vmlal_4 q12, d16, d18, d20, d22
vmull_vmlal_4 q13, d17, d19, d21, d23
shift_store_8 \type, \d_strd, q1, q2, d2, d3, q12, q13, d4, d5
0:
pop {r4-r11,pc}
880: // 8x6, 8x8, 8x16, 8x32 v
1680: // 16x8, 16x16, ...
320: // 32x8, 32x16, ...
640:
1280:
vpush {q4-q7}
vld1.8 {d0}, [\my, :64]
sub \src, \src, \s_strd
sub \src, \src, \s_strd, lsl #1
vmovl.s8 q0, d0
mov \my, \h
168:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
load_reg \src, \sr2, \s_strd, q5, q6, q7, q8, q9, q10, q11
88:
subs \h, \h, #2
load_reg \sr2, \src, \s_strd, q12, q13
vmull_vmlal_8 q1, d10, d12, d14, d16, d18, d20, d22, d24
vmull_vmlal_8 q2, d11, d13, d15, d17, d19, d21, d23, d25
vmull_vmlal_8 q3, d12, d14, d16, d18, d20, d22, d24, d26
vmull_vmlal_8 q4, d13, d15, d17, d19, d21, d23, d25, d27
shift_store_8 \type, \d_strd, q1, q2, d2, d3, q3, q4, d4, d5
ble 9f
subs \h, \h, #2
load_reg \sr2, \src, \s_strd, q1, q2
vmull_vmlal_8 q3, d14, d16, d18, d20, d22, d24, d26, d2
vmull_vmlal_8 q4, d15, d17, d19, d21, d23, d25, d27, d3
vmull_vmlal_8 q5, d16, d18, d20, d22, d24, d26, d2, d4
vmull_vmlal_8 q6, d17, d19, d21, d23, d25, d27, d3, d5
shift_store_8 \type, \d_strd, q3, q4, d6, d7, q5, q6, d8, d9
ble 9f
vmov q5, q9
vmov q6, q10
vmov q7, q11
vmov q8, q12
vmov q9, q13
vmov q10, q1
vmov q11, q2
b 88b
9:
subs \w, \w, #8
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #3
mov \h, \my
add \src, \src, #16
add \dst, \dst, #16
b 168b
0:
vpop {q4-q7}
pop {r4-r11,pc}
160:
bgt 1680b
// 16x2, 16x4 v
vpush {q6-q7}
add \my, \my, #2
vld1.32 {d0[]}, [\my]
sub \src, \src, \s_strd
vmovl.s8 q0, d0
load_16s16 \src, \src, \s_strd, q6, q7, q8, q9, q10, q11
16:
load_16s16 \src, \src, \s_strd, q12, q13
subs \h, \h, #1
vmull_vmlal_4 q1, d12, d16, d20, d24
vmull_vmlal_4 q2, d13, d17, d21, d25
vmull_vmlal_4 q3, d14, d18, d22, d26
vmull_vmlal_4 q6, d15, d19, d23, d27
shift_store_16 \type, \d_strd, q1, q2, d2, d3, q3, q6, d4, d5
ble 0f
vmov q6, q8
vmov q7, q9
vmov q8, q10
vmov q9, q11
vmov q10, q12
vmov q11, q13
b 16b
0:
vpop {q6-q7}
pop {r4-r11,pc}
L(\type\()_8tap_hv):
cmp \h, #4
ubfx r10, \my, #7, #7
and \my, \my, #0x7f
it gt
movgt \my, r10
4:
add \my, r11, \my, lsl #3
adr r10, L(\type\()_8tap_hv_tbl)
neg r12, r12 // -(6-intermediate_bits)
ldr r9, [r10, r9, lsl #2]
vdup.32 q14, r12 // -(6-intermediate_bits)
.ifc \type, put
neg r8, lr // -(6+intermeidate_bits)
.else
vmov.i16 q13, #PREP_BIAS
.endif
add r10, r10, r9
.ifc \type, put
vdup.32 q13, r8 // -(6+intermediate_bits)
.endif
bx r10
.align 2
L(\type\()_8tap_hv_tbl):
.word 1280f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
20:
.ifc \type, put
add \mx, \mx, #2
vld1.32 {d0[]}, [\mx]
bgt 280f
add \my, \my, #2
vld1.32 {d2[]}, [\my]
// 2x2, 2x4 hv
sub \sr2, \src, #2
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vld1.16 {q11}, [\src], \s_strd
vext.8 d24, d22, d23, #2
vmull.s16 q11, d22, d0
vmull.s16 q12, d24, d0
vpadd.s32 d22, d22, d23
vpadd.s32 d23, d24, d25
vpadd.s32 d22, d22, d23
vrshl.s32 d16, d22, d28 // -(6-intermediate_bits)
vmovn.i32 d16, q8
bl L(\type\()_8tap_filter_2)
vext.8 d16, d16, d16, #4
vext.8 d16, d16, d24, #4
vmov d17, d24
2:
bl L(\type\()_8tap_filter_2)
vext.8 d18, d17, d24, #4
vmull.s16 q2, d16, d2[0]
vmlal.s16 q2, d17, d2[1]
vmlal.s16 q2, d18, d2[2]
vmlal.s16 q2, d24, d2[3]
vrshl.s32 q2, q2, q13 // -(6+intermediate_bits)
vqmovun.s32 d4, q2
vmin.u16 d4, d4, d30
subs \h, \h, #2
vst1.32 {d4[0]}, [\dst, :32], \d_strd
vst1.32 {d4[1]}, [\ds2, :32], \d_strd
ble 0f
vmov d16, d18
vmov d17, d24
b 2b
280: // 2x8, 2x16, 2x32 hv
vld1.8 {d2}, [\my, :64]
sub \src, \src, #2
sub \sr2, \src, \s_strd, lsl #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vld1.16 {q11}, [\src], \s_strd
vext.8 d24, d22, d23, #2
vmull.s16 q11, d22, d0
vmull.s16 q12, d24, d0
vpadd.s32 d22, d22, d23
vpadd.s32 d23, d24, d25
vpadd.s32 d22, d22, d23
vrshl.s32 d16, d22, d28 // -(6-intermediate_bits)
vmovn.i32 d16, q8
bl L(\type\()_8tap_filter_2)
vext.8 d16, d16, d16, #4
vext.8 d16, d16, d24, #4
vmov d17, d24
bl L(\type\()_8tap_filter_2)
vext.8 d18, d17, d24, #4
vmov d19, d24
bl L(\type\()_8tap_filter_2)
vext.8 d20, d19, d24, #4
vmov d21, d24
28:
bl L(\type\()_8tap_filter_2)
vext.8 d22, d21, d24, #4
vmull.s16 q3, d16, d2[0]
vmlal.s16 q3, d17, d2[1]
vmlal.s16 q3, d18, d2[2]
vmlal.s16 q3, d19, d2[3]
vmlal.s16 q3, d20, d3[0]
vmlal.s16 q3, d21, d3[1]
vmlal.s16 q3, d22, d3[2]
vmlal.s16 q3, d24, d3[3]
vrshl.s32 q3, q3, q13 // -(6+intermediate_bits)
vqmovun.s32 d6, q3
vmin.u16 d6, d6, d30
subs \h, \h, #2
vst1.32 {d6[0]}, [\dst, :32], \d_strd
vst1.32 {d6[1]}, [\ds2, :32], \d_strd
ble 0f
vmov q8, q9
vmov q9, q10
vmov d20, d22
vmov d21, d24
b 28b
0:
pop {r4-r11,pc}
L(\type\()_8tap_filter_2):
vld1.16 {q11}, [\sr2], \s_strd
vld1.16 {q12}, [\src], \s_strd
vext.8 d23, d22, d23, #2
vext.8 d25, d24, d25, #2
vtrn.32 q11, q12
vmull.s16 q3, d22, d0[0]
vmlal.s16 q3, d23, d0[1]
vmlal.s16 q3, d24, d0[2]
vmlal.s16 q3, d25, d0[3]
vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
vmovn.i32 d24, q3
bx lr
.endif
40:
add \mx, \mx, #2
vld1.32 {d0[]}, [\mx]
bgt 480f
add \my, \my, #2
vld1.32 {d2[]}, [\my]
sub \sr2, \src, #2
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
// 4x2, 4x4 hv
vld1.16 {q11}, [\src], \s_strd
vext.8 d24, d22, d23, #2
vext.8 d25, d22, d23, #4
vext.8 d23, d22, d23, #6
vmull.s16 q10, d22, d0[0]
vmlal.s16 q10, d24, d0[1]
vmlal.s16 q10, d25, d0[2]
vmlal.s16 q10, d23, d0[3]
vrshl.s32 q10, q10, q14 // -(6-intermediate_bits)
vmovn.i32 d17, q10
bl L(\type\()_8tap_filter_4)
vmov q9, q12
4:
bl L(\type\()_8tap_filter_4)
vmull.s16 q2, d17, d2[0]
vmlal.s16 q2, d18, d2[1]
vmlal.s16 q2, d19, d2[2]
vmlal.s16 q2, d24, d2[3]
vmull.s16 q3, d18, d2[0]
vmlal.s16 q3, d19, d2[1]
vmlal.s16 q3, d24, d2[2]
vmlal.s16 q3, d25, d2[3]
.ifc \type, put
vrshl.s32 q2, q2, q13 // -(6+intermediate_bits)
vrshl.s32 q3, q3, q13 // -(6+intermediate_bits)
vqmovun.s32 d4, q2
vqmovun.s32 d5, q3
vmin.u16 q2, q2, q15
.else
vrshrn.i32 d4, q2, #6
vrshrn.i32 d5, q3, #6
vsub.i16 q2, q2, q13 // PREP_BIAS
.endif
subs \h, \h, #2
vst1.16 {d4}, [\dst, :64], \d_strd
vst1.16 {d5}, [\ds2, :64], \d_strd
ble 0f
vmov d17, d19
vmov q9, q12
b 4b
0:
pop {r4-r11,pc}
480: // 4x8, 4x16, 4x32 hv
vpush {d13-d15}
vld1.8 {d2}, [\my, :64]
sub \src, \src, #2
sub \sr2, \src, \s_strd, lsl #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vld1.16 {q11}, [\src], \s_strd
vext.8 d24, d22, d23, #2
vext.8 d25, d22, d23, #4
vext.8 d23, d22, d23, #6
vmull.s16 q10, d22, d0[0]
vmlal.s16 q10, d24, d0[1]
vmlal.s16 q10, d25, d0[2]
vmlal.s16 q10, d23, d0[3]
vrshl.s32 q10, q10, q14 // -(6-intermediate_bits)
vmovn.i32 d13, q10
bl L(\type\()_8tap_filter_4)
vmov q7, q12
bl L(\type\()_8tap_filter_4)
vmov q8, q12
bl L(\type\()_8tap_filter_4)
vmov q9, q12
48:
bl L(\type\()_8tap_filter_4)
vmull.s16 q2, d13, d2[0]
vmlal.s16 q2, d14, d2[1]
vmlal.s16 q2, d15, d2[2]
vmlal.s16 q2, d16, d2[3]
vmlal.s16 q2, d17, d3[0]
vmlal.s16 q2, d18, d3[1]
vmlal.s16 q2, d19, d3[2]
vmlal.s16 q2, d24, d3[3]
vmull.s16 q3, d14, d2[0]
vmlal.s16 q3, d15, d2[1]
vmlal.s16 q3, d16, d2[2]
vmlal.s16 q3, d17, d2[3]
vmlal.s16 q3, d18, d3[0]
vmlal.s16 q3, d19, d3[1]
vmlal.s16 q3, d24, d3[2]
vmlal.s16 q3, d25, d3[3]
.ifc \type, put
vrshl.s32 q2, q2, q13 // -(6+intermediate_bits)
vrshl.s32 q3, q3, q13 // -(6+intermediate_bits)
vqmovun.s32 d4, q2
vqmovun.s32 d5, q3
vmin.u16 q2, q2, q15
.else
vrshrn.i32 d4, q2, #6
vrshrn.i32 d5, q3, #6
vsub.i16 q2, q2, q13 // PREP_BIAS
.endif
subs \h, \h, #2
vst1.16 {d4}, [\dst, :64], \d_strd
vst1.16 {d5}, [\ds2, :64], \d_strd
ble 0f
vmov d13, d15
vmov q7, q8
vmov q8, q9
vmov q9, q12
b 48b
0:
vpop {d13-d15}
pop {r4-r11,pc}
L(\type\()_8tap_filter_4):
vld1.16 {q10}, [\sr2], \s_strd
vld1.16 {q11}, [\src], \s_strd
vext.8 d24, d20, d21, #2
vext.8 d25, d20, d21, #4
vext.8 d21, d20, d21, #6
vmull.s16 q3, d20, d0[0]
vmlal.s16 q3, d24, d0[1]
vmlal.s16 q3, d25, d0[2]
vmlal.s16 q3, d21, d0[3]
vext.8 d24, d22, d23, #2
vext.8 d25, d22, d23, #4
vext.8 d23, d22, d23, #6
vmull.s16 q10, d22, d0[0]
vmlal.s16 q10, d24, d0[1]
vmlal.s16 q10, d25, d0[2]
vmlal.s16 q10, d23, d0[3]
vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
vrshl.s32 q10, q10, q14 // -(6-intermediate_bits)
vmovn.i32 d24, q3
vmovn.i32 d25, q10
bx lr
80:
160:
320:
bgt 880f
add \my, \my, #2
vld1.8 {d0}, [\mx, :64]
vld1.32 {d2[]}, [\my]
sub \src, \src, #6
sub \src, \src, \s_strd
vmovl.s8 q0, d0
vmovl.s8 q1, d2
mov \my, \h
164: // 8x2, 8x4, 16x2, 16x4, 32x2, 32x4 hv
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vld1.16 {q11, q12}, [\src], \s_strd
vmull.s16 q2, d22, d0[0]
vmull.s16 q3, d23, d0[0]
vdup.32 q14, r12 // -(6-intermediate_bits)
.irpc i, 1234567
vext.8 q10, q11, q12, #(2*\i)
.if \i < 4
vmlal.s16 q2, d20, d0[\i]
vmlal.s16 q3, d21, d0[\i]
.else
vmlal.s16 q2, d20, d1[\i - 4]
vmlal.s16 q3, d21, d1[\i - 4]
.endif
.endr
vrshl.s32 q2, q2, q14 // -(6-intermediate_bits)
vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
vmovn.i32 d16, q2
vmovn.i32 d17, q3
bl L(\type\()_8tap_filter_8)
vmov q9, q11
vmov q10, q12
8:
bl L(\type\()_8tap_filter_8)
vmull.s16 q2, d16, d2[0]
vmull.s16 q3, d17, d2[0]
vmull.s16 q13, d18, d2[0]
vmull.s16 q14, d19, d2[0]
.ifc \type, put
vdup.32 q8, r8 // -(6+intermediate_bits)
.endif
vmlal.s16 q2, d18, d2[1]
vmlal.s16 q3, d19, d2[1]
vmlal.s16 q13, d20, d2[1]
vmlal.s16 q14, d21, d2[1]
vmlal.s16 q2, d20, d2[2]
vmlal.s16 q3, d21, d2[2]
vmlal.s16 q13, d22, d2[2]
vmlal.s16 q14, d23, d2[2]
vmlal.s16 q2, d22, d2[3]
vmlal.s16 q3, d23, d2[3]
vmlal.s16 q13, d24, d2[3]
vmlal.s16 q14, d25, d2[3]
.ifc \type, put
vdup.16 q9, \bdmax // bitdepth_max
vrshl.s32 q2, q2, q8 // -(6+intermediate_bits)
vrshl.s32 q3, q3, q8 // -(6+intermediate_bits)
vrshl.s32 q13, q13, q8 // -(6+intermediate_bits)
vrshl.s32 q14, q14, q8 // -(6+intermediate_bits)
vqmovun.s32 d4, q2
vqmovun.s32 d5, q3
vqmovun.s32 d6, q13
vqmovun.s32 d7, q14
vmin.u16 q2, q2, q15
vmin.u16 q3, q3, q15
.else
vmov.i16 q9, #PREP_BIAS
vrshrn.i32 d4, q2, #6
vrshrn.i32 d5, q3, #6
vrshrn.i32 d6, q13, #6
vrshrn.i32 d7, q14, #6
vsub.i16 q2, q2, q9 // PREP_BIAS
vsub.i16 q3, q3, q9 // PREP_BIAS
.endif
subs \h, \h, #2
vst1.16 {q2}, [\dst, :128], \d_strd
vst1.16 {q3}, [\ds2, :128], \d_strd
ble 9f
vmov q8, q10
vmov q9, q11
vmov q10, q12
b 8b
9:
subs \w, \w, #8
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #2
mov \h, \my
add \src, \src, #16
add \dst, \dst, #16
b 164b
0:
pop {r4-r11,pc}
880: // 8x8, 8x16, ..., 16x8, ..., 32x8, ... hv
640:
1280:
vpush {q4-q7}
vld1.8 {d0}, [\mx, :64]
vld1.8 {d2}, [\my, :64]
sub \src, \src, #6
sub \src, \src, \s_strd
sub \src, \src, \s_strd, lsl #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
mov \my, \h
168:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vld1.16 {q11, q12}, [\src], \s_strd
vmull.s16 q2, d22, d0[0]
vmull.s16 q3, d23, d0[0]
vdup.32 q14, r12 // -(6-intermediate_bits)
.irpc i, 1234567
vext.8 q10, q11, q12, #(2*\i)
.if \i < 4
vmlal.s16 q2, d20, d0[\i]
vmlal.s16 q3, d21, d0[\i]
.else
vmlal.s16 q2, d20, d1[\i - 4]
vmlal.s16 q3, d21, d1[\i - 4]
.endif
.endr
vrshl.s32 q2, q2, q14 // -(6-intermediate_bits)
vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
vmovn.i32 d8, q2
vmovn.i32 d9, q3
bl L(\type\()_8tap_filter_8)
vmov q5, q11
vmov q6, q12
bl L(\type\()_8tap_filter_8)
vmov q7, q11
vmov q8, q12
bl L(\type\()_8tap_filter_8)
vmov q9, q11
vmov q10, q12
88:
bl L(\type\()_8tap_filter_8)
vmull.s16 q2, d8, d2[0]
vmull.s16 q3, d9, d2[0]
vmull.s16 q13, d10, d2[0]
vmull.s16 q14, d11, d2[0]
.ifc \type, put
vdup.32 q4, r8 // -(6+intermediate_bits)
.endif
vmlal.s16 q2, d10, d2[1]
vmlal.s16 q3, d11, d2[1]
vmlal.s16 q13, d12, d2[1]
vmlal.s16 q14, d13, d2[1]
vmlal.s16 q2, d12, d2[2]
vmlal.s16 q3, d13, d2[2]
vmlal.s16 q13, d14, d2[2]
vmlal.s16 q14, d15, d2[2]
vmlal.s16 q2, d14, d2[3]
vmlal.s16 q3, d15, d2[3]
vmlal.s16 q13, d16, d2[3]
vmlal.s16 q14, d17, d2[3]
vmlal.s16 q2, d16, d3[0]
vmlal.s16 q3, d17, d3[0]
vmlal.s16 q13, d18, d3[0]
vmlal.s16 q14, d19, d3[0]
vmlal.s16 q2, d18, d3[1]
vmlal.s16 q3, d19, d3[1]
vmlal.s16 q13, d20, d3[1]
vmlal.s16 q14, d21, d3[1]
vmlal.s16 q2, d20, d3[2]
vmlal.s16 q3, d21, d3[2]
vmlal.s16 q13, d22, d3[2]
vmlal.s16 q14, d23, d3[2]
vmlal.s16 q2, d22, d3[3]
vmlal.s16 q3, d23, d3[3]
vmlal.s16 q13, d24, d3[3]
vmlal.s16 q14, d25, d3[3]
.ifc \type, put
vrshl.s32 q2, q2, q4 // -(6+intermediate_bits)
vrshl.s32 q3, q3, q4 // -(6+intermediate_bits)
vrshl.s32 q13, q13, q4 // -(6+intermediate_bits)
vrshl.s32 q14, q14, q4 // -(6+intermediate_bits)
vqmovun.s32 d4, q2
vqmovun.s32 d5, q3
vqmovun.s32 d6, q13
vqmovun.s32 d7, q14
vmin.u16 q2, q2, q15
vmin.u16 q3, q3, q15
.else
vmov.i16 q5, #PREP_BIAS
vrshrn.i32 d4, q2, #6
vrshrn.i32 d5, q3, #6
vrshrn.i32 d6, q13, #6
vrshrn.i32 d7, q14, #6
vsub.i16 q2, q2, q5 // PREP_BIAS
vsub.i16 q3, q3, q5 // PREP_BIAS
.endif
subs \h, \h, #2
vst1.16 {q2}, [\dst, :128], \d_strd
vst1.16 {q3}, [\ds2, :128], \d_strd
ble 9f
vmov q4, q6
vmov q5, q7
vmov q6, q8
vmov q7, q9
vmov q8, q10
vmov q9, q11
vmov q10, q12
b 88b
9:
subs \w, \w, #8
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #3
mov \h, \my
add \src, \src, #16
add \dst, \dst, #16
b 168b
0:
vpop {q4-q7}
pop {r4-r11,pc}
L(\type\()_8tap_filter_8):
vld1.16 {q13, q14}, [\sr2], \s_strd
vmull.s16 q2, d26, d0[0]
vmull.s16 q3, d27, d0[0]
.irpc i, 1234567
vext.8 q12, q13, q14, #(2*\i)
.if \i < 4
vmlal.s16 q2, d24, d0[\i]
vmlal.s16 q3, d25, d0[\i]
.else
vmlal.s16 q2, d24, d1[\i - 4]
vmlal.s16 q3, d25, d1[\i - 4]
.endif
.endr
vdup.32 q12, r12 // -(6-intermediate_bits)
vld1.16 {q13, q14}, [\src], \s_strd
vrshl.s32 q2, q2, q12 // -(6-intermediate_bits)
vrshl.s32 q3, q3, q12 // -(6-intermediate_bits)
vmovn.i32 d4, q2
vmovn.i32 d5, q3
vmull.s16 q3, d26, d0[0]
vmull.s16 q11, d27, d0[0]
.irpc i, 1234567
vext.8 q12, q13, q14, #(2*\i)
.if \i < 4
vmlal.s16 q3, d24, d0[\i]
vmlal.s16 q11, d25, d0[\i]
.else
vmlal.s16 q3, d24, d1[\i - 4]
vmlal.s16 q11, d25, d1[\i - 4]
.endif
.endr
vdup.32 q13, r12 // -(6-intermediate_bits)
vrshl.s32 q3, q3, q13 // -(6-intermediate_bits)
vrshl.s32 q11, q11, q13 // -(6-intermediate_bits)
vmovn.i32 d24, q3
vmovn.i32 d25, q11
vmov q11, q2
bx lr
endfunc
function \type\()_bilin_16bpc_neon, export=1
push {r4-r11,lr}
ldrd r4, r5, [sp, #36]
ldrd r6, r7, [sp, #44]
.ifc \bdmax, r8
ldr r8, [sp, #52]
.endif
vdup.16 q1, \mx
vdup.16 q3, \my
rsb r9, \mx, #16
rsb r10, \my, #16
vdup.16 q0, r9
vdup.16 q2, r10
.ifc \type, prep
lsl \d_strd, \w, #1
.endif
clz \bdmax, \bdmax // bitdepth_max
clz r9, \w
sub \bdmax, \bdmax, #18 // intermediate_bits = clz(bitdepth_max) - 18
cmp \mx, #0
sub r9, r9, #24
rsb r11, \bdmax, #4 // 4 - intermediate_bits
add r12, \bdmax, #4 // 4 + intermediate_bits
bne L(\type\()_bilin_h)
cmp \my, #0
bne L(\type\()_bilin_v)
b \type\()_neon
L(\type\()_bilin_h):
cmp \my, #0
bne L(\type\()_bilin_hv)
adr r10, L(\type\()_bilin_h_tbl)
vdup.16 q15, r11 // 4 - intermediate_bits
ldr r9, [r10, r9, lsl #2]
vneg.s16 q15, q15 // -(4-intermediate_bits)
.ifc \type, put
vdup.16 q14, \bdmax // intermediate_bits
.else
vmov.i16 q14, #PREP_BIAS
.endif
add r10, r10, r9
.ifc \type, put
vneg.s16 q14, q14 // -intermediate_bits
.endif
bx r10
.align 2
L(\type\()_bilin_h_tbl):
.word 1280f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
20: // 2xN h
.ifc \type, put
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
2:
vld1.16 {d16}, [\src], \s_strd
vld1.16 {d18}, [\sr2], \s_strd
vext.8 d17, d16, d16, #2
vext.8 d19, d18, d18, #2
vtrn.32 d16, d18
vtrn.32 d17, d19
subs \h, \h, #2
vmul.i16 d16, d16, d0
vmla.i16 d16, d17, d2
vrshl.u16 d16, d16, d30
vrshl.u16 d16, d16, d28
vst1.32 {d16[0]}, [\dst, :32], \d_strd
vst1.32 {d16[1]}, [\ds2, :32], \d_strd
bgt 2b
pop {r4-r11,pc}
.endif
40: // 4xN h
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
4:
vld1.16 {q8}, [\src], \s_strd
vld1.16 {q10}, [\sr2], \s_strd
vext.8 q9, q8, q8, #2
vext.8 q11, q10, q10, #2
vmov d17, d20
vmov d19, d22
subs \h, \h, #2
vmul.i16 q8, q8, q0
vmla.i16 q8, q9, q1
vrshl.u16 q8, q8, q15
.ifc \type, put
vrshl.u16 q8, q8, q14
.else
vsub.i16 q8, q8, q14
.endif
vst1.16 {d16}, [\dst, :64], \d_strd
vst1.16 {d17}, [\ds2, :64], \d_strd
bgt 4b
pop {r4-r11,pc}
80: // 8xN h
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
8:
vld1.16 {d16, d17, d18}, [\src], \s_strd
vld1.16 {d20, d21, d22}, [\sr2], \s_strd
vext.8 q9, q8, q9, #2
vext.8 q11, q10, q11, #2
subs \h, \h, #2
vmul.i16 q8, q8, q0
vmla.i16 q8, q9, q1
vmul.i16 q10, q10, q0
vmla.i16 q10, q11, q1
vrshl.u16 q8, q8, q15
vrshl.u16 q10, q10, q15
.ifc \type, put
vrshl.u16 q8, q8, q14
vrshl.u16 q10, q10, q14
.else
vsub.i16 q8, q8, q14
vsub.i16 q10, q10, q14
.endif
vst1.16 {q8}, [\dst, :128], \d_strd
vst1.16 {q10}, [\ds2, :128], \d_strd
bgt 8b
pop {r4-r11,pc}
160:
320:
640:
1280: // 16xN, 32xN, ... h
vpush {q4-q7}
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
sub \s_strd, \s_strd, \w, lsl #1
sub \s_strd, \s_strd, #16
.ifc \type, put
lsl \d_strd, \d_strd, #1
sub \d_strd, \d_strd, \w, lsl #1
.endif
161:
vld1.16 {q4}, [\src]!
vld1.16 {q9}, [\sr2]!
mov \mx, \w
16:
vld1.16 {q5, q6}, [\src]!
vld1.16 {q10, q11}, [\sr2]!
vext.8 q7, q4, q5, #2
vext.8 q8, q5, q6, #2
vext.8 q12, q9, q10, #2
vext.8 q13, q10, q11, #2
vmul.i16 q4, q4, q0
vmla.i16 q4, q7, q1
vmul.i16 q5, q5, q0
vmla.i16 q5, q8, q1
vmul.i16 q9, q9, q0
vmla.i16 q9, q12, q1
vmul.i16 q10, q10, q0
vmla.i16 q10, q13, q1
vrshl.u16 q4, q4, q15
vrshl.u16 q5, q5, q15
vrshl.u16 q9, q9, q15
vrshl.u16 q10, q10, q15
subs \mx, \mx, #16
.ifc \type, put
vrshl.u16 q4, q4, q14
vrshl.u16 q5, q5, q14
vrshl.u16 q9, q9, q14
vrshl.u16 q10, q10, q14
.else
vsub.i16 q4, q4, q14
vsub.i16 q5, q5, q14
vsub.i16 q9, q9, q14
vsub.i16 q10, q10, q14
.endif
vst1.16 {q4, q5}, [\dst, :128]!
vst1.16 {q9, q10}, [\ds2, :128]!
ble 9f
vmov q4, q6
vmov q9, q11
b 16b
9:
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
add \src, \src, \s_strd
add \sr2, \sr2, \s_strd
subs \h, \h, #2
bgt 161b
vpop {q4-q7}
pop {r4-r11,pc}
L(\type\()_bilin_v):
cmp \h, #4
adr r10, L(\type\()_bilin_v_tbl)
.ifc \type, prep
vdup.16 q15, r11 // 4 - intermediate_bits
.endif
ldr r9, [r10, r9, lsl #2]
.ifc \type, prep
vmov.i16 q14, #PREP_BIAS
vneg.s16 q15, q15 // -(4-intermediate_bits)
.endif
add r10, r10, r9
bx r10
.align 2
L(\type\()_bilin_v_tbl):
.word 1280f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
20: // 2xN v
.ifc \type, put
cmp \h, #2
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
// 2x2 v
vld1.32 {d16[]}, [\src], \s_strd
bgt 24f
22:
vld1.32 {d17[]}, [\sr2], \s_strd
vld1.32 {d18[]}, [\src], \s_strd
vext.8 d16, d16, d17, #4
vext.8 d17, d17, d18, #4
vmul.i16 d16, d16, d4
vmla.i16 d16, d17, d6
vrshr.u16 d16, d16, #4
vst1.32 {d16[0]}, [\dst, :32]
vst1.32 {d16[1]}, [\ds2, :32]
pop {r4-r11,pc}
24: // 2x4, 2x6, 2x8, ... v
vld1.32 {d17[]}, [\sr2], \s_strd
vld1.32 {d18[]}, [\src], \s_strd
vld1.32 {d19[]}, [\sr2], \s_strd
vld1.32 {d20[]}, [\src], \s_strd
subs \h, \h, #4
vext.8 d16, d16, d17, #4
vext.8 d17, d17, d18, #4
vext.8 d18, d18, d19, #4
vext.8 d19, d19, d20, #4
vswp d17, d18
vmul.i16 q8, q8, q2
vmla.i16 q8, q9, q3
cmp \h, #2
vrshr.u16 q8, q8, #4
vst1.32 {d16[0]}, [\dst, :32], \d_strd
vst1.32 {d16[1]}, [\ds2, :32], \d_strd
vst1.32 {d17[0]}, [\dst, :32], \d_strd
vst1.32 {d17[1]}, [\ds2, :32], \d_strd
blt 0f
vmov d16, d20
beq 22b
b 24b
0:
pop {r4-r11,pc}
.endif
40: // 4xN v
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.16 {d16}, [\src], \s_strd
4:
vld1.16 {d17}, [\sr2], \s_strd
vld1.16 {d19}, [\src], \s_strd
vmov d18, d17
vmul.i16 q8, q8, q2
vmla.i16 q8, q9, q3
subs \h, \h, #2
.ifc \type, put
vrshr.u16 q8, q8, #4
.else
vrshl.u16 q8, q8, q15
vsub.i16 q8, q8, q14
.endif
vst1.16 {d16}, [\dst, :64], \d_strd
vst1.16 {d17}, [\ds2, :64], \d_strd
ble 0f
vmov d16, d19
b 4b
0:
pop {r4-r11,pc}
80: // 8xN v
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.16 {q8}, [\src], \s_strd
8:
vld1.16 {q9}, [\sr2], \s_strd
vld1.16 {q10}, [\src], \s_strd
vmul.i16 q8, q8, q2
vmla.i16 q8, q9, q3
vmul.i16 q9, q9, q2
vmla.i16 q9, q10, q3
subs \h, \h, #2
.ifc \type, put
vrshr.u16 q8, q8, #4
vrshr.u16 q9, q9, #4
.else
vrshl.u16 q8, q8, q15
vrshl.u16 q9, q9, q15
vsub.i16 q8, q8, q14
vsub.i16 q9, q9, q14
.endif
vst1.16 {q8}, [\dst, :128], \d_strd
vst1.16 {q9}, [\ds2, :128], \d_strd
ble 0f
vmov q8, q10
b 8b
0:
pop {r4-r11,pc}
160: // 16xN, 32xN, ...
320:
640:
1280:
mov \my, \h
1:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.16 {q8, q9}, [\src], \s_strd
2:
vld1.16 {q10, q11}, [\sr2], \s_strd
vld1.16 {q12, q13}, [\src], \s_strd
vmul.i16 q8, q8, q2
vmla.i16 q8, q10, q3
vmul.i16 q9, q9, q2
vmla.i16 q9, q11, q3
vmul.i16 q10, q10, q2
vmla.i16 q10, q12, q3
vmul.i16 q11, q11, q2
vmla.i16 q11, q13, q3
subs \h, \h, #2
.ifc \type, put
vrshr.u16 q8, q8, #4
vrshr.u16 q9, q9, #4
vrshr.u16 q10, q10, #4
vrshr.u16 q11, q11, #4
.else
vrshl.u16 q8, q8, q15
vrshl.u16 q9, q9, q15
vrshl.u16 q10, q10, q15
vrshl.u16 q11, q11, q15
vsub.i16 q8, q8, q14
vsub.i16 q9, q9, q14
vsub.i16 q10, q10, q14
vsub.i16 q11, q11, q14
.endif
vst1.16 {q8, q9}, [\dst, :128], \d_strd
vst1.16 {q10, q11}, [\ds2, :128], \d_strd
ble 9f
vmov q8, q12
vmov q9, q13
b 2b
9:
subs \w, \w, #16
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #1
mov \h, \my
add \src, \src, #32
add \dst, \dst, #32
b 1b
0:
pop {r4-r11,pc}
L(\type\()_bilin_hv):
adr r10, L(\type\()_bilin_hv_tbl)
vdup.16 q15, r11 // 4 - intermediate_bits
ldr r9, [r10, r9, lsl #2]
vneg.s16 q15, q15 // -(4-intermediate_bits)
.ifc \type, put
vdup.32 q14, r12 // 4 + intermediate_bits
.else
vmov.i16 q14, #PREP_BIAS
.endif
add r10, r10, r9
.ifc \type, put
vneg.s32 q14, q14 // -(4+intermediate_bits)
.endif
bx r10
.align 2
L(\type\()_bilin_hv_tbl):
.word 1280f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
20: // 2xN hv
.ifc \type, put
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.16 {d20}, [\src], \s_strd
vext.8 d21, d20, d20, #2
vmul.i16 d16, d20, d0
vmla.i16 d16, d21, d2
vrshl.u16 d16, d16, d30
vext.8 d16, d16, d16, #4
2:
vld1.16 {d20}, [\sr2], \s_strd
vld1.16 {d22}, [\src], \s_strd
vext.8 d21, d20, d20, #2
vext.8 d23, d22, d22, #2
vtrn.32 d20, d22
vtrn.32 d21, d23
vmul.i16 d18, d20, d0
vmla.i16 d18, d21, d2
vrshl.u16 d18, d18, d30
vext.8 d16, d16, d18, #4
vmull.u16 q8, d16, d4
vmlal.u16 q8, d18, d6
vrshl.u32 q8, q8, q14
vmovn.i32 d16, q8
subs \h, \h, #2
vst1.32 {d16[0]}, [\dst, :32], \d_strd
vst1.32 {d16[1]}, [\ds2, :32], \d_strd
ble 0f
vmov d16, d18
b 2b
0:
pop {r4-r11,pc}
.endif
40: // 4xN hv
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.16 {q10}, [\src], \s_strd
vext.8 d21, d20, d21, #2
vmul.i16 d16, d20, d0
vmla.i16 d16, d21, d2
vrshl.u16 d16, d16, d30
4:
vld1.16 {q10}, [\sr2], \s_strd
vld1.16 {q11}, [\src], \s_strd
vext.8 d21, d20, d21, #2
vext.8 d23, d22, d23, #2
vswp d21, d22
vmul.i16 q9, q10, q0
vmla.i16 q9, q11, q1
vrshl.u16 q9, q9, q15
vmull.u16 q10, d16, d4
vmlal.u16 q10, d18, d6
vmull.u16 q11, d18, d4
vmlal.u16 q11, d19, d6
.ifc \type, put
vrshl.u32 q10, q10, q14
vrshl.u32 q11, q11, q14
vmovn.i32 d20, q10
vmovn.i32 d21, q11
.else
vrshrn.i32 d20, q10, #4
vrshrn.i32 d21, q11, #4
vsub.i16 q10, q10, q14
.endif
subs \h, \h, #2
vst1.16 {d20}, [\dst, :64], \d_strd
vst1.16 {d21}, [\ds2, :64], \d_strd
ble 0f
vmov d16, d19
b 4b
0:
pop {r4-r11,pc}
80: // 8xN, 16xN, ... hv
160:
320:
640:
1280:
mov \my, \h
1:
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.16 {d20, d21, d22}, [\src], \s_strd
vext.8 q11, q10, q11, #2
vmul.i16 q8, q10, q0
vmla.i16 q8, q11, q1
vrshl.u16 q8, q8, q15
2:
vld1.16 {d20, d21, d22}, [\sr2], \s_strd
vld1.16 {d24, d25, d26}, [\src], \s_strd
vext.8 q11, q10, q11, #2
vext.8 q13, q12, q13, #2
vmul.i16 q9, q10, q0
vmla.i16 q9, q11, q1
vmul.i16 q10, q12, q0
vmla.i16 q10, q13, q1
vrshl.u16 q9, q9, q15
vrshl.u16 q10, q10, q15
vmull.u16 q11, d16, d4
vmlal.u16 q11, d18, d6
vmull.u16 q12, d17, d4
vmlal.u16 q12, d19, d6
vmull.u16 q8, d18, d4
vmlal.u16 q8, d20, d6
vmull.u16 q9, d19, d4
vmlal.u16 q9, d21, d6
.ifc \type, put
vrshl.u32 q11, q11, q14
vrshl.u32 q12, q12, q14
vrshl.u32 q8, q8, q14
vrshl.u32 q9, q9, q14
vmovn.i32 d22, q11
vmovn.i32 d23, q12
vmovn.i32 d16, q8
vmovn.i32 d17, q9
.else
vrshrn.i32 d22, q11, #4
vrshrn.i32 d23, q12, #4
vrshrn.i32 d16, q8, #4
vrshrn.i32 d17, q9, #4
vsub.i16 q11, q11, q14
vsub.i16 q8, q8, q14
.endif
subs \h, \h, #2
vst1.16 {q11}, [\dst, :128], \d_strd
vst1.16 {q8}, [\ds2, :128], \d_strd
ble 9f
vmov q8, q10
b 2b
9:
subs \w, \w, #8
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #1
mov \h, \my
add \src, \src, #16
add \dst, \dst, #16
b 1b
0:
pop {r4-r11,pc}
endfunc
.endm
filter_fn put, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10
filter_fn prep, r0, r8, r1, r2, r3, r4, r5, r6, r7, r9, r10
.macro load_filter_ptr src
asr r12, \src, #10
add r12, r11, r12, lsl #3
.endm
.macro load_filter_coef dst, src, inc
add \src, \src, \inc
vld1.8 {\dst}, [r12, :64]
.endm
.macro load_filter_row dst, src, inc
load_filter_ptr \src
load_filter_coef \dst, \src, \inc
.endm
function warp_filter_horz_neon
load_filter_ptr r5 // filter 0
vld1.16 {q6,q7}, [r2], r3
load_filter_coef d0, r5, r7 // filter 0
load_filter_row d2, r5, r7 // filter 1
vmovl.s8 q0, d0 // filter 0
vext.8 q3, q6, q7, #2*1 // filter 1 pixels
vmovl.s8 q1, d2 // filter 1
vmull.s16 q4, d12, d0 // filter 0 output (0-3)
vmull.s16 q5, d13, d1 // filter 0 output (4-7)
load_filter_ptr r5 // filter 2
vmull.s16 q2, d6, d2 // filter 1 output (0-3)
vmull.s16 q3, d7, d3 // filter 1 output (4-7)
load_filter_coef d0, r5, r7 // filter 2
vpadd.i32 d8, d8, d9 // half pixel 0 (2x32)
vpadd.i32 d9, d10, d11 // half pixel 0 (2x32)
load_filter_ptr r5 // filter 3
vpadd.i32 d4, d4, d5 // half pixel 1 (2x32)
vpadd.i32 d5, d6, d7 // half pixel 1 (2x32)
vmovl.s8 q0, d0 // filter 2
vext.8 q3, q6, q7, #2*2 // filter 2 pixels
vpadd.i32 d8, d8, d9 // pixel 0 (2x32)
vpadd.i32 d9, d4, d5 // pixel 1 (2x32)
load_filter_coef d2, r5, r7 // filter 3
vmull.s16 q2, d6, d0 // filter 2 output (0-3)
vmull.s16 q3, d7, d1 // filter 2 output (4-7)
load_filter_ptr r5 // filter 4
vpadd.i32 d8, d8, d9 // pixel 0,1
vpadd.i32 d9, d4, d5 // half pixel 2 (2x32)
vpadd.i32 d10, d6, d7 // half pixel 2 (2x32)
vmovl.s8 q1, d2 // filter 3
vext.8 q3, q6, q7, #2*3 // filter 3 pixels
load_filter_coef d0, r5, r7 // filter 4
vpadd.i32 d9, d9, d10 // pixel 2 (2x32)
vmull.s16 q2, d6, d2 // filter 3 output (0-3)
vmull.s16 q3, d7, d3 // filter 3 output (4-7)
vmovl.s8 q0, d0 // filter 4
load_filter_ptr r5 // filter 5
vpadd.i32 d10, d4, d5 // half pixel 3 (2x32)
vpadd.i32 d11, d6, d7 // half pixel 3 (2x32)
vext.8 q3, q6, q7, #2*4 // filter 4 pixels
load_filter_coef d2, r5, r7 // filter 5
vpadd.i32 d10, d10, d11 // pixel 3 (2x32)
vpadd.i32 d9, d9, d10 // pixel 2,3
vmull.s16 q2, d6, d0 // filter 4 output (0-3)
vmull.s16 q3, d7, d1 // filter 4 output (4-7)
vmovl.s8 q1, d2 // filter 5
load_filter_ptr r5 // filter 6
vpadd.i32 d10, d4, d5 // half pixel 4 (2x32)
vpadd.i32 d11, d6, d7 // half pixel 4 (2x32)
vext.8 q3, q6, q7, #2*5 // filter 5 pixels
load_filter_coef d0, r5, r7 // filter 6
vpadd.i32 d10, d10, d11 // pixel 4 (2x32)
vmull.s16 q2, d6, d2 // filter 5 output (0-3)
vmull.s16 q3, d7, d3 // filter 5 output (4-7)
vmovl.s8 q0, d0 // filter 6
load_filter_ptr r5 // filter 7
vpadd.i32 d4, d4, d5 // half pixel 5 (2x32)
vpadd.i32 d5, d6, d7 // half pixel 5 (2x32)
vext.8 q3, q6, q7, #2*6 // filter 6 pixels
load_filter_coef d2, r5, r7 // filter 7
vpadd.i32 d11, d4, d5 // pixel 5 (2x32)
vmull.s16 q2, d6, d0 // filter 6 output (0-3)
vmull.s16 q3, d7, d1 // filter 6 output (4-7)
vmovl.s8 q1, d2 // filter 7
vpadd.i32 d10, d10, d11 // pixel 4,5
vpadd.i32 d4, d4, d5 // half pixel 6 (2x32)
vpadd.i32 d5, d6, d7 // half pixel 6 (2x32)
vext.8 q3, q6, q7, #2*7 // filter 7 pixels
vpadd.i32 d11, d4, d5 // pixel 6 (2x32)
vmull.s16 q2, d6, d2 // filter 7 output (0-3)
vmull.s16 q3, d7, d3 // filter 7 output (4-7)
vld1.32 {d14[],d15[]}, [sp] // -(7 - intermediate_bits)
vpadd.i32 d4, d4, d5 // half pixel 7 (2x32)
vpadd.i32 d5, d6, d7 // half pixel 7 (2x32)
sub r5, r5, r7, lsl #3
vpadd.i32 d4, d4, d5 // pixel 7 (2x32)
add r5, r5, r8
vpadd.i32 d11, d11, d4 // pixel 6,7
vrshl.s32 q4, q4, q7 // -(7 - intermediate_bits)
vrshl.s32 q5, q5, q7 // -(7 - intermediate_bits)
bx lr
endfunc
// void dav1d_warp_affine_8x8_16bpc_neon(
// pixel *dst, const ptrdiff_t dst_stride,
// const pixel *src, const ptrdiff_t src_stride,
// const int16_t *const abcd, int mx, int my,
// const int bitdepth_max)
.macro warp t
function warp_affine_8x8\t\()_16bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100]
ldrd r6, r7, [sp, #108]
sub sp, sp, #8
clz r7, r7
// intermediate_bits = clz(bitdepth_max) - 18
.ifb \t
sub r8, r7, #11 // 7 + intermediate_bits = clz(bitdepth_max) - 18 + 7
.endif
sub r7, r7, #25 // -(7 - intermediate_bits)
.ifb \t
neg r8, r8 // -(7 + intermediate_bits)
.endif
str r7, [sp] // spill -(7 - intermediate_bits) on stack
.ifb \t
str r8, [sp, #4] // spill -(7 + intermediate_bits) on stack
.endif
ldrd r8, r9, [r4]
sxth r7, r8
asr r8, r8, #16
asr r4, r9, #16
sxth r9, r9
mov r10, #8
sub r2, r2, r3, lsl #1
sub r2, r2, r3
sub r2, r2, #6
movrel r11, X(mc_warp_filter), 64*8
.ifnb \t
lsl r1, r1, #1
.endif
add r5, r5, #512
add r6, r6, #512
bl warp_filter_horz_neon
vmovn.i32 d16, q4
vmovn.i32 d17, q5
bl warp_filter_horz_neon
vmovn.i32 d18, q4
vmovn.i32 d19, q5
bl warp_filter_horz_neon
vmovn.i32 d20, q4
vmovn.i32 d21, q5
bl warp_filter_horz_neon
vmovn.i32 d22, q4
vmovn.i32 d23, q5
bl warp_filter_horz_neon
vmovn.i32 d24, q4
vmovn.i32 d25, q5
bl warp_filter_horz_neon
vmovn.i32 d26, q4
vmovn.i32 d27, q5
bl warp_filter_horz_neon
vmovn.i32 d28, q4
vmovn.i32 d29, q5
1:
bl warp_filter_horz_neon
vmovn.i32 d30, q4
vmovn.i32 d31, q5
load_filter_row d8, r6, r9
load_filter_row d9, r6, r9
load_filter_row d10, r6, r9
load_filter_row d11, r6, r9
load_filter_row d12, r6, r9
load_filter_row d13, r6, r9
load_filter_row d14, r6, r9
load_filter_row d15, r6, r9
transpose_8x8b q4, q5, q6, q7, d8, d9, d10, d11, d12, d13, d14, d15
vmovl.s8 q1, d8
vmovl.s8 q2, d9
vmovl.s8 q3, d10
vmovl.s8 q4, d11
vmovl.s8 q5, d12
vmovl.s8 q6, d13
sub r6, r6, r9, lsl #3
// This ordering of vmull/vmlal is highly beneficial for
// Cortex A8/A9/A53 here, but harmful for Cortex A7.
vmull.s16 q0, d16, d2
vmlal.s16 q0, d18, d4
vmlal.s16 q0, d20, d6
vmlal.s16 q0, d22, d8
vmlal.s16 q0, d24, d10
vmlal.s16 q0, d26, d12
vmull.s16 q1, d17, d3
vmlal.s16 q1, d19, d5
vmlal.s16 q1, d21, d7
vmlal.s16 q1, d23, d9
vmlal.s16 q1, d25, d11
vmlal.s16 q1, d27, d13
vmovl.s8 q2, d14
vmovl.s8 q3, d15
vmlal.s16 q0, d28, d4
vmlal.s16 q0, d30, d6
vmlal.s16 q1, d29, d5
vmlal.s16 q1, d31, d7
.ifb \t
ldr lr, [sp, #4] // -(7 + intermediate_bits)
ldr r12, [sp, #120] // bitdepth_max
vdup.32 q2, lr // -(7 + intermediate_bits)
vdup.16 q3, r12 // bitdepth_max
.endif
vmov q8, q9
vmov q9, q10
.ifb \t
vrshl.s32 q0, q0, q2 // -(7 + intermediate_bits)
vrshl.s32 q1, q1, q2 // -(7 + intermediate_bits)
.else
vrshrn.s32 d0, q0, #7
vrshrn.s32 d1, q1, #7
vmov.i16 q3, #PREP_BIAS
.endif
vmov q10, q11
.ifb \t
vqmovun.s32 d0, q0
vqmovun.s32 d1, q1
.else
vsub.i16 q0, q0, q3 // PREP_BIAS
.endif
vmov q11, q12
vmov q12, q13
.ifb \t
vmin.u16 q0, q0, q3 // bitdepth_max
.endif
vmov q13, q14
vmov q14, q15
subs r10, r10, #1
vst1.16 {q0}, [r0, :128], r1
add r6, r6, r4
bgt 1b
add sp, sp, #8
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
.endm
warp
warp t
// void dav1d_emu_edge_16bpc_neon(
// const intptr_t bw, const intptr_t bh,
// const intptr_t iw, const intptr_t ih,
// const intptr_t x, const intptr_t y,
// pixel *dst, const ptrdiff_t dst_stride,
// const pixel *ref, const ptrdiff_t ref_stride)
function emu_edge_16bpc_neon, export=1
push {r4-r11,lr}
ldrd r4, r5, [sp, #36]
ldrd r6, r7, [sp, #44]
ldrd r8, r9, [sp, #52]
// ref += iclip(y, 0, ih - 1) * PXSTRIDE(ref_stride)
// ref += iclip(x, 0, iw - 1)
sub r12, r3, #1 // ih - 1
cmp r5, r3
sub lr, r2, #1 // iw - 1
it lt
movlt r12, r5 // min(y, ih - 1)
cmp r4, r2
bic r12, r12, r12, asr #31 // max(min(y, ih - 1), 0)
it lt
movlt lr, r4 // min(x, iw - 1)
bic lr, lr, lr, asr #31 // max(min(x, iw - 1), 0)
mla r8, r12, r9, r8 // ref += iclip() * stride
add r8, r8, lr, lsl #1 // ref += iclip()
// bottom_ext = iclip(y + bh - ih, 0, bh - 1)
// top_ext = iclip(-y, 0, bh - 1)
add r10, r5, r1 // y + bh
neg r5, r5 // -y
sub r10, r10, r3 // y + bh - ih
sub r12, r1, #1 // bh - 1
cmp r10, r1
bic r5, r5, r5, asr #31 // max(-y, 0)
it ge
movge r10, r12 // min(y + bh - ih, bh-1)
cmp r5, r1
bic r10, r10, r10, asr #31 // max(min(y + bh - ih, bh-1), 0)
it ge
movge r5, r12 // min(max(-y, 0), bh-1)
// right_ext = iclip(x + bw - iw, 0, bw - 1)
// left_ext = iclip(-x, 0, bw - 1)
add r11, r4, r0 // x + bw
neg r4, r4 // -x
sub r11, r11, r2 // x + bw - iw
sub lr, r0, #1 // bw - 1
cmp r11, r0
bic r4, r4, r4, asr #31 // max(-x, 0)
it ge
movge r11, lr // min(x + bw - iw, bw-1)
cmp r4, r0
bic r11, r11, r11, asr #31 // max(min(x + bw - iw, bw-1), 0)
it ge
movge r4, lr // min(max(-x, 0), bw - 1)
// center_h = bh - top_ext - bottom_ext
// dst += top_ext * PXSTRIDE(dst_stride)
// center_w = bw - left_ext - right_ext
sub r1, r1, r5 // bh - top_ext
mla r6, r5, r7, r6
sub r2, r0, r4 // bw - left_ext
sub r1, r1, r10 // center_h = bh - top_ext - bottom_ext
sub r2, r2, r11 // center_w = bw - left_ext - right_ext
mov r0, r6 // backup of dst
.macro v_loop need_left, need_right
0:
.if \need_left
vld1.16 {d0[], d1[]}, [r8]
mov r12, r6 // out = dst
mov r3, r4
vmov q1, q0
1:
subs r3, r3, #16
vst1.16 {q0, q1}, [r12, :128]!
bgt 1b
.endif
mov lr, r8
add r12, r6, r4, lsl #1 // out = dst + left_ext
mov r3, r2
1:
vld1.16 {q0, q1}, [lr]!
subs r3, r3, #32
vld1.16 {q2, q3}, [lr]!
.if \need_left
vst1.16 {q0, q1}, [r12]!
vst1.16 {q2, q3}, [r12]!
.else
vst1.16 {q0, q1}, [r12, :128]!
vst1.16 {q2, q3}, [r12, :128]!
.endif
bgt 1b
.if \need_right
add r3, r8, r2, lsl #1 // in + center_w
sub r3, r3, #2 // in + center_w - 1
add r12, r6, r4, lsl #1 // dst + left_ext
vld1.16 {d0[], d1[]}, [r3]
add r12, r12, r2, lsl #1 // out = dst + left_ext + center_w
mov r3, r11
vmov q1, q0
1:
subs r3, r3, #16
vst1.16 {q0, q1}, [r12]!
bgt 1b
.endif
subs r1, r1, #1 // center_h--
add r6, r6, r7
add r8, r8, r9
bgt 0b
.endm
cmp r4, #0
beq 2f
// need_left
cmp r11, #0
beq 3f
// need_left + need_right
v_loop 1, 1
b 5f
2:
// !need_left
cmp r11, #0
beq 4f
// !need_left + need_right
v_loop 0, 1
b 5f
3:
// need_left + !need_right
v_loop 1, 0
b 5f
4:
// !need_left + !need_right
v_loop 0, 0
5:
cmp r10, #0
// Storing the original dst in r0 overwrote bw, recalculate it here
add r2, r2, r4 // center_w + left_ext
add r2, r2, r11 // bw = center_w + left_ext + right_ext
beq 3f
// need_bottom
sub r8, r6, r7 // ref = dst - stride
mov r4, r2
sub r12, r7, #32
1:
vld1.16 {q0, q1}, [r8, :128]!
mov r3, r10
vld1.16 {q2, q3}, [r8, :128]!
2:
vst1.16 {q0, q1}, [r6, :128]!
subs r3, r3, #1
vst1.16 {q2, q3}, [r6, :128], r12
bgt 2b
mls r6, r7, r10, r6 // dst -= bottom_ext * stride
subs r4, r4, #32 // bw -= 32
add r6, r6, #64 // dst += 32
bgt 1b
3:
cmp r5, #0
beq 3f
// need_top
mls r6, r7, r5, r0 // dst = stored_dst - top_ext * stride
sub r12, r7, #32
1:
vld1.16 {q0, q1}, [r0, :128]!
mov r3, r5
vld1.16 {q2, q3}, [r0, :128]!
2:
vst1.16 {q0, q1}, [r6, :128]!
subs r3, r3, #1
vst1.16 {q2, q3}, [r6, :128], r12
bgt 2b
mls r6, r7, r5, r6 // dst -= top_ext * stride
subs r2, r2, #32 // bw -= 32
add r6, r6, #64 // dst += 32
bgt 1b
3:
pop {r4-r11,pc}
endfunc
|
Admenri/urge
| 32,369
|
third_party/dav1d/src/arm/32/loopfilter.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
.macro loop_filter wd
function lpf_8_wd\wd\()_neon
vabd.u8 d0, d22, d23 // abs(p1 - p0)
vabd.u8 d1, d25, d24 // abs(q1 - q0)
vabd.u8 d2, d23, d24 // abs(p0 - q0)
vabd.u8 d3, d22, d25 // abs(p1 - q1)
.if \wd >= 6
vabd.u8 d4, d21, d22 // abs(p2 - p1)
vabd.u8 d5, d26, d25 // abs(q2 - q1)
.endif
.if \wd >= 8
vabd.u8 d6, d20, d21 // abs(p3 - p2)
vabd.u8 d7, d27, d26 // abs(q3 - q3)
.endif
.if \wd >= 6
vmax.u8 d4, d4, d5
.endif
vqadd.u8 d2, d2, d2 // abs(p0 - q0) * 2
.if \wd >= 8
vmax.u8 d6, d6, d7
.endif
vshr.u8 d3, d3, #1
.if \wd >= 8
vmax.u8 d4, d4, d6
.endif
.if \wd >= 6
vand d4, d4, d14
.endif
vmax.u8 d0, d0, d1 // max(abs(p1 - p0), abs(q1 - q0))
vqadd.u8 d2, d2, d3 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
.if \wd >= 6
vmax.u8 d4, d0, d4
vcge.u8 d1, d11, d4 // max(abs(p1 - p0), abs(q1 - q0), abs(), abs(), ...) <= I
.else
vcge.u8 d1, d11, d0 // max(abs(p1 - p0), abs(q1 - q0)) <= I
.endif
vcge.u8 d2, d10, d2 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1 <= E
vand d1, d1, d2 // fm
vand d1, d1, d13 // fm && wd >= 4
.if \wd >= 6
vand d14, d14, d1 // fm && wd > 4
.endif
.if \wd >= 16
vand d15, d15, d1 // fm && wd == 16
.endif
vmov r10, r11, d1
orrs r10, r10, r11
beq 9f // if (!fm || wd < 4) return;
.if \wd >= 6
vmov.i8 d10, #1
vabd.u8 d2, d21, d23 // abs(p2 - p0)
vabd.u8 d3, d22, d23 // abs(p1 - p0)
vabd.u8 d4, d25, d24 // abs(q1 - q0)
vabd.u8 d5, d26, d24 // abs(q2 - q0)
.if \wd >= 8
vabd.u8 d6, d20, d23 // abs(p3 - p0)
vabd.u8 d7, d27, d24 // abs(q3 - q0)
.endif
vmax.u8 d2, d2, d3
vmax.u8 d4, d4, d5
.if \wd >= 8
vmax.u8 d6, d6, d7
.endif
vmax.u8 d2, d2, d4
.if \wd >= 8
vmax.u8 d2, d2, d6
.endif
.if \wd == 16
vabd.u8 d3, d17, d23 // abs(p6 - p0)
vabd.u8 d4, d18, d23 // abs(p5 - p0)
vabd.u8 d5, d19, d23 // abs(p4 - p0)
.endif
vcge.u8 d2, d10, d2 // flat8in
.if \wd == 16
vabd.u8 d6, d28, d24 // abs(q4 - q0)
vabd.u8 d7, d29, d24 // abs(q5 - q0)
vabd.u8 d8, d30, d24 // abs(q6 - q0)
.endif
vand d14, d2, d14 // flat8in && fm && wd > 4
vbic d1, d1, d14 // fm && wd >= 4 && !flat8in
.if \wd == 16
vmax.u8 d3, d3, d4
vmax.u8 d5, d5, d6
.endif
vmov r10, r11, d1
.if \wd == 16
vmax.u8 d7, d7, d8
vmax.u8 d3, d3, d5
vmax.u8 d3, d3, d7
vcge.u8 d3, d10, d3 // flat8out
.endif
orrs r10, r10, r11
.if \wd == 16
vand d15, d15, d3 // flat8out && fm && wd == 16
vand d15, d15, d14 // flat8out && flat8in && fm && wd == 16
vbic d14, d14, d15 // flat8in && fm && wd >= 4 && !flat8out
.endif
beq 1f // skip wd == 4 case
.endif
vsubl.u8 q1, d22, d25 // p1 - q1
vcgt.u8 d0, d0, d12 // hev
vqmovn.s16 d2, q1
vand d4, d2, d0 // if (hev) iclip_diff(p1 - q1)
vbic d0, d1, d0 // (fm && wd >= 4 && !hev)
vsubl.u8 q1, d24, d23
vmov.i16 q3, #3
vmul.i16 q1, q1, q3
vmov.i8 d6, #4
vaddw.s8 q1, q1, d4
vmov.i8 d7, #3
vqmovn.s16 d2, q1 // f
vqadd.s8 d4, d6, d2 // imin(f + 4, 127)
vqadd.s8 d5, d7, d2 // imin(f + 3, 127)
vshr.s8 d4, d4, #3 // f1
vshr.s8 d5, d5, #3 // f2
vmovl.u8 q1, d23 // p0
vmovl.u8 q3, d24 // q0
vaddw.s8 q1, q1, d5
vsubw.s8 q3, q3, d4
vrshr.s8 d4, d4, #1 // (f1 + 1) >> 1
vqmovun.s16 d2, q1 // out p0
vqmovun.s16 d6, q3 // out q0
vbit d23, d2, d1 // if (fm && wd >= 4)
vmovl.u8 q1, d22 // p1
vbit d24, d6, d1 // if (fm && wd >= 4)
vmovl.u8 q3, d25 // q1
vaddw.s8 q1, q1, d4
vsubw.s8 q3, q3, d4
vqmovun.s16 d2, q1 // out p1
vqmovun.s16 d6, q3 // out q1
vbit d22, d2, d0 // if (fm && wd >= 4 && !hev)
vbit d25, d6, d0 // if (fm && wd >= 4 && !hev)
1:
.if \wd == 6
vmov r10, r11, d14
orrs r10, r10, r11
beq 2f // skip if there's no flat8in
vaddl.u8 q0, d21, d21 // p2 * 2
vaddl.u8 q1, d21, d22 // p2 + p1
vaddl.u8 q2, d22, d23 // p1 + p0
vaddl.u8 q3, d23, d24 // p0 + q0
vadd.i16 q4, q0, q1
vadd.i16 q5, q2, q3
vaddl.u8 q6, d24, d25 // q0 + q1
vadd.i16 q4, q4, q5
vsub.i16 q6, q6, q0
vaddl.u8 q5, d25, d26 // q1 + q2
vrshrn.i16 d0, q4, #3 // out p1
vadd.i16 q4, q4, q6
vsub.i16 q5, q5, q1
vaddl.u8 q6, d26, d26 // q2 + q2
vrshrn.i16 d1, q4, #3 // out p0
vadd.i16 q4, q4, q5
vsub.i16 q6, q6, q2
vrshrn.i16 d2, q4, #3 // out q0
vbit d22, d0, d14 // p1 if (flat8in)
vadd.i16 q4, q4, q6
vbit d23, d1, d14 // p0 if (flat8in)
vrshrn.i16 d3, q4, #3 // out q1
vbit d24, d2, d14 // q0 if (flat8in)
vbit d25, d3, d14 // q1 if (flat8in)
.elseif \wd >= 8
vmov r10, r11, d14
orrs r10, r10, r11
.if \wd == 8
beq 8f // skip if there's no flat8in
.else
beq 2f // skip if there's no flat8in
.endif
vaddl.u8 q0, d20, d21 // p3 + p2
vaddl.u8 q1, d22, d25 // p1 + q1
vaddl.u8 q2, d20, d22 // p3 + p1
vaddl.u8 q3, d23, d26 // p0 + q2
vadd.i16 q4, q0, q0 // 2 * (p3 + p2)
vaddw.u8 q4, q4, d23 // + p0
vaddw.u8 q4, q4, d24 // + q0
vadd.i16 q4, q4, q2 // + p3 + p1
vsub.i16 q1, q1, q0 // p1 + q1 - p3 - p2
vsub.i16 q3, q3, q2 // p0 + q2 - p3 - p1
vrshrn.i16 d10, q4, #3 // out p2
vadd.i16 q4, q4, q1
vaddl.u8 q0, d20, d23 // p3 + p0
vaddl.u8 q1, d24, d27 // q0 + q3
vrshrn.i16 d11, q4, #3 // out p1
vadd.i16 q4, q4, q3
vsub.i16 q1, q1, q0 // q0 + q3 - p3 - p0
vaddl.u8 q2, d21, d24 // p2 + q0
vaddl.u8 q3, d25, d27 // q1 + q3
vrshrn.i16 d12, q4, #3 // out p0
vadd.i16 q4, q4, q1
vsub.i16 q3, q3, q2 // q1 + q3 - p2 - q0
vaddl.u8 q0, d22, d25 // p1 + q1
vaddl.u8 q1, d26, d27 // q2 + q3
vrshrn.i16 d13, q4, #3 // out q0
vadd.i16 q4, q4, q3
vsub.i16 q1, q1, q0 // q2 + q3 - p1 - q1
vrshrn.i16 d0, q4, #3 // out q1
vadd.i16 q4, q4, q1
vbit d21, d10, d14
vbit d22, d11, d14
vbit d23, d12, d14
vrshrn.i16 d1, q4, #3 // out q2
vbit d24, d13, d14
vbit d25, d0, d14
vbit d26, d1, d14
.endif
2:
.if \wd == 16
vmov r10, r11, d15
orrs r10, r10, r11
bne 1f // check if flat8out is needed
vmov r10, r11, d14
orrs r10, r10, r11
beq 8f // if there was no flat8in, just write the inner 4 pixels
b 7f // if flat8in was used, write the inner 6 pixels
1:
vaddl.u8 q1, d17, d17 // p6 + p6
vaddl.u8 q2, d17, d18 // p6 + p5
vaddl.u8 q3, d17, d19 // p6 + p4
vaddl.u8 q4, d17, d20 // p6 + p3
vadd.i16 q6, q1, q2
vadd.i16 q5, q3, q4
vaddl.u8 q3, d17, d21 // p6 + p2
vadd.i16 q6, q6, q5
vaddl.u8 q4, d17, d22 // p6 + p1
vaddl.u8 q5, d18, d23 // p5 + p0
vadd.i16 q3, q3, q4
vaddl.u8 q4, d19, d24 // p4 + q0
vadd.i16 q6, q6, q3
vadd.i16 q5, q5, q4
vaddl.u8 q3, d20, d25 // p3 + q1
vadd.i16 q6, q6, q5
vsub.i16 q3, q3, q1
vaddl.u8 q1, d21, d26 // p2 + q2
vrshrn.i16 d0, q6, #4 // out p5
vadd.i16 q6, q6, q3 // - (p6 + p6) + (p3 + q1)
vsub.i16 q1, q1, q2
vaddl.u8 q2, d22, d27 // p1 + q3
vaddl.u8 q3, d17, d19 // p6 + p4
vrshrn.i16 d1, q6, #4 // out p4
vadd.i16 q6, q6, q1 // - (p6 + p5) + (p2 + q2)
vsub.i16 q2, q2, q3
vaddl.u8 q3, d23, d28 // p0 + q4
vaddl.u8 q4, d17, d20 // p6 + p3
vrshrn.i16 d2, q6, #4 // out p3
vadd.i16 q6, q6, q2 // - (p6 + p4) + (p1 + q3)
vsub.i16 q3, q3, q4
vaddl.u8 q4, d24, d29 // q0 + q5
vaddl.u8 q2, d17, d21 // p6 + p2
vrshrn.i16 d3, q6, #4 // out p2
vadd.i16 q6, q6, q3 // - (p6 + p3) + (p0 + q4)
vsub.i16 q4, q4, q2
vaddl.u8 q3, d25, d30 // q1 + q6
vaddl.u8 q5, d17, d22 // p6 + p1
vrshrn.i16 d4, q6, #4 // out p1
vadd.i16 q6, q6, q4 // - (p6 + p2) + (q0 + q5)
vsub.i16 q3, q3, q5
vaddl.u8 q4, d26, d30 // q2 + q6
vbif d0, d18, d15 // out p5
vaddl.u8 q5, d18, d23 // p5 + p0
vrshrn.i16 d5, q6, #4 // out p0
vadd.i16 q6, q6, q3 // - (p6 + p1) + (q1 + q6)
vsub.i16 q4, q4, q5
vaddl.u8 q5, d27, d30 // q3 + q6
vbif d1, d19, d15 // out p4
vaddl.u8 q9, d19, d24 // p4 + q0
vrshrn.i16 d6, q6, #4 // out q0
vadd.i16 q6, q6, q4 // - (p5 + p0) + (q2 + q6)
vsub.i16 q5, q5, q9
vaddl.u8 q4, d28, d30 // q4 + q6
vbif d2, d20, d15 // out p3
vaddl.u8 q9, d20, d25 // p3 + q1
vrshrn.i16 d7, q6, #4 // out q1
vadd.i16 q6, q6, q5 // - (p4 + q0) + (q3 + q6)
vsub.i16 q9, q4, q9
vaddl.u8 q5, d29, d30 // q5 + q6
vbif d3, d21, d15 // out p2
vaddl.u8 q10, d21, d26 // p2 + q2
vrshrn.i16 d8, q6, #4 // out q2
vadd.i16 q6, q6, q9 // - (p3 + q1) + (q4 + q6)
vsub.i16 q5, q5, q10
vaddl.u8 q9, d30, d30 // q6 + q6
vbif d4, d22, d15 // out p1
vaddl.u8 q10, d22, d27 // p1 + q3
vrshrn.i16 d9, q6, #4 // out q3
vadd.i16 q6, q6, q5 // - (p2 + q2) + (q5 + q6)
vsub.i16 q9, q9, q10
vbif d5, d23, d15 // out p0
vrshrn.i16 d10, q6, #4 // out q4
vadd.i16 q6, q6, q9 // - (p1 + q3) + (q6 + q6)
vrshrn.i16 d11, q6, #4 // out q5
vbif d6, d24, d15 // out q0
vbif d7, d25, d15 // out q1
vbif d8, d26, d15 // out q2
vbif d9, d27, d15 // out q3
vbif d10, d28, d15 // out q4
vbif d11, d29, d15 // out q5
.endif
bx lr
.if \wd == 16
7:
// Return to a shorter epilogue, writing only the inner 6 pixels
bx r8
.endif
.if \wd >= 8
8:
// Return to a shorter epilogue, writing only the inner 4 pixels
bx r9
.endif
9:
// Return directly without writing back any pixels
bx r12
endfunc
.endm
loop_filter 16
loop_filter 8
loop_filter 6
loop_filter 4
.macro lpf_8_wd16
adr r8, 7f + CONFIG_THUMB
adr r9, 8f + CONFIG_THUMB
bl lpf_8_wd16_neon
.endm
.macro lpf_8_wd8
adr r9, 8f + CONFIG_THUMB
bl lpf_8_wd8_neon
.endm
.macro lpf_8_wd6
bl lpf_8_wd6_neon
.endm
.macro lpf_8_wd4
bl lpf_8_wd4_neon
.endm
function lpf_v_4_8_neon
mov r12, lr
sub r10, r0, r1, lsl #1
vld1.8 {d22}, [r10, :64], r1 // p1
vld1.8 {d24}, [r0, :64], r1 // q0
vld1.8 {d23}, [r10, :64], r1 // p0
vld1.8 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
lpf_8_wd4
sub r10, r0, r1, lsl #1
vst1.8 {d22}, [r10, :64], r1 // p1
vst1.8 {d24}, [r0, :64], r1 // q0
vst1.8 {d23}, [r10, :64], r1 // p0
vst1.8 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
bx r12
endfunc
function lpf_h_4_8_neon
mov r12, lr
sub r10, r0, #2
add r0, r10, r1, lsl #2
vld1.32 {d22[0]}, [r10], r1
vld1.32 {d22[1]}, [r0], r1
vld1.32 {d23[0]}, [r10], r1
vld1.32 {d23[1]}, [r0], r1
vld1.32 {d24[0]}, [r10], r1
vld1.32 {d24[1]}, [r0], r1
vld1.32 {d25[0]}, [r10], r1
vld1.32 {d25[1]}, [r0], r1
add r0, r0, #2
transpose_4x8b q11, q12, d22, d23, d24, d25
lpf_8_wd4
sub r10, r0, r1, lsl #3
sub r10, r10, #2
transpose_4x8b q11, q12, d22, d23, d24, d25
add r0, r10, r1, lsl #2
vst1.32 {d22[0]}, [r10], r1
vst1.32 {d22[1]}, [r0], r1
vst1.32 {d23[0]}, [r10], r1
vst1.32 {d23[1]}, [r0], r1
vst1.32 {d24[0]}, [r10], r1
vst1.32 {d24[1]}, [r0], r1
vst1.32 {d25[0]}, [r10], r1
vst1.32 {d25[1]}, [r0], r1
add r0, r0, #2
bx r12
endfunc
function lpf_v_6_8_neon
mov r12, lr
sub r10, r0, r1, lsl #1
sub r10, r10, r1
vld1.8 {d21}, [r10, :64], r1 // p2
vld1.8 {d24}, [r0, :64], r1 // q0
vld1.8 {d22}, [r10, :64], r1 // p1
vld1.8 {d25}, [r0, :64], r1 // q1
vld1.8 {d23}, [r10, :64], r1 // p0
vld1.8 {d26}, [r0, :64], r1 // q2
sub r0, r0, r1, lsl #1
sub r0, r0, r1
lpf_8_wd6
sub r10, r0, r1, lsl #1
vst1.8 {d22}, [r10, :64], r1 // p1
vst1.8 {d24}, [r0, :64], r1 // q0
vst1.8 {d23}, [r10, :64], r1 // p0
vst1.8 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
bx r12
endfunc
function lpf_h_6_8_neon
mov r12, lr
sub r10, r0, #4
add r0, r10, r1, lsl #2
vld1.8 {d20}, [r10], r1
vld1.8 {d24}, [r0], r1
vld1.8 {d21}, [r10], r1
vld1.8 {d25}, [r0], r1
vld1.8 {d22}, [r10], r1
vld1.8 {d26}, [r0], r1
vld1.8 {d23}, [r10], r1
vld1.8 {d27}, [r0], r1
add r0, r0, #4
transpose_8x8b q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
lpf_8_wd6
sub r10, r0, r1, lsl #3
sub r10, r10, #2
transpose_4x8b q11, q12, d22, d23, d24, d25
add r0, r10, r1, lsl #2
vst1.32 {d22[0]}, [r10], r1
vst1.32 {d22[1]}, [r0], r1
vst1.32 {d23[0]}, [r10], r1
vst1.32 {d23[1]}, [r0], r1
vst1.32 {d24[0]}, [r10], r1
vst1.32 {d24[1]}, [r0], r1
vst1.32 {d25[0]}, [r10], r1
vst1.32 {d25[1]}, [r0], r1
add r0, r0, #2
bx r12
endfunc
function lpf_v_8_8_neon
mov r12, lr
sub r10, r0, r1, lsl #2
vld1.8 {d20}, [r10, :64], r1 // p3
vld1.8 {d24}, [r0, :64], r1 // q0
vld1.8 {d21}, [r10, :64], r1 // p2
vld1.8 {d25}, [r0, :64], r1 // q1
vld1.8 {d22}, [r10, :64], r1 // p1
vld1.8 {d26}, [r0, :64], r1 // q2
vld1.8 {d23}, [r10, :64], r1 // p0
vld1.8 {d27}, [r0, :64], r1 // q3
sub r0, r0, r1, lsl #2
lpf_8_wd8
sub r10, r0, r1, lsl #1
sub r10, r10, r1
vst1.8 {d21}, [r10, :64], r1 // p2
vst1.8 {d24}, [r0, :64], r1 // q0
vst1.8 {d22}, [r10, :64], r1 // p1
vst1.8 {d25}, [r0, :64], r1 // q1
vst1.8 {d23}, [r10, :64], r1 // p0
vst1.8 {d26}, [r0, :64], r1 // q2
sub r0, r0, r1, lsl #1
sub r0, r0, r1
bx r12
8:
sub r10, r0, r1, lsl #1
vst1.8 {d22}, [r10, :64], r1 // p1
vst1.8 {d24}, [r0, :64], r1 // q0
vst1.8 {d23}, [r10, :64], r1 // p0
vst1.8 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
bx r12
endfunc
function lpf_h_8_8_neon
mov r12, lr
sub r10, r0, #4
add r0, r10, r1, lsl #2
vld1.8 {d20}, [r10], r1
vld1.8 {d24}, [r0], r1
vld1.8 {d21}, [r10], r1
vld1.8 {d25}, [r0], r1
vld1.8 {d22}, [r10], r1
vld1.8 {d26}, [r0], r1
vld1.8 {d23}, [r10], r1
vld1.8 {d27}, [r0], r1
add r0, r0, #4
transpose_8x8b q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
lpf_8_wd8
sub r10, r0, r1, lsl #3
sub r10, r10, #4
transpose_8x8b q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
add r0, r10, r1, lsl #2
vst1.8 {d20}, [r10], r1
vst1.8 {d24}, [r0], r1
vst1.8 {d21}, [r10], r1
vst1.8 {d25}, [r0], r1
vst1.8 {d22}, [r10], r1
vst1.8 {d26}, [r0], r1
vst1.8 {d23}, [r10], r1
vst1.8 {d27}, [r0], r1
add r0, r0, #4
bx r12
8:
sub r10, r0, r1, lsl #3
sub r10, r10, #2
transpose_4x8b q11, q12, d22, d23, d24, d25
add r0, r10, r1, lsl #2
vst1.32 {d22[0]}, [r10], r1
vst1.32 {d22[1]}, [r0], r1
vst1.32 {d23[0]}, [r10], r1
vst1.32 {d23[1]}, [r0], r1
vst1.32 {d24[0]}, [r10], r1
vst1.32 {d24[1]}, [r0], r1
vst1.32 {d25[0]}, [r10], r1
vst1.32 {d25[1]}, [r0], r1
add r0, r0, #2
bx r12
endfunc
function lpf_v_16_8_neon
mov r12, lr
sub r10, r0, r1, lsl #3
add r10, r10, r1
vld1.8 {d17}, [r10, :64], r1 // p6
vld1.8 {d24}, [r0, :64], r1 // q0
vld1.8 {d18}, [r10, :64], r1 // p5
vld1.8 {d25}, [r0, :64], r1 // q1
vld1.8 {d19}, [r10, :64], r1 // p4
vld1.8 {d26}, [r0, :64], r1 // q2
vld1.8 {d20}, [r10, :64], r1 // p3
vld1.8 {d27}, [r0, :64], r1 // q3
vld1.8 {d21}, [r10, :64], r1 // p2
vld1.8 {d28}, [r0, :64], r1 // q4
vld1.8 {d22}, [r10, :64], r1 // p1
vld1.8 {d29}, [r0, :64], r1 // q5
vld1.8 {d23}, [r10, :64], r1 // p0
vld1.8 {d30}, [r0, :64], r1 // q6
sub r0, r0, r1, lsl #3
add r0, r0, r1
lpf_8_wd16
sub r10, r0, r1, lsl #2
sub r10, r10, r1, lsl #1
vst1.8 {d0}, [r10, :64], r1 // p5
vst1.8 {d6}, [r0, :64], r1 // q0
vst1.8 {d1}, [r10, :64], r1 // p4
vst1.8 {d7}, [r0, :64], r1 // q1
vst1.8 {d2}, [r10, :64], r1 // p3
vst1.8 {d8}, [r0, :64], r1 // q2
vst1.8 {d3}, [r10, :64], r1 // p2
vst1.8 {d9}, [r0, :64], r1 // q3
vst1.8 {d4}, [r10, :64], r1 // p1
vst1.8 {d10}, [r0, :64], r1 // q4
vst1.8 {d5}, [r10, :64], r1 // p0
vst1.8 {d11}, [r0, :64], r1 // q5
sub r0, r0, r1, lsl #2
sub r0, r0, r1, lsl #1
bx r12
7:
sub r10, r0, r1
sub r10, r10, r1, lsl #1
vst1.8 {d21}, [r10, :64], r1 // p2
vst1.8 {d24}, [r0, :64], r1 // q0
vst1.8 {d22}, [r10, :64], r1 // p1
vst1.8 {d25}, [r0, :64], r1 // q1
vst1.8 {d23}, [r10, :64], r1 // p0
vst1.8 {d26}, [r0, :64], r1 // q2
sub r0, r0, r1, lsl #1
sub r0, r0, r1
bx r12
8:
sub r10, r0, r1, lsl #1
vst1.8 {d22}, [r10, :64], r1 // p1
vst1.8 {d24}, [r0, :64], r1 // q0
vst1.8 {d23}, [r10, :64], r1 // p0
vst1.8 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
bx r12
endfunc
function lpf_h_16_8_neon
mov r12, lr
sub r10, r0, #8
vld1.8 {d16}, [r10, :64], r1
vld1.8 {d24}, [r0, :64], r1
vld1.8 {d17}, [r10, :64], r1
vld1.8 {d25}, [r0, :64], r1
vld1.8 {d18}, [r10, :64], r1
vld1.8 {d26}, [r0, :64], r1
vld1.8 {d19}, [r10, :64], r1
vld1.8 {d27}, [r0, :64], r1
vld1.8 {d20}, [r10, :64], r1
vld1.8 {d28}, [r0, :64], r1
vld1.8 {d21}, [r10, :64], r1
vld1.8 {d29}, [r0, :64], r1
vld1.8 {d22}, [r10, :64], r1
vld1.8 {d30}, [r0, :64], r1
vld1.8 {d23}, [r10, :64], r1
vld1.8 {d31}, [r0, :64], r1
transpose_8x8b q8, q9, q10, q11, d16, d17, d18, d19, d20, d21, d22, d23
transpose_8x8b q12, q13, q14, q15, d24, d25, d26, d27, d28, d29, d30, d31
lpf_8_wd16
sub r0, r0, r1, lsl #3
sub r10, r0, #8
transpose_8x8b q8, q0, q1, q2, d16, d17, d0, d1, d2, d3, d4, d5
transpose_8x8b q3, q4, q5, q15, d6, d7, d8, d9, d10, d11, d30, d31
vst1.8 {d16}, [r10, :64], r1
vst1.8 {d6}, [r0, :64], r1
vst1.8 {d17}, [r10, :64], r1
vst1.8 {d7}, [r0, :64], r1
vst1.8 {d0}, [r10, :64], r1
vst1.8 {d8}, [r0, :64], r1
vst1.8 {d1}, [r10, :64], r1
vst1.8 {d9}, [r0, :64], r1
vst1.8 {d2}, [r10, :64], r1
vst1.8 {d10}, [r0, :64], r1
vst1.8 {d3}, [r10, :64], r1
vst1.8 {d11}, [r0, :64], r1
vst1.8 {d4}, [r10, :64], r1
vst1.8 {d30}, [r0, :64], r1
vst1.8 {d5}, [r10, :64], r1
vst1.8 {d31}, [r0, :64], r1
bx r12
7:
sub r10, r0, r1, lsl #3
sub r10, r10, #4
transpose_8x8b q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
add r0, r10, r1, lsl #2
vst1.8 {d20}, [r10], r1
vst1.8 {d24}, [r0], r1
vst1.8 {d21}, [r10], r1
vst1.8 {d25}, [r0], r1
vst1.8 {d22}, [r10], r1
vst1.8 {d26}, [r0], r1
vst1.8 {d23}, [r10], r1
vst1.8 {d27}, [r0], r1
add r0, r0, #4
bx r12
8:
sub r10, r0, r1, lsl #3
sub r10, r10, #2
transpose_4x8b q11, q12, d22, d23, d24, d25
add r0, r10, r1, lsl #2
vst1.32 {d22[0]}, [r10], r1
vst1.32 {d22[1]}, [r0], r1
vst1.32 {d23[0]}, [r10], r1
vst1.32 {d23[1]}, [r0], r1
vst1.32 {d24[0]}, [r10], r1
vst1.32 {d24[1]}, [r0], r1
vst1.32 {d25[0]}, [r10], r1
vst1.32 {d25[1]}, [r0], r1
add r0, r0, #2
bx r12
endfunc
// void dav1d_lpf_v_sb_y_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const uint32_t *const vmask,
// const uint8_t (*l)[4], ptrdiff_t b4_stride,
// const Av1FilterLUT *lut, const int w)
.macro lpf_func dir, type
function lpf_\dir\()_sb_\type\()_8bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100]
ldrd r6, r7, [r2] // vmask[0], vmask[1]
.ifc \type, y
ldr r2, [r2, #8] // vmask[2]
.endif
add r5, r5, #128 // Move to sharp part of lut
.ifc \type, y
orr r7, r7, r2 // vmask[1] |= vmask[2]
.endif
.ifc \dir, v
sub r4, r3, r4, lsl #2
.else
sub r3, r3, #4
lsl r4, r4, #2
.endif
orr r6, r6, r7 // vmask[0] |= vmask[1]
1:
tst r6, #0x03
.ifc \dir, v
vld1.8 {d0}, [r4]!
vld1.8 {d1}, [r3]!
.else
vld2.32 {d0[0], d1[0]}, [r3], r4
vld2.32 {d0[1], d1[1]}, [r3], r4
.endif
beq 7f // if (!(vm & bits)) continue;
vld1.8 {d5[]}, [r5] // sharp[0]
add r5, r5, #8
vmov.i32 d2, #0xff
vdup.32 d13, r6 // vmask[0]
vand d0, d0, d2 // Keep only lowest byte in each 32 bit word
vand d1, d1, d2
vtst.8 d3, d1, d2 // Check for nonzero values in l[0][0]
vmov.i8 d4, #1
vld1.8 {d6[]}, [r5] // sharp[1]
sub r5, r5, #8
vbif d1, d0, d3 // if (!l[0][0]) L = l[offset][0]
vtst.32 d2, d1, d2 // L != 0
vmul.i32 d1, d1, d4 // L
.ifc \type, y
vdup.32 d15, r2 // vmask[2]
.endif
vdup.32 d14, r7 // vmask[1]
vmov r10, r11, d2
orrs r10, r10, r11
beq 7f // if (!L) continue;
vneg.s8 d5, d5 // -sharp[0]
movrel_local r10, word_12
vshr.u8 d12, d1, #4 // H
vld1.32 {d16}, [r10, :64]
vshl.s8 d3, d1, d5 // L >> sharp[0]
.ifc \type, y
vtst.32 d15, d15, d16 // if (vmask[2] & bits)
.endif
vmov.i8 d7, #2
vmin.u8 d3, d3, d6 // imin(L >> sharp[0], sharp[1])
vadd.i8 d0, d1, d7 // L + 2
vmax.u8 d11, d3, d4 // imax(imin(), 1) = limit = I
vadd.u8 d0, d0, d0 // 2*(L + 2)
vtst.32 d14, d14, d16 // if (vmask[1] & bits)
vadd.i8 d10, d0, d11 // 2*(L + 2) + limit = E
vtst.32 d13, d13, d16 // if (vmask[0] & bits)
vand d13, d13, d2 // vmask[0] &= L != 0
.ifc \type, y
tst r2, #0x03
beq 2f
// wd16
bl lpf_\dir\()_16_8_neon
b 8f
2:
.endif
tst r7, #0x03
beq 3f
.ifc \type, y
// wd8
bl lpf_\dir\()_8_8_neon
.else
// wd6
bl lpf_\dir\()_6_8_neon
.endif
b 8f
3:
// wd4
bl lpf_\dir\()_4_8_neon
.ifc \dir, h
b 8f
7:
// For dir h, the functions above increment r0.
// If the whole function is skipped, increment it here instead.
add r0, r0, r1, lsl #3
.else
7:
.endif
8:
lsrs r6, r6, #2 // vmask[0] >>= 2
lsr r7, r7, #2 // vmask[1] >>= 2
.ifc \type, y
lsr r2, r2, #2 // vmask[2] >>= 2
.endif
.ifc \dir, v
add r0, r0, #8
.else
// For dir h, r0 is returned incremented
.endif
bne 1b
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
.endm
lpf_func v, y
lpf_func h, y
lpf_func v, uv
lpf_func h, uv
const word_12, align=4
.word 1, 2
endconst
|
Admenri/urge
| 28,843
|
third_party/dav1d/src/arm/32/looprestoration16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
const right_ext_mask_buf
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
right_ext_mask:
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
endconst
// void dav1d_wiener_filter_h_16bpc_neon(int16_t *dst, const pixel (*left)[4],
// const pixel *src, const int16_t fh[8],
// const int w,
// enum LrEdgeFlags edges,
// const int bitdepth_max);
function wiener_filter_h_16bpc_neon, export=1
push {r4-r6,lr}
ldrd r4, r5, [sp, #16]
ldr r6, [sp, #24] // bitdepth_max
vld1.16 {q0}, [r3, :128]
clz r6, r6
vmov.i32 q14, #1
sub r12, r6, #38 // -(bitdepth + 6)
sub r6, r6, #25 // -round_bits_h
neg r12, r12 // bitdepth + 6
vdup.32 q1, r12
vdup.32 q13, r6 // -round_bits_h
vmov.i16 q15, #8192
vshl.u32 q14, q14, q1 // 1 << (bitdepth + 6)
vmvn.i16 q12, #0x8000 // 0x7fff = (1 << 15) - 1
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst r5, #1 // LR_HAVE_LEFT
beq 1f
// LR_HAVE_LEFT
cmp r1, #0
bne 0f
// left == NULL
sub r2, r2, #6
vld1.16 {q2, q3}, [r2]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.16 {q2, q3}, [r2]!
vld1.16 {d3}, [r1]!
// Move r2 back to account for the last 3 pixels we loaded earlier,
// which we'll shift out.
sub r2, r2, #6
vext.8 q3, q2, q3, #10
vext.8 q2, q1, q2, #10
b 2f
1:
vld1.16 {q2, q3}, [r2]!
// !LR_HAVE_LEFT, fill q1 with the leftmost pixel
// and shift q2/q3 to have 3x the first pixel at the front.
vdup.16 q1, d4[0]
// Move r2 back to account for the last 3 pixels we loaded before,
// which we shifted out.
sub r2, r2, #6
vext.8 q3, q2, q3, #10
vext.8 q2, q1, q2, #10
2:
tst r5, #2 // LR_HAVE_RIGHT
bne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r4, #11
bge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+3 pixels valid in q2-q3. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is q1/2.h[w+2]. r2 points at the next input, ie
// q1/2.h[16]. Thus read from r2[w-14] to find the padding pixel.
sub r12, r4, #14
lsl r12, r12, #1
// Insert padding in q2/3.h[w+3] onwards; fuse the +3 (*2) into the
// buffer pointer.
movrel_local r3, right_ext_mask, -6
ldrh r12, [r2, r12]
sub r3, r3, r4, lsl #1
vdup.16 q11, r12
vld1.8 {q9, q10}, [r3]
vbit q2, q11, q9
vbit q3, q11, q10
4: // Loop horizontally
vext.8 q9, q2, q3, #4
vext.8 q10, q2, q3, #8
vext.8 q8, q2, q3, #2
vext.8 q11, q2, q3, #10
vadd.i16 q10, q10, q9
vadd.i16 q11, q11, q8
vext.8 q8, q2, q3, #12
vext.8 q9, q2, q3, #6
vadd.i16 q2, q2, q8
vmull.s16 q8, d18, d0[3]
vmlal.s16 q8, d20, d1[0]
vmlal.s16 q8, d22, d1[1]
vmlal.s16 q8, d4, d1[2]
vmull.s16 q9, d19, d0[3]
vmlal.s16 q9, d21, d1[0]
vmlal.s16 q9, d23, d1[1]
vmlal.s16 q9, d5, d1[2]
vadd.i32 q8, q8, q14
vadd.i32 q9, q9, q14
vrshl.s32 q8, q8, q13
vrshl.s32 q9, q9, q13
vqmovun.s32 d16, q8
vqmovun.s32 d17, q9
vmin.u16 q8, q8, q12
vsub.i16 q8, q8, q15
subs r4, r4, #8
vst1.16 {q8}, [r0, :128]!
ble 9f
vmov q2, q3
tst r5, #2 // LR_HAVE_RIGHT
vld1.16 {q3}, [r2]!
bne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
9:
pop {r4-r6,pc}
endfunc
// void dav1d_wiener_filter_v_16bpc_neon(pixel *dst, int16_t **ptrs,
// const int16_t fv[8], const int w,
// const int bitdepth_max);
function wiener_filter_v_16bpc_neon, export=1
push {r4-r9,lr}
vpush {q4-q7}
ldr lr, [sp, #92] // bitdepth_max
vld1.16 {q0}, [r2, :128]
vdup.16 q2, lr
clz lr, lr
sub lr, lr, #11 // round_bits_v
vdup.32 q1, lr
ldrd r4, r5, [r1]
ldrd r6, r7, [r1, #8]
ldrd r8, r9, [r1, #16]
vneg.s32 q1, q1 // -round_bits_v
1:
vld1.16 {q4, q5}, [r4, :128]!
vld1.16 {q6, q7}, [r5, :128]!
vld1.16 {q8, q9}, [r6, :128]!
vld1.16 {q10, q11}, [r7, :128]!
vld1.16 {q12, q13}, [r8, :128]!
vld1.16 {q14, q15}, [r9, :128]!
subs r3, r3, #16
vmull.s16 q3, d8, d0[0]
vmlal.s16 q3, d12, d0[1]
vmlal.s16 q3, d16, d0[2]
vmlal.s16 q3, d20, d0[3]
vmlal.s16 q3, d24, d1[0]
vmlal.s16 q3, d28, d1[1]
vmlal.s16 q3, d28, d1[2]
vmull.s16 q4, d9, d0[0]
vmlal.s16 q4, d13, d0[1]
vmlal.s16 q4, d17, d0[2]
vmlal.s16 q4, d21, d0[3]
vmlal.s16 q4, d25, d1[0]
vmlal.s16 q4, d29, d1[1]
vmlal.s16 q4, d29, d1[2]
vmull.s16 q6, d10, d0[0]
vmlal.s16 q6, d14, d0[1]
vmlal.s16 q6, d18, d0[2]
vmlal.s16 q6, d22, d0[3]
vmlal.s16 q6, d26, d1[0]
vmlal.s16 q6, d30, d1[1]
vmlal.s16 q6, d30, d1[2]
vmull.s16 q5, d11, d0[0]
vmlal.s16 q5, d15, d0[1]
vmlal.s16 q5, d19, d0[2]
vmlal.s16 q5, d23, d0[3]
vmlal.s16 q5, d27, d1[0]
vmlal.s16 q5, d31, d1[1]
vmlal.s16 q5, d31, d1[2]
vrshl.s32 q3, q3, q1 // round_bits_v
vrshl.s32 q4, q4, q1
vrshl.s32 q6, q6, q1
vrshl.s32 q5, q5, q1
vqmovun.s32 d6, q3
vqmovun.s32 d7, q4
vqmovun.s32 d8, q6
vqmovun.s32 d9, q5
vmin.u16 q3, q3, q2 // bitdepth_max
vmin.u16 q4, q4, q2
vst1.16 {q3, q4}, [r0, :128]!
bgt 1b
// Shift the pointers, but only update the first 5; the 6th pointer is
// kept as it was before (and the 7th is implicitly identical to the
// 6th).
ldrd r4, r5, [r1, #4]
ldrd r6, r7, [r1, #12]
ldr r8, [r1, #20]
strd r4, r5, [r1]
strd r6, r7, [r1, #8]
str r8, [r1, #16]
vpop {q4-q7}
pop {r4-r9,pc}
endfunc
// void dav1d_wiener_filter_hv_16bpc_neon(pixel *dst, const pixel (*left)[4],
// const pixel *src,
// const int16_t filter[2][8],
// const int w,
// const enum LrEdgeFlags edges,
// int16_t **ptrs,
// const int bitdepth_max);
function wiener_filter_hv_16bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100]
ldrd r6, r7, [sp, #108]
vld1.16 {q0, q1}, [r3, :128]
vdup.16 q11, r7 // bitdepth_max
clz r7, r7
vmov.i32 q14, #1
sub r12, r7, #38 // -(bitdepth + 6)
sub lr, r7, #11 // round_bits_v
sub r7, r7, #25 // -round_bits_h
neg r12, r12 // bitdepth + 6
vdup.32 q2, r12
vdup.32 q13, r7 // -round_bits_h
vdup.32 q10, lr // round_bits_v
mov lr, r6
vmov.i16 q15, #8192
vshl.u32 q14, q14, q2 // 1 << (bitdepth + 6)
vneg.s32 q10, q10 // -round_bits_v
ldrd r6, r7, [lr]
ldrd r8, r9, [lr, #8]
ldrd r10, r11, [lr, #16]
ldr r12, [lr, #24]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst r5, #1 // LR_HAVE_LEFT
beq 1f
// LR_HAVE_LEFT
cmp r1, #0
bne 0f
// left == NULL
sub r2, r2, #6
vld1.16 {q2, q3}, [r2]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.16 {q2, q3}, [r2]!
vld1.16 {d9}, [r1]!
// Move r2 back to account for the last 3 pixels we loaded earlier,
// which we'll shift out.
sub r2, r2, #6
vext.8 q3, q2, q3, #10
vext.8 q2, q4, q2, #10
b 2f
1:
vld1.16 {q2, q3}, [r2]!
// !LR_HAVE_LEFT, fill q1 with the leftmost pixel
// and shift q2/q3 to have 3x the first pixel at the front.
vdup.16 q4, d4[0]
// Move r2 back to account for the last 3 pixels we loaded before,
// which we shifted out.
sub r2, r2, #6
vext.8 q3, q2, q3, #10
vext.8 q2, q4, q2, #10
2:
tst r5, #2 // LR_HAVE_RIGHT
bne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r4, #11
bge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+3 pixels valid in q2-q3. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is q1/2.h[w+2]. r2 points at the next input, ie
// q1/2.h[16]. Thus read from r2[w-14] to find the padding pixel.
sub lr, r4, #14
lsl lr, lr, #1
// Insert padding in q2/3.h[w+3] onwards; fuse the +3 (*2) into the
// buffer pointer.
movrel_local r3, right_ext_mask, -6
ldrh lr, [r2, lr]
sub r3, r3, r4, lsl #1
vdup.16 q4, lr
vld1.8 {q8, q9}, [r3]
vbit q2, q4, q8
vbit q3, q4, q9
4: // Loop horizontally
vext.8 q5, q2, q3, #4
vext.8 q6, q2, q3, #8
vext.8 q4, q2, q3, #2
vext.8 q7, q2, q3, #10
vadd.i16 q6, q6, q5
vadd.i16 q7, q7, q4
vext.8 q4, q2, q3, #12
vext.8 q5, q2, q3, #6
vadd.i16 q2, q2, q4
vld1.16 {q4}, [r6, :128]!
vmull.s16 q8, d10, d0[3]
vmlal.s16 q8, d12, d1[0]
vmlal.s16 q8, d14, d1[1]
vmlal.s16 q8, d4, d1[2]
vmull.s16 q9, d11, d0[3]
vmlal.s16 q9, d13, d1[0]
vmlal.s16 q9, d15, d1[1]
vmlal.s16 q9, d5, d1[2]
vld1.16 {q5}, [r7, :128]!
vmvn.i16 q12, #0x8000 // 0x7fff = (1 << 15) - 1
vadd.i32 q8, q8, q14
vadd.i32 q9, q9, q14
vld1.16 {q6}, [r8, :128]!
vrshl.s32 q8, q8, q13
vrshl.s32 q9, q9, q13
vqmovun.s32 d16, q8
vqmovun.s32 d17, q9
vld1.16 {q7}, [r9, :128]!
vmin.u16 q8, q8, q12
vld1.16 {q9}, [r10, :128]!
vsub.i16 q8, q8, q15
vld1.16 {q2}, [r11, :128]!
vmull.s16 q12, d8, d2[0]
vmlal.s16 q12, d10, d2[1]
vmlal.s16 q12, d12, d2[2]
vmlal.s16 q12, d14, d2[3]
vmlal.s16 q12, d18, d3[0]
vmlal.s16 q12, d4, d3[1]
vmlal.s16 q12, d16, d3[2]
vmull.s16 q4, d9, d2[0]
vmlal.s16 q4, d11, d2[1]
vmlal.s16 q4, d13, d2[2]
vmlal.s16 q4, d15, d2[3]
vmlal.s16 q4, d19, d3[0]
vmlal.s16 q4, d5, d3[1]
vmlal.s16 q4, d17, d3[2]
vrshl.s32 q12, q12, q10 // round_bits_v
vrshl.s32 q4, q4, q10
vqmovun.s32 d24, q12
vqmovun.s32 d25, q4
vst1.16 {q8}, [r12, :128]!
vmin.u16 q12, q12, q11 // bitdepth_max
subs r4, r4, #8
vst1.16 {q12}, [r0, :128]!
ble 9f
vmov q2, q3
tst r5, #2 // LR_HAVE_RIGHT
vld1.16 {q3}, [r2]!
bne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
9:
// Reload ptrs from arguments on the stack
ldr lr, [sp, #108]
// Rotate the window of pointers. Shift the 6 pointers downwards one step.
ldrd r6, r7, [lr, #4]
ldrd r8, r9, [lr, #12]
ldrd r10, r11, [lr, #20]
strd r6, r7, [lr]
strd r8, r9, [lr, #8]
strd r10, r11, [lr, #16]
// The topmost pointer, ptrs[6], which isn't used as input, is set to
// ptrs[0], which will be used as output for the next _hv call.
// At the start of the filtering, the caller may set ptrs[6] to the
// right next buffer to fill in, instead.
str r6, [lr, #24]
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
#include "looprestoration_tmpl.S"
// void dav1d_sgr_box3_row_h_16bpc_neon(int32_t *sumsq, int16_t *sum,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box3_row_h_16bpc_neon, export=1
push {r4-r5,lr}
ldrd r4, r5, [sp, #12]
add r4, r4, #2 // w += 2
tst r5, #1 // LR_HAVE_LEFT
beq 1f
cmp r2, #0
bne 0f
// LR_HAVE_LEFT && left == NULL
sub r3, r3, #4
vld1.8 {q0, q1}, [r3]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.8 {q0, q1}, [r3]!
vld1.16 {d5}, [r2]
// Move r3 back to account for the last 2 pixels we loaded earlier,
// which we'll shift out.
sub r3, r3, #4
vext.8 q1, q0, q1, #12
vext.8 q0, q2, q0, #12
b 2f
1:
vld1.8 {q0, q1}, [r3]!
// !LR_HAVE_LEFT, fill q1 with the leftmost pixel
// and shift q0/q1 to have 2x the first pixel at the front.
vdup.16 q2, d0[0]
// Move r3 back to account for the last 2 pixels we loaded before,
// which we shifted out.
sub r3, r3, #4
vext.8 q1, q0, q1, #12
vext.8 q0, q2, q0, #12
2:
tst r5, #2 // LR_HAVE_RIGHT
bne 4f
// If we'll need to pad the right edge, load that pixel to pad with
// here since we can find it pretty easily from here.
sub lr, r4, #(2 + 16 - 2 + 1)
lsl lr, lr, #1
ldrh lr, [r3, lr]
// Fill q14 with the right padding pixel
vdup.16 q14, lr
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r4, #10
bge 4f // If w >= 10, all used input pixels are valid
// 1 <= w < 10, w pixels valid in q0-q1. For w=9, this ends up called
// again; it's not strictly needed in those cases (we pad enough here),
// but keeping the code as simple as possible.
// Insert padding in q0.h[w] onwards
movrel_local lr, right_ext_mask
sub lr, lr, r4, lsl #1
vld1.8 {q12, q13}, [lr]
vbit q0, q14, q12
vbit q1, q14, q13
4: // Loop horizontally
vext.8 q8, q0, q1, #2
vext.8 q9, q0, q1, #4
vadd.i16 q2, q0, q8
vmull.u16 q12, d0, d0
vmlal.u16 q12, d16, d16
vmlal.u16 q12, d18, d18
vadd.i16 q2, q2, q9
vmull.u16 q13, d1, d1
vmlal.u16 q13, d17, d17
vmlal.u16 q13, d19, d19
subs r4, r4, #8
vst1.16 {q2}, [r1, :128]!
vst1.32 {q12, q13}, [r0, :128]!
ble 9f
tst r5, #2 // LR_HAVE_RIGHT
vmov q0, q1
vld1.16 {q1}, [r3]!
bne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
pop {r4-r5,pc}
endfunc
// void dav1d_sgr_box5_row_h_16bpc_neon(int32_t *sumsq, int16_t *sum,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box5_row_h_16bpc_neon, export=1
push {r4-r5,lr}
ldrd r4, r5, [sp, #12]
add r4, r4, #2 // w += 2
tst r5, #1 // LR_HAVE_LEFT
beq 1f
cmp r2, #0
bne 0f
// LR_HAVE_LEFT && left == NULL
sub r3, r3, #6
vld1.8 {q0, q1}, [r3]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.8 {q0, q1}, [r3]!
vld1.16 {d5}, [r2]
// Move r3 back to account for the last 2 pixels we loaded earlier,
// which we'll shift out.
sub r3, r3, #6
vext.8 q1, q0, q1, #10
vext.8 q0, q2, q0, #10
b 2f
1:
vld1.8 {q0, q1}, [r3]!
// !LR_HAVE_LEFT, fill q1 with the leftmost pixel
// and shift q0/q1 to have 3x the first pixel at the front.
vdup.16 q2, d0[0]
// Move r3 back to account for the last 3 pixels we loaded before,
// which we shifted out.
sub r3, r3, #6
vext.8 q1, q0, q1, #10
vext.8 q0, q2, q0, #10
2:
tst r5, #2 // LR_HAVE_RIGHT
bne 4f
// If we'll need to pad the right edge, load that pixel to pad with
// here since we can find it pretty easily from here.
sub lr, r4, #(2 + 16 - 3 + 1)
lsl lr, lr, #1
ldrh lr, [r3, lr]
// Fill q14 with the right padding pixel
vdup.16 q14, lr
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r4, #11
bge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+1 pixels valid in q0-q1. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// Insert padding in q0.h[w+1] onwards; fuse the +1 into the
// buffer pointer.
movrel_local lr, right_ext_mask, -2
sub lr, lr, r4, lsl #1
vld1.8 {q12, q13}, [lr]
vbit q0, q14, q12
vbit q1, q14, q13
4: // Loop horizontally
vext.8 q8, q0, q1, #2
vext.8 q9, q0, q1, #4
vadd.i16 q2, q0, q8
vmull.u16 q12, d0, d0
vmlal.u16 q12, d16, d16
vmlal.u16 q12, d18, d18
vadd.i16 q2, q2, q9
vmull.u16 q13, d1, d1
vmlal.u16 q13, d17, d17
vmlal.u16 q13, d19, d19
vext.8 q8, q0, q1, #6
vext.8 q9, q0, q1, #8
vadd.i16 q2, q2, q8
vmlal.u16 q12, d16, d16
vmlal.u16 q12, d1, d1
vadd.i16 q2, q2, q9
vmlal.u16 q13, d17, d17
vmlal.u16 q13, d19, d19
subs r4, r4, #8
vst1.16 {q2}, [r1, :128]!
vst1.32 {q12, q13}, [r0, :128]!
ble 9f
tst r5, #2 // LR_HAVE_RIGHT
vmov q0, q1
vld1.16 {q1}, [r3]!
bne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
pop {r4-r5,pc}
endfunc
// void dav1d_sgr_box35_row_h_16bpc_neon(int32_t *sumsq3, int16_t *sum3,
// int32_t *sumsq5, int16_t *sum5,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box35_row_h_16bpc_neon, export=1
push {r4-r7,lr}
ldrd r4, r5, [sp, #20]
ldrd r6, r7, [sp, #28]
add r6, r6, #2 // w += 2
tst r7, #1 // LR_HAVE_LEFT
beq 1f
cmp r4, #0
bne 0f
// LR_HAVE_LEFT && left == NULL
sub r5, r5, #6
vld1.8 {q0, q1}, [r5]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.8 {q0, q1}, [r5]!
vld1.16 {d5}, [r4]
// Move r3 back to account for the last 2 pixels we loaded earlier,
// which we'll shift out.
sub r5, r5, #6
vext.8 q1, q0, q1, #10
vext.8 q0, q2, q0, #10
b 2f
1:
vld1.8 {q0, q1}, [r5]!
// !LR_HAVE_LEFT, fill q1 with the leftmost pixel
// and shift q0/q1 to have 3x the first pixel at the front.
vdup.16 q2, d0[0]
// Move r3 back to account for the last 3 pixels we loaded before,
// which we shifted out.
sub r5, r5, #6
vext.8 q1, q0, q1, #10
vext.8 q0, q2, q0, #10
2:
tst r7, #2 // LR_HAVE_RIGHT
bne 4f
// If we'll need to pad the right edge, load that pixel to pad with
// here since we can find it pretty easily from here.
sub lr, r6, #(2 + 16 - 3 + 1)
lsl lr, lr, #1
ldrh lr, [r5, lr]
// Fill q14 with the right padding pixel
vdup.16 q14, lr
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r6, #11
bge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+1 pixels valid in q0-q1. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// Insert padding in q0.h[w+1] onwards; fuse the +1 into the
// buffer pointer.
movrel_local lr, right_ext_mask, -2
sub lr, lr, r6, lsl #1
vld1.8 {q12, q13}, [lr]
vbit q0, q14, q12
vbit q1, q14, q13
4: // Loop horizontally
vext.8 q8, q0, q1, #2
vext.8 q9, q0, q1, #4
vext.8 q10, q0, q1, #6
vext.8 q11, q0, q1, #8
vadd.i16 q2, q8, q9
vadd.i16 q3, q0, q11
vadd.i16 q2, q2, q10
vmull.u16 q12, d16, d16
vmlal.u16 q12, d18, d18
vmlal.u16 q12, d20, d20
vmull.u16 q13, d17, d17
vmlal.u16 q13, d19, d19
vmlal.u16 q13, d21, d21
vadd.i16 q3, q3, q2
vst1.16 {q2}, [r1, :128]!
vst1.32 {q12, q13}, [r0, :128]!
vmlal.u16 q12, d0, d0
vmlal.u16 q12, d22, d22
vmlal.u16 q13, d1, d1
vmlal.u16 q13, d23, d23
subs r6, r6, #8
vst1.16 {q3}, [r3, :128]!
vst1.32 {q12, q13}, [r2, :128]!
ble 9f
tst r7, #2 // LR_HAVE_RIGHT
vmov q0, q1
vld1.16 {q1}, [r5]!
bne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
pop {r4-r7,pc}
endfunc
sgr_funcs 16
|
Admenri/urge
| 114,897
|
third_party/dav1d/src/arm/32/itx.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "src/arm/asm.S"
#include "util.S"
// The exported functions in this file have got the following signature:
// void itxfm_add(pixel *dst, ptrdiff_t dst_stride, coef *coeff, int eob);
// Most of the functions use the following register layout:
// r0-r3 external parameters
// r4 function pointer to first transform
// r5 function pointer to second transform
// r6 output parameter for helper function
// r7 input parameter for helper function
// r8 input stride for helper function
// r9 scratch variable for helper functions
// r10-r11 pointer to list of eob thresholds, eob threshold value,
// scratch variables within helper functions (backed up)
// The SIMD registers most often use the following layout:
// d0-d3 multiplication coefficients
// d4-d7 scratch registers
// d8-d15 unused in some transforms, used for scratch registers in others
// d16-v31 inputs/outputs of transforms
// Potential further optimizations, that are left unimplemented for now:
// - Trying to keep multiplication coefficients in registers across multiple
// transform functions. (The register layout is designed to potentially
// allow this.)
// - Use a simplified version of the transforms themselves for cases where
// we know a significant number of inputs are zero. E.g. if the eob value
// indicates only a quarter of input values are set, for idct16 and up,
// a significant amount of calculation can be skipped, at the cost of more
// code duplication and special casing.
const idct_coeffs, align=4
// idct4
.short 2896, 2896*8, 1567, 3784
// idct8
.short 799, 4017, 3406, 2276
// idct16
.short 401, 4076, 3166, 2598
.short 1931, 3612, 3920, 1189
// idct32
.short 201, 4091, 3035, 2751
.short 1751, 3703, 3857, 1380
.short 995, 3973, 3513, 2106
.short 2440, 3290, 4052, 601
endconst
const idct64_coeffs, align=4
.short 101*8, 4095*8, 2967*8, -2824*8
.short 1660*8, 3745*8, 3822*8, -1474*8
.short 4076, 401, 4017, 799
.short 4036*8, -700*8, 2359*8, 3349*8
.short 3461*8, -2191*8, 897*8, 3996*8
.short -3166, -2598, -799, -4017
.short 501*8, 4065*8, 3229*8, -2520*8
.short 2019*8, 3564*8, 3948*8, -1092*8
.short 3612, 1931, 2276, 3406
.short 4085*8, -301*8, 2675*8, 3102*8
.short 3659*8, -1842*8, 1285*8, 3889*8
.short -3920, -1189, -3406, -2276
endconst
const iadst4_coeffs, align=4
// .h[4-5] can be interpreted as .s[2]
.short 1321, 3803, 2482, 3344, 3344, 0
endconst
const iadst8_coeffs, align=4
.short 4076, 401, 3612, 1931
.short 2598, 3166, 1189, 3920
// idct_coeffs
.short 2896, 0, 1567, 3784, 0, 0, 0, 0
endconst
const iadst16_coeffs, align=4
.short 4091, 201, 3973, 995
.short 3703, 1751, 3290, 2440
.short 2751, 3035, 2106, 3513
.short 1380, 3857, 601, 4052
endconst
.macro vmull_vmlal d0, s0, s1, c0, c1
vmull.s16 \d0, \s0, \c0
vmlal.s16 \d0, \s1, \c1
.endm
.macro vmull_vmlal_8h d0, d1, s0, s1, s2, s3, c0, c1
vmull.s16 \d0, \s0, \c0
vmlal.s16 \d0, \s2, \c1
vmull.s16 \d1, \s1, \c0
vmlal.s16 \d1, \s3, \c1
.endm
.macro vmull_vmlsl d0, s0, s1, c0, c1
vmull.s16 \d0, \s0, \c0
vmlsl.s16 \d0, \s1, \c1
.endm
.macro vmull_vmlsl_8h d0, d1, s0, s1, s2, s3, c0, c1
vmull.s16 \d0, \s0, \c0
vmlsl.s16 \d0, \s2, \c1
vmull.s16 \d1, \s1, \c0
vmlsl.s16 \d1, \s3, \c1
.endm
.macro vqrshrn_8h d0, d1, s0, s1, shift
vqrshrn.s32 \d0, \s0, \shift
vqrshrn.s32 \d1, \s1, \shift
.endm
.macro scale_input c, r0, r1, r2 r3, r4, r5, r6, r7
vqrdmulh.s16 \r0, \r0, \c
vqrdmulh.s16 \r1, \r1, \c
.ifnb \r2
vqrdmulh.s16 \r2, \r2, \c
vqrdmulh.s16 \r3, \r3, \c
.endif
.ifnb \r4
vqrdmulh.s16 \r4, \r4, \c
vqrdmulh.s16 \r5, \r5, \c
vqrdmulh.s16 \r6, \r6, \c
vqrdmulh.s16 \r7, \r7, \c
.endif
.endm
.macro load_add_store load, shift, addsrc, adddst, narrowsrc, narrowdst, store, dst, src, shiftbits=4
.ifnb \load
vld1.8 {\load}, [\src, :64], r1
.endif
.ifnb \shift
vrshr.s16 \shift, \shift, #\shiftbits
.endif
.ifnb \addsrc
vaddw.u8 \adddst, \adddst, \addsrc
.endif
.ifnb \narrowsrc
vqmovun.s16 \narrowdst, \narrowsrc
.endif
.ifnb \store
vst1.8 {\store}, [\dst, :64], r1
.endif
.endm
.macro load_add_store_8x8 dst, src, shiftbits=4
mov \src, \dst
load_add_store d2, q8, , , , , , \dst, \src, \shiftbits
load_add_store d3, q9, , , , , , \dst, \src, \shiftbits
load_add_store d4, q10, d2, q8, , , , \dst, \src, \shiftbits
load_add_store d5, q11, d3, q9, q8, d2, , \dst, \src, \shiftbits
load_add_store d6, q12, d4, q10, q9, d3, d2, \dst, \src, \shiftbits
load_add_store d7, q13, d5, q11, q10, d4, d3, \dst, \src, \shiftbits
load_add_store d2, q14, d6, q12, q11, d5, d4, \dst, \src, \shiftbits
load_add_store d3, q15, d7, q13, q12, d6, d5, \dst, \src, \shiftbits
load_add_store , , d2, q14, q13, d7, d6, \dst, \src, \shiftbits
load_add_store , , d3, q15, q14, d2, d7, \dst, \src, \shiftbits
load_add_store , , , , q15, d3, d2, \dst, \src, \shiftbits
load_add_store , , , , , , d3, \dst, \src, \shiftbits
.endm
.macro load_add_store_8x4 dst, src
mov \src, \dst
load_add_store d2, q8, , , , , , \dst, \src
load_add_store d3, q9, , , , , , \dst, \src
load_add_store d4, q10, d2, q8, , , , \dst, \src
load_add_store d5, q11, d3, q9, q8, d2, , \dst, \src
load_add_store , , d4, q10, q9, d3, d2, \dst, \src
load_add_store , , d5, q11, q10, d4, d3, \dst, \src
load_add_store , , , , q11, d5, d4, \dst, \src
load_add_store , , , , , , d5, \dst, \src
.endm
.macro load_add_store4 load, shift, addsrc, adddst, narrowsrc, narrowdst, store, dst, src
.ifnb \load
vld1.32 {\load[0]}, [\src, :32], r1
.endif
.ifnb \shift
vrshr.s16 \shift, \shift, #4
.endif
.ifnb \load
vld1.32 {\load[1]}, [\src, :32], r1
.endif
.ifnb \addsrc
vaddw.u8 \adddst, \adddst, \addsrc
.endif
.ifnb \store
vst1.32 {\store[0]}, [\dst, :32], r1
.endif
.ifnb \narrowsrc
vqmovun.s16 \narrowdst, \narrowsrc
.endif
.ifnb \store
vst1.32 {\store[1]}, [\dst, :32], r1
.endif
.endm
.macro load_add_store_4x16 dst, src
mov \src, \dst
load_add_store4 d0, , , , , , , \dst, \src
load_add_store4 d1, q8, , , , , , \dst, \src
load_add_store4 d2, q9, d0, q8, , , , \dst, \src
load_add_store4 d3, q10, d1, q9, q8, d0, , \dst, \src
load_add_store4 d4, q11, d2, q10, q9, d1, d0, \dst, \src
load_add_store4 d5, q12, d3, q11, q10, d2, d1, \dst, \src
load_add_store4 d6, q13, d4, q12, q11, d3, d2, \dst, \src
load_add_store4 d7, q14, d5, q13, q12, d4, d3, \dst, \src
load_add_store4 , q15, d6, q14, q13, d5, d4, \dst, \src
load_add_store4 , , d7, q15, q14, d6, d5, \dst, \src
load_add_store4 , , , , q15, d7, d6, \dst, \src
load_add_store4 , , , , , , d7, \dst, \src
.endm
.macro load_add_store_4x8 dst, src
mov \src, \dst
load_add_store4 d0, , , , , , , \dst, \src
load_add_store4 d1, q8, , , , , , \dst, \src
load_add_store4 d2, q9, d0, q8, , , , \dst, \src
load_add_store4 d3, q10, d1, q9, q8, d0, , \dst, \src
load_add_store4 , q11, d2, q10, q9, d1, d0, \dst, \src
load_add_store4 , , d3, q11, q10, d2, d1, \dst, \src
load_add_store4 , , , , q11, d3, d2, \dst, \src
load_add_store4 , , , , , , d3, \dst, \src
.endm
.macro idct_dc w, h, shift
cmp r3, #0
bne 1f
vmov.i16 d30, #0
movw r12, #2896*8
vld1.16 {d16[]}, [r2, :16]
vdup.16 d0, r12
vqrdmulh.s16 d16, d16, d0[0]
vst1.16 {d30[0]}, [r2, :16]
.if (\w == 2*\h) || (2*\w == \h)
vqrdmulh.s16 d16, d16, d0[0]
.endif
.if \shift > 0
vrshr.s16 d16, d16, #\shift
.endif
vqrdmulh.s16 d20, d16, d0[0]
mov r3, #\h
vrshr.s16 d16, d20, #4
vrshr.s16 d17, d20, #4
b idct_dc_w\w\()_neon
1:
.endm
function idct_dc_w4_neon
1:
vld1.32 {d0[0]}, [r0, :32], r1
vld1.32 {d0[1]}, [r0, :32], r1
vld1.32 {d1[0]}, [r0, :32], r1
vld1.32 {d1[1]}, [r0, :32], r1
subs r3, r3, #4
sub r0, r0, r1, lsl #2
vaddw.u8 q10, q8, d0
vqmovun.s16 d0, q10
vaddw.u8 q11, q8, d1
vst1.32 {d0[0]}, [r0, :32], r1
vqmovun.s16 d1, q11
vst1.32 {d0[1]}, [r0, :32], r1
vst1.32 {d1[0]}, [r0, :32], r1
vst1.32 {d1[1]}, [r0, :32], r1
bgt 1b
bx lr
endfunc
function idct_dc_w8_neon
1:
vld1.8 {d0}, [r0, :64], r1
vld1.8 {d1}, [r0, :64], r1
vld1.8 {d2}, [r0, :64], r1
vaddw.u8 q10, q8, d0
vld1.8 {d3}, [r0, :64], r1
sub r0, r0, r1, lsl #2
subs r3, r3, #4
vaddw.u8 q11, q8, d1
vqmovun.s16 d0, q10
vaddw.u8 q12, q8, d2
vqmovun.s16 d1, q11
vaddw.u8 q13, q8, d3
vst1.8 {d0}, [r0, :64], r1
vqmovun.s16 d2, q12
vst1.8 {d1}, [r0, :64], r1
vqmovun.s16 d3, q13
vst1.8 {d2}, [r0, :64], r1
vst1.8 {d3}, [r0, :64], r1
bgt 1b
bx lr
endfunc
function idct_dc_w16_neon
1:
vld1.8 {q0}, [r0, :128], r1
vld1.8 {q1}, [r0, :128], r1
vld1.8 {q2}, [r0, :128], r1
subs r3, r3, #4
vaddw.u8 q10, q8, d0
vaddw.u8 q11, q8, d1
vld1.8 {q3}, [r0, :128], r1
vaddw.u8 q12, q8, d2
vaddw.u8 q13, q8, d3
sub r0, r0, r1, lsl #2
vaddw.u8 q14, q8, d4
vaddw.u8 q15, q8, d5
vqmovun.s16 d0, q10
vqmovun.s16 d1, q11
vaddw.u8 q10, q8, d6
vaddw.u8 q11, q8, d7
vqmovun.s16 d2, q12
vqmovun.s16 d3, q13
vqmovun.s16 d4, q14
vqmovun.s16 d5, q15
vst1.8 {q0}, [r0, :128], r1
vqmovun.s16 d6, q10
vqmovun.s16 d7, q11
vst1.8 {q1}, [r0, :128], r1
vst1.8 {q2}, [r0, :128], r1
vst1.8 {q3}, [r0, :128], r1
bgt 1b
bx lr
endfunc
function idct_dc_w32_neon
1:
vld1.8 {q0, q1}, [r0, :128], r1
subs r3, r3, #2
vld1.8 {q2, q3}, [r0, :128], r1
vaddw.u8 q10, q8, d0
vaddw.u8 q11, q8, d1
vaddw.u8 q12, q8, d2
vaddw.u8 q13, q8, d3
sub r0, r0, r1, lsl #1
vaddw.u8 q14, q8, d4
vaddw.u8 q15, q8, d5
vqmovun.s16 d0, q10
vqmovun.s16 d1, q11
vaddw.u8 q10, q8, d6
vaddw.u8 q11, q8, d7
vqmovun.s16 d2, q12
vqmovun.s16 d3, q13
vqmovun.s16 d4, q14
vqmovun.s16 d5, q15
vst1.8 {q0, q1}, [r0, :128], r1
vqmovun.s16 d6, q10
vqmovun.s16 d7, q11
vst1.8 {q2, q3}, [r0, :128], r1
bgt 1b
bx lr
endfunc
function idct_dc_w64_neon
sub r1, r1, #32
1:
vld1.8 {q0, q1}, [r0, :128]!
subs r3, r3, #1
vld1.8 {q2, q3}, [r0, :128]
vaddw.u8 q10, q8, d0
vaddw.u8 q11, q8, d1
vaddw.u8 q12, q8, d2
vaddw.u8 q13, q8, d3
sub r0, r0, #32
vaddw.u8 q14, q8, d4
vaddw.u8 q15, q8, d5
vqmovun.s16 d0, q10
vqmovun.s16 d1, q11
vaddw.u8 q10, q8, d6
vaddw.u8 q11, q8, d7
vqmovun.s16 d2, q12
vqmovun.s16 d3, q13
vqmovun.s16 d4, q14
vqmovun.s16 d5, q15
vst1.8 {q0, q1}, [r0, :128]!
vqmovun.s16 d6, q10
vqmovun.s16 d7, q11
vst1.8 {q2, q3}, [r0, :128], r1
bgt 1b
bx lr
endfunc
.macro iwht4
vadd.i16 d16, d16, d17
vsub.i16 d21, d18, d19
vsub.i16 d20, d16, d21
vshr.s16 d20, d20, #1
vsub.i16 d18, d20, d17
vsub.i16 d17, d20, d19
vadd.i16 d19, d21, d18
vsub.i16 d16, d16, d17
.endm
.macro idct_4h_x4 r0, r1, r2, r3
vmull_vmlal q3, \r1, \r3, d0[3], d0[2]
vmull_vmlsl q2, \r1, \r3, d0[2], d0[3]
vmull_vmlal q1, \r0, \r2, d0[0], d0[0]
vqrshrn.s32 d6, q3, #12
vqrshrn.s32 d7, q2, #12
vmull_vmlsl q2, \r0, \r2, d0[0], d0[0]
vqrshrn.s32 d2, q1, #12
vqrshrn.s32 d3, q2, #12
vqadd.s16 \r0, d2, d6
vqsub.s16 \r3, d2, d6
vqadd.s16 \r1, d3, d7
vqsub.s16 \r2, d3, d7
.endm
.macro idct_8h_x4 q0, q1, q2, q3, r0, r1, r2, r3, r4, r5, r6, r7
vmull_vmlal_8h q6, q7, \r2, \r3, \r6, \r7, d0[3], d0[2]
vmull_vmlsl_8h q4, q5, \r2, \r3, \r6, \r7, d0[2], d0[3]
vmull_vmlal_8h q2, q3, \r0, \r1, \r4, \r5, d0[0], d0[0]
vqrshrn_8h d12, d13, q6, q7, #12
vqrshrn_8h d14, d15, q4, q5, #12
vmull_vmlsl_8h q4, q5, \r0, \r1, \r4, \r5, d0[0], d0[0]
vqrshrn_8h d4, d5, q2, q3, #12
vqrshrn_8h d6, d7, q4, q5, #12
vqadd.s16 \q0, q2, q6
vqsub.s16 \q3, q2, q6
vqadd.s16 \q1, q3, q7
vqsub.s16 \q2, q3, q7
.endm
function inv_dct_4h_x4_neon, export=1
movrel_local r12, idct_coeffs
vld1.16 {d0}, [r12, :64]
idct_4h_x4 d16, d17, d18, d19
bx lr
endfunc
function inv_dct_8h_x4_neon, export=1
movrel_local r12, idct_coeffs
vld1.16 {d0}, [r12, :64]
idct_8h_x4 q8, q9, q10, q11, d16, d17, d18, d19, d20, d21, d22, d23
bx lr
endfunc
.macro iadst_4x4 o0, o1, o2, o3
movrel_local r12, iadst4_coeffs
vld1.16 {d0, d1}, [r12, :128]
vsubl.s16 q1, d16, d18
vmull.s16 q2, d16, d0[0]
vmlal.s16 q2, d18, d0[1]
vmlal.s16 q2, d19, d0[2]
vmull.s16 q10, d17, d0[3]
vaddw.s16 q1, q1, d19
vmull.s16 q3, d16, d0[2]
vmlsl.s16 q3, d18, d0[0]
vmlsl.s16 q3, d19, d0[1]
vadd.s32 q11, q2, q3
vmul.s32 q1, q1, d1[0]
vadd.s32 q2, q2, q10
vadd.s32 q3, q3, q10
vsub.s32 q11, q11, q10
vqrshrn.s32 \o0, q2, #12
vqrshrn.s32 \o2, q1, #12
vqrshrn.s32 \o1, q3, #12
vqrshrn.s32 \o3, q11, #12
.endm
function inv_adst_4h_x4_neon, export=1
iadst_4x4 d16, d17, d18, d19
bx lr
endfunc
function inv_flipadst_4h_x4_neon, export=1
iadst_4x4 d19, d18, d17, d16
bx lr
endfunc
.macro iadst_8x4 o0, o1, o2, o3, o4, o5, o6, o7
movrel_local r12, iadst4_coeffs
vld1.16 {d0, d1}, [r12, :128]
vsubl.s16 q2, d16, d20
vsubl.s16 q3, d17, d21
vmull.s16 q4, d16, d0[0]
vmlal.s16 q4, d20, d0[1]
vmlal.s16 q4, d22, d0[2]
vmull.s16 q5, d17, d0[0]
vmlal.s16 q5, d21, d0[1]
vmlal.s16 q5, d23, d0[2]
vaddw.s16 q2, q2, d22
vaddw.s16 q3, q3, d23
vmull.s16 q6, d16, d0[2]
vmlsl.s16 q6, d20, d0[0]
vmlsl.s16 q6, d22, d0[1]
vmull.s16 q7, d17, d0[2]
vmlsl.s16 q7, d21, d0[0]
vmlsl.s16 q7, d23, d0[1]
vmul.s32 q10, q2, d1[0]
vmul.s32 q11, q3, d1[0]
vmull.s16 q2, d18, d0[3]
vmull.s16 q3, d19, d0[3]
vadd.s32 q8, q4, q2 // out0
vadd.s32 q9, q5, q3
vadd.s32 q4, q4, q6 // out3
vadd.s32 q5, q5, q7
vadd.s32 q6, q6, q2 // out1
vadd.s32 q7, q7, q3
vsub.s32 q4, q4, q2 // out3
vsub.s32 q5, q5, q3
vqrshrn.s32 d20, q10, #12
vqrshrn.s32 d21, q11, #12
vqrshrn.s32 \o0, q8, #12
vqrshrn.s32 \o1, q9, #12
.ifc \o4, d18
vmov q9, q10
.endif
vqrshrn.s32 \o2, q6, #12
vqrshrn.s32 \o3, q7, #12
vqrshrn.s32 \o6, q4, #12
vqrshrn.s32 \o7, q5, #12
.endm
function inv_adst_8h_x4_neon, export=1
iadst_8x4 d16, d17, d18, d19, d20, d21, d22, d23
bx lr
endfunc
function inv_flipadst_8h_x4_neon, export=1
iadst_8x4 d22, d23, d20, d21, d18, d19, d16, d17
bx lr
endfunc
function inv_identity_4h_x4_neon, export=1
movw r12, #(5793-4096)*8
vdup.16 d0, r12
vqrdmulh.s16 q2, q8, d0[0]
vqrdmulh.s16 q3, q9, d0[0]
vqadd.s16 q8, q8, q2
vqadd.s16 q9, q9, q3
bx lr
endfunc
function inv_identity_8h_x4_neon, export=1
movw r12, #(5793-4096)*8
vdup.16 d0, r12
vqrdmulh.s16 q1, q8, d0[0]
vqrdmulh.s16 q2, q9, d0[0]
vqrdmulh.s16 q3, q10, d0[0]
vqadd.s16 q8, q8, q1
vqrdmulh.s16 q1, q11, d0[0]
vqadd.s16 q9, q9, q2
vqadd.s16 q10, q10, q3
vqadd.s16 q11, q11, q1
bx lr
endfunc
.macro identity_8x4_shift1 r0, r1, r2, r3, c
.irp i, \r0, \r1, \r2, \r3
vqrdmulh.s16 q1, \i, \c
vrhadd.s16 \i, \i, q1
.endr
.endm
function inv_txfm_add_wht_wht_4x4_8bpc_neon, export=1
push {r4-r5,lr}
vmov.i16 q15, #0
vld1.16 {d16, d17, d18, d19}, [r2, :128]
vst1.16 {q15}, [r2, :128]!
vshr.s16 q8, q8, #2
vshr.s16 q9, q9, #2
iwht4
vst1.16 {q15}, [r2, :128]!
transpose_4x4h q8, q9, d16, d17, d18, d19
iwht4
vld1.32 {d0[]}, [r0, :32], r1
vld1.32 {d0[1]}, [r0, :32], r1
vld1.32 {d1[]}, [r0, :32], r1
vld1.32 {d1[1]}, [r0, :32], r1
b L(itx_4x4_end)
endfunc
function inv_txfm_add_4x4_neon
vmov.i16 q15, #0
vld1.16 {d16, d17, d18, d19}, [r2, :128]
vst1.16 {q15}, [r2, :128]!
blx r4
vst1.16 {q15}, [r2, :128]!
transpose_4x4h q8, q9, d16, d17, d18, d19
blx r5
vld1.32 {d0[]}, [r0, :32], r1
vld1.32 {d0[1]}, [r0, :32], r1
vld1.32 {d1[]}, [r0, :32], r1
vld1.32 {d1[1]}, [r0, :32], r1
vrshr.s16 q8, q8, #4
vrshr.s16 q9, q9, #4
L(itx_4x4_end):
sub r0, r0, r1, lsl #2
vaddw.u8 q8, q8, d0
vqmovun.s16 d0, q8
vaddw.u8 q9, q9, d1
vst1.32 {d0[0]}, [r0, :32], r1
vqmovun.s16 d1, q9
vst1.32 {d0[1]}, [r0, :32], r1
vst1.32 {d1[0]}, [r0, :32], r1
vst1.32 {d1[1]}, [r0, :32], r1
pop {r4-r5,pc}
endfunc
.macro def_fn_4x4 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_4x4_8bpc_neon, export=1
push {r4-r5,lr}
.ifc \txfm1\()_\txfm2, dct_dct
cmp r3, #0
bne 1f
vmov.i16 d30, #0
movw r12, #2896*8
vld1.16 {d16[]}, [r2, :16]
vdup.16 d4, r12
vst1.16 {d30[0]}, [r2, :16]
vqrdmulh.s16 d16, d16, d4[0]
vld1.32 {d0[0]}, [r0, :32], r1
vqrdmulh.s16 d20, d16, d4[0]
vld1.32 {d0[1]}, [r0, :32], r1
vrshr.s16 d16, d20, #4
vrshr.s16 d17, d20, #4
vld1.32 {d1[0]}, [r0, :32], r1
vmov q9, q8
vld1.32 {d1[1]}, [r0, :32], r1
b L(itx_4x4_end)
1:
.endif
movrel_local r4, inv_\txfm1\()_4h_x4_neon
movrel_local r5, inv_\txfm2\()_4h_x4_neon
b inv_txfm_add_4x4_neon
endfunc
.endm
def_fn_4x4 dct, dct
def_fn_4x4 identity, identity
def_fn_4x4 dct, adst
def_fn_4x4 dct, flipadst
def_fn_4x4 dct, identity
def_fn_4x4 adst, dct
def_fn_4x4 adst, adst
def_fn_4x4 adst, flipadst
def_fn_4x4 flipadst, dct
def_fn_4x4 flipadst, adst
def_fn_4x4 flipadst, flipadst
def_fn_4x4 identity, dct
def_fn_4x4 adst, identity
def_fn_4x4 flipadst, identity
def_fn_4x4 identity, adst
def_fn_4x4 identity, flipadst
.macro idct_8h_x8 q0, q1, q2, q3, q4, q5, q6, q7, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15
idct_8h_x4 \q0, \q2, \q4, \q6, \r0, \r1, \r4, \r5, \r8, \r9, \r12, \r13
vmull_vmlsl_8h q2, q3, \r2, \r3, \r14, \r15, d1[0], d1[1] // -> t4a
vmull_vmlal_8h q4, q5, \r2, \r3, \r14, \r15, d1[1], d1[0] // -> t7a
vmull_vmlsl_8h q6, q7, \r10, \r11, \r6, \r7, d1[2], d1[3] // -> t5a
vqrshrn_8h \r2, \r3, q2, q3, #12 // t4a
vqrshrn_8h \r14, \r15, q4, q5, #12 // t7a
vmull_vmlal_8h q2, q3, \r10, \r11, \r6, \r7, d1[3], d1[2] // -> t6a
vqrshrn_8h \r6, \r7, q6, q7, #12 // t5a
vqrshrn_8h \r10, \r11, q2, q3, #12 // t6a
vqadd.s16 q2, \q1, \q3 // t4
vqsub.s16 \q1, \q1, \q3 // t5a
vqadd.s16 q3, \q7, \q5 // t7
vqsub.s16 \q3, \q7, \q5 // t6a
vmull_vmlsl_8h q4, q5, \r6, \r7, \r2, \r3, d0[0], d0[0] // -> t5
vmull_vmlal_8h q6, q7, \r6, \r7, \r2, \r3, d0[0], d0[0] // -> t6
vqrshrn_8h d8, d9, q4, q5, #12 // t5
vqrshrn_8h d10, d11, q6, q7, #12 // t6
vqsub.s16 \q7, \q0, q3 // out7
vqadd.s16 \q0, \q0, q3 // out0
vqadd.s16 \q1, \q2, q5 // out1
vqsub.s16 q6, \q2, q5 // out6
vqadd.s16 \q2, \q4, q4 // out2
vqsub.s16 \q5, \q4, q4 // out5
vqadd.s16 \q3, \q6, q2 // out3
vqsub.s16 \q4, \q6, q2 // out4
vmov \q6, q6 // out6
.endm
.macro idct_4h_x8 r0, r1, r2, r3, r4, r5, r6, r7
idct_4h_x4 \r0, \r2, \r4, \r6
vmull_vmlsl q1, \r1, \r7, d1[0], d1[1] // -> t4a
vmull_vmlal q2, \r1, \r7, d1[1], d1[0] // -> t7a
vmull_vmlsl q3, \r5, \r3, d1[2], d1[3] // -> t5a
vqrshrn.s32 \r1, q1, #12 // t4a
vmull_vmlal q1, \r5, \r3, d1[3], d1[2] // -> t6a
vqrshrn.s32 \r7, q2, #12 // t7a
vqrshrn.s32 \r3, q3, #12 // t5a
vqrshrn.s32 \r5, q1, #12 // taa
vqadd.s16 d2, \r1, \r3 // t4
vqsub.s16 \r1, \r1, \r3 // t5a
vqadd.s16 d3, \r7, \r5 // t7
vqsub.s16 \r3, \r7, \r5 // t6a
vmull_vmlsl q2, \r3, \r1, d0[0], d0[0] // -> t5
vmull_vmlal q3, \r3, \r1, d0[0], d0[0] // -> t6
vqrshrn.s32 d4, q2, #12 // t5
vqrshrn.s32 d5, q3, #12 // t6
vqsub.s16 \r7, \r0, d3 // out7
vqadd.s16 \r0, \r0, d3 // out0
vqadd.s16 \r1, \r2, d5 // out1
vqsub.s16 d6, \r2, d5 // out6
vqadd.s16 \r2, \r4, d4 // out2
vqsub.s16 \r5, \r4, d4 // out5
vqadd.s16 \r3, \r6, d2 // out3
vqsub.s16 \r4, \r6, d2 // out4
vmov \r6, d6 // out6
.endm
function inv_dct_8h_x8_neon, export=1
movrel_local r12, idct_coeffs
vld1.16 {q0}, [r12, :128]
idct_8h_x8 q8, q9, q10, q11, q12, q13, q14, q15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
bx lr
endfunc
function inv_dct_4h_x8_neon, export=1
movrel_local r12, idct_coeffs
vld1.16 {q0}, [r12, :128]
idct_4h_x8 d16, d17, d18, d19, d20, d21, d22, d23
bx lr
endfunc
.macro iadst_8h_x8 q0, q1, q2, q3, q4, q5, q6, q7, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15
movrel_local r12, iadst8_coeffs
vld1.16 {d0, d1, d2}, [r12, :64]
vmull_vmlal_8h q2, q3, d30, d31, d16, d17, d0[0], d0[1]
vmull_vmlsl_8h q4, q5, d30, d31, d16, d17, d0[1], d0[0]
vmull_vmlal_8h q6, q7, d26, d27, d20, d21, d0[2], d0[3]
vqrshrn_8h d16, d17, q2, q3, #12 // t0a
vqrshrn_8h d30, d31, q4, q5, #12 // t1a
vmull_vmlsl_8h q2, q3, d26, d27, d20, d21, d0[3], d0[2]
vmull_vmlal_8h q4, q5, d22, d23, d24, d25, d1[0], d1[1]
vqrshrn_8h d20, d21, q6, q7, #12 // t2a
vqrshrn_8h d26, d27, q2, q3, #12 // t3a
vmull_vmlsl_8h q6, q7, d22, d23, d24, d25, d1[1], d1[0]
vmull_vmlal_8h q2, q3, d18, d19, d28, d29, d1[2], d1[3]
vqrshrn_8h d24, d25, q4, q5, #12 // t4a
vqrshrn_8h d22, d23, q6, q7, #12 // t5a
vmull_vmlsl_8h q4, q5, d18, d19, d28, d29, d1[3], d1[2]
vqrshrn_8h d28, d29, q2, q3, #12 // t6a
vqrshrn_8h d18, d19, q4, q5, #12 // t7a
vqadd.s16 q2, q8, q12 // t0
vqsub.s16 q3, q8, q12 // t4
vqadd.s16 q4, q15, q11 // t1
vqsub.s16 q5, q15, q11 // t5
vqadd.s16 q6, q10, q14 // t2
vqsub.s16 q7, q10, q14 // t6
vqadd.s16 q10, q13, q9 // t3
vqsub.s16 q11, q13, q9 // t7
vmull_vmlal_8h q8, q9, d6, d7, d10, d11, d2[3], d2[2]
vmull_vmlsl_8h q12, q13, d6, d7, d10, d11, d2[2], d2[3]
vmull_vmlsl_8h q14, q15, d22, d23, d14, d15, d2[3], d2[2]
vqrshrn_8h d6, d7, q8, q9, #12 // t4a
vqrshrn_8h d10, d11, q12, q13, #12 // t5a
vmull_vmlal_8h q8, q9, d22, d23, d14, d15, d2[2], d2[3]
vqrshrn_8h d14, d15, q14, q15, #12 // t6a
vqrshrn_8h d22, d23, q8, q9, #12 // t7a
vqadd.s16 \q0, q2, q6 // out0
vqsub.s16 q2, q2, q6 // t2
vqadd.s16 \q7, q4, q10 // out7
vqsub.s16 q4, q4, q10 // t3
vqneg.s16 \q7, \q7 // out7
vqadd.s16 \q1, q3, q7 // out1
vqsub.s16 q3, q3, q7 // t6
vqadd.s16 \q6, q5, q11 // out6
vqsub.s16 q5, q5, q11 // t7
vqneg.s16 \q1, \q1 // out1
vmull_vmlal_8h q10, q11, d4, d5, d8, d9, d2[0], d2[0] // -> out3 (q11 or q12)
vmull_vmlsl_8h q6, q7, d4, d5, d8, d9, d2[0], d2[0] // -> out4 (q12 or q11)
vmull_vmlsl_8h q12, q13, d6, d7, d10, d11, d2[0], d2[0] // -> out5 (q13 or q10)
vqrshrn_8h d4, d5, q10, q11, #12 // out3
vmull_vmlal_8h q10, q11, d6, d7, d10, d11, d2[0], d2[0] // -> out2 (q10 or q13)
vqrshrn_8h d6, d7, q12, q13, #12 // out5
vqrshrn_8h \r4, \r5, q10, q11, #12 // out2 (q10 or q13)
vqrshrn_8h \r8, \r9, q6, q7, #12 // out4 (q12 or q11)
vqneg.s16 \q3, q2 // out3
vqneg.s16 \q5, q3 // out5
.endm
.macro iadst_4h_x8 r0, r1, r2, r3, r4, r5, r6, r7
movrel_local r12, iadst8_coeffs
vld1.16 {d0, d1, d2}, [r12, :64]
vmull_vmlal q2, d23, d16, d0[0], d0[1]
vmull_vmlsl q3, d23, d16, d0[1], d0[0]
vmull_vmlal q4, d21, d18, d0[2], d0[3]
vqrshrn.s32 d16, q2, #12 // t0a
vqrshrn.s32 d23, q3, #12 // t1a
vmull_vmlsl q5, d21, d18, d0[3], d0[2]
vmull_vmlal q6, d19, d20, d1[0], d1[1]
vqrshrn.s32 d18, q4, #12 // t2a
vqrshrn.s32 d21, q5, #12 // t3a
vmull_vmlsl q7, d19, d20, d1[1], d1[0]
vmull_vmlal q2, d17, d22, d1[2], d1[3]
vqrshrn.s32 d20, q6, #12 // t4a
vqrshrn.s32 d19, q7, #12 // t5a
vmull_vmlsl q3, d17, d22, d1[3], d1[2]
vqrshrn.s32 d22, q2, #12 // t6a
vqrshrn.s32 d17, q3, #12 // t7a
vqadd.s16 d4, d16, d20 // t0
vqsub.s16 d5, d16, d20 // t4
vqadd.s16 d6, d23, d19 // t1
vqsub.s16 d7, d23, d19 // t5
vqadd.s16 d8, d18, d22 // t2
vqsub.s16 d9, d18, d22 // t6
vqadd.s16 d18, d21, d17 // t3
vqsub.s16 d19, d21, d17 // t7
vmull_vmlal q8, d5, d7, d2[3], d2[2]
vmull_vmlsl q10, d5, d7, d2[2], d2[3]
vmull_vmlsl q11, d19, d9, d2[3], d2[2]
vqrshrn.s32 d5, q8, #12 // t4a
vqrshrn.s32 d7, q10, #12 // t5a
vmull_vmlal q8, d19, d9, d2[2], d2[3]
vqrshrn.s32 d9, q11, #12 // t6a
vqrshrn.s32 d19, q8, #12 // t7a
vqadd.s16 \r0, d4, d8 // out0
vqsub.s16 d4, d4, d8 // t2
vqadd.s16 \r7, d6, d18 // out7
vqsub.s16 d6, d6, d18 // t3
vqneg.s16 \r7, \r7 // out7
vqadd.s16 \r1, d5, d9 // out1
vqsub.s16 d5, d5, d9 // t6
vqadd.s16 \r6, d7, d19 // out6
vqsub.s16 d7, d7, d19 // t7
vqneg.s16 \r1, \r1 // out1
vmull_vmlal q9, d4, d6, d2[0], d2[0] // -> out3 (d19 or d20)
vmull_vmlsl q4, d4, d6, d2[0], d2[0] // -> out4 (d20 or d19)
vmull_vmlsl q10, d5, d7, d2[0], d2[0] // -> out5 (d21 or d18)
vqrshrn.s32 d4, q9, #12 // out3
vmull_vmlal q9, d5, d7, d2[0], d2[0] // -> out2 (d18 or d21)
vqrshrn.s32 d5, q10, #12 // out5
vqrshrn.s32 \r2, q9, #12 // out2 (d18 or d21)
vqrshrn.s32 \r4, q4, #12 // out4 (d20 or d19)
vqneg.s16 \r3, d4 // out3
vqneg.s16 \r5, d5 // out5
.endm
function inv_adst_8h_x8_neon, export=1
iadst_8h_x8 q8, q9, q10, q11, q12, q13, q14, q15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
bx lr
endfunc
function inv_flipadst_8h_x8_neon, export=1
iadst_8h_x8 q15, q14, q13, q12, q11, q10, q9, q8, d30, d31, d28, d29, d26, d27, d24, d25, d22, d23, d20, d21, d18, d19, d16, d17
bx lr
endfunc
function inv_adst_4h_x8_neon, export=1
iadst_4h_x8 d16, d17, d18, d19, d20, d21, d22, d23
bx lr
endfunc
function inv_flipadst_4h_x8_neon, export=1
iadst_4h_x8 d23, d22, d21, d20, d19, d18, d17, d16
bx lr
endfunc
function inv_identity_8h_x8_neon, export=1
vqshl.s16 q8, q8, #1
vqshl.s16 q9, q9, #1
vqshl.s16 q10, q10, #1
vqshl.s16 q11, q11, #1
vqshl.s16 q12, q12, #1
vqshl.s16 q13, q13, #1
vqshl.s16 q14, q14, #1
vqshl.s16 q15, q15, #1
bx lr
endfunc
function inv_identity_4h_x8_neon, export=1
vqshl.s16 q8, q8, #1
vqshl.s16 q9, q9, #1
vqshl.s16 q10, q10, #1
vqshl.s16 q11, q11, #1
bx lr
endfunc
.macro def_fn_8x8_base variant
function inv_txfm_\variant\()add_8x8_neon
vmov.i16 q0, #0
vmov.i16 q1, #0
vld1.16 {q8, q9}, [r2, :128]
vst1.16 {q0, q1}, [r2, :128]!
vld1.16 {q10, q11}, [r2, :128]
vst1.16 {q0, q1}, [r2, :128]!
vld1.16 {q12, q13}, [r2, :128]
vst1.16 {q0, q1}, [r2, :128]!
vld1.16 {q14, q15}, [r2, :128]
vst1.16 {q0, q1}, [r2, :128]
.ifc \variant, identity_
// The identity shl #1 and downshift srshr #1 cancel out
b L(itx_8x8_epilog)
.else
blx r4
vrshr.s16 q8, q8, #1
vrshr.s16 q9, q9, #1
vrshr.s16 q10, q10, #1
vrshr.s16 q11, q11, #1
vrshr.s16 q12, q12, #1
vrshr.s16 q13, q13, #1
vrshr.s16 q14, q14, #1
vrshr.s16 q15, q15, #1
L(itx_8x8_epilog):
transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
blx r5
load_add_store_8x8 r0, r7
vpop {q4-q7}
pop {r4-r5,r7,pc}
.endif
endfunc
.endm
def_fn_8x8_base identity_
def_fn_8x8_base
.macro def_fn_8x8 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_8x8_8bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc 8, 8, 1
.endif
push {r4-r5,r7,lr}
vpush {q4-q7}
movrel_local r5, inv_\txfm2\()_8h_x8_neon
.ifc \txfm1, identity
b inv_txfm_identity_add_8x8_neon
.else
movrel_local r4, inv_\txfm1\()_8h_x8_neon
b inv_txfm_add_8x8_neon
.endif
endfunc
.endm
def_fn_8x8 dct, dct
def_fn_8x8 identity, identity
def_fn_8x8 dct, adst
def_fn_8x8 dct, flipadst
def_fn_8x8 dct, identity
def_fn_8x8 adst, dct
def_fn_8x8 adst, adst
def_fn_8x8 adst, flipadst
def_fn_8x8 flipadst, dct
def_fn_8x8 flipadst, adst
def_fn_8x8 flipadst, flipadst
def_fn_8x8 identity, dct
def_fn_8x8 adst, identity
def_fn_8x8 flipadst, identity
def_fn_8x8 identity, adst
def_fn_8x8 identity, flipadst
function inv_txfm_add_8x4_neon
vmov.i16 q14, #0
vmov.i16 q15, #0
movw r12, #2896*8
vdup.16 d0, r12
vld1.16 {d16, d17, d18, d19}, [r2, :128]
vst1.16 {q14, q15}, [r2, :128]!
vld1.16 {d20, d21, d22, d23}, [r2, :128]
vst1.16 {q14, q15}, [r2, :128]
scale_input d0[0], q8, q9, q10, q11
blx r4
transpose_4x4h q8, q9, d16, d17, d18, d19
transpose_4x4h q10, q11, d20, d21, d22, d23
vswp d17, d20
vswp d19, d21
vswp d18, d20
vswp d21, d22
blx r5
load_add_store_8x4 r0, r7
vpop {q4-q7}
pop {r4-r5,r7,pc}
endfunc
function inv_txfm_add_4x8_neon
vmov.i16 q14, #0
vmov.i16 q15, #0
movw r12, #2896*8
vdup.16 d0, r12
vld1.16 {q8, q9}, [r2, :128]
vst1.16 {q14, q15}, [r2, :128]!
vld1.16 {q10, q11}, [r2, :128]
vst1.16 {q14, q15}, [r2, :128]
scale_input d0[0], q8, q9, q10, q11
blx r4
transpose_4x8h q8, q9, q10, q11
vswp d17, d20
vswp d19, d21
vswp d17, d18
vswp d19, d22
blx r5
load_add_store_4x8 r0, r7
vpop {q4-q7}
pop {r4-r5,r7,pc}
endfunc
.macro def_fn_48 w, h, txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 0
.endif
push {r4-r5,r7,lr}
vpush {q4-q7}
movrel_local r4, inv_\txfm1\()_\h\()h_x\w\()_neon
movrel_local r5, inv_\txfm2\()_\w\()h_x\h\()_neon
b inv_txfm_add_\w\()x\h\()_neon
endfunc
.endm
.macro def_fns_48 w, h
def_fn_48 \w, \h, dct, dct
def_fn_48 \w, \h, identity, identity
def_fn_48 \w, \h, dct, adst
def_fn_48 \w, \h, dct, flipadst
def_fn_48 \w, \h, dct, identity
def_fn_48 \w, \h, adst, dct
def_fn_48 \w, \h, adst, adst
def_fn_48 \w, \h, adst, flipadst
def_fn_48 \w, \h, flipadst, dct
def_fn_48 \w, \h, flipadst, adst
def_fn_48 \w, \h, flipadst, flipadst
def_fn_48 \w, \h, identity, dct
def_fn_48 \w, \h, adst, identity
def_fn_48 \w, \h, flipadst, identity
def_fn_48 \w, \h, identity, adst
def_fn_48 \w, \h, identity, flipadst
.endm
def_fns_48 4, 8
def_fns_48 8, 4
function inv_dct_4h_x16_neon, export=1
movrel_local r12, idct_coeffs
vld1.16 {q0, q1}, [r12, :128]
vmull_vmlsl q2, d17, d31, d2[0], d2[1] // -> t8a
vmull_vmlal q3, d17, d31, d2[1], d2[0] // -> t15a
vmull_vmlsl q4, d25, d23, d2[2], d2[3] // -> t9a
vqrshrn.s32 d17, q2, #12 // t8a
vqrshrn.s32 d31, q3, #12 // t15a
vmull_vmlal q2, d25, d23, d2[3], d2[2] // -> t14a
vmull_vmlsl q3, d21, d27, d3[0], d3[1] // -> t10a
vqrshrn.s32 d23, q4, #12 // t9a
vqrshrn.s32 d25, q2, #12 // t14a
vmull_vmlal q4, d21, d27, d3[1], d3[0] // -> t13a
vmull_vmlsl q2, d29, d19, d3[2], d3[3] // -> t11a
vqrshrn.s32 d21, q3, #12 // t10a
vqrshrn.s32 d27, q4, #12 // t13a
vmull_vmlal q3, d29, d19, d3[3], d3[2] // -> t12a
vqrshrn.s32 d19, q2, #12 // t11a
vqrshrn.s32 d29, q3, #12 // t12a
idct_4h_x8 d16, d18, d20, d22, d24, d26, d28, d30
vqsub.s16 d4, d17, d23 // t9
vqadd.s16 d17, d17, d23 // t8
vqsub.s16 d5, d31, d25 // t14
vqadd.s16 d31, d31, d25 // t15
vqsub.s16 d23, d19, d21 // t10
vqadd.s16 d19, d19, d21 // t11
vqadd.s16 d25, d29, d27 // t12
vqsub.s16 d29, d29, d27 // t13
vmull_vmlsl q3, d5, d4, d0[2], d0[3] // -> t9a
vmull_vmlal q4, d5, d4, d0[3], d0[2] // -> t14a
vqrshrn.s32 d21, q3, #12 // t9a
vqrshrn.s32 d27, q4, #12 // t14a
vmull_vmlsl q3, d29, d23, d0[2], d0[3] // -> t13a
vmull_vmlal q4, d29, d23, d0[3], d0[2] // -> t10a
vqrshrn.s32 d29, q3, #12 // t13a
vneg.s32 q4, q4
vqrshrn.s32 d23, q4, #12 // t10a
vqsub.s16 d4, d17, d19 // t11a
vqadd.s16 d17, d17, d19 // t8a
vqsub.s16 d5, d31, d25 // t12a
vqadd.s16 d31, d31, d25 // t15a
vqadd.s16 d19, d21, d23 // t9
vqsub.s16 d21, d21, d23 // t10
vqsub.s16 d25, d27, d29 // t13
vqadd.s16 d27, d27, d29 // t14
vmull_vmlsl q3, d5, d4, d0[0], d0[0] // -> t11
vmull_vmlal q4, d5, d4, d0[0], d0[0] // -> t12
vmull_vmlsl q2, d25, d21, d0[0], d0[0] // -> t10a
vqrshrn.s32 d6, q3, #12 // t11
vqrshrn.s32 d7, q4, #12 // t12
vmull_vmlal q4, d25, d21, d0[0], d0[0] // -> t13a
vqrshrn.s32 d4, q2, #12 // t10a
vqrshrn.s32 d5, q4, #12 // t13a
vqadd.s16 d8, d16, d31 // out0
vqsub.s16 d31, d16, d31 // out15
vmov d16, d8
vqadd.s16 d23, d30, d17 // out7
vqsub.s16 d9, d30, d17 // out8
vqadd.s16 d17, d18, d27 // out1
vqsub.s16 d30, d18, d27 // out14
vqadd.s16 d18, d20, d5 // out2
vqsub.s16 d29, d20, d5 // out13
vqadd.s16 d5, d28, d19 // out6
vqsub.s16 d25, d28, d19 // out9
vqadd.s16 d19, d22, d7 // out3
vqsub.s16 d28, d22, d7 // out12
vqadd.s16 d20, d24, d6 // out4
vqsub.s16 d27, d24, d6 // out11
vqadd.s16 d21, d26, d4 // out5
vqsub.s16 d26, d26, d4 // out10
vmov d24, d9
vmov d22, d5
bx lr
endfunc
.macro iadst_16 o0, o1, o2, o3, o4, o5, o6, o7, o8, o9, o10, o11, o12, o13, o14, o15
movrel_local r12, iadst16_coeffs
vld1.16 {q0, q1}, [r12, :128]
movrel_local r12, idct_coeffs
vmull_vmlal q2, d31, d16, d0[0], d0[1] // -> t0
vmull_vmlsl q3, d31, d16, d0[1], d0[0] // -> t1
vmull_vmlal q4, d29, d18, d0[2], d0[3] // -> t2
vqrshrn.s32 d16, q2, #12 // t0
vqrshrn.s32 d31, q3, #12 // t1
vmull_vmlsl q2, d29, d18, d0[3], d0[2] // -> t3
vmull_vmlal q3, d27, d20, d1[0], d1[1] // -> t4
vqrshrn.s32 d18, q4, #12 // t2
vqrshrn.s32 d29, q2, #12 // t3
vmull_vmlsl q4, d27, d20, d1[1], d1[0] // -> t5
vmull_vmlal q2, d25, d22, d1[2], d1[3] // -> t6
vqrshrn.s32 d20, q3, #12 // t4
vqrshrn.s32 d27, q4, #12 // t5
vmull_vmlsl q3, d25, d22, d1[3], d1[2] // -> t7
vmull_vmlal q4, d23, d24, d2[0], d2[1] // -> t8
vqrshrn.s32 d22, q2, #12 // t6
vqrshrn.s32 d25, q3, #12 // t7
vmull_vmlsl q2, d23, d24, d2[1], d2[0] // -> t9
vmull_vmlal q3, d21, d26, d2[2], d2[3] // -> t10
vqrshrn.s32 d23, q4, #12 // t8
vqrshrn.s32 d24, q2, #12 // t9
vmull_vmlsl q4, d21, d26, d2[3], d2[2] // -> t11
vmull_vmlal q2, d19, d28, d3[0], d3[1] // -> t12
vqrshrn.s32 d21, q3, #12 // t10
vqrshrn.s32 d26, q4, #12 // t11
vmull_vmlsl q3, d19, d28, d3[1], d3[0] // -> t13
vmull_vmlal q4, d17, d30, d3[2], d3[3] // -> t14
vqrshrn.s32 d19, q2, #12 // t12
vqrshrn.s32 d28, q3, #12 // t13
vmull_vmlsl q2, d17, d30, d3[3], d3[2] // -> t15
vqrshrn.s32 d17, q4, #12 // t14
vqrshrn.s32 d30, q2, #12 // t15
vld1.16 {q0}, [r12, :128]
vqsub.s16 d2, d16, d23 // t8a
vqadd.s16 d16, d16, d23 // t0a
vqsub.s16 d3, d31, d24 // t9a
vqadd.s16 d31, d31, d24 // t1a
vqadd.s16 d23, d18, d21 // t2a
vqsub.s16 d18, d18, d21 // t10a
vqadd.s16 d24, d29, d26 // t3a
vqsub.s16 d29, d29, d26 // t11a
vqadd.s16 d21, d20, d19 // t4a
vqsub.s16 d20, d20, d19 // t12a
vqadd.s16 d26, d27, d28 // t5a
vqsub.s16 d27, d27, d28 // t13a
vqadd.s16 d19, d22, d17 // t6a
vqsub.s16 d22, d22, d17 // t14a
vqadd.s16 d28, d25, d30 // t7a
vqsub.s16 d25, d25, d30 // t15a
vmull_vmlal q2, d2, d3, d1[1], d1[0] // -> t8
vmull_vmlsl q3, d2, d3, d1[0], d1[1] // -> t9
vmull_vmlal q4, d18, d29, d1[3], d1[2] // -> t10
vqrshrn.s32 d17, q2, #12 // t8
vqrshrn.s32 d30, q3, #12 // t9
vmull_vmlsl q2, d18, d29, d1[2], d1[3] // -> t11
vmull_vmlsl q3, d27, d20, d1[1], d1[0] // -> t12
vqrshrn.s32 d18, q4, #12 // t10
vqrshrn.s32 d29, q2, #12 // t11
vmull_vmlal q4, d27, d20, d1[0], d1[1] // -> t13
vmull_vmlsl q2, d25, d22, d1[3], d1[2] // -> t14
vqrshrn.s32 d27, q3, #12 // t12
vqrshrn.s32 d20, q4, #12 // t13
vmull_vmlal q3, d25, d22, d1[2], d1[3] // -> t15
vqrshrn.s32 d25, q2, #12 // t14
vqrshrn.s32 d22, q3, #12 // t15
vqsub.s16 d2, d16, d21 // t4
vqadd.s16 d16, d16, d21 // t0
vqsub.s16 d3, d31, d26 // t5
vqadd.s16 d31, d31, d26 // t1
vqadd.s16 d21, d23, d19 // t2
vqsub.s16 d23, d23, d19 // t6
vqadd.s16 d26, d24, d28 // t3
vqsub.s16 d24, d24, d28 // t7
vqadd.s16 d19, d17, d27 // t8a
vqsub.s16 d17, d17, d27 // t12a
vqadd.s16 d28, d30, d20 // t9a
vqsub.s16 d30, d30, d20 // t13a
vqadd.s16 d27, d18, d25 // t10a
vqsub.s16 d18, d18, d25 // t14a
vqadd.s16 d20, d29, d22 // t11a
vqsub.s16 d29, d29, d22 // t15a
vmull_vmlal q2, d2, d3, d0[3], d0[2] // -> t4a
vmull_vmlsl q3, d2, d3, d0[2], d0[3] // -> t5a
vmull_vmlsl q4, d24, d23, d0[3], d0[2] // -> t6a
vqrshrn.s32 d22, q2, #12 // t4a
vqrshrn.s32 d25, q3, #12 // t5a
vmull_vmlal q2, d24, d23, d0[2], d0[3] // -> t7a
vmull_vmlal q3, d17, d30, d0[3], d0[2] // -> t12
vqrshrn.s32 d24, q4, #12 // t6a
vqrshrn.s32 d23, q2, #12 // t7a
vmull_vmlsl q4, d17, d30, d0[2], d0[3] // -> t13
vmull_vmlsl q2, d29, d18, d0[3], d0[2] // -> t14
vqrshrn.s32 d17, q3, #12 // t12
vmull_vmlal q3, d29, d18, d0[2], d0[3] // -> t15
vqrshrn.s32 d29, q4, #12 // t13
vqrshrn.s32 d30, q2, #12 // t14
vqrshrn.s32 d18, q3, #12 // t15
vqsub.s16 d2, d16, d21 // t2a
.ifc \o0, d16
vqadd.s16 \o0, d16, d21 // out0
vqsub.s16 d21, d31, d26 // t3a
vqadd.s16 \o15,d31, d26 // out15
.else
vqadd.s16 d4, d16, d21 // out0
vqsub.s16 d21, d31, d26 // t3a
vqadd.s16 \o15,d31, d26 // out15
vmov \o0, d4
.endif
vqneg.s16 \o15, \o15 // out15
vqsub.s16 d3, d29, d18 // t15a
vqadd.s16 \o13,d29, d18 // out13
vqadd.s16 \o2, d17, d30 // out2
vqsub.s16 d26, d17, d30 // t14a
vqneg.s16 \o13,\o13 // out13
vqadd.s16 \o1, d19, d27 // out1
vqsub.s16 d27, d19, d27 // t10
vqadd.s16 \o14,d28, d20 // out14
vqsub.s16 d20, d28, d20 // t11
vqneg.s16 \o1, \o1 // out1
vqadd.s16 \o3, d22, d24 // out3
vqsub.s16 d22, d22, d24 // t6
vqadd.s16 \o12,d25, d23 // out12
vqsub.s16 d23, d25, d23 // t7
vqneg.s16 \o3, \o3 // out3
vmull_vmlsl q12, d2, d21, d0[0], d0[0] // -> out8 (d24 or d23)
vmull_vmlal q2, d2, d21, d0[0], d0[0] // -> out7 (d23 or d24)
vmull_vmlal q3, d26, d3, d0[0], d0[0] // -> out5 (d21 or d26)
vqrshrn.s32 d24, q12, #12 // out8
vqrshrn.s32 d4, q2, #12 // out7
vqrshrn.s32 d5, q3, #12 // out5
vmull_vmlsl q4, d26, d3, d0[0], d0[0] // -> out10 (d26 or d21)
vmull_vmlal q1, d22, d23, d0[0], d0[0] // -> out4 (d20 or d27)
vqrshrn.s32 d26, q4, #12 // out10
vmull_vmlsl q4, d22, d23, d0[0], d0[0] // -> out11 (d27 or d20)
vmull_vmlal q11, d27, d20, d0[0], d0[0] // -> out6 (d22 or d25)
vmull_vmlsl q3, d27, d20, d0[0], d0[0] // -> out9 (d25 or d22)
vqrshrn.s32 \o4, q1, #12 // out4
vqrshrn.s32 d7, q3, #12 // out9
vqrshrn.s32 d6, q4, #12 // out11
vqrshrn.s32 \o6, q11, #12 // out6
.ifc \o8, d23
vmov \o8, d24
vmov \o10,d26
.endif
vqneg.s16 \o7, d4 // out7
vqneg.s16 \o5, d5 // out5
vqneg.s16 \o11,d6 // out11
vqneg.s16 \o9, d7 // out9
.endm
function inv_adst_4h_x16_neon, export=1
iadst_16 d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
bx lr
endfunc
function inv_flipadst_4h_x16_neon, export=1
iadst_16 d31, d30, d29, d28, d27, d26, d25, d24, d23, d22, d21, d20, d19, d18, d17, d16
bx lr
endfunc
function inv_identity_4h_x16_neon, export=1
movw r12, #2*(5793-4096)*8
vdup.16 d0, r12
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vqrdmulh.s16 q1, \i, d0[0]
vqadd.s16 \i, \i, \i
vqadd.s16 \i, \i, q1
.endr
bx lr
endfunc
.macro identity_4x16_shift2 c
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vqrdmulh.s16 q2, \i, \c
vshr.s16 q2, q2, #1
vrhadd.s16 \i, \i, q2
.endr
.endm
.macro identity_4x16_shift1 c
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vqrdmulh.s16 q2, \i, \c
vrshr.s16 q2, q2, #1
vqadd.s16 \i, \i, q2
.endr
.endm
.macro identity_8x8_shift1 c
identity_4x16_shift1 \c
.endm
.macro identity_8x8 c
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vqrdmulh.s16 q2, \i, \c
vqadd.s16 \i, \i, \i
vqadd.s16 \i, \i, q2
.endr
.endm
.macro def_horz_16 scale=0, identity=0, shift=2, suffix
function inv_txfm_horz\suffix\()_16x4_neon
push {lr}
vmov.i16 d7, #0
.if \identity
movw r12, #2*(5793-4096)*8
vdup.16 d0, r12
.endif
.if \scale
movw r12, #2896*8
vdup.16 d1, r12
.endif
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.16 {\i}, [r7, :64]
vst1.16 {d7}, [r7, :64], r8
.endr
.if \scale
scale_input d1[0], q8, q9, q10, q11, q12, q13, q14, q15
.endif
.if \identity
.if \shift == -2
identity_4x16_shift2 d0[0]
.else
identity_4x16_shift1 d0[0]
.endif
b L(horz_16x4_epilog)
.else
blx r4
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vrshr.s16 \i, \i, #\shift
.endr
.if \shift == 1
b L(horz_16x4_epilog)
.else
L(horz_16x4_epilog):
transpose_4x4h q8, q9, d16, d17, d18, d19
transpose_4x4h q10, q11, d20, d21, d22, d23
transpose_4x4h q12, q13, d24, d25, d26, d27
transpose_4x4h q14, q15, d28, d29, d30, d31
.irp i, d16, d20, d24, d28, d17, d21, d25, d29, d18, d22, d26, d30, d19, d23, d27, d31
vst1.16 {\i}, [r6, :64]!
.endr
pop {pc}
.endif
.endif
endfunc
.endm
def_horz_16 scale=1, identity=1, shift=-1, suffix=_scale_identity
def_horz_16 scale=0, identity=1, shift=-2, suffix=_identity
def_horz_16 scale=1, identity=0, shift=1, suffix=_scale
def_horz_16 scale=0, identity=0, shift=2
function inv_txfm_add_vert_4x16_neon
push {lr}
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.16 {\i}, [r7, :64], r8
.endr
blx r5
load_add_store_4x16 r6, r7
pop {pc}
endfunc
function inv_txfm_add_16x16_neon
sub_sp_align 512
ldrh r11, [r10], #2
.irp i, 0, 4, 8, 12
add r6, sp, #(\i*16*2)
.if \i > 0
mov r8, #(16 - \i)
cmp r3, r11
blt 1f
.if \i < 12
ldrh r11, [r10], #2
.endif
.endif
add r7, r2, #(\i*2)
mov r8, #16*2
blx r9
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #4
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12
add r6, r0, #(\i)
add r7, sp, #(\i*2)
mov r8, #32
bl inv_txfm_add_vert_4x16_neon
.endr
add_sp_align 512
vpop {q4}
pop {r4-r11,pc}
endfunc
const eob_16x16
.short 10, 36, 78, 256
endconst
const eob_16x16_identity
.short 4, 8, 12, 256
endconst
.macro def_fn_16x16 txfm1, txfm2
function inv_txfm_add_\txfm1\()_\txfm2\()_16x16_8bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc 16, 16, 2
.endif
push {r4-r11,lr}
vpush {q4}
.ifc \txfm1, identity
movrel_local r9, inv_txfm_horz_identity_16x4_neon
.else
movrel_local r9, inv_txfm_horz_16x4_neon
movrel_local r4, inv_\txfm1\()_4h_x16_neon
.endif
movrel_local r5, inv_\txfm2\()_4h_x16_neon
.ifc \txfm1, identity
.ifc \txfm2, identity
movrel_local r10, eob_16x16
.else
movrel_local r10, eob_16x16_identity
.endif
.else
.ifc \txfm2, identity
movrel_local r10, eob_16x16_identity
.else
movrel_local r10, eob_16x16
.endif
.endif
b inv_txfm_add_16x16_neon
endfunc
.endm
def_fn_16x16 dct, dct
def_fn_16x16 identity, identity
def_fn_16x16 dct, adst
def_fn_16x16 dct, flipadst
def_fn_16x16 dct, identity
def_fn_16x16 adst, dct
def_fn_16x16 adst, adst
def_fn_16x16 adst, flipadst
def_fn_16x16 flipadst, dct
def_fn_16x16 flipadst, adst
def_fn_16x16 flipadst, flipadst
def_fn_16x16 identity, dct
.macro def_fn_416_base variant
function inv_txfm_\variant\()add_16x4_neon
.ifc \variant, identity_
vmov.i16 d4, #0
.irp i, d16, d18, d20, d22
vld1.16 {\i}, [r2, :64]
vst1.16 {d4}, [r2, :64]!
.endr
.irp i, d17, d19, d21, d23
vld1.16 {\i}, [r2, :64]
vst1.16 {d4}, [r2, :64]!
.endr
movw r12, #2*(5793-4096)*8
vdup.16 d0, r12
.irp i, d24, d26, d28, d30
vld1.16 {\i}, [r2, :64]
vst1.16 {d4}, [r2, :64]!
.endr
.irp i, d25, d27, d29, d31
vld1.16 {\i}, [r2, :64]
vst1.16 {d4}, [r2, :64]!
.endr
identity_4x16_shift1 d0[0]
b L(itx_16x4_epilog)
.else
vmov.i16 q2, #0
vmov.i16 q3, #0
vld1.16 {d16, d17, d18, d19}, [r2, :128]
vst1.16 {q2, q3}, [r2, :128]!
vld1.16 {d20, d21, d22, d23}, [r2, :128]
vst1.16 {q2, q3}, [r2, :128]!
vld1.16 {d24, d25, d26, d27}, [r2, :128]
vst1.16 {q2, q3}, [r2, :128]!
vld1.16 {d28, d29, d30, d31}, [r2, :128]
vst1.16 {q2, q3}, [r2, :128]!
blx r4
vswp d17, d20
vswp d19, d22
vswp d18, d20
vswp d19, d21
vswp d25, d28
vswp d27, d30
vswp d26, d28
vswp d27, d29
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vrshr.s16 \i, \i, #1
.endr
L(itx_16x4_epilog):
transpose_4x8h q8, q9, q10, q11
blx r5
mov r6, r0
load_add_store_8x4 r6, r7
vmov q8, q12
vmov q9, q13
vmov q10, q14
vmov q11, q15
transpose_4x8h q8, q9, q10, q11
blx r5
add r6, r0, #8
load_add_store_8x4 r6, r7
vpop {q4-q7}
pop {r4-r11,pc}
.endif
endfunc
function inv_txfm_\variant\()add_4x16_neon
vmov.i16 q2, #0
mov r11, #32
cmp r3, r10
blt 1f
add r6, r2, #16
.ifc \variant, identity_
.irp i, q12, q13, q14, q15
vld1.16 {\i}, [r6, :128]
vst1.16 {q2}, [r6, :128], r11
.endr
movw r12, #(5793-4096)*8
vdup.16 d0, r12
identity_8x4_shift1 q12, q13, q14, q15, d0[0]
.else
.irp i, q8, q9, q10, q11
vld1.16 {\i}, [r6, :128]
vst1.16 {q2}, [r6, :128], r11
.endr
blx r4
vrshr.s16 q12, q8, #1
vrshr.s16 q13, q9, #1
vrshr.s16 q14, q10, #1
vrshr.s16 q15, q11, #1
.endif
transpose_4x8h q12, q13, q14, q15
vswp d27, d29
vswp d26, d28
vswp d27, d30
vswp d25, d28
b 2f
1:
.irp i, q12, q13, q14, q15
vmov.i16 \i, #0
.endr
2:
vmov.i16 q2, #0
.irp i, q8, q9, q10, q11
vld1.16 {\i}, [r2, :128]
vst1.16 {q2}, [r2, :128], r11
.endr
.ifc \variant, identity_
movw r12, #(5793-4096)*8
vdup.16 d0, r12
identity_8x4_shift1 q8, q9, q10, q11, d0[0]
b L(itx_4x16_epilog)
.else
blx r4
.irp i, q8, q9, q10, q11
vrshr.s16 \i, \i, #1
.endr
L(itx_4x16_epilog):
transpose_4x8h q8, q9, q10, q11
vswp d19, d21
vswp d18, d20
vswp d19, d22
vswp d17, d20
blx r5
load_add_store_4x16 r0, r6
vpop {q4-q7}
pop {r4-r11,pc}
.endif
endfunc
.endm
def_fn_416_base identity_
def_fn_416_base
.macro def_fn_416 w, h, txfm1, txfm2, eob_half
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 1
.endif
push {r4-r11,lr}
vpush {q4-q7}
.if \w == 4
.ifnc \txfm1, identity
movrel_local r4, inv_\txfm1\()_8h_x\w\()_neon
.endif
movrel_local r5, inv_\txfm2\()_4h_x\h\()_neon
mov r10, #\eob_half
.else
.ifnc \txfm1, identity
movrel_local r4, inv_\txfm1\()_4h_x\w\()_neon
.endif
movrel_local r5, inv_\txfm2\()_8h_x\h\()_neon
.endif
.ifc \txfm1, identity
b inv_txfm_identity_add_\w\()x\h\()_neon
.else
b inv_txfm_add_\w\()x\h\()_neon
.endif
endfunc
.endm
.macro def_fns_416 w, h
def_fn_416 \w, \h, dct, dct, 29
def_fn_416 \w, \h, identity, identity, 29
def_fn_416 \w, \h, dct, adst, 29
def_fn_416 \w, \h, dct, flipadst, 29
def_fn_416 \w, \h, dct, identity, 8
def_fn_416 \w, \h, adst, dct, 29
def_fn_416 \w, \h, adst, adst, 29
def_fn_416 \w, \h, adst, flipadst, 29
def_fn_416 \w, \h, flipadst, dct, 29
def_fn_416 \w, \h, flipadst, adst, 29
def_fn_416 \w, \h, flipadst, flipadst, 29
def_fn_416 \w, \h, identity, dct, 32
def_fn_416 \w, \h, adst, identity, 8
def_fn_416 \w, \h, flipadst, identity, 8
def_fn_416 \w, \h, identity, adst, 32
def_fn_416 \w, \h, identity, flipadst, 32
.endm
def_fns_416 4, 16
def_fns_416 16, 4
function inv_txfm_add_16x8_neon
sub_sp_align 256
.irp i, 0, 4
add r6, sp, #(\i*16*2)
.if \i > 0
cmp r3, r10
blt 1f
.endif
add r7, r2, #(\i*2)
mov r8, #8*2
blx r9
.endr
b 2f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
2:
.irp i, 0, 8
add r7, sp, #(\i*2)
mov r8, #32
.irp j, q8, q9, q10, q11, q12, q13, q14, q15
vld1.16 {\j}, [r7, :128], r8
.endr
blx r5
add r6, r0, #(\i)
load_add_store_8x8 r6, r7
.endr
add_sp_align 256
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
.macro def_fn_816_base variant
function inv_txfm_\variant\()add_8x16_neon
sub_sp_align 256
.irp i, 0, 8
add r6, sp, #(\i*8*2)
.if \i > 0
cmp r3, r10
blt 1f
.endif
add r7, r2, #(\i*2)
mov r8, #16*2
vmov.i16 q2, #0
movw r12, #2896*8
vdup.16 d0, r12
.irp j, q8, q9, q10, q11, q12, q13, q14, q15
vld1.16 {\j}, [r7, :128]
vst1.16 {q2}, [r7, :128], r8
.endr
scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
.ifc \variant, identity_
// The identity shl #1 and downshift vrshr #1 cancel out
.else
blx r4
.irp j, q8, q9, q10, q11, q12, q13, q14, q15
vrshr.s16 \j, \j, #1
.endr
.endif
transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
vst1.16 {q8, q9}, [r6, :128]!
vst1.16 {q10, q11}, [r6, :128]!
vst1.16 {q12, q13}, [r6, :128]!
vst1.16 {q14, q15}, [r6, :128]!
.endr
b 2f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
2:
.ifc \variant, identity_
b L(itx_8x16_epilog)
.else
L(itx_8x16_epilog):
.irp i, 0, 4
add r6, r0, #(\i)
add r7, sp, #(\i*2)
mov r8, #16
bl inv_txfm_add_vert_4x16_neon
.endr
add_sp_align 256
vpop {q4-q7}
pop {r4-r11,pc}
.endif
endfunc
.endm
def_fn_816_base identity_
def_fn_816_base
/* Define symbols used in .if statement */
.equ dct, 1
.equ identity, 2
.equ adst, 3
.equ flipadst, 4
.macro def_fn_816 w, h, txfm1, txfm2, eob_8x8, eob_4x4
function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_neon, export=1
.ifc \txfm1\()_\txfm2, dct_dct
idct_dc \w, \h, 1
.endif
push {r4-r11,lr}
vpush {q4-q7}
.if \w == 8
.ifnc \txfm1, identity
movrel_local r4, inv_\txfm1\()_8h_x8_neon
.endif
movrel_local r5, inv_\txfm2\()_4h_x16_neon
.else
.ifc \txfm1, identity
movrel_local r9, inv_txfm_horz_scale_identity_16x4_neon
.else
movrel_local r4, inv_\txfm1\()_4h_x16_neon
movrel_local r9, inv_txfm_horz_scale_16x4_neon
.endif
movrel_local r5, inv_\txfm2\()_8h_x8_neon
.endif
.if \w == 8
mov r10, #\eob_8x8
.else
mov r10, #\eob_4x4
.endif
.if \w == 8 && \txfm1 == identity
b inv_txfm_identity_add_\w\()x\h\()_neon
.else
b inv_txfm_add_\w\()x\h\()_neon
.endif
endfunc
.endm
.macro def_fns_816 w, h
def_fn_816 \w, \h, dct, dct, 43, 10
def_fn_816 \w, \h, identity, identity, 43, 10
def_fn_816 \w, \h, dct, adst, 43, 10
def_fn_816 \w, \h, dct, flipadst, 43, 10
def_fn_816 \w, \h, dct, identity, 8, 4
def_fn_816 \w, \h, adst, dct, 43, 10
def_fn_816 \w, \h, adst, adst, 43, 10
def_fn_816 \w, \h, adst, flipadst, 43, 10
def_fn_816 \w, \h, flipadst, dct, 43, 10
def_fn_816 \w, \h, flipadst, adst, 43, 10
def_fn_816 \w, \h, flipadst, flipadst, 43, 10
def_fn_816 \w, \h, identity, dct, 64, 4
def_fn_816 \w, \h, adst, identity, 8, 4
def_fn_816 \w, \h, flipadst, identity, 8, 4
def_fn_816 \w, \h, identity, adst, 64, 4
def_fn_816 \w, \h, identity, flipadst, 64, 4
.endm
def_fns_816 8, 16
def_fns_816 16, 8
function inv_dct32_odd_4h_x16_neon, export=1
movrel_local r12, idct_coeffs, 2*16
vld1.16 {q0, q1}, [r12, :128]
sub r12, r12, #2*16
vmull_vmlsl q2, d16, d31, d0[0], d0[1] // -> t16a
vmull_vmlal q3, d16, d31, d0[1], d0[0] // -> t31a
vmull_vmlsl q4, d24, d23, d0[2], d0[3] // -> t17a
vqrshrn.s32 d16, q2, #12 // t16a
vqrshrn.s32 d31, q3, #12 // t31a
vmull_vmlal q2, d24, d23, d0[3], d0[2] // -> t30a
vmull_vmlsl q3, d20, d27, d1[0], d1[1] // -> t18a
vqrshrn.s32 d24, q4, #12 // t17a
vqrshrn.s32 d23, q2, #12 // t30a
vmull_vmlal q4, d20, d27, d1[1], d1[0] // -> t29a
vmull_vmlsl q2, d28, d19, d1[2], d1[3] // -> t19a
vqrshrn.s32 d20, q3, #12 // t18a
vqrshrn.s32 d27, q4, #12 // t29a
vmull_vmlal q3, d28, d19, d1[3], d1[2] // -> t28a
vmull_vmlsl q4, d18, d29, d2[0], d2[1] // -> t20a
vqrshrn.s32 d28, q2, #12 // t19a
vqrshrn.s32 d19, q3, #12 // t28a
vmull_vmlal q2, d18, d29, d2[1], d2[0] // -> t27a
vmull_vmlsl q3, d26, d21, d2[2], d2[3] // -> t21a
vqrshrn.s32 d18, q4, #12 // t20a
vqrshrn.s32 d29, q2, #12 // t27a
vmull_vmlal q4, d26, d21, d2[3], d2[2] // -> t26a
vmull_vmlsl q2, d22, d25, d3[0], d3[1] // -> t22a
vqrshrn.s32 d26, q3, #12 // t21a
vqrshrn.s32 d21, q4, #12 // t26a
vmull_vmlal q3, d22, d25, d3[1], d3[0] // -> t25a
vmull_vmlsl q4, d30, d17, d3[2], d3[3] // -> t23a
vqrshrn.s32 d22, q2, #12 // t22a
vqrshrn.s32 d25, q3, #12 // t25a
vmull_vmlal q2, d30, d17, d3[3], d3[2] // -> t24a
vqrshrn.s32 d30, q4, #12 // t23a
vqrshrn.s32 d17, q2, #12 // t24a
vld1.16 {q0}, [r12, :128]
vqsub.s16 d2, d16, d24 // t17
vqadd.s16 d16, d16, d24 // t16
vqsub.s16 d3, d31, d23 // t30
vqadd.s16 d31, d31, d23 // t31
vqsub.s16 d24, d28, d20 // t18
vqadd.s16 d28, d28, d20 // t19
vqadd.s16 d23, d18, d26 // t20
vqsub.s16 d18, d18, d26 // t21
vqsub.s16 d20, d30, d22 // t22
vqadd.s16 d30, d30, d22 // t23
vqadd.s16 d26, d17, d25 // t24
vqsub.s16 d17, d17, d25 // t25
vqsub.s16 d22, d29, d21 // t26
vqadd.s16 d29, d29, d21 // t27
vqadd.s16 d25, d19, d27 // t28
vqsub.s16 d19, d19, d27 // t29
vmull_vmlsl q2, d3, d2, d1[0], d1[1] // -> t17a
vmull_vmlal q3, d3, d2, d1[1], d1[0] // -> t30a
vmull_vmlal q4, d19, d24, d1[1], d1[0] // -> t18a
vqrshrn.s32 d21, q2, #12 // t17a
vqrshrn.s32 d27, q3, #12 // t30a
vneg.s32 q4, q4 // -> t18a
vmull_vmlsl q1, d19, d24, d1[0], d1[1] // -> t29a
vmull_vmlsl q2, d22, d18, d1[2], d1[3] // -> t21a
vqrshrn.s32 d19, q4, #12 // t18a
vqrshrn.s32 d24, q1, #12 // t29a
vmull_vmlal q3, d22, d18, d1[3], d1[2] // -> t26a
vmull_vmlal q4, d17, d20, d1[3], d1[2] // -> t22a
vqrshrn.s32 d22, q2, #12 // t21a
vqrshrn.s32 d18, q3, #12 // t26a
vneg.s32 q4, q4 // -> t22a
vmull_vmlsl q1, d17, d20, d1[2], d1[3] // -> t25a
vqrshrn.s32 d17, q4, #12 // t22a
vqrshrn.s32 d20, q1, #12 // t25a
vqsub.s16 d2, d27, d24 // t29
vqadd.s16 d27, d27, d24 // t30
vqsub.s16 d3, d21, d19 // t18
vqadd.s16 d21, d21, d19 // t17
vqsub.s16 d24, d16, d28 // t19a
vqadd.s16 d16, d16, d28 // t16a
vqsub.s16 d19, d30, d23 // t20a
vqadd.s16 d30, d30, d23 // t23a
vqsub.s16 d28, d17, d22 // t21
vqadd.s16 d17, d17, d22 // t22
vqadd.s16 d23, d26, d29 // t24a
vqsub.s16 d26, d26, d29 // t27a
vqadd.s16 d22, d20, d18 // t25
vqsub.s16 d20, d20, d18 // t26
vqsub.s16 d29, d31, d25 // t28a
vqadd.s16 d31, d31, d25 // t31a
vmull_vmlsl q2, d2, d3, d0[2], d0[3] // -> t18a
vmull_vmlal q3, d2, d3, d0[3], d0[2] // -> t29a
vmull_vmlsl q4, d29, d24, d0[2], d0[3] // -> t19
vqrshrn.s32 d18, q2, #12 // t18a
vqrshrn.s32 d25, q3, #12 // t29a
vmull_vmlal q1, d29, d24, d0[3], d0[2] // -> t28
vmull_vmlal q2, d26, d19, d0[3], d0[2] // -> t20
vqrshrn.s32 d29, q4, #12 // t19
vqrshrn.s32 d24, q1, #12 // t28
vneg.s32 q2, q2 // -> t20
vmull_vmlsl q3, d26, d19, d0[2], d0[3] // -> t27
vmull_vmlal q4, d20, d28, d0[3], d0[2] // -> t21a
vqrshrn.s32 d26, q2, #12 // t20
vqrshrn.s32 d19, q3, #12 // t27
vneg.s32 q4, q4 // -> t21a
vmull_vmlsl q1, d20, d28, d0[2], d0[3] // -> t26a
vqrshrn.s32 d20, q4, #12 // t21a
vqrshrn.s32 d28, q1, #12 // t26a
vqsub.s16 d2, d16, d30 // t23
vqadd.s16 d16, d16, d30 // t16 = out16
vqsub.s16 d3, d31, d23 // t24
vqadd.s16 d31, d31, d23 // t31 = out31
vqsub.s16 d23, d21, d17 // t22a
vqadd.s16 d17, d21, d17 // t17a = out17
vqadd.s16 d30, d27, d22 // t30a = out30
vqsub.s16 d21, d27, d22 // t25a
vqsub.s16 d27, d18, d20 // t21
vqadd.s16 d18, d18, d20 // t18 = out18
vqadd.s16 d4, d29, d26 // t19a = out19
vqsub.s16 d26, d29, d26 // t20a
vqadd.s16 d29, d25, d28 // t29 = out29
vqsub.s16 d25, d25, d28 // t26
vqadd.s16 d28, d24, d19 // t28a = out28
vqsub.s16 d24, d24, d19 // t27a
vmov d19, d4 // out19
vmull_vmlsl q2, d24, d26, d0[0], d0[0] // -> t20
vmull_vmlal q3, d24, d26, d0[0], d0[0] // -> t27
vqrshrn.s32 d20, q2, #12 // t20
vqrshrn.s32 d22, q3, #12 // t27
vmull_vmlal q2, d25, d27, d0[0], d0[0] // -> t26a
vmull_vmlsl q3, d25, d27, d0[0], d0[0] // -> t21a
vmov d27, d22 // t27
vqrshrn.s32 d26, q2, #12 // t26a
vmull_vmlsl q12, d21, d23, d0[0], d0[0] // -> t22
vmull_vmlal q2, d21, d23, d0[0], d0[0] // -> t25
vqrshrn.s32 d21, q3, #12 // t21a
vqrshrn.s32 d22, q12, #12 // t22
vqrshrn.s32 d25, q2, #12 // t25
vmull_vmlsl q2, d3, d2, d0[0], d0[0] // -> t23a
vmull_vmlal q3, d3, d2, d0[0], d0[0] // -> t24a
vqrshrn.s32 d23, q2, #12 // t23a
vqrshrn.s32 d24, q3, #12 // t24a
bx lr
endfunc
.macro def_horz_32 scale=0, shift=2, suffix
function inv_txfm_horz\suffix\()_dct_32x4_neon
push {lr}
vmov.i16 d7, #0
lsl r8, r8, #1
.if \scale
movw r12, #2896*8
vdup.16 d0, r12
.endif
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.16 {\i}, [r7, :64]
vst1.16 {d7}, [r7, :64], r8
.endr
sub r7, r7, r8, lsl #4
add r7, r7, r8, lsr #1
.if \scale
scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
.endif
bl inv_dct_4h_x16_neon
transpose_4x4h q8, q9, d16, d17, d18, d19
transpose_4x4h q10, q11, d20, d21, d22, d23
transpose_4x4h q12, q13, d24, d25, d26, d27
transpose_4x4h q14, q15, d28, d29, d30, d31
.macro store1 r0, r1, r2, r3
vst1.16 {\r0}, [r6, :64]!
vst1.16 {\r1}, [r6, :64]!
vst1.16 {\r2}, [r6, :64]!
vst1.16 {\r3}, [r6, :64]!
add r6, r6, #32
.endm
store1 d16, d20, d24, d28
store1 d17, d21, d25, d29
store1 d18, d22, d26, d30
store1 d19, d23, d27, d31
.purgem store1
sub r6, r6, #64*4
vmov.i16 d7, #0
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.16 {\i}, [r7, :64]
vst1.16 {d7}, [r7, :64], r8
.endr
.if \scale
// This relies on the fact that the idct also leaves the right coeff in d0[1]
scale_input d0[1], q8, q9, q10, q11, q12, q13, q14, q15
.endif
bl inv_dct32_odd_4h_x16_neon
transpose_4x4h q15, q14, d31, d30, d29, d28
transpose_4x4h q13, q12, d27, d26, d25, d24
transpose_4x4h q11, q10, d23, d22, d21, d20
transpose_4x4h q9, q8, d19, d18, d17, d16
.macro store2 r0, r1, r2, r3, shift
vld1.16 {q0, q1}, [r6, :128]
vqsub.s16 d7, d0, \r0
vqadd.s16 d0, d0, \r0
vqsub.s16 d6, d1, \r1
vqadd.s16 d1, d1, \r1
vqsub.s16 d5, d2, \r2
vqadd.s16 d2, d2, \r2
vqsub.s16 d4, d3, \r3
vqadd.s16 d3, d3, \r3
vrev64.16 q2, q2
vrev64.16 q3, q3
vrshr.s16 q0, q0, #\shift
vrshr.s16 q1, q1, #\shift
vrshr.s16 q2, q2, #\shift
vrshr.s16 q3, q3, #\shift
vst1.16 {q0, q1}, [r6, :128]!
vst1.16 {q2, q3}, [r6, :128]!
.endm
store2 d31, d27, d23, d19, \shift
store2 d30, d26, d22, d18, \shift
store2 d29, d25, d21, d17, \shift
store2 d28, d24, d20, d16, \shift
.purgem store2
pop {pc}
endfunc
.endm
def_horz_32 scale=0, shift=2
def_horz_32 scale=1, shift=1, suffix=_scale
function inv_txfm_add_vert_dct_4x32_neon
push {r10-r11,lr}
lsl r8, r8, #1
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.16 {\i}, [r7, :64], r8
.endr
sub r7, r7, r8, lsl #4
bl inv_dct_4h_x16_neon
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vst1.16 {\i}, [r7, :64], r8
.endr
sub r7, r7, r8, lsl #4
add r7, r7, r8, lsr #1
.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
vld1.16 {\i}, [r7, :64], r8
.endr
sub r7, r7, r8, lsl #4
sub r7, r7, r8, lsr #1
bl inv_dct32_odd_4h_x16_neon
neg r9, r8
mov r10, r6
.macro combine r0, r1, r2, r3, op, stride
vld1.16 {d4}, [r7, :64], \stride
vld1.32 {d2[0]}, [r10, :32], r1
vld1.16 {d5}, [r7, :64], \stride
vld1.32 {d2[1]}, [r10, :32], r1
\op\().s16 d4, d4, \r0
vld1.16 {d6}, [r7, :64], \stride
vld1.32 {d3[0]}, [r10, :32], r1
\op\().s16 d5, d5, \r1
vld1.32 {d3[1]}, [r10, :32], r1
vrshr.s16 q2, q2, #4
\op\().s16 d6, d6, \r2
vld1.16 {d7}, [r7, :64], \stride
vaddw.u8 q2, q2, d2
\op\().s16 d7, d7, \r3
vqmovun.s16 d2, q2
vrshr.s16 q3, q3, #4
vst1.32 {d2[0]}, [r6, :32], r1
vaddw.u8 q3, q3, d3
vst1.32 {d2[1]}, [r6, :32], r1
vqmovun.s16 d3, q3
vst1.32 {d3[0]}, [r6, :32], r1
vst1.32 {d3[1]}, [r6, :32], r1
.endm
combine d31, d30, d29, d28, vqadd, r8
combine d27, d26, d25, d24, vqadd, r8
combine d23, d22, d21, d20, vqadd, r8
combine d19, d18, d17, d16, vqadd, r8
sub r7, r7, r8
combine d16, d17, d18, d19, vqsub, r9
combine d20, d21, d22, d23, vqsub, r9
combine d24, d25, d26, d27, vqsub, r9
combine d28, d29, d30, d31, vqsub, r9
.purgem combine
pop {r10-r11,pc}
endfunc
const eob_32x32
.short 10, 36, 78, 136, 210, 300, 406, 1024
endconst
const eob_16x32
.short 10, 36, 78, 151, 215, 279, 343, 512
endconst
const eob_16x32_shortside
.short 10, 36, 78, 512
endconst
const eob_8x32
// Contrary to the others, this one is only ever used in increments of 8x8
.short 43, 107, 171, 256
endconst
function inv_txfm_add_identity_identity_32x32_8bpc_neon, export=1
push {r4-r7,lr}
vmov.i16 q0, #0
movrel_local r5, eob_32x32, 2
mov r6, #2*32
1:
mov r12, #0
movrel_local r4, eob_32x32, 2
2:
add r12, r12, #8
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.16 {\i}, [r2, :128]
vst1.16 {q0}, [r2, :128], r6
.endr
transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
load_add_store_8x8 r0, r7, shiftbits=2
ldrh lr, [r4], #4
sub r0, r0, r1, lsl #3
cmp r3, lr
add r0, r0, #8
bge 2b
ldrh lr, [r5], #4
cmp r3, lr
blt 9f
sub r0, r0, r12
add r0, r0, r1, lsl #3
mls r2, r6, r12, r2
add r2, r2, #2*8
b 1b
9:
pop {r4-r7,pc}
endfunc
.macro shift_8_regs op, shift
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
\op \i, \i, #\shift
.endr
.endm
.macro def_identity_1632 w, h, wshort, hshort
function inv_txfm_add_identity_identity_\w\()x\h\()_8bpc_neon, export=1
push {r4-r7,lr}
movw r6, #2896*8
movw r7, #2*(5793-4096)*8
vdup.i16 d0, r6
movrel_local r5, eob_16x32\hshort, 2
vmov.16 d0[1], r7
mov r6, #2*\h
1:
mov r12, #0
movrel_local r4, eob_16x32\wshort, 2
2:
vmov.i16 q1, #0
add r12, r12, #8
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.16 {\i}, [r2, :128]
vst1.16 {q1}, [r2, :128], r6
.endr
scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
.if \w == 16
// 16x32
identity_8x8_shift1 d0[1]
.else
// 32x16
shift_8_regs vqshl.s16, 1
identity_8x8 d0[1]
.endif
transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
.if \w == 16
load_add_store_8x8 r0, r7, shiftbits=2
.else
load_add_store_8x8 r0, r7, shiftbits=4
.endif
ldrh lr, [r4], #4
sub r0, r0, r1, lsl #3
cmp r3, lr
add r0, r0, #8
bge 2b
ldrh lr, [r5], #4
cmp r3, lr
blt 9f
sub r0, r0, r12
add r0, r0, r1, lsl #3
mls r2, r6, r12, r2
add r2, r2, #2*8
b 1b
9:
pop {r4-r7,pc}
endfunc
.endm
def_identity_1632 16, 32, _shortside,
def_identity_1632 32, 16, , _shortside
.macro def_identity_832 w, h
function inv_txfm_add_identity_identity_\w\()x\h\()_8bpc_neon, export=1
push {r4-r5,lr}
vmov.i16 q0, #0
movrel_local r4, eob_8x32
mov r12, #2*\h
1:
ldrh lr, [r4], #2
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.16 {\i}, [r2, :128]
vst1.16 {q0}, [r2, :128], r12
.endr
.if \w == 8
// 8x32
shift_8_regs vrshr.s16, 1
.endif
transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
cmp r3, lr
.if \w == 8
load_add_store_8x8 r0, r5, shiftbits=2
.else
load_add_store_8x8 r0, r5, shiftbits=3
.endif
blt 9f
.if \w == 8
sub r2, r2, r12, lsl #3
add r2, r2, #2*8
.else
sub r0, r0, r1, lsl #3
add r0, r0, #8
.endif
b 1b
9:
pop {r4-r5,pc}
endfunc
.endm
def_identity_832 8, 32
def_identity_832 32, 8
function inv_txfm_add_dct_dct_32x32_8bpc_neon, export=1
idct_dc 32, 32, 2
push {r4-r11,lr}
vpush {q4}
sub_sp_align 2048
movrel_local r10, eob_32x32
ldrh r11, [r10], #2
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, sp, #(\i*32*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.if \i < 28
ldrh r11, [r10], #2
.endif
.endif
add r7, r2, #(\i*2)
mov r8, #32*2
bl inv_txfm_horz_dct_32x4_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, r0, #(\i)
add r7, sp, #(\i*2)
mov r8, #32*2
bl inv_txfm_add_vert_dct_4x32_neon
.endr
add_sp_align 2048
vpop {q4}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_16x32_8bpc_neon, export=1
idct_dc 16, 32, 1
push {r4-r11,lr}
vpush {q4}
sub_sp_align 1024
movrel_local r10, eob_16x32
ldrh r11, [r10], #2
movrel_local r4, inv_dct_4h_x16_neon
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, sp, #(\i*16*2)
add r7, r2, #(\i*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.if \i < 28
ldrh r11, [r10], #2
.endif
.endif
mov r8, #2*32
bl inv_txfm_horz_scale_16x4_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #4
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12
add r6, r0, #(\i)
add r7, sp, #(\i*2)
mov r8, #16*2
bl inv_txfm_add_vert_dct_4x32_neon
.endr
add_sp_align 1024
vpop {q4}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_32x16_8bpc_neon, export=1
idct_dc 32, 16, 1
push {r4-r11,lr}
vpush {q4}
sub_sp_align 1024
movrel_local r10, eob_16x32
ldrh r11, [r10], #2
movrel_local r5, inv_dct_4h_x16_neon
.irp i, 0, 4, 8, 12
add r6, sp, #(\i*32*2)
add r7, r2, #(\i*2)
.if \i > 0
mov r8, #(16 - \i)
cmp r3, r11
blt 1f
.if \i < 12
ldrh r11, [r10], #2
.endif
.endif
mov r8, #2*16
bl inv_txfm_horz_scale_dct_32x4_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, r0, #(\i)
add r7, sp, #(\i*2)
mov r8, #32*2
bl inv_txfm_add_vert_4x16_neon
.endr
add_sp_align 1024
vpop {q4}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_8x32_8bpc_neon, export=1
idct_dc 8, 32, 2
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 512
movrel_local r10, eob_8x32
mov r8, #2*32
mov r9, #32
mov r6, sp
1:
vmov.i16 q0, #0
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.16 {\i}, [r2, :128]
vst1.16 {q0}, [r2, :128], r8
.endr
ldrh r11, [r10], #2
sub r2, r2, r8, lsl #3
sub r9, r9, #8
add r2, r2, #2*8
bl inv_dct_8h_x8_neon
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vrshr.s16 \i, \i, #2
.endr
transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
vst1.16 {q8, q9}, [r6, :128]!
cmp r3, r11
vst1.16 {q10, q11}, [r6, :128]!
vst1.16 {q12, q13}, [r6, :128]!
vst1.16 {q14, q15}, [r6, :128]!
bge 1b
cmp r9, #0
beq 3f
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r9, r9, #8
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4
add r6, r0, #(\i)
add r7, sp, #(\i*2)
mov r8, #8*2
bl inv_txfm_add_vert_dct_4x32_neon
.endr
add_sp_align 512
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_32x8_8bpc_neon, export=1
idct_dc 32, 8, 2
push {r4-r11,lr}
vpush {q4-q7}
sub_sp_align 512
.irp i, 0, 4
add r6, sp, #(\i*32*2)
add r7, r2, #(\i*2)
.if \i > 0
cmp r3, #10
blt 1f
.endif
mov r8, #8*2
bl inv_txfm_horz_dct_32x4_neon
.endr
b 2f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
.rept 8
vst1.16 {q2, q3}, [r6, :128]!
.endr
2:
mov r8, #2*32
mov r9, #0
1:
add r6, r0, r9
add r7, sp, r9, lsl #1 // #(\i*2)
.irp i, q8, q9, q10, q11, q12, q13, q14, q15
vld1.16 {\i}, [r7, :128], r8
.endr
add r9, r9, #8
bl inv_dct_8h_x8_neon
cmp r9, #32
load_add_store_8x8 r6, r7
blt 1b
add_sp_align 512
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function inv_dct64_step1_neon
// in1/31/17/15 -> t32a/33/34a/35/60/61a/62/63a
// in7/25/23/ 9 -> t56a/57/58a/59/36/37a/38/39a
// in5/27/21/11 -> t40a/41/42a/43/52/53a/54/55a
// in3/29/19/13 -> t48a/49/50a/51/44/45a/46/47a
vld1.16 {d0, d1, d2}, [r12, :64]!
vqrdmulh.s16 d23, d16, d0[1] // t63a
vqrdmulh.s16 d16, d16, d0[0] // t32a
vqrdmulh.s16 d22, d17, d0[2] // t62a
vqrdmulh.s16 d17, d17, d0[3] // t33a
vqrdmulh.s16 d21, d18, d1[1] // t61a
vqrdmulh.s16 d18, d18, d1[0] // t34a
vqrdmulh.s16 d20, d19, d1[2] // t60a
vqrdmulh.s16 d19, d19, d1[3] // t35a
vqadd.s16 d24, d16, d17 // t32
vqsub.s16 d25, d16, d17 // t33
vqsub.s16 d26, d19, d18 // t34
vqadd.s16 d27, d19, d18 // t35
vqadd.s16 d28, d20, d21 // t60
vqsub.s16 d29, d20, d21 // t61
vqsub.s16 d30, d23, d22 // t62
vqadd.s16 d31, d23, d22 // t63
vmull_vmlal q2, d29, d26, d2[0], d2[1] // -> t34a
vmull_vmlsl q3, d29, d26, d2[1], d2[0] // -> t61a
vneg.s32 q2, q2 // t34a
vmull_vmlsl q4, d30, d25, d2[1], d2[0] // -> t33a
vqrshrn.s32 d26, q2, #12 // t34a
vmull_vmlal q2, d30, d25, d2[0], d2[1] // -> t62a
vqrshrn.s32 d29, q3, #12 // t61a
vqrshrn.s32 d25, q4, #12 // t33a
vqrshrn.s32 d30, q2, #12 // t62a
vqadd.s16 d16, d24, d27 // t32a
vqsub.s16 d19, d24, d27 // t35a
vqadd.s16 d17, d25, d26 // t33
vqsub.s16 d18, d25, d26 // t34
vqsub.s16 d20, d31, d28 // t60a
vqadd.s16 d23, d31, d28 // t63a
vqsub.s16 d21, d30, d29 // t61
vqadd.s16 d22, d30, d29 // t62
vmull_vmlal q2, d21, d18, d2[2], d2[3] // -> t61a
vmull_vmlsl q3, d21, d18, d2[3], d2[2] // -> t34a
vmull_vmlal q4, d20, d19, d2[2], d2[3] // -> t60
vqrshrn.s32 d21, q2, #12 // t61a
vqrshrn.s32 d18, q3, #12 // t34a
vmull_vmlsl q2, d20, d19, d2[3], d2[2] // -> t35
vqrshrn.s32 d20, q4, #12 // t60
vqrshrn.s32 d19, q2, #12 // t35
vst1.16 {d16, d17, d18, d19}, [r6, :128]!
vst1.16 {d20, d21, d22, d23}, [r6, :128]!
bx lr
endfunc
function inv_dct64_step2_neon
movrel_local r12, idct_coeffs
vld1.16 {d0}, [r12, :64]
1:
// t32a/33/34a/35/60/61a/62/63a
// t56a/57/58a/59/36/37a/38/39a
// t40a/41/42a/43/52/53a/54/55a
// t48a/49/50a/51/44/45a/46/47a
vldr d16, [r6, #2*4*0] // t32a
vldr d17, [r9, #2*4*8] // t39a
vldr d18, [r9, #2*4*0] // t63a
vldr d19, [r6, #2*4*8] // t56a
vldr d20, [r6, #2*4*16] // t40a
vldr d21, [r9, #2*4*24] // t47a
vldr d22, [r9, #2*4*16] // t55a
vldr d23, [r6, #2*4*24] // t48a
vqadd.s16 d24, d16, d17 // t32
vqsub.s16 d25, d16, d17 // t39
vqadd.s16 d26, d18, d19 // t63
vqsub.s16 d27, d18, d19 // t56
vqsub.s16 d28, d21, d20 // t40
vqadd.s16 d29, d21, d20 // t47
vqadd.s16 d30, d23, d22 // t48
vqsub.s16 d31, d23, d22 // t55
vmull_vmlal q2, d27, d25, d0[3], d0[2] // -> t56a
vmull_vmlsl q3, d27, d25, d0[2], d0[3] // -> t39a
vmull_vmlal q4, d31, d28, d0[3], d0[2] // -> t40a
vqrshrn.s32 d25, q2, #12 // t56a
vqrshrn.s32 d27, q3, #12 // t39a
vneg.s32 q4, q4 // t40a
vmull_vmlsl q2, d31, d28, d0[2], d0[3] // -> t55a
vqrshrn.s32 d31, q4, #12 // t40a
vqrshrn.s32 d28, q2, #12 // t55a
vqadd.s16 d16, d24, d29 // t32a
vqsub.s16 d19, d24, d29 // t47a
vqadd.s16 d17, d27, d31 // t39
vqsub.s16 d18, d27, d31 // t40
vqsub.s16 d20, d26, d30 // t48a
vqadd.s16 d23, d26, d30 // t63a
vqsub.s16 d21, d25, d28 // t55
vqadd.s16 d22, d25, d28 // t56
vmull_vmlsl q2, d21, d18, d0[0], d0[0] // -> t40a
vmull_vmlal q3, d21, d18, d0[0], d0[0] // -> t55a
vmull_vmlsl q4, d20, d19, d0[0], d0[0] // -> t47
vqrshrn.s32 d18, q2, #12 // t40a
vqrshrn.s32 d21, q3, #12 // t55a
vmull_vmlal q2, d20, d19, d0[0], d0[0] // -> t48
vqrshrn.s32 d19, q4, #12 // t47
vqrshrn.s32 d20, q2, #12 // t48
vstr d16, [r6, #2*4*0] // t32a
vstr d17, [r9, #2*4*0] // t39
vstr d18, [r6, #2*4*8] // t40a
vstr d19, [r9, #2*4*8] // t47
vstr d20, [r6, #2*4*16] // t48
vstr d21, [r9, #2*4*16] // t55a
vstr d22, [r6, #2*4*24] // t56
vstr d23, [r9, #2*4*24] // t63a
add r6, r6, #2*4
sub r9, r9, #2*4
cmp r6, r9
blt 1b
bx lr
endfunc
.macro load8 src, strd, zero, clear
.irp i, d16, d17, d18, d19, d20, d21, d22, d23
.if \clear
vld1.16 {\i}, [\src, :64]
vst1.16 {\zero}, [\src, :64], \strd
.else
vld1.16 {\i}, [\src, :64], \strd
.endif
.endr
.endm
.macro store16 dst
vst1.16 {q8, q9}, [\dst, :128]!
vst1.16 {q10, q11}, [\dst, :128]!
vst1.16 {q12, q13}, [\dst, :128]!
vst1.16 {q14, q15}, [\dst, :128]!
.endm
.macro clear_upper8
.irp i, q12, q13, q14, q15
vmov.i16 \i, #0
.endr
.endm
.macro vmov_if reg, val, cond
.if \cond
vmov.i16 \reg, \val
.endif
.endm
.macro movdup_if reg, gpr, val, cond
.if \cond
movw \gpr, \val
vdup.16 \reg, \gpr
.endif
.endm
.macro vst1_if regs, dst, dstalign, cond
.if \cond
vst1.16 \regs, \dst, \dstalign
.endif
.endm
.macro scale_if cond, c, r0, r1, r2, r3, r4, r5, r6, r7
.if \cond
scale_input \c, \r0, \r1, \r2, \r3, \r4, \r5, \r6, \r7
.endif
.endm
.macro def_dct64_func suffix, clear=0, scale=0
function inv_txfm_dct\suffix\()_4h_x64_neon, export=1
mov r6, sp
push {r10-r11,lr}
lsl r8, r8, #2
movdup_if d0, r12, #2896*8, \scale
vmov_if d7, #0, \clear
load8 r7, r8, d7, \clear
clear_upper8
sub r7, r7, r8, lsl #3
add r7, r7, r8, lsr #1
scale_if \scale, d0[0], q8, q9, q10, q11
bl inv_dct_4h_x16_neon
store16 r6
movdup_if d0, r12, #2896*8, \scale
vmov_if d7, #0, \clear
load8 r7, r8, d7, \clear
clear_upper8
sub r7, r7, r8, lsl #3
lsr r8, r8, #1
sub r7, r7, r8, lsr #1
scale_if \scale, d0[0], q8, q9, q10, q11
bl inv_dct32_odd_4h_x16_neon
add r10, r6, #8*15
sub r6, r6, #8*16
mov r9, #-8
.macro store_addsub r0, r1, r2, r3
vld1.16 {d2}, [r6, :64]!
vld1.16 {d3}, [r6, :64]!
vqadd.s16 d6, d2, \r0
vqsub.s16 \r0, d2, \r0
vld1.16 {d4}, [r6, :64]!
vqadd.s16 d7, d3, \r1
vqsub.s16 \r1, d3, \r1
vld1.16 {d5}, [r6, :64]!
vqadd.s16 d2, d4, \r2
sub r6, r6, #8*4
vqsub.s16 \r2, d4, \r2
vst1.16 {d6}, [r6, :64]!
vst1.16 {\r0}, [r10, :64], r9
vqadd.s16 d3, d5, \r3
vqsub.s16 \r3, d5, \r3
vst1.16 {d7}, [r6, :64]!
vst1.16 {\r1}, [r10, :64], r9
vst1.16 {d2}, [r6, :64]!
vst1.16 {\r2}, [r10, :64], r9
vst1.16 {d3}, [r6, :64]!
vst1.16 {\r3}, [r10, :64], r9
.endm
store_addsub d31, d30, d29, d28
store_addsub d27, d26, d25, d24
store_addsub d23, d22, d21, d20
store_addsub d19, d18, d17, d16
.purgem store_addsub
add r6, r6, #2*4*16
movrel_local r12, idct64_coeffs
movdup_if d0, lr, #2896*8, \scale
vmov_if d7, #0, \clear
add r9, r7, r8, lsl #4 // offset 16
add r10, r7, r8, lsl #3 // offset 8
sub r9, r9, r8 // offset 15
sub r11, r10, r8 // offset 7
vld1.16 {d16}, [r7, :64] // in1 (offset 0)
vld1.16 {d17}, [r9, :64] // in31 (offset 15)
vld1.16 {d18}, [r10, :64] // in17 (offset 8)
vld1.16 {d19}, [r11, :64] // in15 (offset 7)
vst1_if {d7}, [r7, :64], \clear
vst1_if {d7}, [r9, :64], \clear
vst1_if {d7}, [r10, :64], \clear
vst1_if {d7}, [r11, :64], \clear
scale_if \scale, d0[0], q8, q9
bl inv_dct64_step1_neon
movdup_if d0, lr, #2896*8, \scale
vmov_if d7, #0, \clear
add r7, r7, r8, lsl #2 // offset 4
sub r9, r9, r8, lsl #2 // offset 11
sub r10, r7, r8 // offset 3
add r11, r9, r8 // offset 12
vld1.16 {d16}, [r10, :64] // in7 (offset 3)
vld1.16 {d17}, [r11, :64] // in25 (offset 12)
vld1.16 {d18}, [r9, :64] // in23 (offset 11)
vld1.16 {d19}, [r7, :64] // in9 (offset 4)
vst1_if {d7}, [r7, :64], \clear
vst1_if {d7}, [r9, :64], \clear
vst1_if {d7}, [r10, :64], \clear
vst1_if {d7}, [r11, :64], \clear
scale_if \scale, d0[0], q8, q9
bl inv_dct64_step1_neon
movdup_if d0, lr, #2896*8, \scale
vmov_if d7, #0, \clear
sub r10, r10, r8, lsl #1 // offset 1
sub r9, r9, r8, lsl #1 // offset 9
add r10, r10, r8 // offset 2
add r9, r9, r8 // offset 10
add r7, r7, r8 // offset 5
add r11, r11, r8 // offset 13
vld1.16 d16, [r10, :64] // in5 (offset 2)
vld1.16 d17, [r11, :64] // in27 (offset 13)
vld1.16 d18, [r9, :64] // in21 (offset 10)
vld1.16 d19, [r7, :64] // in11 (offset 5)
vst1_if d7, [r10, :64], \clear
vst1_if d7, [r11, :64], \clear
vst1_if d7, [r9, :64], \clear
vst1_if d7, [r7, :64], \clear
scale_if \scale, d0[0], q8, q9
bl inv_dct64_step1_neon
movdup_if d0, lr, #2896*8, \scale
vmov_if d7, #0, \clear
sub r10, r10, r8 // offset 1
sub r9, r9, r8 // offset 9
add r11, r11, r8 // offset 14
add r7, r7, r8 // offset 6
vld1.16 d16, [r10, :64] // in3 (offset 1)
vld1.16 d17, [r11, :64] // in29 (offset 14)
vld1.16 d18, [r9, :64] // in19 (offset 9)
vld1.16 d19, [r7, :64] // in13 (offset 6)
vst1_if d7, [r10, :64], \clear
vst1_if d7, [r11, :64], \clear
vst1_if d7, [r9, :64], \clear
vst1_if d7, [r7, :64], \clear
scale_if \scale, d0[0], q8, q9
bl inv_dct64_step1_neon
sub r6, r6, #2*4*32
add r9, r6, #2*4*7
bl inv_dct64_step2_neon
pop {r10-r11,pc}
endfunc
.endm
def_dct64_func
def_dct64_func _clear, clear=1
def_dct64_func _clear_scale, clear=1, scale=1
function inv_txfm_horz_dct_64x4_neon
vdup.16 q3, r9
mov r7, sp
add r8, sp, #2*4*(64 - 4)
add r9, r6, #2*56
push {r10-r11,lr}
mov r10, #2*64
mov r11, #-2*4*4
1:
vld1.16 {d16, d17, d18, d19}, [r7, :128]!
vld1.16 {d28, d29, d30, d31}, [r8, :128], r11
vld1.16 {d20, d21, d22, d23}, [r7, :128]!
vld1.16 {d24, d25, d26, d27}, [r8, :128], r11
transpose_4x4h q8, q9, d16, d17, d18, d19
transpose_4x4h q15, q14, d31, d30, d29, d28
transpose_4x4h q10, q11, d20, d21, d22, d23
transpose_4x4h q13, q12, d27, d26, d25, d24
.macro store_addsub src0, src1, src2, src3
vqsub.s16 d3, \src0, \src1
vqsub.s16 d2, \src2, \src3
vqadd.s16 d0, \src0, \src1
vqadd.s16 d1, \src2, \src3
vrshl.s16 q1, q1, q3
vrshl.s16 q0, q0, q3
vrev64.16 q1, q1
vst1.16 {q0}, [r6, :128], r10
vst1.16 {q1}, [r9, :128], r10
.endm
store_addsub d16, d31, d20, d27
store_addsub d17, d30, d21, d26
store_addsub d18, d29, d22, d25
store_addsub d19, d28, d23, d24
.purgem store_addsub
sub r6, r6, r10, lsl #2
sub r9, r9, r10, lsl #2
add r6, r6, #16
sub r9, r9, #16
cmp r7, r8
blt 1b
pop {r10-r11,pc}
endfunc
function inv_txfm_add_vert_dct_4x64_neon
lsl r8, r8, #1
mov r7, sp
add r8, sp, #2*4*(64 - 4)
add r9, r6, r1, lsl #6
sub r9, r9, r1
push {r10-r11,lr}
neg r10, r1
mov r11, #-2*4*4
1:
vld1.16 {d16, d17, d18, d19}, [r7, :128]!
vld1.16 {d28, d29, d30, d31}, [r8, :128], r11
vld1.16 {d20, d21, d22, d23}, [r7, :128]!
vld1.16 {d24, d25, d26, d27}, [r8, :128], r11
.macro add_dest_addsub src0, src1, src2, src3
vld1.32 {d0[0]}, [r6, :32], r1
vld1.32 {d1[0]}, [r9, :32], r10
vqadd.s16 d4, \src0, \src1
vld1.32 {d0[1]}, [r6, :32]
vqadd.s16 d5, \src2, \src3
vld1.32 {d1[1]}, [r9, :32]
vqsub.s16 d6, \src0, \src1
vqsub.s16 d7, \src2, \src3
sub r6, r6, r1
sub r9, r9, r10
vrshr.s16 q2, q2, #4
vrshr.s16 q3, q3, #4
vaddw.u8 q2, q2, d0
vaddw.u8 q3, q3, d1
vqmovun.s16 d0, q2
vqmovun.s16 d1, q3
vst1.32 {d0[0]}, [r6, :32], r1
vst1.32 {d1[0]}, [r9, :32], r10
vst1.32 {d0[1]}, [r6, :32], r1
vst1.32 {d1[1]}, [r9, :32], r10
.endm
add_dest_addsub d16, d31, d17, d30
add_dest_addsub d18, d29, d19, d28
add_dest_addsub d20, d27, d21, d26
add_dest_addsub d22, d25, d23, d24
.purgem add_dest_addsub
cmp r7, r8
blt 1b
pop {r10-r11,pc}
endfunc
function inv_txfm_add_dct_dct_64x64_8bpc_neon, export=1
idct_dc 64, 64, 2
push {r4-r11,lr}
vpush {q4}
sub_sp_align 64*32*2+64*4*2
add r5, sp, #64*4*2
movrel_local r10, eob_32x32
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, r5, #(\i*64*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.endif
add r7, r2, #(\i*2)
mov r8, #32*2
bl inv_txfm_dct_clear_4h_x64_neon
add r6, r5, #(\i*64*2)
mov r9, #-2 // shift
bl inv_txfm_horz_dct_64x4_neon
.if \i < 28
ldrh r11, [r10], #2
.endif
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 8
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
add r7, r5, #(\i*2)
mov r8, #64*2
bl inv_txfm_dct_4h_x64_neon
add r6, r0, #(\i)
bl inv_txfm_add_vert_dct_4x64_neon
.endr
add_sp_align 64*32*2+64*4*2
vpop {q4}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_64x32_8bpc_neon, export=1
idct_dc 64, 32, 1
push {r4-r11,lr}
vpush {q4}
sub_sp_align 64*32*2+64*4*2
add r5, sp, #64*4*2
movrel_local r10, eob_32x32
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, r5, #(\i*64*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.endif
add r7, r2, #(\i*2)
mov r8, #32*2
bl inv_txfm_dct_clear_scale_4h_x64_neon
add r6, r5, #(\i*64*2)
mov r9, #-1 // shift
bl inv_txfm_horz_dct_64x4_neon
.if \i < 28
ldrh r11, [r10], #2
.endif
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 8
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
add r6, r0, #(\i)
add r7, r5, #(\i*2)
mov r8, #64*2
bl inv_txfm_add_vert_dct_4x32_neon
.endr
add_sp_align 64*32*2+64*4*2
vpop {q4}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_32x64_8bpc_neon, export=1
idct_dc 32, 64, 1
push {r4-r11,lr}
vpush {q4}
sub_sp_align 32*32*2+64*4*2
add r5, sp, #64*4*2
movrel_local r10, eob_32x32
ldrh r11, [r10], #2
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, r5, #(\i*32*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.if \i < 28
ldrh r11, [r10], #2
.endif
.endif
add r7, r2, #(\i*2)
mov r8, #32*2
bl inv_txfm_horz_scale_dct_32x4_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r7, r5, #(\i*2)
mov r8, #32*2
bl inv_txfm_dct_4h_x64_neon
add r6, r0, #(\i)
bl inv_txfm_add_vert_dct_4x64_neon
.endr
add_sp_align 32*32*2+64*4*2
vpop {q4}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_64x16_8bpc_neon, export=1
idct_dc 64, 16, 2
push {r4-r11,lr}
vpush {q4}
sub_sp_align 64*16*2+64*4*2
add r4, sp, #64*4*2
movrel_local r10, eob_16x32
.irp i, 0, 4, 8, 12
add r6, r4, #(\i*64*2)
.if \i > 0
mov r8, #(16 - \i)
cmp r3, r11
blt 1f
.endif
add r7, r2, #(\i*2)
mov r8, #16*2
bl inv_txfm_dct_clear_4h_x64_neon
add r6, r4, #(\i*64*2)
mov r9, #-2 // shift
bl inv_txfm_horz_dct_64x4_neon
.if \i < 12
ldrh r11, [r10], #2
.endif
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #2
.rept 8
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
movrel_local r5, inv_dct_4h_x16_neon
.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
add r6, r0, #(\i)
add r7, r4, #(\i*2)
mov r8, #64*2
bl inv_txfm_add_vert_4x16_neon
.endr
add_sp_align 64*16*2+64*4*2
vpop {q4}
pop {r4-r11,pc}
endfunc
function inv_txfm_add_dct_dct_16x64_8bpc_neon, export=1
idct_dc 16, 64, 2
push {r4-r11,lr}
vpush {q4}
sub_sp_align 16*32*2+64*4*2
add r5, sp, #64*4*2
movrel_local r10, eob_16x32
ldrh r11, [r10], #2
movrel_local r4, inv_dct_4h_x16_neon
.irp i, 0, 4, 8, 12, 16, 20, 24, 28
add r6, r5, #(\i*16*2)
.if \i > 0
mov r8, #(32 - \i)
cmp r3, r11
blt 1f
.if \i < 28
ldrh r11, [r10], #2
.endif
.endif
add r7, r2, #(\i*2)
mov r8, #32*2
bl inv_txfm_horz_16x4_neon
.endr
b 3f
1:
vmov.i16 q2, #0
vmov.i16 q3, #0
2:
subs r8, r8, #4
.rept 4
vst1.16 {q2, q3}, [r6, :128]!
.endr
bgt 2b
3:
.irp i, 0, 4, 8, 12
add r7, r5, #(\i*2)
mov r8, #16*2
bl inv_txfm_dct_4h_x64_neon
add r6, r0, #(\i)
bl inv_txfm_add_vert_dct_4x64_neon
.endr
add_sp_align 16*32*2+64*4*2
vpop {q4}
pop {r4-r11,pc}
endfunc
|
Admenri/urge
| 67,565
|
third_party/dav1d/src/arm/32/filmgrain.S
|
/*
* Copyright © 2021, VideoLAN and dav1d authors
* Copyright © 2021, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#include "src/arm/asm-offsets.h"
#define GRAIN_WIDTH 82
#define GRAIN_HEIGHT 73
#define SUB_GRAIN_WIDTH 44
#define SUB_GRAIN_HEIGHT 38
.macro increment_seed steps, shift=1
lsr r11, r2, #3
lsr r12, r2, #12
lsr lr, r2, #1
eor r11, r2, r11 // (r >> 0) ^ (r >> 3)
eor r12, r12, lr // (r >> 12) ^ (r >> 1)
eor r11, r11, r12 // (r >> 0) ^ (r >> 3) ^ (r >> 12) ^ (r >> 1)
.if \shift
lsr r2, r2, #\steps
.endif
and r11, r11, #((1 << \steps) - 1) // bit
.if \shift
orr r2, r2, r11, lsl #(16 - \steps) // *state
.else
orr r2, r2, r11, lsl #16 // *state
.endif
.endm
.macro read_rand dest, bits, age
ubfx \dest, r2, #16 - \bits - \age, #\bits
.endm
.macro read_shift_rand dest, bits
ubfx \dest, r2, #17 - \bits, #\bits
lsr r2, r2, #1
.endm
// special calling convention:
// r2 holds seed
// r3 holds dav1d_gaussian_sequence
// clobbers r11-r12
// returns in d0-d1
function get_gaussian_neon
push {r5-r6,lr}
increment_seed 4
read_rand r5, 11, 3
read_rand r6, 11, 2
add r5, r3, r5, lsl #1
add r6, r3, r6, lsl #1
vld1.16 {d0[0]}, [r5]
read_rand r5, 11, 1
vld1.16 {d0[1]}, [r6]
add r5, r3, r5, lsl #1
read_rand r6, 11, 0
increment_seed 4
add r6, r3, r6, lsl #1
vld1.16 {d0[2]}, [r5]
read_rand r5, 11, 3
vld1.16 {d0[3]}, [r6]
add r5, r3, r5, lsl #1
read_rand r6, 11, 2
vld1.16 {d1[0]}, [r5]
add r6, r3, r6, lsl #1
read_rand r5, 11, 1
vld1.16 {d1[1]}, [r6]
read_rand r6, 11, 0
add r5, r3, r5, lsl #1
add r6, r3, r6, lsl #1
vld1.16 {d1[2]}, [r5]
vld1.16 {d1[3]}, [r6]
pop {r5-r6,pc}
endfunc
.macro get_grain_row r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r0, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r1, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r2, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r3, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r4, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r5, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r6, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r7, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r8, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r9, q0
increment_seed 2
read_rand r11, 11, 1
read_rand r12, 11, 0
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d0[0]}, [r11]
vld1.16 {d0[1]}, [r12]
vrshl.s16 d0, d0, d30
vmovn.i16 \r10, q0
.endm
.macro store_grain_row r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10
vst1.16 {\r0, \r1, \r2, \r3}, [r0]!
vst1.16 {\r4, \r5, \r6, \r7}, [r0]!
vst1.16 {\r8, \r9}, [r0]!
vst1.16 {\r10[0]}, [r0]!
.endm
.macro get_grain_row_44 r0, r1, r2, r3, r4, r5
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r0, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r1, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r2, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r3, q0
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
vmovn.i16 \r4, q0
increment_seed 4
read_rand r11, 11, 3
read_rand r12, 11, 2
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d0[]}, [r11]
read_rand r11, 11, 1
vld1.16 {d0[1]}, [r12]
add r11, r3, r11, lsl #1
read_rand r12, 11, 0
vld1.16 {d0[2]}, [r11]
add r12, r3, r12, lsl #1
vld1.16 {d0[3]}, [r12]
vrshl.s16 d0, d0, d30
vmovn.i16 \r5, q0
.endm
.macro store_grain_row_44 r0, r1, r2, r3, r4, r5
vst1.16 {\r0, \r1, \r2, \r3}, [r0]!
vst1.16 {\r4, \r5}, [r0]
add r0, r0, #GRAIN_WIDTH-32
.endm
function get_grain_2_neon
push {r11,lr}
increment_seed 2
read_rand r11, 11, 1
read_rand r12, 11, 0
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d0[0]}, [r11]
vld1.16 {d0[1]}, [r12]
vrshl.s16 d0, d0, d30
vmovn.i16 d0, q0
pop {r11,pc}
endfunc
.macro get_grain_2 dst
bl get_grain_2_neon
.ifnc \dst, d0
vmov \dst, d0
.endif
.endm
// r1 holds the number of entries to produce
// r6, r8 and r10 hold the previous output entries
// q0 holds the vector of produced entries
// q1 holds the input vector of sums from above
.macro output_lag n
function output_lag\n\()_neon
push {r0, lr}
.if \n == 1
mov lr, #-128
.else
mov r0, #1
mov lr, #1
sub r7, r7, #1
sub r9, r9, #1
lsl r0, r0, r7
lsl lr, lr, r9
add r7, r7, #1
add r9, r9, #1
.endif
1:
read_shift_rand r12, 11
vmov.32 r11, d2[0]
lsl r12, r12, #1
vext.8 q0, q0, q0, #1
ldrsh r12, [r3, r12]
.if \n == 1
mla r11, r6, r4, r11 // sum (above) + *coeff * prev output
add r6, r11, r8 // 1 << (ar_coeff_shift - 1)
add r12, r12, r10
asr r6, r6, r7 // >> ar_coeff_shift
asr r12, r12, r9 // >> (4 + grain_scale_shift)
add r6, r6, r12
cmp r6, r5
.elseif \n == 2
mla r11, r8, r4, r11 // sum (above) + *coeff * prev output 1
mla r11, r6, r10, r11 // += *coeff * prev output 2
mov r8, r6
add r6, r11, r0 // 1 << (ar_coeff_shift - 1)
add r12, r12, lr // 1 << (4 + grain_scale_shift - 1)
asr r6, r6, r7 // >> ar_coeff_shift
asr r12, r12, r9 // >> (4 + grain_scale_shift)
add r6, r6, r12
push {lr}
cmp r6, r5
mov lr, #-128
.else
push {r1-r3}
sbfx r1, r4, #0, #8
sbfx r2, r4, #8, #8
sbfx r3, r4, #16, #8
mla r11, r10, r1, r11 // sum (above) + *coeff * prev output 1
mla r11, r8, r2, r11 // sum (above) + *coeff * prev output 2
mla r11, r6, r3, r11 // += *coeff * prev output 3
pop {r1-r3}
mov r10, r8
mov r8, r6
add r6, r11, r0 // 1 << (ar_coeff_shift - 1)
add r12, r12, lr // 1 << (4 + grain_scale_shift - 1)
asr r6, r6, r7 // >> ar_coeff_shift
asr r12, r12, r9 // >> (4 + grain_scale_shift)
add r6, r6, r12
push {lr}
cmp r6, r5
mov lr, #-128
.endif
it gt
movgt r6, r5
cmp r6, lr
it lt
movlt r6, lr
.if \n >= 2
pop {lr}
.endif
subs r1, r1, #1
vext.8 q1, q1, q1, #4
vmov.8 d1[7], r6
bgt 1b
pop {r0, pc}
endfunc
.endm
output_lag 1
output_lag 2
output_lag 3
function sum_lag1_above_neon
vmull.s8 q2, d6, d28
vmull.s8 q3, d7, d28
vmull.s8 q4, d0, d27
vmull.s8 q5, d1, d27
vaddl.s16 q0, d4, d8
vaddl.s16 q2, d5, d9
vaddl.s16 q4, d6, d10
vaddl.s16 q5, d7, d11
vmull.s8 q3, d3, d29
vmull.s8 q1, d2, d29
vaddw.s16 q4, q4, d6
vaddw.s16 q5, q5, d7
vaddw.s16 q3, q2, d3
vaddw.s16 q2, q0, d2
bx lr
endfunc
.macro sum_lag_n_body lag, type, uv_layout, edge, elems, store, uv_coeff
.ifc \lag\()_\edge, lag3_left
bl sum_lag3_left_above_neon
.else
bl sum_\lag\()_above_neon
.endif
.ifc \type, uv_420
vpush {q6-q7}
add r12, r11, #GRAIN_WIDTH
vld1.16 {q0, q1}, [r11]!
vld1.16 {q6, q7}, [r12]!
vpaddl.s8 q0, q0
vpaddl.s8 q1, q1
vpaddl.s8 q6, q6
vpaddl.s8 q7, q7
vadd.i16 q0, q0, q6
vadd.i16 q1, q1, q7
vpop {q6-q7}
vrshrn.s16 d0, q0, #2
vrshrn.s16 d1, q1, #2
.endif
.ifc \type, uv_422
vld1.8 {q0, q1}, [r11]!
vpaddl.s8 q0, q0
vpaddl.s8 q1, q1
vrshrn.s16 d0, q0, #1
vrshrn.s16 d1, q1, #1
.endif
.ifc \type, uv_444
vld1.8 {q0}, [r11]!
.endif
.if \uv_layout
.ifnb \uv_coeff
vdup.8 d13, \uv_coeff
.endif
vmull.s8 q1, d0, d13
vmull.s8 q0, d1, d13
vaddw.s16 q2, q2, d2
vaddw.s16 q3, q3, d3
vaddw.s16 q4, q4, d0
vaddw.s16 q5, q5, d1
.endif
.if \uv_layout && \elems == 16
b sum_\lag\()_y_\edge\()_start
.elseif \uv_layout == 444 && \elems == 15
b sum_\lag\()_y_\edge\()_start
.elseif \uv_layout == 422 && \elems == 9
b sum_\lag\()_uv_420_\edge\()_start
.else
sum_\lag\()_\type\()_\edge\()_start:
push {r11}
.ifc \edge, left
increment_seed 4
read_rand r11, 11, 3
read_rand r12, 11, 2
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d1[1]}, [r11]
read_rand r11, 11, 1
vld1.16 {d1[2]}, [r12]
add r11, r3, r11, lsl #1
vld1.16 {d1[3]}, [r11]
lsl r2, r2, #1 // shift back the state as if we'd done increment_seed with shift=0
vrshl.s16 d1, d1, d30
vmovn.i16 d1, q0
vext.8 q2, q2, q2, #12
.ifc \lag, lag3
vmov.s8 r10, d1[5]
.endif
.ifnc \lag, lag1
vmov.s8 r8, d1[6]
.endif
vmov.s8 r6, d1[7]
vmov q1, q2
mov r1, #1
bl output_\lag\()_neon
.else
increment_seed 4, shift=0
vmov q1, q2
mov r1, #4
bl output_\lag\()_neon
.endif
increment_seed 4, shift=0
vmov q1, q3
mov r1, #4
bl output_\lag\()_neon
increment_seed 4, shift=0
vmov q1, q4
.if \elems == 9
mov r1, #1
bl output_\lag\()_neon
lsr r2, r2, #3
read_rand r11, 11, 2
read_rand r12, 11, 1
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d2[0]}, [r11]
read_rand r11, 11, 0
vld1.16 {d2[1]}, [r12]
add r11, r3, r11, lsl #1
vld1.16 {d2[2]}, [r11]
vrshl.s16 d2, d2, d30
vmovn.i16 d2, q1
vext.8 q0, q0, q1, #7
.else
mov r1, #4
bl output_\lag\()_neon
increment_seed 4, shift=0
vmov q1, q5
.ifc \edge, right
mov r1, #3
bl output_\lag\()_neon
read_shift_rand r11, 11
add r11, r3, r11, lsl #1
vld1.16 {d2[0]}, [r11]
vrshl.s16 d2, d2, d30
vext.8 q0, q0, q1, #1
.else
mov r1, #4
bl output_\lag\()_neon
.endif
.endif
.if \store
vst1.8 {q0}, [r0]!
.endif
pop {r11}
pop {r1, pc}
.endif
.endm
.macro sum_lag1_func type, uv_layout, edge, elems=16
function sum_\type\()_lag1_\edge\()_neon
push {r1, lr}
sum_lag_n_body lag1, \type, \uv_layout, \edge, \elems, store=0
endfunc
.endm
sum_lag1_func y, 0, left
sum_lag1_func y, 0, mid
sum_lag1_func y, 0, right, 15
sum_lag1_func uv_444, 444, left
sum_lag1_func uv_444, 444, mid
sum_lag1_func uv_444, 444, right, 15
sum_lag1_func uv_422, 422, left
sum_lag1_func uv_422, 422, mid
sum_lag1_func uv_422, 422, right, 9
sum_lag1_func uv_420, 420, left
sum_lag1_func uv_420, 420, mid
sum_lag1_func uv_420, 420, right, 9
.macro sum_lag1 type, dst, left, mid, right, edge=mid
vmov q3, \mid
vext.8 q0, \left, \mid, #15
vext.8 q1, \mid, \right, #1
bl sum_\type\()_lag1_\edge\()_neon
vmov \dst, q0
.endm
.macro sum_y_lag1 dst, left, mid, right, edge=mid
sum_lag1 y, \dst, \left, \mid, \right, \edge
.endm
.macro sum_uv_444_lag1 dst, left, mid, right, edge=mid
sum_lag1 uv_444, \dst, \left, \mid, \right, \edge
.endm
.macro sum_uv_422_lag1 dst, left, mid, right, edge=mid
sum_lag1 uv_422, \dst, \left, \mid, \right, \edge
.endm
.macro sum_uv_420_lag1 dst, left, mid, right, edge=mid
sum_lag1 uv_420, \dst, \left, \mid, \right, \edge
.endm
function sum_lag2_above_neon
push {lr}
sub r12, r0, #2*GRAIN_WIDTH - 16
sub lr, r0, #1*GRAIN_WIDTH - 16
vld1.8 {q10}, [r12] // load top right
vld1.8 {q13}, [lr]
vext.8 q6, q8, q9, #14 // top left, top mid
vdup.8 d14, d28[0]
vext.8 q8, q8, q9, #15
vdup.8 d15, d28[1]
vmull.s8 q0, d12, d14
vmull.s8 q1, d13, d14
vmull.s8 q6, d16, d15
vmull.s8 q8, d17, d15
vaddl.s16 q2, d0, d12
vaddl.s16 q3, d1, d13
vaddl.s16 q4, d2, d16
vaddl.s16 q5, d3, d17
vext.8 q6, q9, q10, #1 // top mid, top right
vdup.8 d14, d28[3]
vext.8 q8, q9, q10, #2
vdup.8 d15, d28[4]
vmull.s8 q0, d12, d14
vmull.s8 q1, d13, d14
vmull.s8 q6, d16, d15
vmull.s8 q8, d17, d15
vaddl.s16 q7, d0, d12
vaddl.s16 q0, d1, d13
vaddl.s16 q6, d2, d16
vaddl.s16 q1, d3, d17
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q0
vadd.i32 q4, q4, q6
vadd.i32 q5, q5, q1
vext.8 q6, q11, q12, #14 // top left, top mid
vdup.8 d14, d28[5]
vext.8 q8, q11, q12, #15
vdup.8 d15, d28[6]
vmull.s8 q0, d12, d14
vmull.s8 q1, d13, d14
vmull.s8 q6, d16, d15
vmull.s8 q8, d17, d15
vaddl.s16 q7, d0, d12
vaddl.s16 q0, d1, d13
vaddl.s16 q6, d2, d16
vaddl.s16 q1, d3, d17
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q0
vadd.i32 q4, q4, q6
vadd.i32 q5, q5, q1
vext.8 q6, q12, q13, #1 // top mid, top right
vdup.8 d14, d29[0]
vext.8 q8, q12, q13, #2
vdup.8 d15, d29[1]
vmull.s8 q0, d12, d14
vmull.s8 q1, d13, d14
vmull.s8 q6, d16, d15
vmull.s8 q8, d17, d15
vaddl.s16 q7, d0, d12
vaddl.s16 q0, d1, d13
vaddl.s16 q6, d2, d16
vaddl.s16 q1, d3, d17
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q0
vadd.i32 q4, q4, q6
vadd.i32 q5, q5, q1
vdup.8 d14, d28[2]
vdup.8 d15, d28[7]
vmull.s8 q0, d18, d14
vmull.s8 q1, d19, d14
vmull.s8 q6, d24, d15
vmull.s8 q8, d25, d15
vaddl.s16 q7, d0, d12
vaddl.s16 q0, d1, d13
vaddl.s16 q6, d2, d16
vaddl.s16 q1, d3, d17
vmov q8, q9
vmov q9, q10
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q0
vadd.i32 q4, q4, q6
vadd.i32 q5, q5, q1
vmov q11, q12
vmov q12, q13
pop {pc}
endfunc
.macro sum_lag2_func type, uv_layout, edge, elems=16
function sum_\type\()_lag2_\edge\()_neon
push {r1, lr}
.ifc \edge, left
sub r12, r0, #2*GRAIN_WIDTH
sub lr, r0, #1*GRAIN_WIDTH
vld1.8 {q9}, [r12] // load the previous block right above
vld1.8 {q12}, [lr]
.endif
sum_lag_n_body lag2, \type, \uv_layout, \edge, \elems, store=1, uv_coeff=d29[4]
endfunc
.endm
sum_lag2_func y, 0, left
sum_lag2_func y, 0, mid
sum_lag2_func y, 0, right, 15
sum_lag2_func uv_444, 444, left
sum_lag2_func uv_444, 444, mid
sum_lag2_func uv_444, 444, right, 15
sum_lag2_func uv_422, 422, left
sum_lag2_func uv_422, 422, mid
sum_lag2_func uv_422, 422, right, 9
sum_lag2_func uv_420, 420, left
sum_lag2_func uv_420, 420, mid
sum_lag2_func uv_420, 420, right, 9
function sum_lag3_left_above_neon
// A separate codepath for the left edge, to avoid reading outside
// of the edge of the buffer.
sub r12, r0, #3*GRAIN_WIDTH
vld1.8 {q11, q12}, [r12]
vext.8 q12, q11, q12, #13
vext.8 q11, q11, q11, #13
b sum_lag3_above_start
endfunc
function sum_lag3_above_neon
sub r12, r0, #3*GRAIN_WIDTH + 3
vld1.8 {q11, q12}, [r12]
sum_lag3_above_start:
vdup.8 d20, d26[0]
vext.8 q9, q11, q12, #1
vdup.8 d21, d26[1]
vmull.s8 q0, d22, d20
vmull.s8 q1, d23, d20
vmull.s8 q6, d18, d21
vmull.s8 q7, d19, d21
vext.8 q8, q11, q12, #2
vdup.8 d20, d26[2]
vext.8 q9, q11, q12, #3
vdup.8 d21, d26[3]
vaddl.s16 q2, d0, d12
vaddl.s16 q3, d1, d13
vaddl.s16 q4, d2, d14
vaddl.s16 q5, d3, d15
vmull.s8 q0, d16, d20
vmull.s8 q1, d17, d20
vmull.s8 q6, d18, d21
vmull.s8 q7, d19, d21
vaddl.s16 q8, d0, d12
vaddl.s16 q9, d1, d13
vaddl.s16 q0, d2, d14
vaddl.s16 q1, d3, d15
vext.8 q6, q11, q12, #4
vdup.8 d20, d26[4]
vext.8 q7, q11, q12, #5
vdup.8 d21, d26[5]
vadd.i32 q2, q2, q8
vadd.i32 q3, q3, q9
vadd.i32 q4, q4, q0
vadd.i32 q5, q5, q1
vmull.s8 q0, d12, d20
vmull.s8 q1, d13, d20
vmull.s8 q8, d14, d21
vmull.s8 q9, d15, d21
sub r12, r0, #2*GRAIN_WIDTH + 3
vaddl.s16 q6, d0, d16
vaddl.s16 q7, d1, d17
vaddl.s16 q0, d2, d18
vaddl.s16 q1, d3, d19
vext.8 q8, q11, q12, #6
vld1.8 {q11, q12}, [r12]
vdup.8 d20, d26[6]
vdup.8 d21, d26[7]
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
vadd.i32 q4, q4, q0
vadd.i32 q5, q5, q1
vmull.s8 q0, d16, d20
vmull.s8 q1, d17, d20
vmull.s8 q6, d22, d21
vmull.s8 q7, d23, d21
vaddl.s16 q8, d0, d12
vaddl.s16 q9, d1, d13
vaddl.s16 q0, d2, d14
vaddl.s16 q1, d3, d15
vext.8 q6, q11, q12, #1
vdup.8 d20, d27[0]
vext.8 q7, q11, q12, #2
vdup.8 d21, d27[1]
vadd.i32 q2, q2, q8
vadd.i32 q3, q3, q9
vadd.i32 q4, q4, q0
vadd.i32 q5, q5, q1
vmull.s8 q0, d12, d20
vmull.s8 q1, d13, d20
vmull.s8 q8, d14, d21
vmull.s8 q9, d15, d21
vaddl.s16 q6, d0, d16
vaddl.s16 q7, d1, d17
vaddl.s16 q0, d2, d18
vaddl.s16 q1, d3, d19
vext.8 q8, q11, q12, #3
vdup.8 d20, d27[2]
vext.8 q9, q11, q12, #4
vdup.8 d21, d27[3]
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
vadd.i32 q4, q4, q0
vadd.i32 q5, q5, q1
vmull.s8 q0, d16, d20
vmull.s8 q1, d17, d20
vmull.s8 q6, d18, d21
vmull.s8 q7, d19, d21
sub r12, r0, #1*GRAIN_WIDTH + 3
vaddl.s16 q8, d0, d12
vaddl.s16 q9, d1, d13
vaddl.s16 q0, d2, d14
vaddl.s16 q1, d3, d15
vext.8 q6, q11, q12, #5
vdup.8 d20, d27[4]
vext.8 q7, q11, q12, #6
vdup.8 d21, d27[5]
vld1.8 {q11, q12}, [r12]
vadd.i32 q2, q2, q8
vadd.i32 q3, q3, q9
vadd.i32 q4, q4, q0
vadd.i32 q5, q5, q1
vmull.s8 q0, d12, d20
vmull.s8 q1, d13, d20
vmull.s8 q8, d14, d21
vmull.s8 q9, d15, d21
vaddl.s16 q6, d0, d16
vaddl.s16 q7, d1, d17
vaddl.s16 q0, d2, d18
vaddl.s16 q1, d3, d19
vdup.8 d20, d27[6]
vext.8 q9, q11, q12, #1
vdup.8 d21, d27[7]
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
vadd.i32 q4, q4, q0
vadd.i32 q5, q5, q1
vmull.s8 q0, d22, d20
vmull.s8 q1, d23, d20
vmull.s8 q6, d18, d21
vmull.s8 q7, d19, d21
vaddl.s16 q8, d0, d12
vaddl.s16 q9, d1, d13
vaddl.s16 q0, d2, d14
vaddl.s16 q1, d3, d15
vext.8 q6, q11, q12, #2
vdup.8 d20, d28[0]
vext.8 q7, q11, q12, #3
vdup.8 d21, d28[1]
vadd.i32 q2, q2, q8
vadd.i32 q3, q3, q9
vadd.i32 q4, q4, q0
vadd.i32 q5, q5, q1
vmull.s8 q0, d12, d20
vmull.s8 q1, d13, d20
vmull.s8 q8, d14, d21
vmull.s8 q9, d15, d21
vaddl.s16 q6, d0, d16
vaddl.s16 q7, d1, d17
vaddl.s16 q0, d2, d18
vaddl.s16 q1, d3, d19
vext.8 q8, q11, q12, #4
vdup.8 d20, d28[2]
vext.8 q9, q11, q12, #5
vdup.8 d21, d28[3]
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
vadd.i32 q4, q4, q0
vadd.i32 q5, q5, q1
vmull.s8 q0, d16, d20
vmull.s8 q1, d17, d20
vmull.s8 q6, d18, d21
vmull.s8 q7, d19, d21
vaddl.s16 q8, d0, d12
vaddl.s16 q9, d1, d13
vaddl.s16 q0, d2, d14
vaddl.s16 q1, d3, d15
vext.8 q6, q11, q12, #6
vdup.8 d20, d28[4]
vadd.i32 q2, q2, q8
vadd.i32 q3, q3, q9
vadd.i32 q4, q4, q0
vadd.i32 q5, q5, q1
vmull.s8 q0, d12, d20
vmull.s8 q1, d13, d20
vaddw.s16 q2, q2, d0
vaddw.s16 q3, q3, d1
vaddw.s16 q4, q4, d2
vaddw.s16 q5, q5, d3
bx lr
endfunc
.macro sum_lag3_func type, uv_layout, edge, elems=16
function sum_\type\()_lag3_\edge\()_neon
push {r1, lr}
sum_lag_n_body lag3, \type, \uv_layout, \edge, \elems, store=1, uv_coeff=d29[0]
endfunc
.endm
sum_lag3_func y, 0, left
sum_lag3_func y, 0, mid
sum_lag3_func y, 0, right, 15
sum_lag3_func uv_444, 444, left
sum_lag3_func uv_444, 444, mid
sum_lag3_func uv_444, 444, right, 15
sum_lag3_func uv_422, 422, left
sum_lag3_func uv_422, 422, mid
sum_lag3_func uv_422, 422, right, 9
sum_lag3_func uv_420, 420, left
sum_lag3_func uv_420, 420, mid
sum_lag3_func uv_420, 420, right, 9
function generate_grain_rows_neon
push {r11,lr}
1:
get_grain_row d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26
subs r1, r1, #1
store_grain_row d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26
bgt 1b
pop {r11,pc}
endfunc
function generate_grain_rows_44_neon
push {r11,lr}
1:
get_grain_row_44 d16, d17, d18, d19, d20, d21
subs r1, r1, #1
store_grain_row_44 d16, d17, d18, d19, d20, d21
bgt 1b
pop {r11,pc}
endfunc
function gen_grain_uv_444_lag0_neon
vld1.8 {q3}, [r11]!
push {r11,lr}
bl get_gaussian_neon
vrshl.s16 q8, q0, q15
bl get_gaussian_neon
vrshl.s16 q9, q0, q15
vqmovn.s16 d0, q8
vqmovn.s16 d1, q9
vand q3, q3, q1
vmull.s8 q2, d6, d22
vmull.s8 q3, d7, d22
vrshl.s16 q2, q2, q12
vrshl.s16 q3, q3, q12
vaddw.s8 q2, q2, d0
vaddw.s8 q3, q3, d1
vqmovn.s16 d4, q2
vqmovn.s16 d5, q3
vst1.8 {q2}, [r0]!
pop {r11,pc}
endfunc
function get_grain_row_44_neon
push {r11,lr}
get_grain_row_44 d16, d17, d18, d19, d20, d21
pop {r11,pc}
endfunc
function add_uv_420_coeff_lag0_neon
vld1.16 {q2, q3}, [r11]!
vld1.16 {q4, q5}, [r12]!
vpaddl.s8 q2, q2
vpaddl.s8 q3, q3
vpaddl.s8 q4, q4
vpaddl.s8 q5, q5
vadd.i16 q2, q2, q4
vadd.i16 q3, q3, q5
vrshrn.s16 d4, q2, #2
vrshrn.s16 d5, q3, #2
b add_coeff_lag0_start
endfunc
function add_uv_422_coeff_lag0_neon
vld1.16 {q2, q3}, [r11]!
vpaddl.s8 q2, q2
vpaddl.s8 q3, q3
vrshrn.s16 d4, q2, #1
vrshrn.s16 d5, q3, #1
add_coeff_lag0_start:
vand q3, q2, q1
vmull.s8 q2, d6, d22
vmull.s8 q3, d7, d22
vrshl.s16 q2, q2, q12
vrshl.s16 q3, q3, q12
vaddw.s8 q2, q2, d0
vaddw.s8 q3, q3, d1
vqmovn.s16 d4, q2
vqmovn.s16 d5, q3
bx lr
endfunc
.macro gen_grain_82 type
function generate_grain_\type\()_8bpc_neon, export=1
push {r4-r11,lr}
.ifc \type, uv_444
mov r12, r3
mov lr, #28
add r11, r1, #3*GRAIN_WIDTH
mov r1, r2
mul r12, r12, lr
.endif
movrel r3, X(gaussian_sequence)
ldr r2, [r1, #FGD_SEED]
ldr r9, [r1, #FGD_GRAIN_SCALE_SHIFT]
.ifc \type, y
add r4, r1, #FGD_AR_COEFFS_Y
.else
add r4, r1, #FGD_AR_COEFFS_UV
.endif
adr r5, L(gen_grain_\type\()_tbl)
ldr r6, [r1, #FGD_AR_COEFF_LAG]
add r9, r9, #4
ldr r6, [r5, r6, lsl #2]
vdup.16 q15, r9 // 4 + data->grain_scale_shift
add r5, r5, r6
vneg.s16 q15, q15
.ifc \type, uv_444
cmp r12, #0
movw r10, #0x49d8
movw lr, #0xb524
// Intentionally using a separate register instead of moveq with an
// immediate constant, to avoid armv8 deprecated it instruction forms.
it eq
moveq r10, lr
add r4, r4, r12 // Add offset to ar_coeffs_uv[1]
eor r2, r2, r10
.endif
ldr r7, [r1, #FGD_AR_COEFF_SHIFT]
mov r8, #1
mov r10, #1
lsl r8, r8, r7 // 1 << ar_coeff_shift
lsl r10, r10, r9 // 1 << (4 + data->grain_scale_shift)
lsr r8, r8, #1 // 1 << (ar_coeff_shift - 1)
lsr r10, r10, #1 // 1 << (4 + data->grain_scale_shift - 1)
bx r5
.align 2
L(gen_grain_\type\()_tbl):
.word L(generate_grain_\type\()_lag0) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag1) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag2) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag3) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
L(generate_grain_\type\()_lag0):
.ifc \type, y
mov r1, #GRAIN_HEIGHT
bl generate_grain_rows_neon
.else
mov r1, #3
bl generate_grain_rows_neon
mov r1, #GRAIN_HEIGHT-3
vdup.16 q12, r7
vld1.8 {d22[]}, [r4] // ar_coeffs_uv[0]
vmov.i8 q0, #0
vmov.i8 q1, #255
vext.8 q13, q0, q1, #13
vext.8 q14, q1, q0, #1
vneg.s16 q12, q12
1:
vmov q1, q13
bl gen_grain_uv_444_lag0_neon // 16
vmov.i8 q1, #255
bl gen_grain_uv_444_lag0_neon // 32
bl gen_grain_uv_444_lag0_neon // 48
bl gen_grain_uv_444_lag0_neon // 64
vmov q1, q14
bl gen_grain_uv_444_lag0_neon // 80
get_grain_2 d16
subs r1, r1, #1
add r11, r11, #2
vst1.16 {d16[0]}, [r0]!
bgt 1b
.endif
pop {r4-r11,pc}
L(generate_grain_\type\()_lag1):
vpush {q4-q7}
mov r5, #127
vld1.8 {d27[]}, [r4]! // ar_coeffs_y[0]
vld1.8 {d28[]}, [r4]! // ar_coeffs_y[1]
vld1.8 {d29[]}, [r4] // ar_coeffs_y[2]
.ifc \type, y
ldrsb r4, [r4, #1] // ar_coeffs_y[3]
.else
add r4, r4, #2
.endif
mov r1, #3
.ifc \type, uv_444
vld1.8 {d13[]}, [r4] // ar_coeffs_uv[4]
ldrsb r4, [r4, #-1] // ar_coeffs_uv[3]
.endif
bl generate_grain_rows_neon
mov r1, #GRAIN_HEIGHT - 3
1:
sum_\type\()_lag1 q7, q8, q8, q9, left
sum_\type\()_lag1 q8, q8, q9, q10
sum_\type\()_lag1 q9, q9, q10, q11
sum_\type\()_lag1 q10, q10, q11, q12
sum_\type\()_lag1 q12, q11, q12, q13, right
get_grain_2 d26
subs r1, r1, #1
.ifc \type, uv_444
add r11, r11, #2
.endif
store_grain_row d14, d15, d16, d17, d18, d19, d20, d21, d24, d25, d26
vmov q11, q10
vmov q10, q9
vmov q9, q8
vmov q8, q7
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
L(generate_grain_\type\()_lag2):
vpush {q4-q7}
mov r5, #127
vld1.8 {d28,d29}, [r4] // ar_coeffs_y[0-11], ar_coeffs_uv[0-12]
vmov.s8 r4, d29[2]
vmov.s8 r10, d29[3]
mov r1, #3
bl generate_grain_rows_neon
mov r1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag2_left_neon
bl sum_\type\()_lag2_mid_neon
bl sum_\type\()_lag2_mid_neon
bl sum_\type\()_lag2_mid_neon
bl sum_\type\()_lag2_right_neon
get_grain_2 d16
subs r1, r1, #1
.ifc \type, uv_444
add r11, r11, #2
.endif
vst1.16 {d16[0]}, [r0]!
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
L(generate_grain_\type\()_lag3):
vpush {q4-q7}
mov r5, #127
vld1.8 {q13, q14}, [r4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
vmov.u8 r4, d28[5]
vmov.u8 r10, d28[6]
vmov.u8 r12, d28[7]
orr r4, r4, r10, lsl #8
orr r4, r4, r12, lsl #16
mov r1, #3
vpush {d26}
bl generate_grain_rows_neon
vpop {d26}
mov r1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag3_left_neon
bl sum_\type\()_lag3_mid_neon
bl sum_\type\()_lag3_mid_neon
bl sum_\type\()_lag3_mid_neon
bl sum_\type\()_lag3_right_neon
get_grain_2 d16
subs r1, r1, #1
.ifc \type, uv_444
add r11, r11, #2
.endif
vst1.16 {d16[0]}, [r0]!
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
.endm
gen_grain_82 y
gen_grain_82 uv_444
.macro set_height dst, type
.ifc \type, uv_420
mov \dst, #SUB_GRAIN_HEIGHT-3
.else
mov \dst, #GRAIN_HEIGHT-3
.endif
.endm
.macro increment_y_ptr reg, type
.ifc \type, uv_420
add \reg, \reg, #2*GRAIN_WIDTH-(3*32)
.else
sub \reg, \reg, #3*32-GRAIN_WIDTH
.endif
.endm
.macro gen_grain_44 type
function generate_grain_\type\()_8bpc_neon, export=1
push {r4-r11,lr}
mov r12, r3
mov lr, #28
add r11, r1, #3*GRAIN_WIDTH-3
mov r1, r2
mul r12, r12, lr
movrel r3, X(gaussian_sequence)
ldr r2, [r1, #FGD_SEED]
ldr r9, [r1, #FGD_GRAIN_SCALE_SHIFT]
add r4, r1, #FGD_AR_COEFFS_UV
adr r5, L(gen_grain_\type\()_tbl)
ldr r6, [r1, #FGD_AR_COEFF_LAG]
add r9, r9, #4
ldr r6, [r5, r6, lsl #2]
vdup.16 q15, r9 // 4 + data->grain_scale_shift
add r5, r5, r6
vneg.s16 q15, q15
cmp r12, #0
movw r10, #0x49d8
movw lr, #0xb524
// Intentionally using a separate register instead of moveq with an
// immediate constant, to avoid armv8 deprecated it instruction forms.
it eq
moveq r10, lr
add r4, r4, r12 // Add offset to ar_coeffs_uv[1]
eor r2, r2, r10
ldr r7, [r1, #FGD_AR_COEFF_SHIFT]
mov r8, #1
mov r10, #1
lsl r8, r8, r7 // 1 << ar_coeff_shift
lsl r10, r10, r9 // 1 << (4 + data->grain_scale_shift)
lsr r8, r8, #1 // 1 << (ar_coeff_shift - 1)
lsr r10, r10, #1 // 1 << (4 + data->grain_scale_shift - 1)
bx r5
.align 2
L(gen_grain_\type\()_tbl):
.word L(generate_grain_\type\()_lag0) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag1) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag2) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag3) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
L(generate_grain_\type\()_lag0):
.ifc \type, uv_420
vpush {q4-q5}
.endif
mov r1, #3
bl generate_grain_rows_44_neon
set_height r1, \type
vdup.16 q12, r7
vld1.8 {d22[]}, [r4] // ar_coeffs_uv[0]
vmov.i8 q0, #0
vmov.i8 q1, #255
vext.8 q13, q0, q1, #13
vext.8 q14, q1, q0, #7
vneg.s16 q12, q12
1:
bl get_grain_row_44_neon
.ifc \type, uv_420
add r12, r11, #GRAIN_WIDTH
.endif
vmov q1, q13
vmov q0, q8
bl add_\type\()_coeff_lag0_neon
vmov.i8 q1, #255
vmov q0, q9
vmov q8, q2
bl add_\type\()_coeff_lag0_neon
vmov.i8 q1, q14
vmov q0, q10
vmov q9, q2
bl add_\type\()_coeff_lag0_neon
vmov q10, q2
subs r1, r1, #1
increment_y_ptr r11, \type
store_grain_row_44 d16, d17, d18, d19, d20, d21
bgt 1b
.ifc \type, uv_420
vpop {q4-q5}
.endif
pop {r4-r11,pc}
L(generate_grain_\type\()_lag1):
vpush {q4-q7}
mov r5, #127
vld1.8 {d27[]}, [r4]! // ar_coeffs_uv[0]
vld1.8 {d28[]}, [r4]! // ar_coeffs_uv[1]
vld1.8 {d29[]}, [r4] // ar_coeffs_uv[2]
add r4, r4, #2
mov r1, #3
vld1.8 {d13[]}, [r4] // ar_coeffs_uv[4]
ldrsb r4, [r4, #-1] // ar_coeffs_uv[3]
bl generate_grain_rows_44_neon
set_height r1, \type
1:
sum_\type\()_lag1 q7, q8, q8, q9, left
sum_\type\()_lag1 q8, q8, q9, q10
sum_\type\()_lag1 q10, q9, q10, q11, right
subs r1, r1, #1
increment_y_ptr r11, \type
store_grain_row_44 d14, d15, d16, d17, d20, d21
vmov q9, q8
vmov q8, q7
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
L(generate_grain_\type\()_lag2):
vpush {q4-q7}
mov r5, #127
vld1.8 {d28,d29}, [r4] // ar_coeffs_uv[0-12]
vmov.s8 r4, d29[2]
vmov.s8 r10, d29[3]
mov r1, #3
bl generate_grain_rows_44_neon
set_height r1, \type
1:
bl sum_\type\()_lag2_left_neon
bl sum_\type\()_lag2_mid_neon
bl sum_\type\()_lag2_right_neon
subs r1, r1, #1
increment_y_ptr r11, \type
add r0, r0, #GRAIN_WIDTH-48
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
L(generate_grain_\type\()_lag3):
vpush {q4-q7}
mov r5, #127
vld1.8 {q13, q14}, [r4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
vmov.u8 r4, d28[5]
vmov.u8 r10, d28[6]
vmov.u8 r12, d28[7]
orr r4, r4, r10, lsl #8
orr r4, r4, r12, lsl #16
mov r1, #3
bl generate_grain_rows_44_neon
set_height r1, \type
1:
bl sum_\type\()_lag3_left_neon
bl sum_\type\()_lag3_mid_neon
bl sum_\type\()_lag3_right_neon
subs r1, r1, #1
increment_y_ptr r11, \type
add r0, r0, #GRAIN_WIDTH-48
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
.endm
gen_grain_44 uv_420
gen_grain_44 uv_422
.macro gather_interleaved dst1, dst2, src1, src2, off
vmov.u8 r11, \src1[0+\off]
vmov.u8 r12, \src2[0+\off]
add r11, r11, r3
vmov.u8 lr, \src1[2+\off]
add r12, r12, r3
vld1.8 {\dst1[0+\off]}, [r11]
vmov.u8 r11, \src2[2+\off]
add lr, lr, r3
vld1.8 {\dst2[0+\off]}, [r12]
vmov.u8 r12, \src1[4+\off]
add r11, r11, r3
vld1.8 {\dst1[2+\off]}, [lr]
vmov.u8 lr, \src2[4+\off]
add r12, r12, r3
vld1.8 {\dst2[2+\off]}, [r11]
vmov.u8 r11, \src1[6+\off]
add lr, lr, r3
vld1.8 {\dst1[4+\off]}, [r12]
vmov.u8 r12, \src2[6+\off]
add r11, r11, r3
vld1.8 {\dst2[4+\off]}, [lr]
add r12, r12, r3
vld1.8 {\dst1[6+\off]}, [r11]
vld1.8 {\dst2[6+\off]}, [r12]
.endm
.macro gather dst1, dst2, dst3, dst4, src1, src2, src3, src4
gather_interleaved \dst1, \dst3, \src1, \src3, 0
gather_interleaved \dst1, \dst3, \src1, \src3, 1
gather_interleaved \dst2, \dst4, \src2, \src4, 0
gather_interleaved \dst2, \dst4, \src2, \src4, 1
.endm
function gather32_neon
push {r11-r12,lr}
gather d8, d9, d10, d11, d0, d1, d2, d3
pop {r11-r12,pc}
endfunc
function gather16_neon
push {r11-r12,lr}
gather_interleaved d8, d9, d0, d1, 0
gather_interleaved d8, d9, d0, d1, 1
pop {r11-r12,pc}
endfunc
const overlap_coeffs_0, align=4
.byte 27, 17, 0, 0, 0, 0, 0, 0
.byte 17, 27, 32, 32, 32, 32, 32, 32
endconst
const overlap_coeffs_1, align=4
.byte 23, 0, 0, 0, 0, 0, 0, 0
.byte 22, 32, 32, 32, 32, 32, 32, 32
endconst
.macro calc_offset offx, offy, src, sx, sy
and \offy, \src, #0xF // randval & 0xF
lsr \offx, \src, #4 // randval >> 4
.if \sy == 0
add \offy, \offy, \offy // 2 * (randval & 0xF)
.endif
.if \sx == 0
add \offx, \offx, \offx // 2 * (randval >> 4)
.endif
.endm
.macro add_offset dst, offx, offy, src, stride
mla \dst, \stride, \offy, \src // grain_lut += grain_stride * offy
add \dst, \dst, \offx // grain_lut += offx
.endm
// void dav1d_fgy_32x32_8bpc_neon(pixel *const dst, const pixel *const src,
// const ptrdiff_t stride,
// const uint8_t scaling[SCALING_SIZE],
// const int scaling_shift,
// const entry grain_lut[][GRAIN_WIDTH],
// const int offsets[][2],
// const int h, const ptrdiff_t clip,
// const ptrdiff_t type);
function fgy_32x32_8bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100] // scaling_shift, grain_lut
ldrd r6, r7, [sp, #108] // offsets, h
ldr r8, [sp, #116] // clip
mov r9, #GRAIN_WIDTH // grain_lut stride
neg r4, r4
vdup.16 q13, r4 // -scaling_shift
cmp r8, #0
movrel_local r12, overlap_coeffs_0
beq 1f
// clip
vmov.i8 q14, #16
vmov.i8 q15, #235
b 2f
1:
// no clip
vmov.i8 q14, #0
vmov.i8 q15, #255
2:
vld1.8 {d24, d25}, [r12, :128] // overlap_coeffs
add r5, r5, #9 // grain_lut += 9
add r5, r5, r9, lsl #3 // grain_lut += 8 * grain_stride
add r5, r5, r9 // grain_lut += grain_stride
ldr r10, [r6, #8] // offsets[1][0]
calc_offset r10, r4, r10, 0, 0
add_offset r4, r10, r4, r5, r9
ldr r10, [r6, #4] // offsets[0][1]
calc_offset r10, r11, r10, 0, 0
add_offset r11, r10, r11, r5, r9
ldr r10, [r6, #12] // offsets[1][1]
calc_offset r10, r8, r10, 0, 0
add_offset r8, r10, r8, r5, r9
ldr r6, [r6] // offsets[0][0]
calc_offset r6, lr, r6, 0, 0
add_offset r5, r6, lr, r5, r9
add r4, r4, #32 // grain_lut += FG_BLOCK_SIZE * bx
add r6, r11, r9, lsl #5 // grain_lut += grain_stride * FG_BLOCK_SIZE * by
ldr r10, [sp, #120] // type
adr r11, L(fgy_loop_tbl)
tst r10, #1
ldr r10, [r11, r10, lsl #2]
add r8, r8, r9, lsl #5 // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add r8, r8, #32 // grain_lut += FG_BLOCK_SIZE * bx
add r11, r11, r10
beq 1f
// y overlap
vdup.8 d14, d24[0]
vdup.8 d15, d24[1]
mov r10, r7 // backup actual h
mov r7, #2
1:
bx r11
endfunc
function fgy_loop_neon
L(fgy_loop_tbl):
.word L(loop_00) - L(fgy_loop_tbl) + CONFIG_THUMB
.word L(loop_01) - L(fgy_loop_tbl) + CONFIG_THUMB
.word L(loop_10) - L(fgy_loop_tbl) + CONFIG_THUMB
.word L(loop_11) - L(fgy_loop_tbl) + CONFIG_THUMB
.macro fgy ox, oy
L(loop_\ox\oy):
1:
.if \ox
vld1.8 {d8}, [r4], r9 // grain_lut old
.endif
.if \oy
vld1.8 {q2, q3}, [r6], r9 // grain_lut top
.endif
.if \ox && \oy
vld1.8 {d10}, [r8], r9 // grain_lut top old
.endif
vld1.8 {q0, q1}, [r1, :128], r2 // src
vld1.8 {q10, q11}, [r5], r9 // grain_lut
.if \ox
vmull.s8 q4, d8, d24
vmlal.s8 q4, d20, d25
.endif
.if \oy
.if \ox
vmull.s8 q5, d10, d24
vmlal.s8 q5, d4, d25
vqrshrn.s16 d20, q4, #5
vqrshrn.s16 d4, q5, #5
.endif
vmull.s8 q4, d20, d15
vmull.s8 q5, d21, d15
vmull.s8 q8, d22, d15
vmull.s8 q9, d23, d15
vmlal.s8 q4, d4, d14
vmlal.s8 q5, d5, d14
vmlal.s8 q8, d6, d14
vmlal.s8 q9, d7, d14
vqrshrn.s16 d20, q4, #5
vqrshrn.s16 d21, q5, #5
vqrshrn.s16 d22, q8, #5
vqrshrn.s16 d23, q9, #5
.elseif \ox
vqrshrn.s16 d20, q4, #5
.endif
bl gather32_neon
vmovl.s8 q8, d20 // grain
vmovl.s8 q9, d21
vmovl.s8 q10, d22
vmovl.s8 q11, d23
vmovl.u8 q2, d8 // scaling
vmovl.u8 q3, d9
vmovl.u8 q4, d10
vmovl.u8 q5, d11
vmul.i16 q8, q8, q2 // scaling * grain
vmul.i16 q9, q9, q3
vmul.i16 q10, q10, q4
vmul.i16 q11, q11, q5
vrshl.s16 q8, q8, q13 // round2(scaling * grain, scaling_shift)
vrshl.s16 q9, q9, q13
vrshl.s16 q10, q10, q13
vrshl.s16 q11, q11, q13
vaddw.u8 q8, q8, d0 // *src + noise
vaddw.u8 q9, q9, d1
vaddw.u8 q10, q10, d2
vaddw.u8 q11, q11, d3
vqmovun.s16 d0, q8
vqmovun.s16 d1, q9
vqmovun.s16 d2, q10
vqmovun.s16 d3, q11
vmax.u8 q0, q0, q14
vmax.u8 q1, q1, q14
vmin.u8 q0, q0, q15
vmin.u8 q1, q1, q15
subs r7, r7, #1
.if \oy
vdup.8 d14, d25[0]
vdup.8 d15, d25[1]
.endif
vst1.8 {q0, q1}, [r0, :128], r2 // dst
bgt 1b
.if \oy
cmp r10, #2
sub r7, r10, #2 // restore actual remaining h
bgt L(loop_\ox\()0)
.endif
vpop {q4-q7}
pop {r4-r11,pc}
.endm
fgy 0, 0
fgy 0, 1
fgy 1, 0
fgy 1, 1
endfunc
// void dav1d_fguv_32x32_420_8bpc_neon(pixel *const dst,
// const pixel *const src,
// const ptrdiff_t stride,
// const uint8_t scaling[SCALING_SIZE],
// const Dav1dFilmGrainData *const data,
// const entry grain_lut[][GRAIN_WIDTH],
// const pixel *const luma_row,
// const ptrdiff_t luma_stride,
// const int offsets[][2],
// const ptrdiff_t h, const ptrdiff_t uv,
// const ptrdiff_t is_id,
// const ptrdiff_t type);
.macro fguv layout, sx, sy
function fguv_32x32_\layout\()_8bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100] // data, grain_lut
ldrd r6, r7, [sp, #108] // luma_row, luma_stride
ldrd r8, r9, [sp, #116] // offsets, h
ldrd r10, r11, [sp, #124] // uv, is_id
// !csfl
add r10, r4, r10, lsl #2 // + 4*uv
add r12, r10, #FGD_UV_LUMA_MULT
add lr, r10, #FGD_UV_MULT
add r10, r10, #FGD_UV_OFFSET
vld1.16 {d4[]}, [r12] // uv_luma_mult
vld1.16 {d4[2]}, [r10] // uv_offset
vld1.16 {d4[1]}, [lr] // uv_mult
ldr lr, [r4, #FGD_SCALING_SHIFT]
ldr r12, [r4, #FGD_CLIP_TO_RESTRICTED_RANGE]
neg lr, lr // -scaling_shift
cmp r12, #0
vdup.16 q13, lr // -scaling_shift
beq 1f
// clip
cmp r11, #0
vmov.i8 q14, #16
vmov.i8 q15, #240
beq 2f
// is_id
vmov.i8 q15, #235
b 2f
1:
// no clip
vmov.i8 q14, #0
vmov.i8 q15, #255
2:
mov r10, #GRAIN_WIDTH // grain_lut stride
add r5, r5, #(3 + (2 >> \sx)*3) // grain_lut += 9 or 6
.if \sy
add r5, r5, r10, lsl #2 // grain_lut += 4 * grain_stride
add r5, r5, r10, lsl #1 // grain_lut += 2 * grain_stride
.else
add r5, r5, r10, lsl #3 // grain_lut += 8 * grain_stride
add r5, r5, r10 // grain_lut += grain_stride
.endif
ldr r12, [r8, #8] // offsets[1][0]
calc_offset r12, r4, r12, \sx, \sy
add_offset r4, r12, r4, r5, r10
ldr r12, [r8, #4] // offsets[0][1]
calc_offset r12, lr, r12, \sx, \sy
add_offset lr, r12, lr, r5, r10
ldr r12, [r8, #12] // offsets[1][1]
calc_offset r12, r11, r12, \sx, \sy
add_offset r11, r12, r11, r5, r10
ldr r8, [r8] // offsets[0][0]
calc_offset r8, r12, r8, \sx, \sy
add_offset r5, r8, r12, r5, r10
add r4, r4, #(32 >> \sx) // grain_lut += FG_BLOCK_SIZE * bx
add r8, lr, r10, lsl #(5 - \sy) // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add r11, r11, r10, lsl #(5 - \sy) // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add r11, r11, #(32 >> \sx) // grain_lut += FG_BLOCK_SIZE * bx
movrel_local r12, overlap_coeffs_\sx
ldr lr, [sp, #132] // type
vld1.8 {d24, d25}, [r12, :128] // overlap_coeffs
movrel_local r12, L(fguv_loop_sx\sx\()_tbl)
#if CONFIG_THUMB
// This uses movrel_local instead of adr above, because the target
// can be out of range for adr. But movrel_local leaves the thumb bit
// set on COFF (but probably wouldn't if building for thumb on ELF),
// thus try to clear the bit for robustness.
bic r12, r12, #1
#endif
tst lr, #1
ldr lr, [r12, lr, lsl #2]
add r12, r12, lr
beq 1f
// y overlap
sub lr, r9, #(2 >> \sy) // backup remaining h
mov r9, #(2 >> \sy)
1:
.if \sy
vmov.i8 d6, #23
vmov.i8 d7, #22
.else
vmov.i8 d6, #27
vmov.i8 d7, #17
.endif
.if \sy
add r7, r7, r7 // luma_stride *= 2
.endif
bx r12
endfunc
.endm
fguv 420, 1, 1
fguv 422, 1, 0
fguv 444, 0, 0
function fguv_loop_sx0_neon
L(fguv_loop_sx0_tbl):
.word L(fguv_loop_sx0_csfl0_00) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl0_01) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl0_10) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl0_11) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl1_00) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl1_01) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl1_10) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl1_11) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.macro fguv_loop_sx0 csfl, ox, oy
L(fguv_loop_sx0_csfl\csfl\()_\ox\oy):
.if \oy
mov r12, lr
.endif
1:
.if \ox
vld1.8 {d8}, [r4], r10 // grain_lut old
.endif
.if \oy
vld1.8 {q8, q9}, [r8], r10 // grain_lut top
.endif
.if \ox && \oy
vld1.8 {d10}, [r11], r10 // grain_lut top old
.endif
vld1.8 {q0, q1}, [r6, :128], r7 // luma
vld1.8 {q10, q11}, [r5], r10 // grain_lut
.if \ox
vmull.s8 q4, d8, d24
vmlal.s8 q4, d20, d25
.endif
.if \oy
.if \ox
vmull.s8 q5, d10, d24
vmlal.s8 q5, d16, d25
vqrshrn.s16 d20, q4, #5
vqrshrn.s16 d16, q5, #5
.endif
vmull.s8 q4, d20, d7
vmull.s8 q5, d21, d7
vmull.s8 q6, d22, d7
vmull.s8 q7, d23, d7
vmlal.s8 q4, d16, d6
vmlal.s8 q5, d17, d6
vmlal.s8 q6, d18, d6
vmlal.s8 q7, d19, d6
vqrshrn.s16 d20, q4, #5
vqrshrn.s16 d21, q5, #5
vqrshrn.s16 d22, q6, #5
vqrshrn.s16 d23, q7, #5
.elseif \ox
vqrshrn.s16 d20, q4, #5
.endif
.if !\csfl
vld1.8 {q8, q9}, [r1, :128] // src
vmovl.u8 q4, d0
vmovl.u8 q5, d1
vmovl.u8 q6, d2
vmovl.u8 q7, d3
vmovl.u8 q0, d16
vmovl.u8 q1, d17
vmovl.u8 q8, d18
vmovl.u8 q9, d19
vmul.i16 q4, q4, d4[0]
vmul.i16 q5, q5, d4[0]
vmul.i16 q6, q6, d4[0]
vmul.i16 q7, q7, d4[0]
vmul.i16 q0, q0, d4[1]
vmul.i16 q1, q1, d4[1]
vmul.i16 q8, q8, d4[1]
vmul.i16 q9, q9, d4[1]
vqadd.s16 q4, q4, q0
vqadd.s16 q5, q5, q1
vqadd.s16 q6, q6, q8
vqadd.s16 q7, q7, q9
vdup.16 q0, d4[2]
vshr.s16 q4, q4, #6
vshr.s16 q5, q5, #6
vshr.s16 q6, q6, #6
vshr.s16 q7, q7, #6
vadd.i16 q4, q4, q0
vadd.i16 q5, q5, q0
vadd.i16 q6, q6, q0
vadd.i16 q7, q7, q0
vqmovun.s16 d0, q4
vqmovun.s16 d1, q5
vqmovun.s16 d2, q6
vqmovun.s16 d3, q7
.endif
bl gather32_neon
vld1.8 {q0, q1}, [r1, :128], r2 // src
vmovl.s8 q8, d20 // grain
vmovl.s8 q9, d21
vmovl.s8 q10, d22
vmovl.s8 q11, d23
vmovl.u8 q6, d8 // scaling
vmovl.u8 q7, d9
vmovl.u8 q4, d10
vmovl.u8 q5, d11
vmul.i16 q8, q8, q6 // scaling * grain
vmul.i16 q9, q9, q7
vmul.i16 q10, q10, q4
vmul.i16 q11, q11, q5
vrshl.s16 q8, q8, q13 // round2(scaling * grain, scaling_shift)
vrshl.s16 q9, q9, q13
vrshl.s16 q10, q10, q13
vrshl.s16 q11, q11, q13
vaddw.u8 q8, q8, d0 // *src + noise
vaddw.u8 q9, q9, d1
vaddw.u8 q10, q10, d2
vaddw.u8 q11, q11, d3
vqmovun.s16 d0, q8
vqmovun.s16 d1, q9
vqmovun.s16 d2, q10
vqmovun.s16 d3, q11
vmax.u8 q0, q0, q14
vmax.u8 q1, q1, q14
vmin.u8 q0, q0, q15
vmin.u8 q1, q1, q15
subs r9, r9, #1
.if \oy
vdup.8 d6, d25[0]
vdup.8 d7, d25[1]
.endif
vst1.8 {q0, q1}, [r0, :128], r2 // dst
bgt 1b
.if \oy
cmp r12, #0
mov r9, r12 // restore actual remaining h
bgt L(fguv_loop_sx0_csfl\csfl\()_\ox\()0)
.endif
b 9f
.endm
fguv_loop_sx0 0, 0, 0
fguv_loop_sx0 0, 0, 1
fguv_loop_sx0 0, 1, 0
fguv_loop_sx0 0, 1, 1
fguv_loop_sx0 1, 0, 0
fguv_loop_sx0 1, 0, 1
fguv_loop_sx0 1, 1, 0
fguv_loop_sx0 1, 1, 1
9:
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function fguv_loop_sx1_neon
L(fguv_loop_sx1_tbl):
.word L(fguv_loop_sx1_csfl0_00) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl0_01) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl0_10) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl0_11) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl1_00) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl1_01) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl1_10) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl1_11) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.macro fguv_loop_sx1 csfl, ox, oy
L(fguv_loop_sx1_csfl\csfl\()_\ox\oy):
.if \oy
mov r12, lr
.endif
1:
.if \ox
vld1.8 {d8}, [r4], r10 // grain_lut old
.endif
.if \oy
vld1.8 {q8}, [r8], r10 // grain_lut top
.endif
.if \ox && \oy
vld1.8 {d10}, [r11], r10 // grain_lut top old
.endif
vld1.8 {q0, q1}, [r6, :128], r7 // luma
vld1.8 {q10}, [r5], r10 // grain_lut
vld1.8 {q11}, [r1, :128], r2 // src
.if \ox
vmull.s8 q4, d8, d24
vmlal.s8 q4, d20, d25
.endif
vpaddl.u8 q0, q0
vpaddl.u8 q1, q1
.if \oy
.if \ox
vmull.s8 q5, d10, d24
vmlal.s8 q5, d16, d25
vqrshrn.s16 d20, q4, #5
vqrshrn.s16 d16, q5, #5
.endif
vmull.s8 q4, d20, d7
vmull.s8 q5, d21, d7
vmlal.s8 q4, d16, d6
vmlal.s8 q5, d17, d6
vqrshrn.s16 d20, q4, #5
vqrshrn.s16 d21, q5, #5
.elseif \ox
vqrshrn.s16 d20, q4, #5
.endif
.if \csfl
vrshrn.u16 d0, q0, #1
vrshrn.u16 d1, q1, #1
.else
vrshr.u16 q4, q0, #1
vrshr.u16 q5, q1, #1
vmovl.u8 q0, d22
vmovl.u8 q1, d23
vmul.i16 q4, q4, d4[0]
vmul.i16 q5, q5, d4[0]
vmul.i16 q0, q0, d4[1]
vmul.i16 q1, q1, d4[1]
vqadd.s16 q4, q4, q0
vqadd.s16 q5, q5, q1
vdup.16 q0, d4[2]
vshr.s16 q4, q4, #6
vshr.s16 q5, q5, #6
vadd.i16 q4, q4, q0
vadd.i16 q5, q5, q0
vqmovun.s16 d0, q4
vqmovun.s16 d1, q5
.endif
bl gather16_neon
vmovl.s8 q8, d20 // grain
vmovl.s8 q9, d21
vmovl.u8 q6, d8 // scaling
vmovl.u8 q7, d9
vmul.i16 q8, q8, q6 // scaling * grain
vmul.i16 q9, q9, q7
vrshl.s16 q8, q8, q13 // round2(scaling * grain, scaling_shift)
vrshl.s16 q9, q9, q13
vaddw.u8 q8, q8, d22 // *src + noise
vaddw.u8 q9, q9, d23
vqmovun.s16 d0, q8
vqmovun.s16 d1, q9
vmax.u8 q0, q0, q14
vmin.u8 q0, q0, q15
subs r9, r9, #1
.if \oy
vswp d6, d7
.endif
vst1.8 {q0}, [r0, :128], r2 // dst
bgt 1b
.if \oy
cmp r12, #0
mov r9, r12 // restore actual remaining h
bgt L(fguv_loop_sx1_csfl\csfl\()_\ox\()0)
.endif
b 9f
.endm
fguv_loop_sx1 0, 0, 0
fguv_loop_sx1 0, 0, 1
fguv_loop_sx1 0, 1, 0
fguv_loop_sx1 0, 1, 1
fguv_loop_sx1 1, 0, 0
fguv_loop_sx1 1, 0, 1
fguv_loop_sx1 1, 1, 0
fguv_loop_sx1 1, 1, 1
9:
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
|
Admenri/urge
| 130,500
|
third_party/dav1d/src/arm/32/ipred16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, B Krishnan Iyer
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
// void ipred_dc_128_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height,
// const int bitdepth_max);
function ipred_dc_128_16bpc_neon, export=1
push {r4, lr}
ldr r4, [sp, #8]
ldr r12, [sp, #24]
clz r3, r3
adr r2, L(ipred_dc_128_tbl)
sub r3, r3, #25
vdup.16 q0, r12
ldr r3, [r2, r3, lsl #2]
add r12, r0, r1
vrshr.u16 q0, q0, #1
add r2, r2, r3
lsl r1, r1, #1
bx r2
.align 2
L(ipred_dc_128_tbl):
.word 640f - L(ipred_dc_128_tbl) + CONFIG_THUMB
.word 320f - L(ipred_dc_128_tbl) + CONFIG_THUMB
.word 160f - L(ipred_dc_128_tbl) + CONFIG_THUMB
.word 8f - L(ipred_dc_128_tbl) + CONFIG_THUMB
.word 4f - L(ipred_dc_128_tbl) + CONFIG_THUMB
4:
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
subs r4, r4, #4
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
bgt 4b
pop {r4, pc}
8:
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
bgt 8b
pop {r4, pc}
160:
vmov q1, q0
16:
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 16b
pop {r4, pc}
320:
vmov q1, q0
sub r1, r1, #32
32:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 32b
pop {r4, pc}
640:
vmov q1, q0
sub r1, r1, #96
64:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
subs r4, r4, #2
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 64b
pop {r4, pc}
endfunc
// void ipred_v_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_v_16bpc_neon, export=1
push {r4, lr}
ldr lr, [sp, #8]
clz r3, r3
adr r4, L(ipred_v_tbl)
sub r3, r3, #25
ldr r3, [r4, r3, lsl #2]
add r2, r2, #2
add r4, r4, r3
add r12, r0, r1
lsl r1, r1, #1
bx r4
.align 2
L(ipred_v_tbl):
.word 640f - L(ipred_v_tbl) + CONFIG_THUMB
.word 320f - L(ipred_v_tbl) + CONFIG_THUMB
.word 160f - L(ipred_v_tbl) + CONFIG_THUMB
.word 80f - L(ipred_v_tbl) + CONFIG_THUMB
.word 40f - L(ipred_v_tbl) + CONFIG_THUMB
40:
vld1.16 {d0}, [r2]
4:
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
subs lr, lr, #4
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
bgt 4b
pop {r4, pc}
80:
vld1.16 {q0}, [r2]
8:
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
subs lr, lr, #4
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
bgt 8b
pop {r4, pc}
160:
vld1.16 {q0, q1}, [r2]
16:
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
subs lr, lr, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 16b
pop {r4, pc}
320:
vld1.16 {q0, q1}, [r2]!
sub r1, r1, #32
vld1.16 {q2, q3}, [r2]
32:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d4, d5, d6, d7}, [r0, :128], r1
vst1.16 {d4, d5, d6, d7}, [r12, :128], r1
subs lr, lr, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d4, d5, d6, d7}, [r0, :128], r1
vst1.16 {d4, d5, d6, d7}, [r12, :128], r1
bgt 32b
pop {r4, pc}
640:
vld1.16 {q0, q1}, [r2]!
sub r1, r1, #96
vld1.16 {q2, q3}, [r2]!
vld1.16 {q8, q9}, [r2]!
vld1.16 {q10, q11}, [r2]!
64:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d4, d5, d6, d7}, [r0, :128]!
vst1.16 {d4, d5, d6, d7}, [r12, :128]!
subs lr, lr, #2
vst1.16 {d16, d17, d18, d19}, [r0, :128]!
vst1.16 {d16, d17, d18, d19}, [r12, :128]!
vst1.16 {d20, d21, d22, d23}, [r0, :128], r1
vst1.16 {d20, d21, d22, d23}, [r12, :128], r1
bgt 64b
pop {r4, pc}
endfunc
// void ipred_h_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_h_16bpc_neon, export=1
push {r4-r5, lr}
ldr r4, [sp, #12]
clz r3, r3
adr r5, L(ipred_h_tbl)
sub r3, r3, #25
ldr r3, [r5, r3, lsl #2]
sub r2, r2, #2
mov lr, #-2
add r5, r5, r3
add r12, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_h_tbl):
.word 640f - L(ipred_h_tbl) + CONFIG_THUMB
.word 320f - L(ipred_h_tbl) + CONFIG_THUMB
.word 160f - L(ipred_h_tbl) + CONFIG_THUMB
.word 8f - L(ipred_h_tbl) + CONFIG_THUMB
.word 40f - L(ipred_h_tbl) + CONFIG_THUMB
40:
sub r2, r2, #6
mov lr, #-8
4:
vld4.16 {d0[], d1[], d2[], d3[]}, [r2], lr
vst1.16 {d3}, [r0, :64], r1
vst1.16 {d2}, [r12, :64], r1
subs r4, r4, #4
vst1.16 {d1}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
bgt 4b
pop {r4-r5, pc}
8:
vld1.16 {d0[], d1[]}, [r2], lr
subs r4, r4, #4
vld1.16 {d2[], d3[]}, [r2], lr
vst1.16 {q0}, [r0, :128], r1
vld1.16 {d4[], d5[]}, [r2], lr
vst1.16 {q1}, [r12, :128], r1
vld1.16 {d6[], d7[]}, [r2], lr
vst1.16 {q2}, [r0, :128], r1
vst1.16 {q3}, [r12, :128], r1
bgt 8b
pop {r4-r5, pc}
160:
sub r1, r1, #16
16:
vld1.16 {d0[], d1[]}, [r2], lr
subs r4, r4, #4
vld1.16 {d2[], d3[]}, [r2], lr
vst1.16 {q0}, [r0, :128]!
vld1.16 {d4[], d5[]}, [r2], lr
vst1.16 {q1}, [r12, :128]!
vld1.16 {d6[], d7[]}, [r2], lr
vst1.16 {q0}, [r0, :128], r1
vst1.16 {q1}, [r12, :128], r1
vst1.16 {q2}, [r0, :128]!
vst1.16 {q3}, [r12, :128]!
vst1.16 {q2}, [r0, :128], r1
vst1.16 {q3}, [r12, :128], r1
bgt 16b
pop {r4-r5, pc}
320:
sub r1, r1, #48
32:
vld1.16 {d0[], d1[]}, [r2], lr
subs r4, r4, #4
vld1.16 {d2[], d3[]}, [r2], lr
vst1.16 {q0}, [r0, :128]!
vld1.16 {d4[], d5[]}, [r2], lr
vst1.16 {q1}, [r12, :128]!
vld1.16 {d6[], d7[]}, [r2], lr
vst1.16 {q0}, [r0, :128]!
vst1.16 {q1}, [r12, :128]!
vst1.16 {q0}, [r0, :128]!
vst1.16 {q1}, [r12, :128]!
vst1.16 {q0}, [r0, :128], r1
vst1.16 {q1}, [r12, :128], r1
vst1.16 {q2}, [r0, :128]!
vst1.16 {q3}, [r12, :128]!
vst1.16 {q2}, [r0, :128]!
vst1.16 {q3}, [r12, :128]!
vst1.16 {q2}, [r0, :128]!
vst1.16 {q3}, [r12, :128]!
vst1.16 {q2}, [r0, :128], r1
vst1.16 {q3}, [r12, :128], r1
bgt 32b
pop {r4-r5, pc}
640:
sub r1, r1, #96
64:
vld1.16 {d0[], d1[]}, [r2], lr
subs r4, r4, #2
vld1.16 {d4[], d5[]}, [r2], lr
vmov q1, q0
vmov q3, q2
vst1.16 {q0, q1}, [r0, :128]!
vst1.16 {q2, q3}, [r12, :128]!
vst1.16 {q0, q1}, [r0, :128]!
vst1.16 {q2, q3}, [r12, :128]!
vst1.16 {q0, q1}, [r0, :128]!
vst1.16 {q2, q3}, [r12, :128]!
vst1.16 {q0, q1}, [r0, :128], r1
vst1.16 {q2, q3}, [r12, :128], r1
bgt 64b
pop {r4-r5, pc}
endfunc
// void ipred_dc_top_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_top_16bpc_neon, export=1
push {r4-r5, lr}
ldr r4, [sp, #12]
clz r3, r3
adr r5, L(ipred_dc_top_tbl)
sub r3, r3, #25
ldr r3, [r5, r3, lsl #2]
add r2, r2, #2
add r5, r5, r3
add r12, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_dc_top_tbl):
.word 640f - L(ipred_dc_top_tbl) + CONFIG_THUMB
.word 320f - L(ipred_dc_top_tbl) + CONFIG_THUMB
.word 160f - L(ipred_dc_top_tbl) + CONFIG_THUMB
.word 80f - L(ipred_dc_top_tbl) + CONFIG_THUMB
.word 40f - L(ipred_dc_top_tbl) + CONFIG_THUMB
40:
vld1.16 {d0}, [r2]
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #2
vdup.16 d0, d0[0]
4:
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
subs r4, r4, #4
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
bgt 4b
pop {r4-r5, pc}
80:
vld1.16 {d0, d1}, [r2]
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #3
vdup.16 q0, d0[0]
8:
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
bgt 8b
pop {r4-r5, pc}
160:
vld1.16 {d0, d1, d2, d3}, [r2]
vadd.i16 q0, q0, q1
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d4, d0, #4
vdup.16 q0, d4[0]
vdup.16 q1, d4[0]
16:
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 16b
pop {r4-r5, pc}
320:
vld1.16 {d0, d1, d2, d3}, [r2]!
vld1.16 {d4, d5, d6, d7}, [r2]
vadd.i16 q0, q0, q1
vadd.i16 q2, q2, q3
vadd.i16 q0, q0, q2
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpaddl.u16 d0, d0
vrshrn.i32 d18, q0, #5
vdup.16 q0, d18[0]
vdup.16 q1, d18[0]
sub r1, r1, #32
32:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 32b
pop {r4-r5, pc}
640:
vld1.16 {d0, d1, d2, d3}, [r2]!
vld1.16 {d4, d5, d6, d7}, [r2]!
vadd.i16 q0, q0, q1
vld1.16 {d16, d17, d18, d19}, [r2]!
vadd.i16 q2, q2, q3
vld1.16 {d20, d21, d22, d23}, [r2]
vadd.i16 q8, q8, q9
vadd.i16 q10, q10, q11
vadd.i16 q0, q0, q2
vadd.i16 q8, q8, q10
vadd.i16 q0, q0, q8
vadd.i16 d0, d0, d1
vpaddl.u16 d0, d0
vpadd.i32 d0, d0, d0
vrshrn.i32 d18, q0, #6
vdup.16 q0, d18[0]
vdup.16 q1, d18[0]
sub r1, r1, #96
64:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
subs r4, r4, #2
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 64b
pop {r4-r5, pc}
endfunc
// void ipred_dc_left_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_left_16bpc_neon, export=1
push {r4-r5, lr}
ldr r4, [sp, #12]
sub r2, r2, r4, lsl #1
clz r3, r3
clz lr, r4
sub lr, lr, #25
adr r5, L(ipred_dc_left_tbl)
sub r3, r3, #20
ldr r3, [r5, r3, lsl #2]
ldr lr, [r5, lr, lsl #2]
add r3, r5, r3
add r5, r5, lr
add r12, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_dc_left_tbl):
.word L(ipred_dc_left_h64) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_h32) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_h16) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_h8) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_h4) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w64) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w32) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w16) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w8) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w4) - L(ipred_dc_left_tbl) + CONFIG_THUMB
L(ipred_dc_left_h4):
vld1.16 {d0}, [r2, :64]
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #2
vdup.16 q0, d0[0]
bx r3
L(ipred_dc_left_w4):
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
subs r4, r4, #4
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
bgt L(ipred_dc_left_w4)
pop {r4-r5, pc}
L(ipred_dc_left_h8):
vld1.16 {d0, d1}, [r2, :128]
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #3
vdup.16 q0, d0[0]
bx r3
L(ipred_dc_left_w8):
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
bgt L(ipred_dc_left_w8)
pop {r4-r5, pc}
L(ipred_dc_left_h16):
vld1.16 {d0, d1, d2, d3}, [r2, :128]
vadd.i16 q0, q0, q1
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #4
vdup.16 q0, d0[0]
bx r3
L(ipred_dc_left_w16):
vmov q1, q0
1:
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 1b
pop {r4-r5, pc}
L(ipred_dc_left_h32):
vld1.16 {d0, d1, d2, d3}, [r2, :128]!
vld1.16 {d4, d5, d6, d7}, [r2, :128]
vadd.i16 q0, q0, q1
vadd.i16 q2, q2, q3
vadd.i16 q0, q0, q2
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpaddl.u16 d0, d0
vrshrn.i32 d0, q0, #5
vdup.16 q0, d0[0]
bx r3
L(ipred_dc_left_w32):
sub r1, r1, #32
vmov q1, q0
1:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 1b
pop {r4-r5, pc}
L(ipred_dc_left_h64):
vld1.16 {d0, d1, d2, d3}, [r2, :128]!
vld1.16 {d4, d5, d6, d7}, [r2, :128]!
vadd.i16 q0, q0, q1
vld1.16 {d16, d17, d18, d19}, [r2, :128]!
vadd.i16 q2, q2, q3
vld1.16 {d20, d21, d22, d23}, [r2, :128]
vadd.i16 q8, q8, q9
vadd.i16 q10, q10, q11
vadd.i16 q0, q0, q2
vadd.i16 q8, q8, q10
vadd.i16 q0, q0, q8
vadd.i16 d0, d0, d1
vpaddl.u16 d0, d0
vpadd.i32 d0, d0, d0
vrshrn.i32 d0, q0, #6
vdup.16 q0, d0[0]
bx r3
L(ipred_dc_left_w64):
sub r1, r1, #96
vmov q1, q0
1:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
subs r4, r4, #2
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 1b
pop {r4-r5, pc}
endfunc
// void ipred_dc_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_16bpc_neon, export=1
push {r4-r6, lr}
ldr r4, [sp, #16]
sub r2, r2, r4, lsl #1
add lr, r3, r4 // width + height
clz r3, r3
clz r12, r4
vdup.32 q15, lr // width + height
adr r5, L(ipred_dc_tbl)
rbit lr, lr // rbit(width + height)
sub r3, r3, #20 // 25 leading bits, minus table offset 5
sub r12, r12, #25
clz lr, lr // ctz(width + height)
ldr r3, [r5, r3, lsl #2]
ldr r12, [r5, r12, lsl #2]
neg lr, lr // -ctz(width + height)
add r3, r5, r3
add r5, r5, r12
vshr.u32 q15, q15, #1 // (width + height) >> 1
vdup.32 q14, lr // -ctz(width + height)
add r12, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_dc_tbl):
.word L(ipred_dc_h64) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_h32) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_h16) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_h8) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_h4) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w64) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w32) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w16) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w8) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w4) - L(ipred_dc_tbl) + CONFIG_THUMB
L(ipred_dc_h4):
vld1.16 {d0}, [r2, :64]!
vpadd.i16 d0, d0, d0
add r2, r2, #2
vpaddl.u16 d0, d0
bx r3
L(ipred_dc_w4):
vld1.16 {d2}, [r2]
vadd.i32 d0, d0, d30
vpadd.i16 d2, d2, d2
vpaddl.u16 d2, d2
cmp r4, #4
vadd.i32 d0, d0, d2
vshl.u32 d0, d0, d28
beq 1f
// h = 8/16
cmp r4, #16
movw lr, #0x6667
movw r5, #0xAAAB
it ne
movne lr, r5
vdup.32 d24, lr
vmul.i32 d0, d0, d24
vshr.u32 d0, d0, #17
1:
vdup.16 d0, d0[0]
2:
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
subs r4, r4, #4
vst1.16 {d0}, [r0, :64], r1
vst1.16 {d0}, [r12, :64], r1
bgt 2b
pop {r4-r6, pc}
L(ipred_dc_h8):
vld1.16 {d0, d1}, [r2, :128]!
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
add r2, r2, #2
vpaddl.u16 d0, d0
bx r3
L(ipred_dc_w8):
vld1.16 {d2, d3}, [r2]
vadd.i32 d0, d0, d30
vadd.i16 d2, d2, d3
vpadd.i16 d2, d2, d2
vpaddl.u16 d2, d2
cmp r4, #8
vadd.i32 d0, d0, d2
vshl.u32 d0, d0, d28
beq 1f
// h = 4/16/32
cmp r4, #32
movw lr, #0x6667
movw r5, #0xAAAB
it ne
movne lr, r5
vdup.32 d24, lr
vmul.i32 d0, d0, d24
vshr.u32 d0, d0, #17
1:
vdup.16 q0, d0[0]
2:
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1}, [r0, :128], r1
vst1.16 {d0, d1}, [r12, :128], r1
bgt 2b
pop {r4-r6, pc}
L(ipred_dc_h16):
vld1.16 {d0, d1, d2, d3}, [r2, :128]!
vadd.i16 q0, q0, q1
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
add r2, r2, #2
vpaddl.u16 d0, d0
bx r3
L(ipred_dc_w16):
vld1.16 {d2, d3, d4, d5}, [r2]
vadd.i32 d0, d0, d30
vadd.i16 q1, q1, q2
vadd.i16 d2, d2, d3
vpadd.i16 d2, d2, d1
vpaddl.u16 d2, d2
cmp r4, #16
vadd.i32 d0, d0, d2
vshl.u32 d4, d0, d28
beq 1f
// h = 4/8/32/64
tst r4, #(32+16+8) // 16 added to make a consecutive bitmask
movw lr, #0x6667
movw r5, #0xAAAB
it ne
movne lr, r5
vdup.32 d24, lr
vmul.i32 d4, d4, d24
vshr.u32 d4, d4, #17
1:
vdup.16 q0, d4[0]
vdup.16 q1, d4[0]
2:
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 2b
pop {r4-r6, pc}
L(ipred_dc_h32):
vld1.16 {d0, d1, d2, d3}, [r2, :128]!
vld1.16 {d4, d5, d6, d7}, [r2, :128]!
vadd.i16 q0, q0, q1
vadd.i16 q2, q2, q3
vadd.i16 q0, q0, q2
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
add r2, r2, #2
vpaddl.u16 d0, d0
bx r3
L(ipred_dc_w32):
vld1.16 {d2, d3, d4, d5}, [r2]!
vadd.i32 d0, d0, d30
vld1.16 {d16, d17, d18, d19}, [r2]
vadd.i16 q1, q1, q2
vadd.i16 q8, q8, q9
vadd.i16 q1, q1, q8
vadd.i16 d2, d2, d3
vpadd.i16 d2, d2, d2
vpaddl.u16 d2, d2
cmp r4, #32
vadd.i32 d0, d0, d2
vshl.u32 d4, d0, d28
beq 1f
// h = 8/16/64
cmp r4, #8
movw lr, #0x6667
movw r5, #0xAAAB
it ne
movne lr, r5
vdup.32 d24, lr
vmul.i32 d4, d4, d24
vshr.u32 d4, d4, #17
1:
sub r1, r1, #32
vdup.16 q0, d4[0]
vdup.16 q1, d4[0]
2:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 2b
pop {r4-r6, pc}
L(ipred_dc_h64):
vld1.16 {d0, d1, d2, d3}, [r2, :128]!
vld1.16 {d4, d5, d6, d7}, [r2, :128]!
vadd.i16 q0, q0, q1
vld1.16 {d16, d17, d18, d19}, [r2, :128]!
vadd.i16 q2, q2, q3
vld1.16 {d20, d21, d22, d23}, [r2, :128]!
vadd.i16 q8, q8, q9
vadd.i16 q10, q10, q11
vadd.i16 q0, q0, q2
vadd.i16 q8, q8, q10
vadd.i16 q0, q0, q8
vadd.i16 d0, d0, d1
vpaddl.u16 d0, d0
add r2, r2, #2
vpadd.i32 d0, d0, d0
bx r3
L(ipred_dc_w64):
vld1.16 {d2, d3, d4, d5}, [r2]!
vadd.i32 d0, d0, d30
vld1.16 {d16, d17, d18, d19}, [r2]!
vadd.i16 q1, q1, q2
vld1.16 {d20, d21, d22, d23}, [r2]!
vadd.i16 q8, q8, q9
vld1.16 {d24, d25, d26, d27}, [r2]!
vadd.i16 q10, q10, q11
vadd.i16 q12, q12, q13
vadd.i16 q1, q1, q8
vadd.i16 q10, q10, q12
vadd.i16 q1, q1, q10
vadd.i16 d2, d2, d3
vpaddl.u16 d2, d2
vpadd.i32 d2, d2, d2
cmp r4, #64
vadd.i32 d0, d0, d2
vshl.u32 d4, d0, d28
beq 1f
// h = 16/32
cmp r4, #16
movw lr, #0x6667
movw r5, #0xAAAB
it ne
movne lr, r5
vdup.32 d24, lr
vmul.i32 d4, d4, d24
vshr.u32 d4, d4, #17
1:
sub r1, r1, #96
vdup.16 q0, d4[0]
vdup.16 q1, d4[0]
2:
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
subs r4, r4, #2
vst1.16 {d0, d1, d2, d3}, [r0, :128]!
vst1.16 {d0, d1, d2, d3}, [r12, :128]!
vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
bgt 2b
pop {r4-r6, pc}
endfunc
// void ipred_paeth_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_paeth_16bpc_neon, export=1
push {r4-r6, lr}
vpush {q4}
ldr r4, [sp, #32]
clz lr, r3
adr r12, L(ipred_paeth_tbl)
sub lr, lr, #25
ldr lr, [r12, lr, lsl #2]
vld1.16 {d4[], d5[]}, [r2]
add r6, r2, #2
sub r2, r2, #4
add r12, r12, lr
mov r5, #-4
add lr, r0, r1
lsl r1, r1, #1
bx r12
.align 2
L(ipred_paeth_tbl):
.word 640f - L(ipred_paeth_tbl) + CONFIG_THUMB
.word 320f - L(ipred_paeth_tbl) + CONFIG_THUMB
.word 160f - L(ipred_paeth_tbl) + CONFIG_THUMB
.word 80f - L(ipred_paeth_tbl) + CONFIG_THUMB
.word 40f - L(ipred_paeth_tbl) + CONFIG_THUMB
40:
sub r2, r2, #4
mov r5, #-8
vld1.16 {d6}, [r6]
vsub.i16 d16, d6, d4 // top - topleft
vmov d7, d6
vmov d17, d16
4:
vld4.16 {d0[], d1[], d2[], d3[]}, [r2, :64], r5
vadd.i16 q9, q8, q0 // base
vadd.i16 q10, q8, q1
vabd.s16 q11, q3, q9 // tdiff
vabd.s16 q12, q3, q10
vabd.s16 q13, q2, q9 // tldiff
vabd.s16 q14, q2, q10
vabd.s16 q9, q0, q9 // ldiff
vabd.s16 q10, q1, q10
vmin.u16 q15, q11, q13 // min(tdiff, tldiff)
vmin.u16 q4, q12, q14
vcge.u16 q11, q13, q11 // tldiff >= tdiff
vcge.u16 q12, q14, q12
vcge.u16 q9, q15, q9 // min(tdiff, tldiff) >= ldiff
vcge.u16 q10, q4, q10
vbsl q12, q3, q2 // tdiff <= tldiff ? top : topleft
vbsl q11, q3, q2
vbit q12, q1, q10 // ldiff <= min ? left : ...
vbit q11, q0, q9
vst1.16 {d25}, [r0, :64], r1
vst1.16 {d24}, [lr, :64], r1
subs r4, r4, #4
vst1.16 {d23}, [r0, :64], r1
vst1.16 {d22}, [lr, :64], r1
bgt 4b
vpop {q4}
pop {r4-r6, pc}
80:
160:
320:
640:
vld1.16 {q3}, [r6]!
mov r12, r3
sub r1, r1, r3, lsl #1
1:
vld2.16 {d0[], d2[]}, [r2, :32], r5
vmov d1, d0
vmov d3, d2
2:
vsub.i16 q8, q3, q2 // top - topleft
vadd.i16 q9, q8, q0 // base
vadd.i16 q10, q8, q1
vabd.s16 q11, q3, q9 // tdiff
vabd.s16 q12, q3, q10
vabd.s16 q13, q2, q9 // tldiff
vabd.s16 q14, q2, q10
vabd.s16 q9, q0, q9 // ldiff
vabd.s16 q10, q1, q10
vmin.u16 q15, q11, q13 // min(tdiff, tldiff)
vmin.u16 q4, q12, q14
vcge.u16 q11, q13, q11 // tldiff >= tdiff
vcge.u16 q12, q14, q12
vcge.u16 q9, q15, q9 // min(tdiff, tldiff) >= ldiff
vcge.u16 q10, q4, q10
vbsl q12, q3, q2 // tdiff <= tldiff ? top : topleft
vbsl q11, q3, q2
vbit q12, q1, q10 // ldiff <= min ? left : ...
vbit q11, q0, q9
subs r3, r3, #8
vst1.16 {q12}, [r0, :128]!
vst1.16 {q11}, [lr, :128]!
ble 8f
vld1.16 {q3}, [r6]!
b 2b
8:
subs r4, r4, #2
ble 9f
// End of horizontal loop, move pointers to next two rows
sub r6, r6, r12, lsl #1
add r0, r0, r1
add lr, lr, r1
vld1.16 {q3}, [r6]!
mov r3, r12
b 1b
9:
vpop {q4}
pop {r4-r6, pc}
endfunc
// void ipred_smooth_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_16bpc_neon, export=1
push {r4-r10, lr}
ldr r4, [sp, #32]
movrel r10, X(sm_weights)
add r12, r10, r4
add r10, r10, r3
clz r9, r3
adr r5, L(ipred_smooth_tbl)
sub lr, r2, r4, lsl #1
sub r9, r9, #25
ldr r9, [r5, r9, lsl #2]
vld1.16 {d4[], d5[]}, [lr] // bottom
add r8, r2, #2
add r5, r5, r9
add r6, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_smooth_tbl):
.word 640f - L(ipred_smooth_tbl) + CONFIG_THUMB
.word 320f - L(ipred_smooth_tbl) + CONFIG_THUMB
.word 160f - L(ipred_smooth_tbl) + CONFIG_THUMB
.word 80f - L(ipred_smooth_tbl) + CONFIG_THUMB
.word 40f - L(ipred_smooth_tbl) + CONFIG_THUMB
40:
vld1.16 {d16}, [r8] // top
vld1.32 {d18[]}, [r10, :32] // weights_hor
sub r2, r2, #8
mov r7, #-8
vdup.16 q3, d16[3] // right
vsub.i16 q8, q8, q2 // top-bottom
vmovl.u8 q9, d18 // weights_hor
vadd.i16 d19, d4, d6 // bottom+right
4:
vld4.16 {d0[], d1[], d2[], d3[]}, [r2, :64], r7 // left
vld4.8 {d20[], d21[], d22[], d23[]}, [r12, :32]! // weights_ver
vshll.u16 q12, d19, #8 // (bottom+right)*256
vshll.u16 q13, d19, #8
vshll.u16 q14, d19, #8
vshll.u16 q15, d19, #8
vzip.32 d20, d21 // weights_ver
vzip.32 d22, d23
vsub.i16 q1, q1, q3 // left-right
vsub.i16 q0, q0, q3
vmovl.u8 q10, d20 // weights_ver
vmovl.u8 q11, d22
vmlal.s16 q12, d3, d18 // += (left-right)*weights_hor
vmlal.s16 q13, d2, d18 // (left flipped)
vmlal.s16 q14, d1, d18
vmlal.s16 q15, d0, d18
vmlal.s16 q12, d16, d20 // += (top-bottom)*weights_ver
vmlal.s16 q13, d16, d21
vmlal.s16 q14, d16, d22
vmlal.s16 q15, d16, d23
vrshrn.i32 d24, q12, #9
vrshrn.i32 d25, q13, #9
vrshrn.i32 d26, q14, #9
vrshrn.i32 d27, q15, #9
vst1.16 {d24}, [r0, :64], r1
vst1.16 {d25}, [r6, :64], r1
subs r4, r4, #4
vst1.16 {d26}, [r0, :64], r1
vst1.16 {d27}, [r6, :64], r1
bgt 4b
pop {r4-r10, pc}
80:
vld1.16 {q8}, [r8] // top
vld1.8 {d18}, [r10, :64] // weights_hor
sub r2, r2, #4
mov r7, #-4
vdup.16 q3, d17[3] // right
vsub.i16 q8, q8, q2 // top-bottom
vmovl.u8 q9, d18 // weights_hor
vadd.i16 d3, d4, d6 // bottom+right
8:
vld2.16 {d0[], d1[]}, [r2, :32], r7 // left
vld2.8 {d20[], d22[]}, [r12, :16]! // weights_ver
vshll.u16 q12, d3, #8 // (bottom+right)*256
vshll.u16 q13, d3, #8
vshll.u16 q14, d3, #8
vshll.u16 q15, d3, #8
vsub.i16 q0, q0, q3 // left-right
vmovl.u8 q10, d20 // weights_ver
vmovl.u8 q11, d22
vmlal.s16 q12, d1, d18 // += (left-right)*weights_hor
vmlal.s16 q13, d1, d19 // (left flipped)
vmlal.s16 q14, d0, d18
vmlal.s16 q15, d0, d19
vmlal.s16 q12, d16, d20 // += (top-bottom)*weights_ver
vmlal.s16 q13, d17, d20
vmlal.s16 q14, d16, d22
vmlal.s16 q15, d17, d22
vrshrn.i32 d24, q12, #9
vrshrn.i32 d25, q13, #9
vrshrn.i32 d26, q14, #9
vrshrn.i32 d27, q15, #9
subs r4, r4, #2
vst1.16 {q12}, [r0, :128], r1
vst1.16 {q13}, [r6, :128], r1
bgt 8b
pop {r4-r10, pc}
160:
320:
640:
add lr, r2, r3, lsl #1
sub r2, r2, #4
mov r7, #-4
vld1.16 {d6[], d7[]}, [lr] // right
sub r1, r1, r3, lsl #1
mov r9, r3
vadd.i16 d3, d4, d6 // bottom+right
1:
vld2.16 {d0[], d1[]}, [r2, :32], r7 // left
vld2.8 {d20[], d22[]}, [r12, :16]! // weights_ver
vsub.i16 q0, q0, q3 // left-right
vmovl.u8 q10, d20 // weights_ver
vmovl.u8 q11, d22
2:
vld1.8 {d18}, [r10, :64]! // weights_hor
vld1.16 {q8}, [r8]! // top
vshll.u16 q12, d3, #8 // (bottom+right)*256
vshll.u16 q13, d3, #8
vmovl.u8 q9, d18 // weights_hor
vshll.u16 q14, d3, #8
vshll.u16 q15, d3, #8
vsub.i16 q8, q8, q2 // top-bottom
vmlal.s16 q12, d1, d18 // += (left-right)*weights_hor
vmlal.s16 q13, d1, d19 // (left flipped)
vmlal.s16 q14, d0, d18
vmlal.s16 q15, d0, d19
vmlal.s16 q12, d16, d20 // += (top-bottom)*weights_ver
vmlal.s16 q13, d17, d20
vmlal.s16 q14, d16, d22
vmlal.s16 q15, d17, d22
vrshrn.i32 d24, q12, #9
vrshrn.i32 d25, q13, #9
vrshrn.i32 d26, q14, #9
vrshrn.i32 d27, q15, #9
subs r3, r3, #8
vst1.16 {q12}, [r0, :128]!
vst1.16 {q13}, [r6, :128]!
bgt 2b
subs r4, r4, #2
ble 9f
sub r8, r8, r9, lsl #1
sub r10, r10, r9
add r0, r0, r1
add r6, r6, r1
mov r3, r9
b 1b
9:
pop {r4-r10, pc}
endfunc
// void ipred_smooth_v_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_v_16bpc_neon, export=1
push {r4-r7, lr}
ldr r4, [sp, #20]
movrel r7, X(sm_weights)
add r7, r7, r4
clz lr, r3
adr r5, L(ipred_smooth_v_tbl)
sub r12, r2, r4, lsl #1
sub lr, lr, #25
ldr lr, [r5, lr, lsl #2]
vld1.16 {d4[], d5[]}, [r12] // bottom
add r2, r2, #2
add r5, r5, lr
add r6, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_smooth_v_tbl):
.word 640f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
.word 320f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
.word 160f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
.word 80f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
.word 40f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
40:
vld1.16 {d6}, [r2] // top
vsub.i16 d6, d6, d4 // top-bottom
vmov d7, d6
4:
vld4.8 {d16[], d17[], d18[], d19[]}, [r7, :32]! // weights_ver
vzip.32 d16, d17 // weights_ver
vzip.32 d18, d19
vshll.u8 q8, d16, #7 // weights_ver << 7
vshll.u8 q9, d18, #7
vqrdmulh.s16 q10, q3, q8 // ((top-bottom)*weights_ver + 128) >> 8
vqrdmulh.s16 q11, q3, q9
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q2
vst1.16 {d20}, [r0, :64], r1
vst1.16 {d21}, [r6, :64], r1
subs r4, r4, #4
vst1.16 {d22}, [r0, :64], r1
vst1.16 {d23}, [r6, :64], r1
bgt 4b
pop {r4-r7, pc}
80:
vld1.16 {q3}, [r2] // top
vsub.i16 q3, q3, q2 // top-bottom
8:
vld4.8 {d16[], d18[], d20[], d22[]}, [r7, :32]! // weights_ver
vshll.u8 q8, d16, #7 // weights_ver << 7
vshll.u8 q9, d18, #7
vshll.u8 q10, d20, #7
vshll.u8 q11, d22, #7
vqrdmulh.s16 q8, q3, q8 // ((top-bottom)*weights_ver + 128) >> 8
vqrdmulh.s16 q9, q3, q9
vqrdmulh.s16 q10, q3, q10
vqrdmulh.s16 q11, q3, q11
vadd.i16 q8, q8, q2
vadd.i16 q9, q9, q2
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q2
vst1.16 {q8}, [r0, :128], r1
vst1.16 {q9}, [r6, :128], r1
subs r4, r4, #4
vst1.16 {q10}, [r0, :128], r1
vst1.16 {q11}, [r6, :128], r1
bgt 8b
pop {r4-r7, pc}
160:
320:
640:
vpush {q4-q7}
// Set up pointers for four rows in parallel; r0, r6, r5, lr
add r5, r0, r1
add lr, r6, r1
lsl r1, r1, #1
sub r1, r1, r3, lsl #1
mov r12, r3
1:
vld4.8 {d8[], d10[], d12[], d14[]}, [r7, :32]! // weights_ver
vshll.u8 q4, d8, #7 // weights_ver << 7
vshll.u8 q5, d10, #7
vshll.u8 q6, d12, #7
vshll.u8 q7, d14, #7
2:
vld1.16 {q0, q1}, [r2]! // top
vsub.i16 q0, q0, q2 // top-bottom
vsub.i16 q1, q1, q2
vqrdmulh.s16 q8, q0, q4 // ((top-bottom)*weights_ver + 128) >> 8
vqrdmulh.s16 q9, q1, q4
vqrdmulh.s16 q10, q0, q5
vqrdmulh.s16 q11, q1, q5
vqrdmulh.s16 q12, q0, q6
vqrdmulh.s16 q13, q1, q6
vqrdmulh.s16 q14, q0, q7
vqrdmulh.s16 q15, q1, q7
vadd.i16 q8, q8, q2
vadd.i16 q9, q9, q2
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q2
vadd.i16 q12, q12, q2
vadd.i16 q13, q13, q2
vadd.i16 q14, q14, q2
vadd.i16 q15, q15, q2
subs r3, r3, #16
vst1.16 {q8, q9}, [r0, :128]!
vst1.16 {q10, q11}, [r6, :128]!
vst1.16 {q12, q13}, [r5, :128]!
vst1.16 {q14, q15}, [lr, :128]!
bgt 2b
subs r4, r4, #4
ble 9f
sub r2, r2, r12, lsl #1
add r0, r0, r1
add r6, r6, r1
add r5, r5, r1
add lr, lr, r1
mov r3, r12
b 1b
9:
vpop {q4-q7}
pop {r4-r7, pc}
endfunc
// void ipred_smooth_h_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_h_16bpc_neon, export=1
push {r4-r8, lr}
ldr r4, [sp, #24]
movrel r8, X(sm_weights)
add r8, r8, r3
clz lr, r3
adr r5, L(ipred_smooth_h_tbl)
add r12, r2, r3, lsl #1
sub lr, lr, #25
ldr lr, [r5, lr, lsl #2]
vld1.16 {d4[], d5[]}, [r12] // right
add r5, r5, lr
add r6, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_smooth_h_tbl):
.word 640f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
.word 320f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
.word 160f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
.word 80f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
.word 40f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
40:
vld1.32 {d6[]}, [r8, :32] // weights_hor
sub r2, r2, #8
mov r7, #-8
vshll.u8 q3, d6, #7 // weights_hor << 7
4:
vld4.16 {d0[], d1[], d2[], d3[]}, [r2, :64], r7 // left
vsub.i16 q0, q0, q2 // left-right
vsub.i16 q1, q1, q2
subs r4, r4, #4
vqrdmulh.s16 q8, q1, q3 // ((left-right)*weights_hor + 128) >> 8
vqrdmulh.s16 q9, q0, q3 // (left flipped)
vadd.i16 q8, q8, q2
vadd.i16 q9, q9, q2
vst1.16 {d17}, [r0, :64], r1
vst1.16 {d16}, [r6, :64], r1
vst1.16 {d19}, [r0, :64], r1
vst1.16 {d18}, [r6, :64], r1
bgt 4b
pop {r4-r8, pc}
80:
vld1.8 {d6}, [r8, :64] // weights_hor
sub r2, r2, #8
mov r7, #-8
vshll.u8 q3, d6, #7 // weights_hor << 7
8:
vld1.16 {d23}, [r2, :64], r7 // left
subs r4, r4, #4
vsub.i16 d23, d23, d4 // left-right
vdup.16 q8, d23[3] // flip left
vdup.16 q9, d23[2]
vdup.16 q10, d23[1]
vdup.16 q11, d23[0]
vqrdmulh.s16 q8, q8, q3 // ((left-right)*weights_hor + 128) >> 8
vqrdmulh.s16 q9, q9, q3
vqrdmulh.s16 q10, q10, q3
vqrdmulh.s16 q11, q11, q3
vadd.i16 q8, q8, q2
vadd.i16 q9, q9, q2
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q2
vst1.16 {q8}, [r0, :128], r1
vst1.16 {q9}, [r6, :128], r1
vst1.16 {q10}, [r0, :128], r1
vst1.16 {q11}, [r6, :128], r1
bgt 8b
pop {r4-r8, pc}
160:
320:
640:
vpush {q4-q7}
sub r2, r2, #8
mov r7, #-8
// Set up pointers for four rows in parallel; r0, r6, r5, lr
add r5, r0, r1
add lr, r6, r1
lsl r1, r1, #1
sub r1, r1, r3, lsl #1
mov r12, r3
1:
vld1.16 {d15}, [r2, :64], r7 // left
vsub.i16 d15, d15, d4 // left-right
vdup.16 q4, d15[3] // flip left
vdup.16 q5, d15[2]
vdup.16 q6, d15[1]
vdup.16 q7, d15[0]
2:
vld1.8 {q1}, [r8, :128]! // weights_hor
subs r3, r3, #16
vshll.u8 q0, d2, #7 // weights_hor << 7
vshll.u8 q1, d3, #7
vqrdmulh.s16 q8, q0, q4 // ((left-right)*weights_hor + 128) >> 8
vqrdmulh.s16 q9, q1, q4
vqrdmulh.s16 q10, q0, q5
vqrdmulh.s16 q11, q1, q5
vqrdmulh.s16 q12, q0, q6
vqrdmulh.s16 q13, q1, q6
vqrdmulh.s16 q14, q0, q7
vqrdmulh.s16 q15, q1, q7
vadd.i16 q8, q8, q2
vadd.i16 q9, q9, q2
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q2
vadd.i16 q12, q12, q2
vadd.i16 q13, q13, q2
vadd.i16 q14, q14, q2
vadd.i16 q15, q15, q2
vst1.16 {q8, q9}, [r0, :128]!
vst1.16 {q10, q11}, [r6, :128]!
vst1.16 {q12, q13}, [r5, :128]!
vst1.16 {q14, q15}, [lr, :128]!
bgt 2b
subs r4, r4, #4
ble 9f
sub r8, r8, r12
add r0, r0, r1
add r6, r6, r1
add r5, r5, r1
add lr, lr, r1
mov r3, r12
b 1b
9:
vpop {q4-q7}
pop {r4-r8, pc}
endfunc
// void ipred_filter_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int filt_idx,
// const int max_width, const int max_height,
// const int bitdepth_max);
.macro filter_fn bpc
function ipred_filter_\bpc\()bpc_neon, export=1
movw r12, #511
ldrd r4, r5, [sp, #88]
and r5, r5, r12 // 511
movrel r6, X(filter_intra_taps)
lsl r5, r5, #6
add r6, r6, r5
vld1.8 {d20, d21, d22, d23}, [r6, :128]!
clz lr, r3
adr r5, L(ipred_filter\bpc\()_tbl)
vld1.8 {d27, d28, d29}, [r6, :64]
sub lr, lr, #26
ldr lr, [r5, lr, lsl #2]
vmovl.s8 q8, d20
vmovl.s8 q9, d21
add r5, r5, lr
vmovl.s8 q10, d22
vmovl.s8 q11, d23
add r6, r0, r1
lsl r1, r1, #1
vmovl.s8 q12, d27
vmovl.s8 q13, d28
vmovl.s8 q14, d29
mov r7, #-4
vdup.16 q15, r8
add r8, r2, #2
sub r2, r2, #4
.if \bpc == 10
vmov.i16 q7, #0
.endif
bx r5
.align 2
L(ipred_filter\bpc\()_tbl):
.word 320f - L(ipred_filter\bpc\()_tbl) + CONFIG_THUMB
.word 160f - L(ipred_filter\bpc\()_tbl) + CONFIG_THUMB
.word 80f - L(ipred_filter\bpc\()_tbl) + CONFIG_THUMB
.word 40f - L(ipred_filter\bpc\()_tbl) + CONFIG_THUMB
40:
vld1.16 {d0}, [r8] // top (0-3)
4:
vld1.16 {d2}, [r2], r7 // left (0-1) + topleft (2)
.if \bpc == 10
vmul.i16 q2, q9, d0[0] // p1(top[0]) * filter(1)
vmla.i16 q2, q10, d0[1] // p2(top[1]) * filter(2)
vmla.i16 q2, q11, d0[2] // p3(top[2]) * filter(3)
vmla.i16 q2, q12, d0[3] // p4(top[3]) * filter(4)
vmla.i16 q2, q8, d2[2] // p0(topleft) * filter(0)
vmla.i16 q2, q13, d2[1] // p5(left[0]) * filter(5)
vmla.i16 q2, q14, d2[0] // p6(left[1]) * filter(6)
vrshr.s16 q2, q2, #4
vmax.s16 q2, q2, q7
.else
vmull.s16 q2, d18, d0[0] // p1(top[0]) * filter(1)
vmlal.s16 q2, d20, d0[1] // p2(top[1]) * filter(2)
vmlal.s16 q2, d22, d0[2] // p3(top[2]) * filter(3)
vmlal.s16 q2, d24, d0[3] // p4(top[3]) * filter(4)
vmlal.s16 q2, d16, d2[2] // p0(topleft) * filter(0)
vmlal.s16 q2, d26, d2[1] // p5(left[0]) * filter(5)
vmlal.s16 q2, d28, d2[0] // p6(left[1]) * filter(6)
vmull.s16 q3, d19, d0[0] // p1(top[0]) * filter(1)
vmlal.s16 q3, d21, d0[1] // p2(top[1]) * filter(2)
vmlal.s16 q3, d23, d0[2] // p3(top[2]) * filter(3)
vmlal.s16 q3, d25, d0[3] // p4(top[3]) * filter(4)
vmlal.s16 q3, d17, d2[2] // p0(topleft) * filter(0)
vmlal.s16 q3, d27, d2[1] // p5(left[0]) * filter(5)
vmlal.s16 q3, d29, d2[0] // p6(left[1]) * filter(6)
vqrshrun.s32 d4, q2, #4
vqrshrun.s32 d5, q3, #4
.endif
vmin.s16 q2, q2, q15
subs r4, r4, #2
vst1.16 {d4}, [r0, :64], r1
vst1.16 {d5}, [r6, :64], r1
vmov d0, d5 // move top from [4-7] to [0-3]
bgt 4b
vpop {q4-q7}
pop {r4-r8, pc}
80:
vld1.16 {q0}, [r8] // top (0-7)
8:
vld1.16 {d2}, [r2], r7 // left (0-1) + topleft (2)
.if \bpc == 10
vmul.i16 q2, q9, d0[0] // p1(top[0]) * filter(1)
vmla.i16 q2, q10, d0[1] // p2(top[1]) * filter(2)
vmla.i16 q2, q11, d0[2] // p3(top[2]) * filter(3)
vmla.i16 q2, q12, d0[3] // p4(top[3]) * filter(4)
vmla.i16 q2, q8, d2[2] // p0(topleft) * filter(0)
vmla.i16 q2, q13, d2[1] // p5(left[0]) * filter(5)
vmla.i16 q2, q14, d2[0] // p6(left[1]) * filter(6)
vmul.i16 q3, q9, d1[0] // p1(top[0]) * filter(1)
vmla.i16 q3, q10, d1[1] // p2(top[1]) * filter(2)
vmla.i16 q3, q11, d1[2] // p3(top[2]) * filter(3)
vrshr.s16 q2, q2, #4
vmax.s16 q2, q2, q7
vmin.s16 q2, q2, q15
vmla.i16 q3, q12, d1[3] // p4(top[3]) * filter(4)
vmla.i16 q3, q8, d0[3] // p0(topleft) * filter(0)
vmla.i16 q3, q13, d4[3] // p5(left[0]) * filter(5)
vmla.i16 q3, q14, d5[3] // p6(left[1]) * filter(6)
vrshr.s16 q3, q3, #4
vmax.s16 q3, q3, q7
.else
vmull.s16 q2, d18, d0[0] // p1(top[0]) * filter(1)
vmlal.s16 q2, d20, d0[1] // p2(top[1]) * filter(2)
vmlal.s16 q2, d22, d0[2] // p3(top[2]) * filter(3)
vmlal.s16 q2, d24, d0[3] // p4(top[3]) * filter(4)
vmlal.s16 q2, d16, d2[2] // p0(topleft) * filter(0)
vmlal.s16 q2, d26, d2[1] // p5(left[0]) * filter(5)
vmlal.s16 q2, d28, d2[0] // p6(left[1]) * filter(6)
vmull.s16 q3, d19, d0[0] // p1(top[0]) * filter(1)
vmlal.s16 q3, d21, d0[1] // p2(top[1]) * filter(2)
vmlal.s16 q3, d23, d0[2] // p3(top[2]) * filter(3)
vmlal.s16 q3, d25, d0[3] // p4(top[3]) * filter(4)
vmlal.s16 q3, d17, d2[2] // p0(topleft) * filter(0)
vmlal.s16 q3, d27, d2[1] // p5(left[0]) * filter(5)
vmlal.s16 q3, d29, d2[0] // p6(left[1]) * filter(6)
vqrshrun.s32 d4, q2, #4
vmull.s16 q4, d18, d1[0] // p1(top[0]) * filter(1)
vmlal.s16 q4, d20, d1[1] // p2(top[1]) * filter(2)
vmlal.s16 q4, d22, d1[2] // p3(top[2]) * filter(3)
vqrshrun.s32 d5, q3, #4
vmin.s16 q2, q2, q15
vmlal.s16 q4, d24, d1[3] // p4(top[3]) * filter(4)
vmlal.s16 q4, d16, d0[3] // p0(topleft) * filter(0)
vmlal.s16 q4, d26, d4[3] // p5(left[0]) * filter(5)
vmlal.s16 q4, d28, d5[3] // p6(left[1]) * filter(6)
vmull.s16 q5, d19, d1[0] // p1(top[0]) * filter(1)
vmlal.s16 q5, d21, d1[1] // p2(top[1]) * filter(2)
vmlal.s16 q5, d23, d1[2] // p3(top[2]) * filter(3)
vmlal.s16 q5, d25, d1[3] // p4(top[3]) * filter(4)
vmlal.s16 q5, d17, d0[3] // p0(topleft) * filter(0)
vmlal.s16 q5, d27, d4[3] // p5(left[0]) * filter(5)
vmlal.s16 q5, d29, d5[3] // p6(left[1]) * filter(6)
vqrshrun.s32 d6, q4, #4
vqrshrun.s32 d7, q5, #4
.endif
vmin.s16 q3, q3, q15
vswp d5, d6
subs r4, r4, #2
vst1.16 {q2}, [r0, :128], r1
vmov q0, q3
vst1.16 {q3}, [r6, :128], r1
bgt 8b
vpop {q4-q7}
pop {r4-r8, pc}
160:
320:
sub r1, r1, r3, lsl #1
mov lr, r3
1:
vld1.16 {d0}, [r2], r7 // left (0-1) + topleft (2)
2:
vld1.16 {q1, q2}, [r8]! // top(0-15)
.if \bpc == 10
vmul.i16 q3, q8, d0[2] // p0(topleft) * filter(0)
vmla.i16 q3, q13, d0[1] // p5(left[0]) * filter(5)
vmla.i16 q3, q14, d0[0] // p6(left[1]) * filter(6)
vmla.i16 q3, q9, d2[0] // p1(top[0]) * filter(1)
vmla.i16 q3, q10, d2[1] // p2(top[1]) * filter(2)
vmla.i16 q3, q11, d2[2] // p3(top[2]) * filter(3)
vmla.i16 q3, q12, d2[3] // p4(top[3]) * filter(4)
vmul.i16 q4, q9, d3[0] // p1(top[0]) * filter(1)
vmla.i16 q4, q10, d3[1] // p2(top[1]) * filter(2)
vmla.i16 q4, q11, d3[2] // p3(top[2]) * filter(3)
vrshr.s16 q3, q3, #4
vmax.s16 q3, q3, q7
vmin.s16 q3, q3, q15
vmla.i16 q4, q12, d3[3] // p4(top[3]) * filter(4)
vmla.i16 q4, q8, d2[3] // p0(topleft) * filter(0)
vmla.i16 q4, q13, d6[3] // p5(left[0]) * filter(5)
vmla.i16 q4, q14, d7[3] // p6(left[1]) * filter(6)
vmul.i16 q5, q9, d4[0] // p1(top[0]) * filter(1)
vmla.i16 q5, q10, d4[1] // p2(top[1]) * filter(2)
vmla.i16 q5, q11, d4[2] // p3(top[2]) * filter(3)
vrshr.s16 q4, q4, #4
vmax.s16 q4, q4, q7
vmin.s16 q4, q4, q15
vmov q0, q4
vmla.i16 q5, q12, d4[3] // p4(top[3]) * filter(4)
vmla.i16 q5, q8, d3[3] // p0(topleft) * filter(0)
vmla.i16 q5, q13, d0[3] // p5(left[0]) * filter(5)
vmla.i16 q5, q14, d1[3] // p6(left[1]) * filter(6)
vmul.i16 q6, q9, d5[0] // p1(top[0]) * filter(1)
vmla.i16 q6, q10, d5[1] // p2(top[1]) * filter(2)
vmla.i16 q6, q11, d5[2] // p3(top[2]) * filter(3)
vrshr.s16 q5, q5, #4
vmax.s16 q5, q5, q7
vmin.s16 q5, q5, q15
vmov q0, q5
vmov.u16 r12, d5[3]
vmla.i16 q6, q12, d5[3] // p4(top[3]) * filter(4)
vmla.i16 q6, q8, d4[3] // p0(topleft) * filter(0)
vmla.i16 q6, q13, d0[3] // p5(left[0]) * filter(5)
vmla.i16 q6, q14, d1[3] // p6(left[1]) * filter(6)
vmov.16 d0[2], r12
subs r3, r3, #16
vrshr.s16 q6, q6, #4
.else
vmull.s16 q3, d16, d0[2] // p0(topleft) * filter(0)
vmlal.s16 q3, d26, d0[1] // p5(left[0]) * filter(5)
vmlal.s16 q3, d28, d0[0] // p6(left[1]) * filter(6)
vmlal.s16 q3, d18, d2[0] // p1(top[0]) * filter(1)
vmlal.s16 q3, d20, d2[1] // p2(top[1]) * filter(2)
vmlal.s16 q3, d22, d2[2] // p3(top[2]) * filter(3)
vmlal.s16 q3, d24, d2[3] // p4(top[3]) * filter(4)
vmull.s16 q4, d17, d0[2] // p0(topleft) * filter(0)
vmlal.s16 q4, d27, d0[1] // p5(left[0]) * filter(5)
vmlal.s16 q4, d29, d0[0] // p6(left[1]) * filter(6)
vmlal.s16 q4, d19, d2[0] // p1(top[0]) * filter(1)
vmlal.s16 q4, d21, d2[1] // p2(top[1]) * filter(2)
vmlal.s16 q4, d23, d2[2] // p3(top[2]) * filter(3)
vmlal.s16 q4, d25, d2[3] // p4(top[3]) * filter(4)
vqrshrun.s32 d6, q3, #4
vmull.s16 q5, d18, d3[0] // p1(top[0]) * filter(1)
vmlal.s16 q5, d20, d3[1] // p2(top[1]) * filter(2)
vqrshrun.s32 d7, q4, #4
vmin.s16 q3, q3, q15
vmlal.s16 q5, d22, d3[2] // p3(top[2]) * filter(3)
vmlal.s16 q5, d24, d3[3] // p4(top[3]) * filter(4)
vmlal.s16 q5, d16, d2[3] // p0(topleft) * filter(0)
vmlal.s16 q5, d26, d6[3] // p5(left[0]) * filter(5)
vmlal.s16 q5, d28, d7[3] // p6(left[1]) * filter(6)
vmull.s16 q6, d19, d3[0] // p1(top[0]) * filter(1)
vmlal.s16 q6, d21, d3[1] // p2(top[1]) * filter(2)
vmlal.s16 q6, d23, d3[2] // p3(top[2]) * filter(3)
vmlal.s16 q6, d25, d3[3] // p4(top[3]) * filter(4)
vmlal.s16 q6, d17, d2[3] // p0(topleft) * filter(0)
vmlal.s16 q6, d27, d6[3] // p5(left[0]) * filter(5)
vmlal.s16 q6, d29, d7[3] // p6(left[1]) * filter(6)
vqrshrun.s32 d8, q5, #4
vmull.s16 q7, d18, d4[0] // p1(top[0]) * filter(1)
vmlal.s16 q7, d20, d4[1] // p2(top[1]) * filter(2)
vmlal.s16 q7, d22, d4[2] // p3(top[2]) * filter(3)
vqrshrun.s32 d9, q6, #4
vmin.s16 q0, q4, q15
vmlal.s16 q7, d24, d4[3] // p4(top[3]) * filter(4)
vmlal.s16 q7, d16, d3[3] // p0(topleft) * filter(0)
vmlal.s16 q7, d26, d0[3] // p5(left[0]) * filter(5)
vmlal.s16 q7, d28, d1[3] // p6(left[1]) * filter(6)
vmin.s16 q4, q4, q15
vmull.s16 q6, d19, d4[0] // p1(top[0]) * filter(1)
vmlal.s16 q6, d21, d4[1] // p2(top[1]) * filter(2)
vmlal.s16 q6, d23, d4[2] // p3(top[2]) * filter(3)
vmlal.s16 q6, d25, d4[3] // p4(top[3]) * filter(4)
vmlal.s16 q6, d17, d3[3] // p0(topleft) * filter(0)
vmlal.s16 q6, d27, d0[3] // p5(left[0]) * filter(5)
vmlal.s16 q6, d29, d1[3] // p6(left[1]) * filter(6)
vqrshrun.s32 d10, q7, #4
vmull.s16 q1, d18, d5[0] // p1(top[0]) * filter(1)
vmlal.s16 q1, d20, d5[1] // p2(top[1]) * filter(2)
vmlal.s16 q1, d22, d5[2] // p3(top[2]) * filter(3)
vqrshrun.s32 d11, q6, #4
vmin.s16 q0, q5, q15
vmlal.s16 q1, d24, d5[3] // p4(top[3]) * filter(4)
vmlal.s16 q1, d16, d4[3] // p0(topleft) * filter(0)
vmlal.s16 q1, d26, d0[3] // p5(left[0]) * filter(5)
vmlal.s16 q1, d28, d1[3] // p6(left[1]) * filter(6)
vmin.s16 q5, q5, q15
vmov.u16 r12, d5[3]
vmull.s16 q7, d19, d5[0] // p1(top[0]) * filter(1)
vmlal.s16 q7, d21, d5[1] // p2(top[1]) * filter(2)
vmlal.s16 q7, d23, d5[2] // p3(top[2]) * filter(3)
vmlal.s16 q7, d25, d5[3] // p4(top[3]) * filter(4)
vmlal.s16 q7, d17, d4[3] // p0(topleft) * filter(0)
vmlal.s16 q7, d27, d0[3] // p5(left[0]) * filter(5)
vmlal.s16 q7, d29, d1[3] // p6(left[1]) * filter(6)
vmov.16 d0[2], r12
vqrshrun.s32 d12, q1, #4
subs r3, r3, #16
vqrshrun.s32 d13, q7, #4
.endif
vswp q4, q5
.if \bpc == 10
vmax.s16 q6, q6, q7
.endif
vswp d7, d10
vmin.s16 q6, q6, q15
vswp d9, d12
vst1.16 {q3, q4}, [r0, :128]!
vst1.16 {q5, q6}, [r6, :128]!
ble 8f
vmov.u16 r12, d13[3]
vmov.16 d0[0], r12
vmov.u16 r12, d9[3]
vmov.16 d0[1], r12
b 2b
8:
subs r4, r4, #2
ble 9f
sub r8, r6, lr, lsl #1
add r0, r0, r1
add r6, r6, r1
mov r3, lr
b 1b
9:
vpop {q4-q7}
pop {r4-r8, pc}
endfunc
.endm
filter_fn 10
filter_fn 12
function ipred_filter_16bpc_neon, export=1
push {r4-r8, lr}
vpush {q4-q7}
movw r12, 0x3ff
ldr r8, [sp, #104]
cmp r8, r12
ble ipred_filter_10bpc_neon
b ipred_filter_12bpc_neon
endfunc
// void pal_pred_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const pal, const uint8_t *idx,
// const int w, const int h);
function pal_pred_16bpc_neon, export=1
push {r4-r5, lr}
ldr r4, [sp, #12]
ldr r5, [sp, #16]
vld1.16 {q14}, [r2, :128]
clz lr, r4
adr r12, L(pal_pred_tbl)
sub lr, lr, #25
vmov.i8 q13, #7
ldr lr, [r12, lr, lsl #2]
vmov.i16 q15, #0x100
add r12, r12, lr
add r2, r0, r1
bx r12
.align 2
L(pal_pred_tbl):
.word 640f - L(pal_pred_tbl) + CONFIG_THUMB
.word 320f - L(pal_pred_tbl) + CONFIG_THUMB
.word 160f - L(pal_pred_tbl) + CONFIG_THUMB
.word 80f - L(pal_pred_tbl) + CONFIG_THUMB
.word 40f - L(pal_pred_tbl) + CONFIG_THUMB
40:
lsl r1, r1, #1
4:
vld1.8 {d2}, [r3, :64]!
subs r5, r5, #4
vshr.u8 d3, d2, #4
vand.u8 d2, d2, d26
vzip.8 d2, d3
// Restructure q1 from a, b, c, ... into 2*a, 2*a+1, 2*b, 2*b+1, 2*c, 2*c+1, ...
vadd.i8 q0, q1, q1
vadd.i8 q1, q1, q1
vzip.8 q0, q1
vadd.i16 q0, q0, q15
vadd.i16 q1, q1, q15
vtbl.8 d0, {q14}, d0
vtbl.8 d1, {q14}, d1
vst1.16 {d0}, [r0, :64], r1
vtbl.8 d2, {q14}, d2
vst1.16 {d1}, [r2, :64], r1
vtbl.8 d3, {q14}, d3
vst1.16 {d2}, [r0, :64], r1
vst1.16 {d3}, [r2, :64], r1
bgt 4b
pop {r4-r5, pc}
80:
lsl r1, r1, #1
8:
vld1.8 {q1}, [r3, :64]!
subs r5, r5, #4
vshr.u8 q2, q1, #4
vand.u8 q1, q1, q13
vzip.8 q1, q2
// Prefer doing the adds twice, instead of chaining a vmov after
// the add.
vadd.i8 q0, q1, q1
vadd.i8 q1, q1, q1
vadd.i8 q3, q2, q2
vadd.i8 q2, q2, q2
vzip.8 q0, q1
vzip.8 q2, q3
vadd.i16 q0, q0, q15
vadd.i16 q1, q1, q15
vtbl.8 d0, {q14}, d0
vadd.i16 q2, q2, q15
vtbl.8 d1, {q14}, d1
vadd.i16 q3, q3, q15
vtbl.8 d2, {q14}, d2
vtbl.8 d3, {q14}, d3
vtbl.8 d4, {q14}, d4
vtbl.8 d5, {q14}, d5
vst1.16 {q0}, [r0, :128], r1
vtbl.8 d6, {q14}, d6
vst1.16 {q1}, [r2, :128], r1
vtbl.8 d7, {q14}, d7
vst1.16 {q2}, [r0, :128], r1
vst1.16 {q3}, [r2, :128], r1
bgt 8b
pop {r4-r5, pc}
160:
lsl r1, r1, #1
16:
vld1.8 {q10, q11}, [r3, :64]!
subs r5, r5, #4
vand.u8 q2, q10, q13
vshr.u8 q3, q10, #4
vand.u8 q10, q11, q13
vshr.u8 q11, q11, #4
vzip.8 q2, q3
vzip.8 q10, q11
vadd.i8 q0, q2, q2
vadd.i8 q1, q2, q2
vadd.i8 q2, q3, q3
vadd.i8 q3, q3, q3
vadd.i8 q8, q10, q10
vadd.i8 q9, q10, q10
vadd.i8 q10, q11, q11
vzip.8 q0, q1
vadd.i8 q11, q11, q11
vzip.8 q2, q3
vzip.8 q8, q9
vadd.i16 q0, q0, q15
vzip.8 q10, q11
vadd.i16 q1, q1, q15
vadd.i16 q2, q2, q15
vadd.i16 q3, q3, q15
vadd.i16 q8, q8, q15
vadd.i16 q9, q9, q15
vadd.i16 q10, q10, q15
vtbl.8 d0, {q14}, d0
vadd.i16 q11, q11, q15
vtbl.8 d1, {q14}, d1
vtbl.8 d2, {q14}, d2
vtbl.8 d3, {q14}, d3
vtbl.8 d4, {q14}, d4
vtbl.8 d5, {q14}, d5
vtbl.8 d6, {q14}, d6
vtbl.8 d7, {q14}, d7
vtbl.8 d16, {q14}, d16
vtbl.8 d17, {q14}, d17
vtbl.8 d18, {q14}, d18
vst1.16 {q0, q1}, [r0, :128], r1
vtbl.8 d19, {q14}, d19
vtbl.8 d20, {q14}, d20
vst1.16 {q2, q3}, [r2, :128], r1
vtbl.8 d21, {q14}, d21
vtbl.8 d22, {q14}, d22
vst1.16 {q8, q9}, [r0, :128], r1
vtbl.8 d23, {q14}, d23
vst1.16 {q10, q11}, [r2, :128], r1
bgt 16b
pop {r4-r5, pc}
320:
lsl r1, r1, #1
sub r1, r1, #32
32:
vld1.8 {q10, q11}, [r3, :64]!
subs r5, r5, #2
vand.u8 q2, q10, q13
vshr.u8 q3, q10, #4
vand.u8 q10, q11, q13
vshr.u8 q11, q11, #4
vzip.8 q2, q3
vzip.8 q10, q11
vadd.i8 q0, q2, q2
vadd.i8 q1, q2, q2
vadd.i8 q2, q3, q3
vadd.i8 q3, q3, q3
vadd.i8 q8, q10, q10
vadd.i8 q9, q10, q10
vadd.i8 q10, q11, q11
vzip.8 q0, q1
vadd.i8 q11, q11, q11
vzip.8 q2, q3
vzip.8 q8, q9
vadd.i16 q0, q0, q15
vzip.8 q10, q11
vadd.i16 q1, q1, q15
vadd.i16 q2, q2, q15
vadd.i16 q3, q3, q15
vadd.i16 q8, q8, q15
vadd.i16 q9, q9, q15
vadd.i16 q10, q10, q15
vtbl.8 d0, {q14}, d0
vadd.i16 q11, q11, q15
vtbl.8 d1, {q14}, d1
vtbl.8 d2, {q14}, d2
vtbl.8 d3, {q14}, d3
vtbl.8 d4, {q14}, d4
vtbl.8 d5, {q14}, d5
vtbl.8 d6, {q14}, d6
vtbl.8 d7, {q14}, d7
vtbl.8 d16, {q14}, d16
vtbl.8 d17, {q14}, d17
vtbl.8 d18, {q14}, d18
vst1.16 {q0, q1}, [r0, :128]!
vtbl.8 d19, {q14}, d19
vtbl.8 d20, {q14}, d20
vst1.16 {q2, q3}, [r0, :128], r1
vtbl.8 d21, {q14}, d21
vtbl.8 d22, {q14}, d22
vst1.16 {q8, q9}, [r2, :128]!
vtbl.8 d23, {q14}, d23
vst1.16 {q10, q11}, [r2, :128], r1
bgt 32b
pop {r4-r5, pc}
640:
sub r1, r1, #96
64:
vld1.8 {q10, q11}, [r3, :64]!
subs r5, r5, #1
vand.u8 q2, q10, q13
vshr.u8 q3, q10, #4
vand.u8 q10, q11, q13
vshr.u8 q11, q11, #4
vzip.8 q2, q3
vzip.8 q10, q11
vadd.i8 q0, q2, q2
vadd.i8 q1, q2, q2
vadd.i8 q2, q3, q3
vadd.i8 q3, q3, q3
vadd.i8 q8, q10, q10
vadd.i8 q9, q10, q10
vadd.i8 q10, q11, q11
vzip.8 q0, q1
vadd.i8 q11, q11, q11
vzip.8 q2, q3
vzip.8 q8, q9
vadd.i16 q0, q0, q15
vzip.8 q10, q11
vadd.i16 q1, q1, q15
vadd.i16 q2, q2, q15
vadd.i16 q3, q3, q15
vadd.i16 q8, q8, q15
vadd.i16 q9, q9, q15
vadd.i16 q10, q10, q15
vtbl.8 d0, {q14}, d0
vadd.i16 q11, q11, q15
vtbl.8 d1, {q14}, d1
vtbl.8 d2, {q14}, d2
vtbl.8 d3, {q14}, d3
vtbl.8 d4, {q14}, d4
vtbl.8 d5, {q14}, d5
vtbl.8 d6, {q14}, d6
vtbl.8 d7, {q14}, d7
vtbl.8 d16, {q14}, d16
vtbl.8 d17, {q14}, d17
vtbl.8 d18, {q14}, d18
vst1.16 {q0, q1}, [r0, :128]!
vtbl.8 d19, {q14}, d19
vtbl.8 d20, {q14}, d20
vst1.16 {q2, q3}, [r0, :128]!
vtbl.8 d21, {q14}, d21
vtbl.8 d22, {q14}, d22
vst1.16 {q8, q9}, [r0, :128]!
vtbl.8 d23, {q14}, d23
vst1.16 {q10, q11}, [r0, :128], r1
bgt 64b
pop {r4-r5, pc}
endfunc
// void ipred_cfl_128_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha,
// const int bitdepth_max);
function ipred_cfl_128_16bpc_neon, export=1
push {r4-r8, lr}
ldrd r4, r5, [sp, #24]
ldrd r6, r7, [sp, #32]
clz lr, r3
vdup.16 q15, r7 // bitdepth_max
adr r12, L(ipred_cfl_128_tbl)
sub lr, lr, #26
ldr lr, [r12, lr, lsl #2]
vrshr.u16 q0, q15, #1
vdup.16 q1, r6 // alpha
add r12, r12, lr
add r6, r0, r1
lsl r1, r1, #1
vmov.i16 q14, #0
bx r12
.align 2
L(ipred_cfl_128_tbl):
L(ipred_cfl_splat_tbl):
.word L(ipred_cfl_splat_w16) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
.word L(ipred_cfl_splat_w16) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
.word L(ipred_cfl_splat_w8) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
.word L(ipred_cfl_splat_w4) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
L(ipred_cfl_splat_w4):
vld1.16 {q8, q9}, [r5, :128]!
vmull.s16 q2, d16, d2 // diff = ac * alpha
vmull.s16 q3, d17, d3
vmull.s16 q8, d18, d2
vmull.s16 q9, d19, d3
vshr.s32 q10, q2, #31 // sign = diff >> 15
vshr.s32 q11, q3, #31
vshr.s32 q12, q8, #31
vshr.s32 q13, q9, #31
vadd.i32 q2, q2, q10 // diff + sign
vadd.i32 q3, q3, q11
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vrshrn.i32 d4, q2, #6 // (diff + sign + 32) >> 6 = apply_sign()
vrshrn.i32 d5, q3, #6
vrshrn.i32 d6, q8, #6
vrshrn.i32 d7, q9, #6
vadd.i16 q2, q2, q0 // dc + apply_sign()
vadd.i16 q3, q3, q0
vmax.s16 q2, q2, q14
vmax.s16 q3, q3, q14
vmin.s16 q2, q2, q15
vmin.s16 q3, q3, q15
vst1.16 {d4}, [r0, :64], r1
vst1.16 {d5}, [r6, :64], r1
subs r4, r4, #4
vst1.16 {d6}, [r0, :64], r1
vst1.16 {d7}, [r6, :64], r1
bgt L(ipred_cfl_splat_w4)
pop {r4-r8, pc}
L(ipred_cfl_splat_w8):
vld1.16 {q8, q9}, [r5, :128]!
subs r4, r4, #2
vmull.s16 q2, d16, d2 // diff = ac * alpha
vmull.s16 q3, d17, d3
vmull.s16 q8, d18, d2
vmull.s16 q9, d19, d3
vshr.s32 q10, q2, #31 // sign = diff >> 15
vshr.s32 q11, q3, #31
vshr.s32 q12, q8, #31
vshr.s32 q13, q9, #31
vadd.i32 q2, q2, q10 // diff + sign
vadd.i32 q3, q3, q11
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vrshrn.i32 d4, q2, #6 // (diff + sign + 32) >> 6 = apply_sign()
vrshrn.i32 d5, q3, #6
vrshrn.i32 d6, q8, #6
vrshrn.i32 d7, q9, #6
vadd.i16 q2, q2, q0 // dc + apply_sign()
vadd.i16 q3, q3, q0
vmax.s16 q2, q2, q14
vmax.s16 q3, q3, q14
vmin.s16 q2, q2, q15
vmin.s16 q3, q3, q15
vst1.16 {q2}, [r0, :128], r1
vst1.16 {q3}, [r6, :128], r1
bgt L(ipred_cfl_splat_w8)
pop {r4-r8, pc}
L(ipred_cfl_splat_w16):
vpush {q4-q7}
add r12, r5, r3, lsl #1
sub r1, r1, r3, lsl #1
mov lr, r3
1:
vld1.16 {q6, q7}, [r5, :128]!
vmull.s16 q2, d12, d2 // diff = ac * alpha
vld1.16 {q8, q9}, [r12, :128]!
vmull.s16 q3, d13, d3
vmull.s16 q4, d14, d2
vmull.s16 q5, d15, d3
vmull.s16 q6, d16, d2
vmull.s16 q7, d17, d3
vmull.s16 q8, d18, d2
vmull.s16 q9, d19, d3
vshr.s32 q10, q2, #31 // sign = diff >> 15
vshr.s32 q11, q3, #31
vshr.s32 q12, q4, #31
vshr.s32 q13, q5, #31
vadd.i32 q2, q2, q10 // diff + sign
vshr.s32 q10, q6, #31
vadd.i32 q3, q3, q11
vshr.s32 q11, q7, #31
vadd.i32 q4, q4, q12
vshr.s32 q12, q8, #31
vadd.i32 q5, q5, q13
vshr.s32 q13, q9, #31
vadd.i32 q6, q6, q10
vadd.i32 q7, q7, q11
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vrshrn.i32 d4, q2, #6 // (diff + sign + 32) >> 6 = apply_sign()
vrshrn.i32 d5, q3, #6
vrshrn.i32 d6, q4, #6
vrshrn.i32 d7, q5, #6
vadd.i16 q2, q2, q0 // dc + apply_sign()
vrshrn.i32 d8, q6, #6
vrshrn.i32 d9, q7, #6
vadd.i16 q3, q3, q0
vrshrn.i32 d10, q8, #6
vrshrn.i32 d11, q9, #6
vadd.i16 q4, q4, q0
vadd.i16 q5, q5, q0
vmax.s16 q2, q2, q14
vmax.s16 q3, q3, q14
vmax.s16 q4, q4, q14
vmax.s16 q5, q5, q14
vmin.s16 q2, q2, q15
vmin.s16 q3, q3, q15
vmin.s16 q4, q4, q15
vmin.s16 q5, q5, q15
subs r3, r3, #16
vst1.16 {q2, q3}, [r0, :128]!
vst1.16 {q4, q5}, [r6, :128]!
bgt 1b
subs r4, r4, #2
add r5, r5, lr, lsl #1
add r12, r12, lr, lsl #1
add r0, r0, r1
add r6, r6, r1
mov r3, lr
bgt 1b
vpop {q4-q7}
pop {r4-r8, pc}
endfunc
// void ipred_cfl_top_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha,
// const int bitdepth_max);
function ipred_cfl_top_16bpc_neon, export=1
push {r4-r8, lr}
ldrd r4, r5, [sp, #24]
ldrd r6, r7, [sp, #32]
clz lr, r3
vdup.16 q15, r7 // bitdepth_max
adr r12, L(ipred_cfl_top_tbl)
sub lr, lr, #26
ldr lr, [r12, lr, lsl #2]
vdup.16 q1, r6 // alpha
add r2, r2, #2
add r12, r12, lr
add r6, r0, r1
lsl r1, r1, #1
vmov.i16 q14, #0
bx r12
.align 2
L(ipred_cfl_top_tbl):
.word 32f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
.word 16f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
.word 8f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
.word 4f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
4:
vld1.16 {d0}, [r2]
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #2
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w4)
8:
vld1.16 {q0}, [r2]
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #3
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w8)
16:
vld1.16 {q2, q3}, [r2]
vadd.i16 q0, q2, q3
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #4
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w16)
32:
vld1.16 {q8, q9}, [r2]!
vld1.16 {q10, q11}, [r2]
vadd.i16 q8, q8, q9
vadd.i16 q10, q10, q11
vadd.i16 q0, q8, q10
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpaddl.u16 d0, d0
vrshrn.i32 d0, q0, #5
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w16)
endfunc
// void ipred_cfl_left_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha,
// const int bitdepth_max);
function ipred_cfl_left_16bpc_neon, export=1
push {r4-r8, lr}
ldrd r4, r5, [sp, #24]
ldrd r6, r7, [sp, #32]
sub r2, r2, r4, lsl #1
clz lr, r3
clz r8, r4
vdup.16 q15, r7 // bitdepth_max
adr r12, L(ipred_cfl_splat_tbl)
adr r7, L(ipred_cfl_left_tbl)
sub lr, lr, #26
sub r8, r8, #26
ldr lr, [r12, lr, lsl #2]
ldr r8, [r7, r8, lsl #2]
vdup.16 q1, r6 // alpha
add r12, r12, lr
add r7, r7, r8
add r6, r0, r1
lsl r1, r1, #1
vmov.i16 q14, #0
bx r7
.align 2
L(ipred_cfl_left_tbl):
.word L(ipred_cfl_left_h32) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
.word L(ipred_cfl_left_h16) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
.word L(ipred_cfl_left_h8) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
.word L(ipred_cfl_left_h4) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
L(ipred_cfl_left_h4):
vld1.16 {d0}, [r2, :64]
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #2
vdup.16 q0, d0[0]
bx r12
L(ipred_cfl_left_h8):
vld1.16 {q0}, [r2, :128]
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #3
vdup.16 q0, d0[0]
bx r12
L(ipred_cfl_left_h16):
vld1.16 {q2, q3}, [r2, :128]
vadd.i16 q0, q2, q3
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpadd.i16 d0, d0, d0
vrshr.u16 d0, d0, #4
vdup.16 q0, d0[0]
bx r12
L(ipred_cfl_left_h32):
vld1.16 {q8, q9}, [r2, :128]!
vld1.16 {q10, q11}, [r2, :128]
vadd.i16 q8, q8, q9
vadd.i16 q10, q10, q11
vadd.i16 q0, q8, q10
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
vpaddl.u16 d0, d0
vrshrn.i32 d0, q0, #5
vdup.16 q0, d0[0]
bx r12
endfunc
// void ipred_cfl_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha,
// const int bitdepth_max);
function ipred_cfl_16bpc_neon, export=1
push {r4-r8, lr}
ldrd r4, r5, [sp, #24]
ldrd r6, r7, [sp, #32]
sub r2, r2, r4, lsl #1
add r8, r3, r4 // width + height
vdup.16 q1, r6 // alpha
clz lr, r3
clz r6, r4
vdup.32 d16, r8 // width + height
vdup.16 q15, r7 // bitdepth_max
adr r7, L(ipred_cfl_tbl)
rbit r8, r8 // rbit(width + height)
sub lr, lr, #22 // 26 leading bits, minus table offset 4
sub r6, r6, #26
clz r8, r8 // ctz(width + height)
ldr lr, [r7, lr, lsl #2]
ldr r6, [r7, r6, lsl #2]
neg r8, r8 // -ctz(width + height)
add r12, r7, lr
add r7, r7, r6
vshr.u32 d16, d16, #1 // (width + height) >> 1
vdup.32 d17, r8 // -ctz(width + height)
add r6, r0, r1
lsl r1, r1, #1
vmov.i16 q14, #0
bx r7
.align 2
L(ipred_cfl_tbl):
.word L(ipred_cfl_h32) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_h16) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_h8) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_h4) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_w32) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_w16) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_w8) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_w4) - L(ipred_cfl_tbl) + CONFIG_THUMB
L(ipred_cfl_h4):
vld1.16 {d0}, [r2, :64]!
vpadd.i16 d0, d0, d0
add r2, r2, #2
vpaddl.u16 d0, d0
bx r12
L(ipred_cfl_w4):
vld1.16 {d1}, [r2]
vadd.i32 d0, d0, d16
vpadd.i16 d1, d1, d1
vpaddl.u16 d1, d1
cmp r4, #4
vadd.i32 d0, d0, d1
vshl.u32 d0, d0, d17
beq 1f
// h = 8/16
cmp r4, #16
movw lr, #0x6667
movw r8, #0xAAAB
it ne
movne lr, r8
vdup.32 d18, lr
vmul.i32 d0, d0, d18
vshr.u32 d0, d0, #17
1:
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w4)
L(ipred_cfl_h8):
vld1.16 {q0}, [r2, :128]!
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
add r2, r2, #2
vpaddl.u16 d0, d0
bx r12
L(ipred_cfl_w8):
vld1.16 {q2}, [r2]
vadd.i32 d0, d0, d16
vadd.i16 d1, d4, d5
vpadd.i16 d1, d1, d1
vpaddl.u16 d1, d1
cmp r4, #8
vadd.i32 d0, d0, d1
vshl.u32 d0, d0, d17
beq 1f
// h = 4/16/32
cmp r4, #32
movw lr, #0x6667
movw r8, #0xAAAB
it ne
movne lr, r8
vdup.32 d18, lr
vmul.i32 d0, d0, d18
vshr.u32 d0, d0, #17
1:
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w8)
L(ipred_cfl_h16):
vld1.16 {q2, q3}, [r2, :128]!
vadd.i16 q0, q2, q3
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
add r2, r2, #2
vpaddl.u16 d0, d0
bx r12
L(ipred_cfl_w16):
vld1.16 {q2, q3}, [r2]
vadd.i32 d0, d0, d16
vadd.i16 q2, q2, q3
vadd.i16 d1, d4, d5
vpadd.i16 d1, d1, d1
vpaddl.u16 d1, d1
cmp r4, #16
vadd.i32 d0, d0, d1
vshl.u32 d0, d0, d17
beq 1f
// h = 4/8/32/64
tst r4, #(32+16+8) // 16 added to make a consecutive bitmask
movw lr, #0x6667
movw r8, #0xAAAB
it ne
movne lr, r8
vdup.32 d18, lr
vmul.i32 d0, d0, d18
vshr.u32 d0, d0, #17
1:
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w16)
L(ipred_cfl_h32):
vld1.16 {q2, q3}, [r2, :128]!
vld1.16 {q10, q11}, [r2, :128]!
vadd.i16 q2, q2, q3
vadd.i16 q10, q10, q11
vadd.i16 q0, q2, q10
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0, d0
add r2, r2, #2
vpaddl.u16 d0, d0
bx r12
L(ipred_cfl_w32):
vld1.16 {q2, q3}, [r2]!
vadd.i32 d0, d0, d16
vld1.16 {q10, q11}, [r2]!
vadd.i16 q2, q2, q3
vadd.i16 q10, q10, q11
vadd.i16 q2, q2, q10
vadd.i16 d1, d4, d5
vpadd.i16 d1, d1, d1
vpaddl.u16 d1, d1
cmp r4, #32
vadd.i32 d0, d0, d1
vshl.u32 d0, d0, d17
beq 1f
// h = 8/16/64
cmp r4, #8
movw lr, #0x6667
movw r8, #0xAAAB
it ne
movne lr, r8
vdup.32 d18, lr
vmul.i32 d0, d0, d18
vshr.u32 d0, d0, #17
1:
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w16)
endfunc
// void cfl_ac_420_16bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_420_16bpc_neon, export=1
push {r4-r8,lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
clz r8, r5
lsl r4, r4, #2
adr r7, L(ipred_cfl_ac_420_tbl)
sub r8, r8, #27
ldr r8, [r7, r8, lsl #2]
vmov.i32 q8, #0
vmov.i32 q9, #0
vmov.i32 q10, #0
vmov.i32 q11, #0
add r7, r7, r8
sub r8, r6, r4 // height - h_pad
rbit lr, r5 // rbit(width)
rbit r12, r6 // rbit(height)
clz lr, lr // ctz(width)
clz r12, r12 // ctz(height)
add lr, lr, r12 // log2sz
add r12, r1, r2
vdup.32 d31, lr
lsl r2, r2, #1
vneg.s32 d31, d31 // -log2sz
bx r7
.align 2
L(ipred_cfl_ac_420_tbl):
.word L(ipred_cfl_ac_420_w16) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w8) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w4) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_420_w4):
1: // Copy and subsample input
vld1.16 {q0}, [r1, :128], r2
vld1.16 {q1}, [r12, :128], r2
vld1.16 {q2}, [r1, :128], r2
vld1.16 {q3}, [r12, :128], r2
vadd.i16 q0, q0, q1
vadd.i16 q2, q2, q3
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d4, d5
vshl.i16 q0, q0, #1
subs r8, r8, #2
vst1.16 {q0}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
bgt 1b
cmp r4, #0
vmov d0, d1
vmov d2, d1
vmov d3, d1
L(ipred_cfl_ac_420_w4_hpad):
beq 3f // This assumes that all callers already did "cmp r4, #0"
2: // Vertical padding (h_pad > 0)
subs r4, r4, #4
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 2b
3:
L(ipred_cfl_ac_420_w4_calc_subtract_dc):
// Aggregate the sums
vadd.i32 q8, q8, q9
vadd.i32 q10, q10, q11
vadd.i32 q0, q8, q10
vadd.i32 d0, d0, d1
vpadd.i32 d0, d0, d0 // sum
sub r0, r0, r6, lsl #3
vrshl.u32 d16, d0, d31 // (sum + (1 << (log2sz - 1))) >>= log2sz
vdup.16 q8, d16[0]
6: // Subtract dc from ac
vld1.16 {q0, q1}, [r0, :128]
subs r6, r6, #4
vsub.i16 q0, q0, q8
vsub.i16 q1, q1, q8
vst1.16 {q0, q1}, [r0, :128]!
bgt 6b
pop {r4-r8, pc}
L(ipred_cfl_ac_420_w8):
cmp r3, #0
bne L(ipred_cfl_ac_420_w8_wpad)
1: // Copy and subsample input, without padding
vld1.16 {q0, q1}, [r1, :128], r2
vld1.16 {q2, q3}, [r12, :128], r2
vld1.16 {q12, q13}, [r1, :128], r2
vadd.i16 q0, q0, q2
vadd.i16 q1, q1, q3
vld1.16 {q2, q3}, [r12, :128], r2
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vadd.i16 q12, q12, q2
vadd.i16 q13, q13, q3
vpadd.i16 d2, d24, d25
vpadd.i16 d3, d26, d27
vshl.i16 q0, q0, #1
vshl.i16 q1, q1, #1
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 1b
cmp r4, #0
vmov q0, q1
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_420_w8_wpad):
1: // Copy and subsample input, padding 4
vld1.16 {q0}, [r1, :128], r2
vld1.16 {q1}, [r12, :128], r2
vld1.16 {q2}, [r1, :128], r2
vld1.16 {q3}, [r12, :128], r2
vadd.i16 q0, q0, q1
vadd.i16 q2, q2, q3
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d4, d5
vshl.i16 q0, q0, #1
vdup.16 d3, d1[3]
vmov d2, d1
vdup.16 d1, d0[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 1b
cmp r4, #0
vmov q0, q1
L(ipred_cfl_ac_420_w8_hpad):
beq 3f // This assumes that all callers already did "cmp r4, #0"
2: // Vertical padding (h_pad > 0)
subs r4, r4, #4
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 2b
3:
// Double the height and reuse the w4 summing/subtracting
lsl r6, r6, #1
b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
L(ipred_cfl_ac_420_w16):
adr r7, L(ipred_cfl_ac_420_w16_tbl)
ldr r3, [r7, r3, lsl #2]
add r7, r7, r3
bx r7
.align 2
L(ipred_cfl_ac_420_w16_tbl):
.word L(ipred_cfl_ac_420_w16_wpad0) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w16_wpad1) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w16_wpad2) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w16_wpad3) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_420_w16_wpad0):
sub r2, r2, #32
1: // Copy and subsample input, without padding
vld1.16 {q0, q1}, [r1, :128]!
vld1.16 {q12, q13}, [r12, :128]!
vld1.16 {q2, q3}, [r1, :128], r2
vadd.i16 q0, q0, q12
vadd.i16 q1, q1, q13
vld1.16 {q12, q13}, [r12, :128], r2
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vadd.i16 q2, q2, q12
vadd.i16 q3, q3, q13
vpadd.i16 d2, d4, d5
vpadd.i16 d3, d6, d7
vshl.i16 q0, q0, #1
vshl.i16 q1, q1, #1
subs r8, r8, #1
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad1):
sub r2, r2, #32
1: // Copy and subsample input, padding 4
vld1.16 {q0, q1}, [r1, :128]!
vld1.16 {q12, q13}, [r12, :128]!
vld1.16 {q2}, [r1, :128], r2
vadd.i16 q0, q0, q12
vadd.i16 q1, q1, q13
vld1.16 {q12}, [r12, :128], r2
vpadd.i16 d0, d0, d1
vadd.i16 q2, q2, q12
vpadd.i16 d1, d2, d3
vpadd.i16 d2, d4, d5
vshl.i16 q0, q0, #1
vshl.i16 d2, d2, #1
subs r8, r8, #1
vdup.16 d3, d2[3]
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad2):
1: // Copy and subsample input, padding 8
vld1.16 {q0, q1}, [r1, :128], r2
vld1.16 {q12, q13}, [r12, :128], r2
vadd.i16 q0, q0, q12
vadd.i16 q1, q1, q13
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vshl.i16 q0, q0, #1
subs r8, r8, #1
vdup.16 q1, d1[3]
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad3):
1: // Copy and subsample input, padding 12
vld1.16 {q0}, [r1, :128], r2
vld1.16 {q12}, [r12, :128], r2
vadd.i16 q0, q0, q12
vpadd.i16 d0, d0, d1
vshl.i16 d0, d0, #1
subs r8, r8, #1
vdup.16 q1, d0[3]
vdup.16 d1, d0[3]
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_hpad):
beq 3f // This assumes that all callers already did "cmp r4, #0"
2: // Vertical padding (h_pad > 0)
subs r4, r4, #2
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 2b
3:
// Quadruple the height and reuse the w4 summing/subtracting
lsl r6, r6, #2
b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
endfunc
// void cfl_ac_422_16bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_422_16bpc_neon, export=1
push {r4-r8,lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
clz r8, r5
lsl r4, r4, #2
adr r7, L(ipred_cfl_ac_422_tbl)
sub r8, r8, #27
ldr r8, [r7, r8, lsl #2]
vmov.i16 q8, #0
vmov.i16 q9, #0
vmov.i16 q10, #0
vmov.i16 q11, #0
add r7, r7, r8
sub r8, r6, r4 // height - h_pad
rbit lr, r5 // rbit(width)
rbit r12, r6 // rbit(height)
clz lr, lr // ctz(width)
clz r12, r12 // ctz(height)
add lr, lr, r12 // log2sz
add r12, r1, r2
vdup.32 d31, lr
lsl r2, r2, #1
vneg.s32 d31, d31 // -log2sz
bx r7
.align 2
L(ipred_cfl_ac_422_tbl):
.word L(ipred_cfl_ac_422_w16) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w8) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w4) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_422_w4):
1: // Copy and subsample input
vld1.16 {q0}, [r1, :128], r2
vld1.16 {q1}, [r12, :128], r2
vld1.16 {q2}, [r1, :128], r2
vld1.16 {q3}, [r12, :128], r2
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vpadd.i16 d2, d4, d5
vpadd.i16 d3, d6, d7
vshl.i16 q0, q0, #2
vshl.i16 q1, q1, #2
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 1b
cmp r4, #0
vmov d0, d3
vmov d1, d3
vmov d2, d3
b L(ipred_cfl_ac_420_w4_hpad)
L(ipred_cfl_ac_422_w8):
cmp r3, #0
bne L(ipred_cfl_ac_422_w8_wpad)
1: // Copy and subsample input, without padding
vld1.16 {q0, q1}, [r1, :128], r2
vld1.16 {q2, q3}, [r12, :128], r2
vld1.16 {q12, q13}, [r1, :128], r2
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vpadd.i16 d2, d4, d5
vpadd.i16 d3, d6, d7
vld1.16 {q2, q3}, [r12, :128], r2
vpadd.i16 d24, d24, d25
vpadd.i16 d25, d26, d27
vpadd.i16 d26, d4, d5
vpadd.i16 d27, d6, d7
vshl.i16 q0, q0, #2
vshl.i16 q1, q1, #2
vshl.i16 q2, q12, #2
vshl.i16 q3, q13, #2
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
vmov q0, q3
vmov q1, q3
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_422_w8_wpad):
1: // Copy and subsample input, padding 4
vld1.16 {q0}, [r1, :128], r2
vld1.16 {q2}, [r12, :128], r2
vld1.16 {q12}, [r1, :128], r2
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d4, d5
vld1.16 {q2, q3}, [r12, :128], r2
vpadd.i16 d24, d24, d25
vpadd.i16 d25, d4, d5
vshl.i16 q0, q0, #2
vshl.i16 q12, q12, #2
vdup.16 d7, d25[3]
vmov d6, d25
vdup.16 d5, d24[3]
vmov d4, d24
vdup.16 d3, d1[3]
vmov d2, d1
vdup.16 d1, d0[3]
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
vmov q0, q3
vmov q1, q3
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_422_w16):
adr r7, L(ipred_cfl_ac_422_w16_tbl)
ldr r3, [r7, r3, lsl #2]
add r7, r7, r3
bx r7
.align 2
L(ipred_cfl_ac_422_w16_tbl):
.word L(ipred_cfl_ac_422_w16_wpad0) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w16_wpad1) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w16_wpad2) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w16_wpad3) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_422_w16_wpad0):
sub r2, r2, #32
1: // Copy and subsample input, without padding
vld1.16 {q0, q1}, [r1, :128]!
vld1.16 {q2, q3}, [r12, :128]!
vld1.16 {q12, q13}, [r1, :128], r2
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vpadd.i16 d2, d24, d25
vpadd.i16 d3, d26, d27
vld1.16 {q12, q13}, [r12, :128], r2
vpadd.i16 d4, d4, d5
vpadd.i16 d5, d6, d7
vpadd.i16 d6, d24, d25
vpadd.i16 d7, d26, d27
vshl.i16 q0, q0, #2
vshl.i16 q1, q1, #2
vshl.i16 q2, q2, #2
vshl.i16 q3, q3, #2
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad1):
sub r2, r2, #32
1: // Copy and subsample input, padding 4
vld1.16 {q0, q1}, [r1, :128]!
vld1.16 {q2, q3}, [r12, :128]!
vld1.16 {q12}, [r1, :128], r2
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vpadd.i16 d2, d24, d25
vld1.16 {q12}, [r12, :128], r2
vpadd.i16 d4, d4, d5
vpadd.i16 d5, d6, d7
vpadd.i16 d6, d24, d25
vshl.i16 q0, q0, #2
vshl.i16 d2, d2, #2
vshl.i16 q2, q2, #2
vshl.i16 d6, d6, #2
vdup.16 d3, d2[3]
vdup.16 d7, d6[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad2):
1: // Copy and subsample input, padding 8
vld1.16 {q0, q1}, [r1, :128], r2
vld1.16 {q2, q3}, [r12, :128], r2
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vpadd.i16 d4, d4, d5
vpadd.i16 d5, d6, d7
vshl.i16 q0, q0, #2
vshl.i16 q2, q2, #2
vdup.16 q1, d1[3]
vdup.16 q3, d5[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad3):
1: // Copy and subsample input, padding 12
vld1.16 {q0}, [r1, :128], r2
vld1.16 {q2}, [r12, :128], r2
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d4, d5
vshl.i16 q0, q0, #2
vdup.16 q3, d1[3]
vdup.16 q1, d0[3]
vdup.16 d5, d1[3]
vmov d4, d1
vdup.16 d1, d0[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
endfunc
// void cfl_ac_444_16bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_444_16bpc_neon, export=1
push {r4-r8,lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
clz r8, r5
lsl r4, r4, #2
adr r7, L(ipred_cfl_ac_444_tbl)
sub r8, r8, #26
ldr r8, [r7, r8, lsl #2]
vmov.i16 q8, #0
vmov.i16 q9, #0
vmov.i16 q10, #0
vmov.i16 q11, #0
add r7, r7, r8
sub r8, r6, r4 // height - h_pad
rbit lr, r5 // rbit(width)
rbit r12, r6 // rbit(height)
clz lr, lr // ctz(width)
clz r12, r12 // ctz(height)
add lr, lr, r12 // log2sz
add r12, r1, r2
vdup.32 d31, lr
lsl r2, r2, #1
vneg.s32 d31, d31 // -log2sz
bx r7
.align 2
L(ipred_cfl_ac_444_tbl):
.word L(ipred_cfl_ac_444_w32) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w16) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w8) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w4) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_444_w4):
1: // Copy and expand input
vld1.16 {d0}, [r1, :64], r2
vld1.16 {d1}, [r12, :64], r2
vld1.16 {d2}, [r1, :64], r2
vld1.16 {d3}, [r12, :64], r2
vshl.i16 q0, q0, #3
vshl.i16 q1, q1, #3
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
bgt 1b
cmp r4, #0
vmov d0, d3
vmov d1, d3
vmov d2, d3
b L(ipred_cfl_ac_420_w4_hpad)
L(ipred_cfl_ac_444_w8):
1: // Copy and expand input
vld1.16 {q0}, [r1, :128], r2
vld1.16 {q1}, [r12, :128], r2
vld1.16 {q2}, [r1, :128], r2
vld1.16 {q3}, [r12, :128], r2
vshl.i16 q0, q0, #3
vshl.i16 q1, q1, #3
vshl.i16 q2, q2, #3
vshl.i16 q3, q3, #3
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
vmov q0, q3
vmov q1, q3
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_444_w16):
cmp r3, #0
bne L(ipred_cfl_ac_444_w16_wpad)
1: // Copy and expand input, without padding
vld1.16 {q0, q1}, [r1, :128], r2
vld1.16 {q2, q3}, [r12, :128], r2
vshl.i16 q0, q0, #3
vshl.i16 q1, q1, #3
vshl.i16 q2, q2, #3
vshl.i16 q3, q3, #3
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_444_w16_wpad):
1: // Copy and expand input, padding 8
vld1.16 {q0}, [r1, :128], r2
vld1.16 {q2}, [r12, :128], r2
vshl.i16 q0, q0, #3
vshl.i16 q2, q2, #3
vdup.16 q1, d1[3]
vdup.16 q3, d5[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_444_w32):
adr r7, L(ipred_cfl_ac_444_w32_tbl)
ldr r3, [r7, r3, lsl #1] // (w3>>1) << 2
asr r2, r2, #1
add r7, r7, r3
bx r7
.align 2
L(ipred_cfl_ac_444_w32_tbl):
.word L(ipred_cfl_ac_444_w32_wpad0) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w32_wpad2) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w32_wpad4) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w32_wpad6) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_444_w32_wpad0):
sub r2, r2, #32
1: // Copy and expand input, without padding
vld1.16 {q0, q1}, [r1, :128]!
vld1.16 {q2, q3}, [r1, :128], r2
vshl.i16 q0, q0, #3
vshl.i16 q1, q1, #3
vshl.i16 q2, q2, #3
vshl.i16 q3, q3, #3
subs r8, r8, #1
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad2):
sub r2, r2, #32
1: // Copy and expand input, padding 8
vld1.16 {q0, q1}, [r1, :128]!
vld1.16 {q2}, [r1, :128], r2
vshl.i16 q0, q0, #3
vshl.i16 q1, q1, #3
vshl.i16 q2, q2, #3
subs r8, r8, #1
vst1.16 {q0, q1}, [r0, :128]!
vdup.16 q3, d5[3]
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad4):
1: // Copy and expand input, padding 16
vld1.16 {q0, q1}, [r1, :128], r2
vshl.i16 q0, q0, #3
vshl.i16 q1, q1, #3
subs r8, r8, #1
vst1.16 {q0, q1}, [r0, :128]!
vdup.16 q2, d3[3]
vdup.16 q3, d3[3]
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad6):
1: // Copy and expand input, padding 24
vld1.16 {q0}, [r1, :128], r2
vshl.i16 q0, q0, #3
subs r8, r8, #1
vdup.16 q1, d1[3]
vst1.16 {q0, q1}, [r0, :128]!
vdup.16 q2, d1[3]
vdup.16 q3, d1[3]
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 1b
cmp r4, #0
L(ipred_cfl_ac_444_w32_hpad):
beq 3f // This assumes that all callers already did "cmp r4, #0"
2: // Vertical padding (h_pad > 0)
subs r4, r4, #1
vst1.16 {q0, q1}, [r0, :128]!
vaddw.u16 q8, q8, d0
vaddw.u16 q9, q9, d1
vaddw.u16 q10, q10, d2
vaddw.u16 q11, q11, d3
vst1.16 {q2, q3}, [r0, :128]!
vaddw.u16 q8, q8, d4
vaddw.u16 q9, q9, d5
vaddw.u16 q10, q10, d6
vaddw.u16 q11, q11, d7
bgt 2b
3:
// Multiply the height by eight and reuse the w4 subtracting
lsl r6, r6, #3
b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
endfunc
|
Admenri/urge
| 8,381
|
third_party/dav1d/src/arm/32/looprestoration_common.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
// void dav1d_sgr_box3_row_v_neon(int32_t **sumsq, int16_t **sum,
// int32_t *sumsq_out, int16_t *sum_out,
// const int w);
function sgr_box3_row_v_neon, export=1
push {r4-r9,lr}
ldr r4, [sp, #28]
ldrd r6, r7, [r0]
ldr r0, [r0, #8]
add r4, r4, #2
ldrd r8, r9, [r1]
ldr r1, [r1, #8]
1:
vld1.32 {q8, q9}, [r6]!
vld1.32 {q10, q11}, [r7]!
vld1.16 {q14}, [r8]!
vld1.16 {q15}, [r9]!
subs r4, r4, #8
vadd.i32 q8, q8, q10
vadd.i32 q9, q9, q11
vld1.32 {q12, q13}, [r0]!
vadd.i16 q14, q14, q15
vld1.16 {q15}, [r1]!
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vadd.i16 q14, q14, q15
vst1.32 {q8, q9}, [r2]!
vst1.16 {q14}, [r3]!
bgt 1b
pop {r4-r9,pc}
endfunc
// void dav1d_sgr_box5_row_v_neon(int32_t **sumsq, int16_t **sum,
// int32_t *sumsq_out, int16_t *sum_out,
// const int w);
function sgr_box5_row_v_neon, export=1
push {r4-r11,lr}
ldr lr, [sp, #36]
ldrd r4, r5, [r0]
ldrd r6, r7, [r0, #8]
ldr r0, [r0, #16]
add lr, lr, #2
ldrd r8, r9, [r1]
ldrd r10, r11, [r1, #8]
ldr r1, [r1, #16]
1:
vld1.32 {q8, q9}, [r4]!
vld1.32 {q10, q11}, [r5]!
vld1.32 {q12, q13}, [r6]!
vld1.32 {q14, q15}, [r7]!
vld1.16 {q0}, [r8]!
vld1.16 {q1}, [r9]!
vld1.16 {q2}, [r10]!
vld1.16 {q3}, [r11]!
subs lr, lr, #8
vadd.i32 q8, q8, q10
vadd.i32 q9, q9, q11
vadd.i32 q12, q12, q14
vadd.i32 q13, q13, q15
vld1.32 {q14, q15}, [r0]!
vadd.i16 q0, q0, q1
vadd.i16 q2, q2, q3
vld1.16 {q3}, [r1]!
vadd.i32 q8, q8, q12
vadd.i32 q9, q9, q13
vadd.i16 q0, q0, q2
vadd.i32 q8, q8, q14
vadd.i32 q9, q9, q15
vadd.i16 q0, q0, q3
vst1.32 {q8, q9}, [r2]!
vst1.16 {q0}, [r3]!
bgt 1b
pop {r4-r11,pc}
endfunc
// void dav1d_sgr_calc_row_ab1_neon(int32_t *a, int16_t *b,
// const int w, const int strength,
// const int bitdepth_max);
// void dav1d_sgr_calc_row_ab2_neon(int32_t *a, int16_t *b,
// const int w, const int strength,
// const int bitdepth_max);
function sgr_calc_row_ab1_neon, export=1
push {r4-r7,lr}
vpush {q4-q7}
ldr r4, [sp, #84]
clz r6, r4
vmov.i32 q15, #9 // n
movw r5, #455
b sgr_calc_ab_neon
endfunc
function sgr_calc_row_ab2_neon, export=1
push {r4-r7,lr}
vpush {q4-q7}
ldr r4, [sp, #84]
clz r6, r4
vmov.i32 q15, #25 // n
mov r5, #164
endfunc
function sgr_calc_ab_neon
movrel r12, X(sgr_x_by_x)
sub r6, r6, #24 // -bitdepth_min_8
vld1.8 {q8, q9}, [r12, :128]!
add r7, r6, r6 // -2*bitdepth_min_8
vmov.i8 q11, #5
vmov.i8 d10, #55 // idx of last 5
vld1.8 {q10}, [r12, :128]
vmov.i8 d11, #72 // idx of last 4
vmov.i8 d12, #101 // idx of last 3
vmov.i8 d13, #169 // idx of last 2
vmov.i8 d14, #254 // idx of last 1
vmov.i8 d15, #32 // elements consumed in first vtbl
add r2, r2, #2 // w += 2
vdup.32 q12, r3
vsub.i8 q8, q8, q11
vsub.i8 q9, q9, q11
vsub.i8 q10, q10, q11
vdup.32 q13, r7 // -2*bitdepth_min_8
1:
vld1.32 {q0, q1}, [r0, :128] // a
vld1.16 {q2}, [r1, :128] // b
vdup.16 q14, r6 // -bitdepth_min_8
subs r2, r2, #8
vrshl.s32 q0, q0, q13
vrshl.s32 q1, q1, q13
vrshl.s16 q4, q2, q14
vmul.i32 q0, q0, q15 // a * n
vmul.i32 q1, q1, q15 // a * n
vmull.u16 q3, d8, d8 // b * b
vmull.u16 q4, d9, d9 // b * b
vqsub.u32 q0, q0, q3 // imax(a * n - b * b, 0)
vqsub.u32 q1, q1, q4 // imax(a * n - b * b, 0)
vmul.i32 q0, q0, q12 // p * s
vmul.i32 q1, q1, q12 // p * s
vqshrn.u32 d0, q0, #16
vqshrn.u32 d1, q1, #16
vqrshrn.u16 d0, q0, #4 // imin(z, 255)
vcgt.u8 d2, d0, d10 // = -1 if sgr_x_by_x[d0] < 5
vcgt.u8 d3, d0, d11 // = -1 if sgr_x_by_x[d0] < 4
vtbl.8 d1, {q8, q9}, d0
vcgt.u8 d6, d0, d12 // = -1 if sgr_x_by_x[d0] < 3
vsub.i8 d9, d0, d15 // indices for vtbx
vcgt.u8 d7, d0, d13 // = -1 if sgr_x_by_x[d0] < 2
vadd.i8 d2, d2, d3
vtbx.8 d1, {q10}, d9
vcgt.u8 d8, d0, d14 // = -1 if sgr_x_by_x[d0] < 1
vadd.i8 d6, d6, d7
vadd.i8 d8, d8, d22
vadd.i8 d2, d2, d6
vadd.i8 d1, d1, d8
vadd.i8 d1, d1, d2
vmovl.u8 q0, d1 // x
vdup.32 q14, r5 // one_by_x
vmull.u16 q1, d0, d4 // x * BB[i]
vmull.u16 q2, d1, d5 // x * BB[i]
vmul.i32 q1, q1, q14 // x * BB[i] * sgr_one_by_x
vmul.i32 q2, q2, q14 // x * BB[i] * sgr_one_by_x
vrshr.s32 q1, q1, #12 // AA[i]
vrshr.s32 q2, q2, #12 // AA[i]
vst1.32 {q1, q2}, [r0, :128]!
vst1.16 {q0}, [r1, :128]!
bgt 1b
vpop {q4-q7}
pop {r4-r7,pc}
endfunc
|
Admenri/urge
| 26,502
|
third_party/dav1d/src/arm/32/looprestoration.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
const right_ext_mask_buf
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
right_ext_mask:
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
endconst
// void dav1d_wiener_filter_h_8bpc_neon(int16_t *dst, const pixel (*left)[4],
// const pixel *src, const int16_t fh[8],
// const int w,
// const enum LrEdgeFlags edges);
function wiener_filter_h_8bpc_neon, export=1
push {r4-r5,lr}
ldrd r4, r5, [sp, #12]
vld1.16 {q0}, [r3, :128]
movw r12, #(1 << 14) - (1 << 2)
vdup.16 q14, r12
vmov.s16 q15, #2048
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst r5, #1 // LR_HAVE_LEFT
beq 1f
// LR_HAVE_LEFT
cmp r1, #0
bne 0f
// left == NULL
sub r2, r2, #3
vld1.8 {q2}, [r2]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.8 {q2}, [r2]!
vld1.32 {d3[1]}, [r1]
// Move r2 back to account for the last 3 bytes we loaded earlier,
// which we'll shift out.
sub r2, r2, #3
vext.8 q2, q1, q2, #13
b 2f
1:
vld1.8 {q2}, [r2]!
// !LR_HAVE_LEFT, fill q1 with the leftmost byte
// and shift q2 to have 3x the first byte at the front.
vdup.8 q1, d4[0]
// Move r2 back to account for the last 3 bytes we loaded before,
// which we shifted out.
sub r2, r2, #3
vext.8 q2, q1, q2, #13
2:
vmovl.u8 q1, d4
vmovl.u8 q2, d5
tst r5, #2 // LR_HAVE_RIGHT
bne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r4, #11
bge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+3 pixels valid in q1-q2. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is q1/2.h[w+2]. r2 points at the next input, ie
// q1/2.h[16]. Thus read from r2[w-14] to find the padding pixel.
sub r12, r4, #14
// Insert padding in q1/2.h[w+3] onwards; fuse the +3 (*2) into the
// buffer pointer.
movrel_local r3, right_ext_mask, -6
ldrb r12, [r2, r12]
sub r3, r3, r4, lsl #1
vdup.16 q13, r12
vld1.8 {q10, q11}, [r3]
vbit q1, q13, q10
vbit q2, q13, q11
4: // Loop horizontally
vext.8 q10, q1, q2, #4
vext.8 q11, q1, q2, #8
vext.8 q9, q1, q2, #2
vext.8 q12, q1, q2, #10
vext.8 q13, q1, q2, #12
vext.8 q8, q1, q2, #6
vadd.i16 q10, q10, q11
vadd.i16 q9, q9, q12
vadd.i16 q13, q13, q1
vshl.s16 q1, q8, #7
vmul.s16 q3, q8, d0[3]
vmla.s16 q3, q10, d1[0]
vmla.s16 q3, q9, d1[1]
vmla.s16 q3, q13, d1[2]
vsub.s16 q1, q1, q14
vqadd.s16 q3, q3, q1
vshr.s16 q3, q3, #3
vadd.s16 q3, q3, q15
subs r4, r4, #8
vst1.16 {q3}, [r0, :128]!
ble 9f
vmov q1, q2
vld1.8 {d4}, [r2]!
tst r5, #2 // LR_HAVE_RIGHT
vmovl.u8 q2, d4
bne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
9:
pop {r4-r5,pc}
endfunc
// void dav1d_wiener_filter_v_8bpc_neon(pixel *dst, int16_t **ptrs,
// const int16_t fv[8], const int w);
function wiener_filter_v_8bpc_neon, export=1
push {r4-r9,lr}
vpush {q4-q6}
vld1.16 {q0}, [r2, :128]
ldrd r4, r5, [r1]
ldrd r6, r7, [r1, #8]
ldrd r8, r9, [r1, #16]
1:
vld1.16 {q1, q2}, [r4, :128]!
vld1.16 {q8, q9}, [r9, :128]!
vld1.16 {q5, q6}, [r5, :128]!
vld1.16 {q10, q11}, [r6, :128]!
vld1.16 {q12, q13}, [r8, :128]!
vld1.16 {q14, q15}, [r7, :128]!
subs r3, r3, #16
vadd.i16 q1, q1, q8
vadd.i16 q2, q2, q9
vadd.i16 q5, q5, q8
vadd.i16 q6, q6, q9
vadd.i16 q10, q10, q12
vadd.i16 q11, q11, q13
vmull.s16 q3, d28, d0[3]
vmlal.s16 q3, d2, d0[0]
vmlal.s16 q3, d10, d0[1]
vmlal.s16 q3, d20, d0[2]
vmull.s16 q4, d29, d0[3]
vmlal.s16 q4, d3, d0[0]
vmlal.s16 q4, d11, d0[1]
vmlal.s16 q4, d21, d0[2]
vmull.s16 q8, d30, d0[3]
vmlal.s16 q8, d4, d0[0]
vmlal.s16 q8, d12, d0[1]
vmlal.s16 q8, d22, d0[2]
vmull.s16 q9, d31, d0[3]
vmlal.s16 q9, d5, d0[0]
vmlal.s16 q9, d13, d0[1]
vmlal.s16 q9, d23, d0[2]
vqrshrun.s32 d6, q3, #11
vqrshrun.s32 d7, q4, #11
vqrshrun.s32 d16, q8, #11
vqrshrun.s32 d17, q9, #11
vqmovun.s16 d6, q3
vqmovun.s16 d7, q8
vst1.8 {q3}, [r0, :128]!
bgt 1b
// Shift the pointers, but only update the first 5; the 6th pointer is
// kept as it was before (and the 7th is implicitly identical to the
// 6th).
ldrd r4, r5, [r1, #4]
ldrd r6, r7, [r1, #12]
ldr r8, [r1, #20]
strd r4, r5, [r1]
strd r6, r7, [r1, #8]
str r8, [r1, #16]
vpop {q4-q6}
pop {r4-r9,pc}
endfunc
// void dav1d_wiener_filter_hv_8bpc_neon(pixel *dst, const pixel (*left)[4],
// const pixel *src,
// const int16_t filter[2][8],
// const int w,
// const enum LrEdgeFlags edges,
// int16_t **ptrs);
function wiener_filter_hv_8bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100]
ldr lr, [sp, #108]
vld1.16 {q0, q1}, [r3, :128]
movw r12, #(1 << 14) - (1 << 2)
vdup.16 q14, r12
vmov.s16 q15, #2048
ldrd r6, r7, [lr]
ldrd r8, r9, [lr, #8]
ldrd r10, r11, [lr, #16]
ldr r12, [lr, #24]
// Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
tst r5, #1 // LR_HAVE_LEFT
beq 1f
// LR_HAVE_LEFT
cmp r1, #0
bne 0f
// left == NULL
sub r2, r2, #3
vld1.8 {q2}, [r2]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.8 {q2}, [r2]!
vld1.32 {d3[1]}, [r1]
// Move r2 back to account for the last 3 bytes we loaded earlier,
// which we'll shift out.
sub r2, r2, #3
vext.8 q2, q1, q2, #13
b 2f
1:
vld1.8 {q2}, [r2]!
// !LR_HAVE_LEFT, fill q1 with the leftmost byte
// and shift q2 to have 3x the first byte at the front.
vdup.8 q3, d4[0]
// Move r2 back to account for the last 3 bytes we loaded before,
// which we shifted out.
sub r2, r2, #3
vext.8 q2, q3, q2, #13
2:
vmovl.u8 q3, d5
vmovl.u8 q2, d4
tst r5, #2 // LR_HAVE_RIGHT
bne 4f
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r4, #11
bge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+3 pixels valid in q1-q2. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// The padding pixel is q1/2.h[w+2]. r2 points at the next input, ie
// q1/2.h[16]. Thus read from r2[w-14] to find the padding pixel.
sub lr, r4, #14
// Insert padding in q1/2.h[w+3] onwards; fuse the +3 (*2) into the
// buffer pointer.
movrel_local r3, right_ext_mask, -6
ldrb lr, [r2, lr]
sub r3, r3, r4, lsl #1
vdup.16 q13, lr
vld1.8 {q10, q11}, [r3]
vbit q2, q13, q10
vbit q3, q13, q11
4: // Loop horizontally
vext.8 q10, q2, q3, #4
vext.8 q11, q2, q3, #8
vext.8 q9, q2, q3, #2
vext.8 q12, q2, q3, #10
vext.8 q13, q2, q3, #12
vext.8 q8, q2, q3, #6
vadd.i16 q10, q10, q11
vadd.i16 q9, q9, q12
vadd.i16 q13, q13, q2
vld1.16 {q6}, [r7, :128]!
vshl.s16 q2, q8, #7
vld1.16 {q11}, [r11, :128]!
vsub.s16 q2, q2, q14
vld1.16 {q7}, [r8, :128]!
vmul.s16 q4, q8, d0[3]
vmla.s16 q4, q10, d1[0]
vmla.s16 q4, q9, d1[1]
vmla.s16 q4, q13, d1[2]
vld1.16 {q10}, [r10, :128]!
vqadd.s16 q4, q4, q2
vld1.16 {q9}, [r9, :128]!
vshr.s16 q4, q4, #3
vld1.16 {q5}, [r6, :128]!
vadd.s16 q4, q4, q15
vadd.s16 q6, q6, q11
vadd.s16 q7, q7, q10
vadd.s16 q5, q5, q4
vmull.s16 q8, d18, d2[3]
vmlal.s16 q8, d12, d2[1]
vmlal.s16 q8, d14, d2[2]
vmlal.s16 q8, d10, d2[0]
vmull.s16 q9, d19, d2[3]
vmlal.s16 q9, d13, d2[1]
vmlal.s16 q9, d15, d2[2]
vmlal.s16 q9, d11, d2[0]
vqrshrun.s32 d16, q8, #11
vqrshrun.s32 d17, q9, #11
vst1.16 {q4}, [r12, :128]!
vqmovun.s16 d16, q8
subs r4, r4, #8
vst1.8 {d16}, [r0, :64]!
ble 9f
vmov q2, q3
vld1.8 {d6}, [r2]!
tst r5, #2 // LR_HAVE_RIGHT
vmovl.u8 q3, d6
bne 4b // If we don't need to pad, just keep filtering.
b 3b // If we need to pad, check how many pixels we have left.
9:
// Reload ptrs from arguments on the stack
ldr lr, [sp, #108]
// Rotate the window of pointers. Shift the 6 pointers downwards one step.
ldrd r6, r7, [lr, #4]
ldrd r8, r9, [lr, #12]
ldrd r10, r11, [lr, #20]
strd r6, r7, [lr]
strd r8, r9, [lr, #8]
strd r10, r11, [lr, #16]
// The topmost pointer, ptrs[6], which isn't used as input, is set to
// ptrs[0], which will be used as output for the next _hv call.
// At the start of the filtering, the caller may set ptrs[6] to the
// right next buffer to fill in, instead.
str r6, [lr, #24]
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
#include "looprestoration_tmpl.S"
// void dav1d_sgr_box3_row_h_8bpc_neon(int32_t *sumsq, int16_t *sum,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box3_row_h_8bpc_neon, export=1
push {r4-r5,lr}
ldrd r4, r5, [sp, #12]
add r4, r4, #2 // w += 2
tst r5, #1 // LR_HAVE_LEFT
beq 1f
cmp r2, #0
bne 0f
// LR_HAVE_LEFT && left == NULL
sub r3, r3, #2
vld1.8 {q0}, [r3]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.8 {q0}, [r3]!
vld1.32 {d3[]}, [r2]
// Move r3 back to account for the last 2 bytes we loaded earlier,
// which we'll shift out.
sub r3, r3, #2
vext.8 q0, q1, q0, #14
b 2f
1:
vld1.8 {q0}, [r3]!
// !LR_HAVE_LEFT, fill q1 with the leftmost byte
// and shift q0 to have 2x the first byte at the front.
vdup.8 q1, d0[0]
// Move r3 back to account for the last 2 bytes we loaded before,
// which we shifted out.
sub r3, r3, #2
vext.8 q0, q1, q0, #14
2:
vmull.u8 q1, d0, d0
vmull.u8 q2, d1, d1
tst r5, #2 // LR_HAVE_RIGHT
bne 4f
// If we'll need to pad the right edge, load that byte to pad with
// here since we can find it pretty easily from here.
sub lr, r4, #(2 + 16 - 2 + 1)
ldrb lr, [r3, lr]
// Fill q14 with the right padding pixel
vdup.8 q14, lr
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r4, #10
bge 4f // If w >= 10, all used input pixels are valid
// 1 <= w < 10, w pixels valid in q0. For w=9, this ends up called
// again; it's not strictly needed in those cases (we pad enough here),
// but keeping the code as simple as possible.
// Insert padding in q0.b[w] onwards
movrel_local lr, right_ext_mask
sub lr, lr, r4
vld1.8 {q13}, [lr]
vbit q0, q14, q13
// Update the precalculated squares
vmull.u8 q1, d0, d0
vmull.u8 q2, d1, d1
4: // Loop horizontally
vext.8 d16, d0, d1, #1
vext.8 d17, d0, d1, #2
vaddl.u8 q3, d0, d16
vext.8 q9, q1, q2, #2
vaddw.u8 q3, q3, d17
vext.8 q10, q1, q2, #4
vaddl.u16 q12, d2, d18
vaddl.u16 q13, d3, d19
vaddw.u16 q12, q12, d20
vaddw.u16 q13, q13, d21
subs r4, r4, #8
vst1.16 {q3}, [r1, :128]!
vst1.32 {q12, q13}, [r0, :128]!
ble 9f
tst r5, #2 // LR_HAVE_RIGHT
vld1.8 {d6}, [r3]!
vmov q1, q2
vext.8 q0, q0, q3, #8
vmull.u8 q2, d6, d6
bne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
pop {r4-r5,pc}
endfunc
// void dav1d_sgr_box5_row_h_8bpc_neon(int32_t *sumsq, int16_t *sum,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box5_row_h_8bpc_neon, export=1
push {r4-r5,lr}
ldrd r4, r5, [sp, #12]
add r4, r4, #2 // w += 2
tst r5, #1 // LR_HAVE_LEFT
beq 1f
cmp r2, #0
bne 0f
// LR_HAVE_LEFT && left == NULL
sub r3, r3, #3
vld1.8 {q0}, [r3]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.8 {q0}, [r3]!
vld1.32 {d3[]}, [r2]
// Move r3 back to account for the last 3 bytes we loaded earlier,
// which we'll shift out.
sub r3, r3, #3
vext.8 q0, q1, q0, #13
b 2f
1:
vld1.8 {q0}, [r3]!
// !LR_HAVE_LEFT, fill q1 with the leftmost byte
// and shift q0 to have 3x the first byte at the front.
vdup.8 q1, d0[0]
// Move r3 back to account for the last 3 bytes we loaded before,
// which we shifted out.
sub r3, r3, #3
vext.8 q0, q1, q0, #13
2:
vmull.u8 q1, d0, d0
vmull.u8 q2, d1, d1
tst r5, #2 // LR_HAVE_RIGHT
bne 4f
// If we'll need to pad the right edge, load that byte to pad with
// here since we can find it pretty easily from here.
sub lr, r4, #(2 + 16 - 3 + 1)
ldrb lr, [r3, lr]
// Fill q14 with the right padding pixel
vdup.8 q14, lr
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r4, #11
bge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+1 pixels valid in q0. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// Insert padding in q0.b[w+1] onwards; fuse the +1 into the
// buffer pointer.
movrel_local lr, right_ext_mask, -1
sub lr, lr, r4
vld1.8 {q13}, [lr]
vbit q0, q14, q13
// Update the precalculated squares
vmull.u8 q1, d0, d0
vmull.u8 q2, d1, d1
4: // Loop horizontally
vext.8 d16, d0, d1, #1
vext.8 d17, d0, d1, #2
vext.8 d18, d0, d1, #3
vext.8 d19, d0, d1, #4
vaddl.u8 q3, d0, d16
vaddl.u8 q12, d17, d18
vaddw.u8 q3, q3, d19
vadd.u16 q3, q3, q12
vext.8 q8, q1, q2, #2
vext.8 q9, q1, q2, #4
vext.8 q10, q1, q2, #6
vext.8 q11, q1, q2, #8
vaddl.u16 q12, d2, d16
vaddl.u16 q13, d3, d17
vaddl.u16 q8, d18, d20
vaddl.u16 q9, d19, d21
vaddw.u16 q12, q12, d22
vaddw.u16 q13, q13, d23
vadd.i32 q12, q12, q8
vadd.i32 q13, q13, q9
subs r4, r4, #8
vst1.16 {q3}, [r1, :128]!
vst1.32 {q12, q13}, [r0, :128]!
ble 9f
tst r5, #2 // LR_HAVE_RIGHT
vld1.8 {d6}, [r3]!
vmov q1, q2
vext.8 q0, q0, q3, #8
vmull.u8 q2, d6, d6
bne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
pop {r4-r5,pc}
endfunc
// void dav1d_sgr_box35_row_h_8bpc_neon(int32_t *sumsq3, int16_t *sum3,
// int32_t *sumsq5, int16_t *sum5,
// const pixel (*left)[4],
// const pixel *src, const int w,
// const enum LrEdgeFlags edges);
function sgr_box35_row_h_8bpc_neon, export=1
push {r4-r7,lr}
ldrd r4, r5, [sp, #20]
ldrd r6, r7, [sp, #28]
add r6, r6, #2 // w += 2
tst r7, #1 // LR_HAVE_LEFT
beq 1f
cmp r4, #0
bne 0f
// LR_HAVE_LEFT && left == NULL
sub r5, r5, #3
vld1.8 {q0}, [r5]!
b 2f
0:
// LR_HAVE_LEFT, left != NULL
vld1.8 {q0}, [r5]!
vld1.32 {d3[]}, [r4]
// Move r3 back to account for the last 3 bytes we loaded earlier,
// which we'll shift out.
sub r5, r5, #3
vext.8 q0, q1, q0, #13
b 2f
1:
vld1.8 {q0}, [r5]!
// !LR_HAVE_LEFT, fill q1 with the leftmost byte
// and shift q0 to have 3x the first byte at the front.
vdup.8 q1, d0[0]
// Move r3 back to account for the last 3 bytes we loaded before,
// which we shifted out.
sub r5, r5, #3
vext.8 q0, q1, q0, #13
2:
vmull.u8 q1, d0, d0
vmull.u8 q2, d1, d1
tst r7, #2 // LR_HAVE_RIGHT
bne 4f
// If we'll need to pad the right edge, load that byte to pad with
// here since we can find it pretty easily from here.
sub lr, r6, #(2 + 16 - 3 + 1)
ldrb lr, [r5, lr]
// Fill q14 with the right padding pixel
vdup.8 q14, lr
3: // !LR_HAVE_RIGHT
// Check whether we need to pad the right edge
cmp r6, #11
bge 4f // If w >= 11, all used input pixels are valid
// 1 <= w < 11, w+1 pixels valid in q0. For w=9 or w=10,
// this ends up called again; it's not strictly needed in those
// cases (we pad enough here), but keeping the code as simple as possible.
// Insert padding in q0.b[w+1] onwards; fuse the +1 into the
// buffer pointer.
movrel_local lr, right_ext_mask, -1
sub lr, lr, r6
vld1.8 {q13}, [lr]
vbit q0, q14, q13
// Update the precalculated squares
vmull.u8 q1, d0, d0
vmull.u8 q2, d1, d1
4: // Loop horizontally
vext.8 d16, d0, d1, #1
vext.8 d17, d0, d1, #2
vext.8 d18, d0, d1, #3
vext.8 d19, d0, d1, #4
vaddl.u8 q3, d16, d17
vaddl.u8 q12, d0, d19
vaddw.u8 q3, q3, d18
vext.8 q8, q1, q2, #2
vext.8 q9, q1, q2, #4
vext.8 q10, q1, q2, #6
vext.8 q11, q1, q2, #8
vst1.16 {q3}, [r1, :128]!
vadd.u16 q3, q3, q12
vaddl.u16 q12, d16, d18
vaddl.u16 q13, d17, d19
vaddl.u16 q8, d2, d22
vaddl.u16 q9, d3, d23
vaddw.u16 q12, q12, d20
vaddw.u16 q13, q13, d21
vst1.32 {q12, q13}, [r0, :128]!
vadd.i32 q12, q12, q8
vadd.i32 q13, q13, q9
subs r6, r6, #8
vst1.16 {q3}, [r3, :128]!
vst1.32 {q12, q13}, [r2, :128]!
ble 9f
tst r7, #2 // LR_HAVE_RIGHT
vld1.8 {d6}, [r5]!
vmov q1, q2
vext.8 q0, q0, q3, #8
vmull.u8 q2, d6, d6
bne 4b // If we don't need to pad, just keep summing.
b 3b // If we need to pad, check how many pixels we have left.
9:
pop {r4-r7,pc}
endfunc
sgr_funcs 8
|
Admenri/urge
| 6,216
|
third_party/dav1d/src/arm/32/util.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2015 Martin Storsjo
* Copyright © 2015 Janne Grunau
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef DAV1D_SRC_ARM_32_UTIL_S
#define DAV1D_SRC_ARM_32_UTIL_S
#include "config.h"
#include "src/arm/asm.S"
#include "src/arm/arm-arch.h"
.macro v4bx rd
#if __ARM_ARCH >= 5 || defined(__ARM_ARCH_4T__)
bx \rd
#else
mov pc, \rd
#endif
.endm
.macro v4blx rd
#if __ARM_ARCH >= 5
blx \rd
#else
mov lr, pc
v4bx \rd
#endif
.endm
.macro movrel_local rd, val, offset=0
#if (__ARM_ARCH >= 7 || defined(__ARM_ARCH_6T2__)) && !defined(PIC)
movw \rd, #:lower16:\val+\offset
movt \rd, #:upper16:\val+\offset
#else
ldr \rd, 90001f
b 90002f
90001:
.word \val + \offset - (90002f + 8 - 4 * CONFIG_THUMB)
90002:
add \rd, \rd, pc
#endif
.endm
.macro movrel rd, val, offset=0
#if defined(PIC) && defined(__APPLE__)
ldr \rd, 1f
b 2f
1:
.word 3f - (2f + 8 - 4 * CONFIG_THUMB)
2:
ldr \rd, [pc, \rd]
.if \offset < 0
sub \rd, \rd, #-(\offset)
.elseif \offset > 0
add \rd, \rd, #\offset
.endif
.non_lazy_symbol_pointer
3:
.indirect_symbol \val
.word 0
.text
#else
movrel_local \rd, \val, \offset
#endif
.endm
// This macro clobbers r7 (and r12 on windows) and stores data at the
// bottom of the stack; sp is the start of the space allocated that
// the caller can use.
.macro sub_sp_align space
#if CONFIG_THUMB
mov r7, sp
and r7, r7, #15
#else
and r7, sp, #15
#endif
sub sp, sp, r7
// Now the stack is aligned, store the amount of adjustment back
// on the stack, as we don't want to waste a register as frame
// pointer.
str r7, [sp, #-16]!
#ifdef _WIN32
.if \space > 8192
// Here, we'd need to touch two (or more) pages while decrementing
// the stack pointer.
.error "sub_sp_align doesn't support values over 8K at the moment"
.elseif \space > 4096
sub r7, sp, #4096
ldr r12, [r7]
sub r7, r7, #(\space - 4096)
mov sp, r7
.else
sub sp, sp, #\space
.endif
#else
.if \space >= 4096
sub sp, sp, #(\space)/4096*4096
.endif
.if (\space % 4096) != 0
sub sp, sp, #(\space)%4096
.endif
#endif
.endm
.macro add_sp_align space
.if \space >= 4096
add sp, sp, #(\space)/4096*4096
.endif
.if (\space % 4096) != 0
add sp, sp, #(\space)%4096
.endif
ldr r7, [sp], #16
// Add back the original stack adjustment
add sp, sp, r7
.endm
.macro transpose_8x8b q0, q1, q2, q3, r0, r1, r2, r3, r4, r5, r6, r7
vtrn.32 \q0, \q2
vtrn.32 \q1, \q3
vtrn.16 \r0, \r2
vtrn.16 \r1, \r3
vtrn.16 \r4, \r6
vtrn.16 \r5, \r7
vtrn.8 \r0, \r1
vtrn.8 \r2, \r3
vtrn.8 \r4, \r5
vtrn.8 \r6, \r7
.endm
.macro transpose_8x8h r0, r1, r2, r3, r4, r5, r6, r7, d0, d1, d2, d3, d4, d5, d6, d7
vswp \d0, \d4
vswp \d1, \d5
vswp \d2, \d6
vswp \d3, \d7
vtrn.32 \r0, \r2
vtrn.32 \r1, \r3
vtrn.32 \r4, \r6
vtrn.32 \r5, \r7
vtrn.16 \r0, \r1
vtrn.16 \r2, \r3
vtrn.16 \r4, \r5
vtrn.16 \r6, \r7
.endm
.macro transpose_4x8b q0, q1, r0, r1, r2, r3
vtrn.16 \q0, \q1
vtrn.8 \r0, \r1
vtrn.8 \r2, \r3
.endm
.macro transpose_4x4s q0, q1, q2, q3, r0, r1, r2, r3, r4, r5, r6, r7
vswp \r1, \r4 // vtrn.64 \q0, \q2
vswp \r3, \r6 // vtrn.64 \q1, \q3
vtrn.32 \q0, \q1
vtrn.32 \q2, \q3
.endm
.macro transpose_4x4h q0, q1, r0, r1, r2, r3
vtrn.32 \q0, \q1
vtrn.16 \r0, \r1
vtrn.16 \r2, \r3
.endm
.macro transpose_4x8h r0, r1, r2, r3
vtrn.32 \r0, \r2
vtrn.32 \r1, \r3
vtrn.16 \r0, \r1
vtrn.16 \r2, \r3
.endm
#endif /* DAV1D_SRC_ARM_32_UTIL_S */
|
Admenri/urge
| 119,791
|
third_party/dav1d/src/arm/32/mc.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2018, Janne Grunau
* Copyright © 2018, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
.macro avg dst0, dst1, t0, t1, t2, t3
vld1.16 {\t0,\t1}, [r2, :128]!
vld1.16 {\t2,\t3}, [r3, :128]!
vadd.i16 \t0, \t0, \t2
vadd.i16 \t1, \t1, \t3
vqrshrun.s16 \dst0, \t0, #5
vqrshrun.s16 \dst1, \t1, #5
.endm
.macro w_avg dst0, dst1, t0, t1, t2, t3
vld1.16 {\t0,\t1}, [r2, :128]!
vld1.16 {\t2,\t3}, [r3, :128]!
vsub.i16 \t0, \t2, \t0
vsub.i16 \t1, \t3, \t1
vqdmulh.s16 \t0, \t0, q15
vqdmulh.s16 \t1, \t1, q15
vadd.i16 \t0, \t2, \t0
vadd.i16 \t1, \t3, \t1
vqrshrun.s16 \dst0, \t0, #4
vqrshrun.s16 \dst1, \t1, #4
.endm
.macro mask dst0, dst1, t0, t1, t2, t3
vld1.8 {q14}, [lr, :128]!
vld1.16 {\t0,\t1}, [r2, :128]!
vmul.i8 q14, q14, q15
vld1.16 {\t2,\t3}, [r3, :128]!
vshll.i8 q13, d28, #8
vshll.i8 q14, d29, #8
vsub.i16 \t0, \t2, \t0
vsub.i16 \t1, \t3, \t1
vqdmulh.s16 \t0, \t0, q13
vqdmulh.s16 \t1, \t1, q14
vadd.i16 \t0, \t2, \t0
vadd.i16 \t1, \t3, \t1
vqrshrun.s16 \dst0, \t0, #4
vqrshrun.s16 \dst1, \t1, #4
.endm
.macro bidir_fn type
function \type\()_8bpc_neon, export=1
push {r4-r6,lr}
ldrd r4, r5, [sp, #16]
clz r4, r4
.ifnc \type, avg
ldr lr, [sp, #24]
.endif
.ifc \type, w_avg
vdup.s16 q15, lr
vneg.s16 q15, q15
vshl.i16 q15, q15, #11
.endif
.ifc \type, mask
vmov.i8 q15, #256-2
.endif
adr r12, L(\type\()_tbl)
sub r4, r4, #24
ldr r4, [r12, r4, lsl #2]
\type d16, d17, q0, q1, q2, q3
add r12, r12, r4
bx r12
.align 2
L(\type\()_tbl):
.word 1280f - L(\type\()_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_tbl) + CONFIG_THUMB
.word 4f - L(\type\()_tbl) + CONFIG_THUMB
4:
add r6, r0, r1
lsl r1, r1, #1
cmp r5, #4
vst1.32 {d16[0]}, [r0, :32], r1
vst1.32 {d16[1]}, [r6, :32], r1
vst1.32 {d17[0]}, [r0, :32], r1
vst1.32 {d17[1]}, [r6, :32], r1
beq 0f
\type d18, d19, q0, q1, q2, q3
cmp r5, #8
vst1.32 {d18[0]}, [r0, :32], r1
vst1.32 {d18[1]}, [r6, :32], r1
vst1.32 {d19[0]}, [r0, :32], r1
vst1.32 {d19[1]}, [r6, :32], r1
beq 0f
\type d16, d17, q0, q1, q2, q3
vst1.32 {d16[0]}, [r0, :32], r1
vst1.32 {d16[1]}, [r6, :32], r1
\type d18, d19, q0, q1, q2, q3
vst1.32 {d17[0]}, [r0, :32], r1
vst1.32 {d17[1]}, [r6, :32], r1
vst1.32 {d18[0]}, [r0, :32], r1
vst1.32 {d18[1]}, [r6, :32], r1
vst1.32 {d19[0]}, [r0, :32], r1
vst1.32 {d19[1]}, [r6, :32], r1
pop {r4-r6,pc}
80:
add r6, r0, r1
lsl r1, r1, #1
8:
vst1.8 {d16}, [r0, :64], r1
\type d18, d19, q0, q1, q2, q3
vst1.8 {d17}, [r6, :64], r1
vst1.8 {d18}, [r0, :64], r1
subs r5, r5, #4
vst1.8 {d19}, [r6, :64], r1
ble 0f
\type d16, d17, q0, q1, q2, q3
b 8b
160:
add r6, r0, r1
lsl r1, r1, #1
16:
\type d18, d19, q0, q1, q2, q3
vst1.8 {q8}, [r0, :128], r1
\type d20, d21, q0, q1, q2, q3
vst1.8 {q9}, [r6, :128], r1
\type d22, d23, q0, q1, q2, q3
vst1.8 {q10}, [r0, :128], r1
subs r5, r5, #4
vst1.8 {q11}, [r6, :128], r1
ble 0f
\type d16, d17, q0, q1, q2, q3
b 16b
320:
add r6, r0, r1
lsl r1, r1, #1
32:
\type d18, d19, q0, q1, q2, q3
\type d20, d21, q0, q1, q2, q3
vst1.8 {q8, q9}, [r0, :128], r1
\type d22, d23, q0, q1, q2, q3
subs r5, r5, #2
vst1.8 {q10, q11}, [r6, :128], r1
ble 0f
\type d16, d17, q0, q1, q2, q3
b 32b
640:
add r6, r0, #32
64:
\type d18, d19, q0, q1, q2, q3
\type d20, d21, q0, q1, q2, q3
\type d22, d23, q0, q1, q2, q3
vst1.8 {q8, q9}, [r0, :128], r1
\type d16, d17, q0, q1, q2, q3
vst1.8 {q10, q11}, [r6, :128], r1
\type d18, d19, q0, q1, q2, q3
\type d20, d21, q0, q1, q2, q3
vst1.8 {q8, q9}, [r0, :128], r1
\type d22, d23, q0, q1, q2, q3
subs r5, r5, #2
vst1.8 {q10, q11}, [r6, :128], r1
ble 0f
\type d16, d17, q0, q1, q2, q3
b 64b
1280:
sub r1, r1, #32
add r6, r0, #64
128:
\type d18, d19, q0, q1, q2, q3
\type d20, d21, q0, q1, q2, q3
\type d22, d23, q0, q1, q2, q3
vst1.8 {q8, q9}, [r0, :128]!
\type d16, d17, q0, q1, q2, q3
vst1.8 {q10, q11}, [r0, :128], r1
\type d18, d19, q0, q1, q2, q3
\type d20, d21, q0, q1, q2, q3
vst1.8 {q8, q9}, [r6, :128]!
\type d22, d23, q0, q1, q2, q3
subs r5, r5, #1
vst1.8 {q10, q11}, [r6, :128], r1
ble 0f
\type d16, d17, q0, q1, q2, q3
b 128b
0:
pop {r4-r6,pc}
endfunc
.endm
bidir_fn avg
bidir_fn w_avg
bidir_fn mask
.macro w_mask_fn type
function w_mask_\type\()_8bpc_neon, export=1
push {r4-r9,lr}
ldrd r4, r5, [sp, #28]
ldrd r6, r7, [sp, #36]
clz r8, r4
adr r9, L(w_mask_\type\()_tbl)
sub r8, r8, #24
ldr r8, [r9, r8, lsl #2]
add r9, r9, r8
movw r12, #6903
vdup.16 q14, r12
.if \type == 444
vmov.i8 q15, #64
.elseif \type == 422
vdup.8 d0, r7 // d0[] <- sign
vmov.i8 d30, #129
vsub.i8 d30, d30, d0 // 129 - sign
.elseif \type == 420
vdup.16 q0, r7 // d0[] <- sign
vmov.i16 q15, #256
vsub.i16 q15, q15, q0 // 256 - sign
.endif
add r12, r0, r1
lsl r1, r1, #1
bx r9
.align 2
L(w_mask_\type\()_tbl):
.word 1280f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 640f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 320f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 160f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 8f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
.word 4f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
4:
vld1.16 {d0, d1, d2, d3}, [r2, :128]! // tmp1 (four rows at once)
vld1.16 {d4, d5, d6, d7}, [r3, :128]! // tmp2 (four rows at once)
subs r5, r5, #4
vsub.i16 q8, q2, q0 // tmp2-tmp1
vsub.i16 q9, q3, q1
vabd.s16 q10, q0, q2 // (abs(tmp1[x] - tmp2[x]))
vabd.s16 q11, q1, q3
vqsub.u16 q10, q14, q10 // 6903 - abs ()
vqsub.u16 q11, q14, q11
vshr.s16 q10, q10, #8 // 64-m = (6903 - abs()) >> 8
vshr.s16 q11, q11, #8
vshl.s16 q12, q10, #9 // (64-m)<<9
vshl.s16 q13, q11, #9
vqdmulh.s16 q12, q12, q8 // ((tmp2-tmp1)*(64-m)<<9)>>15
vqdmulh.s16 q13, q13, q9
vadd.i16 q12, q12, q0 // (((tmp2-tmp1)*(64-m)<<9)>>15) + tmp1
vadd.i16 q13, q13, q1
vqrshrun.s16 d24, q12, #4 // (((((tmp2-tmp1)*(64-m)<<9)>>15) + tmp1) + 8) >> 4
vqrshrun.s16 d25, q13, #4
.if \type == 444
vmovn.u16 d20, q10 // 64 - m
vmovn.u16 d21, q11
vsub.i8 q10, q15, q10 // m
vst1.8 {d20, d21}, [r6, :128]!
.elseif \type == 422
vpadd.s16 d20, d20, d21 // (64 - m) + (64 - n) (column wise addition)
vpadd.s16 d21, d22, d23
vmovn.s16 d6, q10
vhsub.u8 d6, d30, d6 // ((129 - sign) - ((64 - m) + (64 - n))) >> 1
vst1.8 {d6}, [r6, :64]!
.elseif \type == 420
vadd.s16 d20, d20, d21 // (64 - my1) + (64 - my2) (row wise addition)
vadd.s16 d21, d22, d23
vpadd.s16 d20, d20, d21 // (128 - m) + (128 - n) (column wise addition)
vsub.s16 d20, d30, d20 // (256 - sign) - ((128 - m) + (128 - n))
vrshrn.u16 d20, q10, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
vst1.32 {d20[0]}, [r6, :32]!
.endif
vst1.32 {d24[0]}, [r0, :32], r1
vst1.32 {d24[1]}, [r12, :32], r1
vst1.32 {d25[0]}, [r0, :32], r1
vst1.32 {d25[1]}, [r12, :32], r1
bgt 4b
pop {r4-r9,pc}
8:
vld1.16 {d0, d1, d2, d3}, [r2, :128]! // tmp1y1, tmp1y2
vld1.16 {d4, d5, d6, d7}, [r3, :128]! // tmp2y1, tmp2y2
subs r5, r5, #2
vsub.i16 q8, q2, q0 // tmp2y1 - tmp1y1
vsub.i16 q9, q3, q1 // tmp2y2 - tmp1y2
vabd.s16 q10, q0, q2 // abs(tmp1y1 - tmp2y1)
vabd.s16 q11, q1, q3 // abs(tmp1y2 - tmp2y2)
vqsub.u16 q10, q14, q10 // 6903 - abs(tmp1y1 - tmp2y1)
vqsub.u16 q11, q14, q11 // 6903 - abs(tmp1y2 - tmp2y2)
vshr.s16 q10, q10, #8 // 64 - my1 = 6903 - abs(tmp1y1 - tmp2y1) >> 8
vshr.s16 q11, q11, #8 // 64 - my2 = 6903 - abs(tmp1y2 - tmp2y2) >> 8
vshl.s16 q12, q10, #9 // (64 - my1) << 9
vshl.s16 q13, q11, #9 // (64 - my2) << 9
vqdmulh.s16 q12, q12, q8 // ((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15
vqdmulh.s16 q13, q13, q9 // ((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15
vadd.s16 q12, q12, q0 // (((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15) + tmp1y1
vadd.s16 q13, q13, q1 // (((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15) + tmp1y2
vqrshrun.s16 d24, q12, #4 // (((((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15) + tmp1y1) + 8) >> 4
vqrshrun.s16 d25, q13, #4 // (((((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15) + tmp1y2) + 8) >> 4
.if \type == 444
vmovn.u16 d20, q10 // 64 - m
vmovn.u16 d21, q11
vsub.i8 q10, q15, q10 // m
vst1.8 {d20, d21}, [r6, :128]!
.elseif \type == 422
vpadd.s16 d20, d20, d21 // (64 - my1) + (64 - ny1) (column wise addition)
vpadd.s16 d21, d22, d23 // (64 - my2) + (64 - ny2)
vmovn.s16 d20, q10
vhsub.u8 d20, d30, d20 // ((129 - sign) - ((64 - my1/y2) + (64 - ny1/y2))) >> 1
vst1.8 {d20}, [r6, :64]!
.elseif \type == 420
vadd.s16 q10, q10, q11 // (64 - my1) + (64 - my2) (row wise addition)
vpadd.s16 d20, d20, d21 // (128 - m) + (128 - n) (column wise addition)
vsub.s16 d20, d30, d20 // (256 - sign) - ((128 - m) + (128 - n))
vrshrn.u16 d20, q10, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
vst1.32 {d20[0]}, [r6, :32]!
.endif
vst1.16 {d24}, [r0, :64], r1
vst1.16 {d25}, [r12, :64], r1
bgt 8b
pop {r4-r9,pc}
1280:
640:
320:
160:
sub r1, r1, r4
.if \type == 444
add lr, r6, r4
.elseif \type == 422
add lr, r6, r4, lsr #1
.endif
add r9, r3, r4, lsl #1
add r7, r2, r4, lsl #1
161:
mov r8, r4
16:
vld1.16 {d0, d1, d2, d3}, [r2, :128]! // tmp1y1
vld1.16 {d4, d5, d6, d7}, [r3, :128]! // tmp2y1
vld1.16 {d16, d17, d18, d19}, [r7, :128]! // tmp1y2
subs r8, r8, #16
vsub.i16 q2, q2, q0 // tmp2y1 - tmp1y1
vsub.i16 q3, q3, q1
vabs.s16 q10, q2 // abs(tm2y1 - tmp1y1)
vabs.s16 q11, q3
vqsub.u16 q10, q14, q10 // 6903 - abs(tmp1y1 - tmp2y1)
vqsub.u16 q11, q14, q11
vshr.s16 q10, q10, #8 // 64 - my1 = 6903 - abs(tmp1y1 - tmp2y1) >> 8
vshr.s16 q11, q11, #8
vshl.s16 q12, q10, #9 // (64 - my1) << 9
vshl.s16 q13, q11, #9
vqdmulh.s16 q12, q12, q2 // ((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15
vqdmulh.s16 q13, q13, q3
vadd.i16 q12, q12, q0 // (((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15) + tmp1y1
vadd.i16 q13, q13, q1
vld1.16 {d0, d1, d2, d3}, [r9, :128]! // tmp2h2
.if \type == 444
vmovn.u16 d20, q10 // 64 - my1
vmovn.u16 d21, q11
vsub.i8 q10, q15, q10 // my1
vst1.8 {d20, d21}, [r6, :128]!
.elseif \type == 422
vpadd.s16 d20, d20, d21 // (64 - my1) + (64 - ny1) (column wise addition)
vpadd.s16 d21, d22, d23
vmovn.s16 d20, q10
vhsub.u8 d20, d30, d20 // ((129 - sign) - ((64 - my1) + (64 - ny1))) >> 1
vst1.8 {d20}, [r6, :64]!
.endif
vqrshrun.s16 d24, q12, #4 // (((((tmp2y1 - tmp1y1)*(64 - my1) << 9) >> 15) + tmp1y1) + 8) >> 4
vqrshrun.s16 d25, q13, #4
vsub.i16 q0, q0, q8 // tmp2y2 - tmp1y2
vsub.i16 q1, q1, q9
vst1.16 {d24, d25}, [r0, :128]! // store dsty1
vabs.s16 q2, q0 // abs(tmp2y2 - tmp1y2)
vabs.s16 q3, q1
vqsub.u16 q2, q14, q2 // 6903 - abs(tmp2y2 - tmp1y2)
vqsub.u16 q3, q14, q3
vshr.s16 q2, q2, #8 // (6903 - abs(tmp2y2 - tmp1y2)) >> 8
vshr.s16 q3, q3, #8
vshl.s16 q12, q2, #9 // (64 - my2) << 9
vshl.s16 q13, q3, #9
.if \type == 444
vmovn.u16 d4, q2 // 64 - my2
vmovn.u16 d5, q3
vsub.i8 q2, q15, q2 // my2
vst1.8 {d4, d5}, [lr, :128]!
.elseif \type == 422
vpadd.s16 d4, d4, d5 // (64 - my2) + (64 - ny2) (column wise addition)
vpadd.s16 d5, d6, d7
vmovn.s16 d4, q2
vhsub.u8 d4, d30, d4 // ((129 - sign) - ((64 - my2) + (64 - ny2))) >> 1
vst1.8 {d4}, [lr, :64]!
.elseif \type == 420
vadd.s16 q10, q10, q2 // (64 - my1) + (64 - my2) (row wise addition)
vadd.s16 q11, q11, q3
vpadd.s16 d20, d20, d21 // (128 - m) + (128 - n) (column wise addition)
vpadd.s16 d21, d22, d23
vsub.s16 q10, q15, q10 // (256 - sign) - ((128 - m) + (128 - n))
vrshrn.u16 d20, q10, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
vst1.8 {d20}, [r6, :64]!
.endif
vqdmulh.s16 q12, q12, q0 // ((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15
vqdmulh.s16 q13, q13, q1
vadd.i16 q12, q12, q8 // (((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15) + tmp1y2
vadd.i16 q13, q13, q9
vqrshrun.s16 d24, q12, #4 // (((((tmp2y2 - tmp1y2)*(64 - my2) << 9) >> 15) + tmp1y2) + 8) >> 4
vqrshrun.s16 d25, q13, #4
vst1.16 {d24, d25}, [r12, :128]! // store dsty2
bgt 16b
subs r5, r5, #2
add r2, r2, r4, lsl #1
add r3, r3, r4, lsl #1
add r7, r7, r4, lsl #1
add r9, r9, r4, lsl #1
.if \type == 444
add r6, r6, r4
add lr, lr, r4
.elseif \type == 422
add r6, r6, r4, lsr #1
add lr, lr, r4, lsr #1
.endif
add r0, r0, r1
add r12, r12, r1
bgt 161b
pop {r4-r9,pc}
endfunc
.endm
w_mask_fn 444
w_mask_fn 422
w_mask_fn 420
function blend_8bpc_neon, export=1
push {r4-r5,lr}
ldrd r4, r5, [sp, #12]
clz lr, r3
adr r3, L(blend_tbl)
sub lr, lr, #26
ldr lr, [r3, lr, lsl #2]
add r3, r3, lr
bx r3
.align 2
L(blend_tbl):
.word 320f - L(blend_tbl) + CONFIG_THUMB
.word 160f - L(blend_tbl) + CONFIG_THUMB
.word 80f - L(blend_tbl) + CONFIG_THUMB
.word 40f - L(blend_tbl) + CONFIG_THUMB
40:
vmov.i8 d22, #64
add r12, r0, r1
lsl r1, r1, #1
4:
vld1.u8 {d2}, [r5, :64]!
vld1.u8 {d1}, [r2, :64]!
vld1.32 {d0[]}, [r0, :32]
subs r4, r4, #2
vld1.32 {d0[1]}, [r12, :32]
vsub.i8 d3, d22, d2
vmull.u8 q8, d1, d2
vmlal.u8 q8, d0, d3
vrshrn.i16 d20, q8, #6
vst1.32 {d20[0]}, [r0, :32], r1
vst1.32 {d20[1]}, [r12, :32], r1
bgt 4b
pop {r4-r5,pc}
80:
vmov.i8 d16, #64
add r12, r0, r1
lsl r1, r1, #1
8:
vld1.u8 {q1}, [r5, :128]!
vld1.u8 {q2}, [r2, :128]!
vld1.u8 {d0}, [r0, :64]
vsub.i8 d17, d16, d2
vld1.u8 {d1}, [r12, :64]
subs r4, r4, #2
vsub.i8 d18, d16, d3
vmull.u8 q3, d2, d4
vmlal.u8 q3, d0, d17
vmull.u8 q10, d3, d5
vmlal.u8 q10, d1, d18
vrshrn.i16 d22, q3, #6
vrshrn.i16 d23, q10, #6
vst1.u8 {d22}, [r0, :64], r1
vst1.u8 {d23}, [r12, :64], r1
bgt 8b
pop {r4-r5,pc}
160:
vmov.i8 q12, #64
add r12, r0, r1
lsl r1, r1, #1
16:
vld1.u8 {q1, q2}, [r5, :128]!
vld1.u8 {q8, q9}, [r2, :128]!
vld1.u8 {q0}, [r0, :128]
subs r4, r4, #2
vsub.i8 q15, q12, q1
vld1.u8 {q13}, [r12, :128]
vmull.u8 q3, d16, d2
vmlal.u8 q3, d0, d30
vmull.u8 q14, d17, d3
vmlal.u8 q14, d1, d31
vsub.i8 q15, q12, q2
vrshrn.i16 d20, q3, #6
vrshrn.i16 d21, q14, #6
vmull.u8 q3, d18, d4
vmlal.u8 q3, d26, d30
vmull.u8 q14, d19, d5
vmlal.u8 q14, d27, d31
vrshrn.i16 d22, q3, #6
vrshrn.i16 d23, q14, #6
vst1.u8 {q10}, [r0, :128], r1
vst1.u8 {q11}, [r12, :128], r1
bgt 16b
pop {r4-r5,pc}
320:
vmov.i8 q10, #64
32:
vld1.u8 {q2, q3}, [r5, :128]!
vld1.u8 {q8, q9}, [r2, :128]!
vld1.u8 {q0, q1}, [r0, :128]
subs r4, r4, #1
vsub.i8 q11, q10, q2
vmull.u8 q15, d16, d4
vmlal.u8 q15, d0, d22
vmull.u8 q14, d17, d5
vmlal.u8 q14, d1, d23
vsub.i8 q11, q10, q3
vrshrn.i16 d24, q15, #6
vrshrn.i16 d25, q14, #6
vmull.u8 q15, d18, d6
vmlal.u8 q15, d2, d22
vmull.u8 q14, d19, d7
vmlal.u8 q14, d3, d23
vrshrn.i16 d26, q15, #6
vrshrn.i16 d27, q14, #6
vst1.u8 {q12, q13}, [r0, :128], r1
bgt 32b
pop {r4-r5,pc}
endfunc
function blend_h_8bpc_neon, export=1
push {r4-r5,lr}
ldr r4, [sp, #12]
movrel r5, X(obmc_masks)
add r5, r5, r4
sub r4, r4, r4, lsr #2
clz lr, r3
adr r12, L(blend_h_tbl)
sub lr, lr, #24
ldr lr, [r12, lr, lsl #2]
add r12, r12, lr
bx r12
.align 2
L(blend_h_tbl):
.word 1280f - L(blend_h_tbl) + CONFIG_THUMB
.word 640f - L(blend_h_tbl) + CONFIG_THUMB
.word 320f - L(blend_h_tbl) + CONFIG_THUMB
.word 160f - L(blend_h_tbl) + CONFIG_THUMB
.word 80f - L(blend_h_tbl) + CONFIG_THUMB
.word 40f - L(blend_h_tbl) + CONFIG_THUMB
.word 20f - L(blend_h_tbl) + CONFIG_THUMB
20:
vmov.i8 d22, #64
add r12, r0, r1
lsl r1, r1, #1
2:
vld1.16 {d2[], d3[]}, [r5, :16]!
vld1.32 {d1[]}, [r2, :32]!
subs r4, r4, #2
vld1.16 {d0[]}, [r0, :16]
vzip.8 d2, d3
vsub.i8 d4, d22, d2
vld1.16 {d0[1]}, [r12, :16]
vmull.u8 q8, d1, d2
vmlal.u8 q8, d0, d4
vrshrn.i16 d20, q8, #6
vst1.16 {d20[0]}, [r0, :16], r1
vst1.16 {d20[1]}, [r12, :16], r1
bgt 2b
pop {r4-r5,pc}
40:
vmov.i8 d22, #64
add r12, r0, r1
lsl r1, r1, #1
4:
vld2.u8 {d2[], d3[]}, [r5, :16]!
vld1.u8 {d1}, [r2, :64]!
subs r4, r4, #2
vext.u8 d2, d2, d3, #4
vld1.32 {d0[]}, [r0, :32]
vsub.i8 d6, d22, d2
vld1.32 {d0[1]}, [r12, :32]
vmull.u8 q8, d1, d2
vmlal.u8 q8, d0, d6
vrshrn.i16 d20, q8, #6
vst1.32 {d20[0]}, [r0, :32], r1
vst1.32 {d20[1]}, [r12, :32], r1
bgt 4b
pop {r4-r5,pc}
80:
vmov.i8 q8, #64
add r12, r0, r1
lsl r1, r1, #1
8:
vld2.u8 {d2[], d3[]}, [r5, :16]!
vld1.u8 {d4, d5}, [r2, :128]!
vld1.u8 {d0}, [r0, :64]
vsub.i8 q9, q8, q1
vld1.u8 {d1}, [r12, :64]
subs r4, r4, #2
vmull.u8 q3, d2, d4
vmlal.u8 q3, d0, d18
vmull.u8 q10, d3, d5
vmlal.u8 q10, d1, d19
vrshrn.i16 d22, q3, #6
vrshrn.i16 d23, q10, #6
vst1.u8 {d22}, [r0, :64], r1
vst1.u8 {d23}, [r12, :64], r1
bgt 8b
pop {r4-r5,pc}
160:
vmov.i8 q12, #64
add r12, r0, r1
lsl r1, r1, #1
16:
vld2.u8 {d28[], d29[]}, [r5, :16]!
vld1.u8 {d2, d3, d4, d5}, [r2, :128]!
vsub.i8 q15, q12, q14
vld1.u8 {q0}, [r0, :128]
subs r4, r4, #2
vld1.u8 {q13}, [r12, :128]
vmull.u8 q3, d2, d28
vmlal.u8 q3, d0, d30
vmull.u8 q8, d3, d28
vmlal.u8 q8, d1, d30
vrshrn.i16 d18, q3, #6
vrshrn.i16 d19, q8, #6
vmull.u8 q3, d4, d29
vmlal.u8 q3, d26, d31
vmull.u8 q8, d5, d29
vmlal.u8 q8, d27, d31
vrshrn.i16 d20, q3, #6
vrshrn.i16 d21, q8, #6
vst1.u8 {q9}, [r0, :128], r1
vst1.u8 {q10}, [r12, :128], r1
bgt 16b
pop {r4-r5,pc}
320:
640:
1280:
vmov.i8 d20, #64
sub r1, r1, r3
321:
vld1.u8 {d6[]}, [r5]!
vsub.i8 d7, d20, d6
mov r12, r3
32:
vld1.u8 {q8, q9}, [r2, :128]!
vld1.u8 {q0, q1}, [r0, :128]
vmull.u8 q15, d16, d6
vmlal.u8 q15, d0, d7
vmull.u8 q14, d17, d6
vmlal.u8 q14, d1, d7
vrshrn.i16 d0, q15, #6
vrshrn.i16 d1, q14, #6
vmull.u8 q15, d18, d6
vmlal.u8 q15, d2, d7
vmull.u8 q14, d19, d6
vmlal.u8 q14, d3, d7
vrshrn.i16 d2, q15, #6
vrshrn.i16 d3, q14, #6
subs r12, r12, #32
vst1.u8 {q0, q1}, [r0, :128]!
bgt 32b
add r0, r0, r1
subs r4, r4, #1
bgt 321b
pop {r4-r5,pc}
endfunc
function blend_v_8bpc_neon, export=1
push {r4,lr}
ldr r4, [sp, #8]
movrel lr, X(obmc_masks)
add lr, lr, r3
clz r12, r3
adr r3, L(blend_v_tbl)
sub r12, r12, #26
ldr r12, [r3, r12, lsl #2]
add r3, r3, r12
bx r3
.align 2
L(blend_v_tbl):
.word 320f - L(blend_v_tbl) + CONFIG_THUMB
.word 160f - L(blend_v_tbl) + CONFIG_THUMB
.word 80f - L(blend_v_tbl) + CONFIG_THUMB
.word 40f - L(blend_v_tbl) + CONFIG_THUMB
.word 20f - L(blend_v_tbl) + CONFIG_THUMB
20:
vmov.i8 d22, #64
vld1.8 {d2[]}, [lr]
add r12, r0, r1
lsl r1, r1, #1
vsub.i8 d3, d22, d2
2:
vld1.16 {d1[0]}, [r2, :16]!
vld1.8 {d0[]}, [r0]
subs r4, r4, #2
vld1.8 {d1[1]}, [r2]
vld1.8 {d0[1]}, [r12]
vmull.u8 q2, d1, d2
vmlal.u8 q2, d0, d3
vrshrn.i16 d6, q2, #6
add r2, r2, #2
vst1.8 {d6[0]}, [r0], r1
vst1.8 {d6[1]}, [r12], r1
bgt 2b
pop {r4,pc}
40:
vmov.i8 d22, #64
vld1.32 {d4[]}, [lr, :32]
add r12, r0, r1
lsl r1, r1, #1
vsub.i8 d5, d22, d4
sub r1, r1, #2
4:
vld1.u8 {d2}, [r2, :64]!
vld1.32 {d0[]}, [r0, :32]
vld1.32 {d0[1]}, [r12, :32]
subs r4, r4, #2
vmull.u8 q3, d2, d4
vmlal.u8 q3, d0, d5
vrshrn.i16 d20, q3, #6
vst1.16 {d20[0]}, [r0, :16]!
vst1.16 {d20[2]}, [r12, :16]!
vst1.8 {d20[2]}, [r0], r1
vst1.8 {d20[6]}, [r12], r1
bgt 4b
pop {r4,pc}
80:
vmov.i8 d16, #64
vld1.u8 {d2}, [lr, :64]
add r12, r0, r1
lsl r1, r1, #1
vsub.i8 d17, d16, d2
sub r1, r1, #4
8:
vld1.u8 {d4, d5}, [r2, :128]!
vld1.u8 {d0}, [r0, :64]
vld1.u8 {d1}, [r12, :64]
subs r4, r4, #2
vmull.u8 q3, d2, d4
vmlal.u8 q3, d0, d17
vmull.u8 q10, d2, d5
vmlal.u8 q10, d1, d17
vrshrn.i16 d22, q3, #6
vrshrn.i16 d23, q10, #6
vst1.32 {d22[0]}, [r0, :32]!
vst1.32 {d23[0]}, [r12, :32]!
vst1.16 {d22[2]}, [r0, :16], r1
vst1.16 {d23[2]}, [r12, :16], r1
bgt 8b
pop {r4,pc}
160:
vmov.i8 q12, #64
vld1.u8 {q14}, [lr, :128]
add r12, r0, r1
lsl r1, r1, #1
vsub.i8 q11, q12, q14
sub r1, r1, #8
16:
vld1.u8 {q1, q2}, [r2, :128]!
vld1.u8 {q0}, [r0, :128]
subs r4, r4, #2
vld1.u8 {q13}, [r12, :128]
vmull.u8 q3, d2, d28
vmlal.u8 q3, d0, d22
vmull.u8 q8, d3, d29
vmlal.u8 q8, d1, d23
vrshrn.i16 d18, q3, #6
vrshrn.i16 d19, q8, #6
vmull.u8 q3, d4, d28
vmlal.u8 q3, d26, d22
vmull.u8 q8, d5, d29
vmlal.u8 q8, d27, d23
vrshrn.i16 d20, q3, #6
vrshrn.i16 d21, q8, #6
vst1.u8 {d18}, [r0, :64]!
vst1.u8 {d20}, [r12, :64]!
vst1.32 {d19[0]}, [r0, :32], r1
vst1.32 {d21[0]}, [r12, :32], r1
bgt 16b
pop {r4,pc}
320:
vmov.i8 q10, #64
vld1.u8 {q2, q3}, [lr, :128]
vsub.i8 q11, q10, q2
vsub.i8 d24, d20, d6
32:
vld1.u8 {q8, q9}, [r2, :128]!
vld1.u8 {d0, d1, d2}, [r0, :64]
subs r4, r4, #1
vmull.u8 q15, d16, d4
vmlal.u8 q15, d0, d22
vmull.u8 q14, d17, d5
vmlal.u8 q14, d1, d23
vrshrn.i16 d0, q15, #6
vrshrn.i16 d1, q14, #6
vmull.u8 q15, d18, d6
vmlal.u8 q15, d2, d24
vrshrn.i16 d2, q15, #6
vst1.u8 {d0, d1, d2}, [r0, :64], r1
bgt 32b
pop {r4,pc}
endfunc
// This has got the same signature as the put_8tap functions,
// assumes that the caller has loaded the h argument into r5,
// and assumes that r8 is set to (clz(w)-24).
function put_neon
adr r9, L(put_tbl)
ldr r8, [r9, r8, lsl #2]
add r9, r9, r8
bx r9
.align 2
L(put_tbl):
.word 1280f - L(put_tbl) + CONFIG_THUMB
.word 640f - L(put_tbl) + CONFIG_THUMB
.word 32f - L(put_tbl) + CONFIG_THUMB
.word 160f - L(put_tbl) + CONFIG_THUMB
.word 8f - L(put_tbl) + CONFIG_THUMB
.word 4f - L(put_tbl) + CONFIG_THUMB
.word 2f - L(put_tbl) + CONFIG_THUMB
2:
vld1.16 {d0[]}, [r2], r3
vld1.16 {d1[]}, [r2], r3
subs r5, r5, #2
vst1.16 {d0[0]}, [r0, :16], r1
vst1.16 {d1[0]}, [r0, :16], r1
bgt 2b
pop {r4-r11,pc}
4:
vld1.32 {d0[]}, [r2], r3
vld1.32 {d1[]}, [r2], r3
subs r5, r5, #2
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d1[0]}, [r0, :32], r1
bgt 4b
pop {r4-r11,pc}
8:
vld1.8 {d0}, [r2], r3
vld1.8 {d1}, [r2], r3
subs r5, r5, #2
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d1}, [r0, :64], r1
bgt 8b
pop {r4-r11,pc}
160:
add r8, r0, r1
lsl r1, r1, #1
add r9, r2, r3
lsl r3, r3, #1
16:
vld1.8 {q0}, [r2], r3
vld1.8 {q1}, [r9], r3
subs r5, r5, #2
vst1.8 {q0}, [r0, :128], r1
vst1.8 {q1}, [r8, :128], r1
bgt 16b
pop {r4-r11,pc}
32:
vld1.8 {q0, q1}, [r2], r3
subs r5, r5, #1
vst1.8 {q0, q1}, [r0, :128], r1
bgt 32b
pop {r4-r11,pc}
640:
sub r1, r1, #32
sub r3, r3, #32
64:
vld1.8 {q0, q1}, [r2]!
vst1.8 {q0, q1}, [r0, :128]!
vld1.8 {q2, q3}, [r2], r3
subs r5, r5, #1
vst1.8 {q2, q3}, [r0, :128], r1
bgt 64b
pop {r4-r11,pc}
1280:
sub r1, r1, #96
sub r3, r3, #96
128:
vld1.8 {q8, q9}, [r2]!
vst1.8 {q8, q9}, [r0, :128]!
vld1.8 {q10, q11}, [r2]!
vst1.8 {q10, q11}, [r0, :128]!
vld1.8 {q12, q13}, [r2]!
vst1.8 {q12, q13}, [r0, :128]!
vld1.8 {q14, q15}, [r2], r3
subs r5, r5, #1
vst1.8 {q14, q15}, [r0, :128], r1
bgt 128b
pop {r4-r11,pc}
endfunc
// This has got the same signature as the put_8tap functions,
// assumes that the caller has loaded the h argument into r4,
// and assumes that r8 is set to (clz(w)-24), and r7 to w*2.
function prep_neon
adr r9, L(prep_tbl)
ldr r8, [r9, r8, lsl #2]
add r9, r9, r8
bx r9
.align 2
L(prep_tbl):
.word 1280f - L(prep_tbl) + CONFIG_THUMB
.word 640f - L(prep_tbl) + CONFIG_THUMB
.word 320f - L(prep_tbl) + CONFIG_THUMB
.word 160f - L(prep_tbl) + CONFIG_THUMB
.word 8f - L(prep_tbl) + CONFIG_THUMB
.word 4f - L(prep_tbl) + CONFIG_THUMB
4:
vld1.32 {d0[]}, [r1], r2
vld1.32 {d2[]}, [r1], r2
subs r4, r4, #2
vshll.u8 q0, d0, #4
vshll.u8 q1, d2, #4
vst1.16 {d1, d2}, [r0, :64]!
bgt 4b
pop {r4-r11,pc}
8:
vld1.8 {d0}, [r1], r2
vld1.8 {d2}, [r1], r2
subs r4, r4, #2
vshll.u8 q0, d0, #4
vshll.u8 q1, d2, #4
vst1.16 {q0, q1}, [r0, :128]!
bgt 8b
pop {r4-r11,pc}
160:
add r9, r1, r2
lsl r2, r2, #1
add r8, r0, r7
lsl r7, r7, #1
16:
vld1.8 {q2}, [r1], r2
vld1.8 {q3}, [r9], r2
subs r4, r4, #2
vshll.u8 q0, d4, #4
vshll.u8 q1, d5, #4
vshll.u8 q2, d6, #4
vshll.u8 q3, d7, #4
vst1.16 {q0, q1}, [r0, :128], r7
vst1.16 {q2, q3}, [r8, :128], r7
bgt 16b
pop {r4-r11,pc}
320:
add r8, r0, r3
32:
vld1.8 {q0, q1}, [r1], r2
subs r4, r4, #2
vshll.u8 q8, d0, #4
vshll.u8 q9, d1, #4
vld1.8 {q2, q3}, [r1], r2
vshll.u8 q10, d2, #4
vshll.u8 q11, d3, #4
vshll.u8 q12, d4, #4
vst1.16 {q8, q9}, [r0, :128], r7
vshll.u8 q13, d5, #4
vst1.16 {q10, q11}, [r8, :128], r7
vshll.u8 q14, d6, #4
vst1.16 {q12, q13}, [r0, :128], r7
vshll.u8 q15, d7, #4
vst1.16 {q14, q15}, [r8, :128], r7
bgt 32b
pop {r4-r11,pc}
640:
sub r2, r2, #32
add r8, r0, #32
mov r6, #64
64:
vld1.8 {q0, q1}, [r1]!
subs r4, r4, #1
vshll.u8 q8, d0, #4
vshll.u8 q9, d1, #4
vld1.8 {q2, q3}, [r1], r2
vshll.u8 q10, d2, #4
vshll.u8 q11, d3, #4
vshll.u8 q12, d4, #4
vst1.16 {q8, q9}, [r0, :128], r6
vshll.u8 q13, d5, #4
vshll.u8 q14, d6, #4
vst1.16 {q10, q11}, [r8, :128], r6
vshll.u8 q15, d7, #4
vst1.16 {q12, q13}, [r0, :128], r6
vst1.16 {q14, q15}, [r8, :128], r6
bgt 64b
pop {r4-r11,pc}
1280:
sub r2, r2, #96
add r8, r0, #32
mov r6, #64
128:
vld1.8 {q0, q1}, [r1]!
vld1.8 {q2, q3}, [r1]!
vshll.u8 q10, d0, #4
vshll.u8 q11, d1, #4
vshll.u8 q12, d2, #4
vshll.u8 q13, d3, #4
vshll.u8 q14, d4, #4
vshll.u8 q15, d5, #4
vld1.8 {q8, q9}, [r1]!
vst1.16 {q10, q11}, [r0, :128], r6
vst1.16 {q12, q13}, [r8, :128], r6
vshll.u8 q0, d6, #4
vshll.u8 q1, d7, #4
vshll.u8 q2, d16, #4
vshll.u8 q3, d17, #4
vshll.u8 q8, d18, #4
vshll.u8 q9, d19, #4
vld1.8 {q10, q11}, [r1], r2
vst1.16 {q14, q15}, [r0, :128], r6
vst1.16 {q0, q1}, [r8, :128], r6
vshll.u8 q12, d20, #4
vshll.u8 q13, d21, #4
vshll.u8 q14, d22, #4
vshll.u8 q15, d23, #4
subs r4, r4, #1
vst1.16 {q2, q3}, [r0, :128], r6
vst1.16 {q8, q9}, [r8, :128], r6
vst1.16 {q12, q13}, [r0, :128], r6
vst1.16 {q14, q15}, [r8, :128], r6
bgt 128b
pop {r4-r11,pc}
endfunc
.macro load_slice s0, s1, strd, wd, d0, d1, d2, d3, d4, d5, d6
vld1.\wd {\d0[]}, [\s0], \strd
vld1.\wd {\d1[]}, [\s1], \strd
.ifnb \d2
vld1.\wd {\d2[]}, [\s0], \strd
vld1.\wd {\d3[]}, [\s1], \strd
.endif
.ifnb \d4
vld1.\wd {\d4[]}, [\s0], \strd
.endif
.ifnb \d5
vld1.\wd {\d5[]}, [\s1], \strd
.endif
.ifnb \d6
vld1.\wd {\d6[]}, [\s0], \strd
.endif
.endm
.macro load_reg s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
vld1.8 {\d0}, [\s0], \strd
vld1.8 {\d1}, [\s1], \strd
.ifnb \d2
vld1.8 {\d2}, [\s0], \strd
vld1.8 {\d3}, [\s1], \strd
.endif
.ifnb \d4
vld1.8 {\d4}, [\s0], \strd
.endif
.ifnb \d5
vld1.8 {\d5}, [\s1], \strd
.endif
.ifnb \d6
vld1.8 {\d6}, [\s0], \strd
.endif
.endm
.macro load_16 s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_slice \s0, \s1, \strd, 16, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro load_32 s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
load_slice \s0, \s1, \strd, 32, \d0, \d1, \d2, \d3, \d4, \d5, \d6
.endm
.macro interleave_1_16 r0, r1, r2, r3, r4
vext.8 \r0, \r0, \r1, #6
vext.8 \r1, \r1, \r2, #6
.ifnb \r3
vext.8 \r2, \r2, \r3, #6
vext.8 \r3, \r3, \r4, #6
.endif
.endm
.macro interleave_1_32 r0, r1, r2, r3, r4
vext.8 \r0, \r0, \r1, #4
vext.8 \r1, \r1, \r2, #4
.ifnb \r3
vext.8 \r2, \r2, \r3, #4
vext.8 \r3, \r3, \r4, #4
.endif
.endm
.macro vmovl_u8 q0, d0, q1, d1, q2, d2, q3, d3, q4, d4, q5, d5, q6, d6
vmovl.u8 \q0, \d0
vmovl.u8 \q1, \d1
.ifnb \q2
vmovl.u8 \q2, \d2
vmovl.u8 \q3, \d3
.endif
.ifnb \q4
vmovl.u8 \q4, \d4
.endif
.ifnb \q5
vmovl.u8 \q5, \d5
.endif
.ifnb \q6
vmovl.u8 \q6, \d6
.endif
.endm
.macro mul_mla_4 d, s0, s1, s2, s3
vmul.s16 \d, \s0, d0[0]
vmla.s16 \d, \s1, d0[1]
vmla.s16 \d, \s2, d0[2]
vmla.s16 \d, \s3, d0[3]
.endm
.macro mul_mla_8_0 d0, s0, s1, s2, s3, s4, s5, s6, s7
vmul.s16 \d0, \s0, d0[0]
vmla.s16 \d0, \s1, d0[1]
vmla.s16 \d0, \s2, d0[2]
vmla.s16 \d0, \s3, d0[3]
vmla.s16 \d0, \s4, d1[0]
vmla.s16 \d0, \s5, d1[1]
vmla.s16 \d0, \s6, d1[2]
vmla.s16 \d0, \s7, d1[3]
.endm
.macro mul_mla_8_1 d0, d1, s0, s1, s2, s3, s4, s5, s6, s7, s8
vmul.s16 \d0, \s0, d0[0]
vmla.s16 \d0, \s1, d0[1]
vmla.s16 \d0, \s2, d0[2]
vmla.s16 \d0, \s3, d0[3]
vmla.s16 \d0, \s4, d1[0]
vmla.s16 \d0, \s5, d1[1]
vmla.s16 \d0, \s6, d1[2]
vmla.s16 \d0, \s7, d1[3]
vmul.s16 \d1, \s1, d0[0]
vmla.s16 \d1, \s2, d0[1]
vmla.s16 \d1, \s3, d0[2]
vmla.s16 \d1, \s4, d0[3]
vmla.s16 \d1, \s5, d1[0]
vmla.s16 \d1, \s6, d1[1]
vmla.s16 \d1, \s7, d1[2]
vmla.s16 \d1, \s8, d1[3]
.endm
.macro mul_mla_8_2 d0, d1, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9
vmul.s16 \d0, \s0, d0[0]
vmla.s16 \d0, \s1, d0[1]
vmla.s16 \d0, \s2, d0[2]
vmla.s16 \d0, \s3, d0[3]
vmla.s16 \d0, \s4, d1[0]
vmla.s16 \d0, \s5, d1[1]
vmla.s16 \d0, \s6, d1[2]
vmla.s16 \d0, \s7, d1[3]
vmul.s16 \d1, \s2, d0[0]
vmla.s16 \d1, \s3, d0[1]
vmla.s16 \d1, \s4, d0[2]
vmla.s16 \d1, \s5, d0[3]
vmla.s16 \d1, \s6, d1[0]
vmla.s16 \d1, \s7, d1[1]
vmla.s16 \d1, \s8, d1[2]
vmla.s16 \d1, \s9, d1[3]
.endm
.macro vqrshrun_s16 shift, q0, d0, q1, d1, q2, d2, q3, d3
vqrshrun.s16 \d0, \q0, #\shift
.ifnb \q1
vqrshrun.s16 \d1, \q1, #\shift
.endif
.ifnb \q2
vqrshrun.s16 \d2, \q2, #\shift
vqrshrun.s16 \d3, \q3, #\shift
.endif
.endm
.macro vrshr_s16 shift, r0, r1, r2, r3
vrshr.s16 \r0, \r0, #\shift
.ifnb \r1
vrshr.s16 \r1, \r1, #\shift
.endif
.ifnb \r2
vrshr.s16 \r2, \r2, #\shift
vrshr.s16 \r3, \r3, #\shift
.endif
.endm
.macro st_16 strd, reg, lanes
vst1.16 {\reg[0]}, [r0, :16], \strd
vst1.16 {\reg[1]}, [r8, :16], \strd
.if \lanes > 2
vst1.16 {\reg[2]}, [r0, :16], \strd
vst1.16 {\reg[3]}, [r8, :16], \strd
.endif
.endm
.macro st_32 strd, r0, r1
vst1.32 {\r0[0]}, [r0, :32], \strd
vst1.32 {\r0[1]}, [r8, :32], \strd
.ifnb \r1
vst1.32 {\r1[0]}, [r0, :32], \strd
vst1.32 {\r1[1]}, [r8, :32], \strd
.endif
.endm
.macro st_reg strd, align, r0, r1, r2, r3, r4, r5, r6, r7
vst1.8 {\r0}, [r0, \align], \strd
vst1.8 {\r1}, [r8, \align], \strd
.ifnb \r2
vst1.8 {\r2}, [r0, \align], \strd
vst1.8 {\r3}, [r8, \align], \strd
.endif
.ifnb \r4
vst1.8 {\r4}, [r0, \align], \strd
vst1.8 {\r5}, [r8, \align], \strd
vst1.8 {\r6}, [r0, \align], \strd
vst1.8 {\r7}, [r8, \align], \strd
.endif
.endm
.macro shift_store_4 type, strd, q0, d0, d1, q1, d2, d3
.ifc \type, put
vqrshrun_s16 6, \q0, \d0, \q1, \d2
st_32 \strd, \d0, \d2
.else
vrshr_s16 2, \q0, \q1
st_reg \strd, :64, \d0, \d1, \d2, \d3
.endif
.endm
.macro shift_store_8 type, strd, q0, d0, q1, d1, q2, d2, q3, d3
.ifc \type, put
vqrshrun_s16 6, \q0, \d0, \q1, \d1, \q2, \d2, \q3, \d3
st_reg \strd, :64, \d0, \d1, \d2, \d3
.else
vrshr_s16 2, \q0, \q1, \q2, \q3
st_reg \strd, :128,\q0, \q1, \q2, \q3
.endif
.endm
.macro shift_store_16 type, strd, q0, d0, d1, q1, q2, d4, d5, q3
.ifc \type, put
vqrshrun.s16 \d0, \q0, #6
vqrshrun.s16 \d1, \q1, #6
vqrshrun.s16 \d4, \q2, #6
vqrshrun.s16 \d5, \q3, #6
st_reg \strd, :128, \q0, \q2
.else
vrshr_s16 2, \q0, \q1, \q2, \q3
vst1.16 {\q0, \q1}, [r0, :128], \strd
vst1.16 {\q2, \q3}, [r8, :128], \strd
.endif
.endm
.macro make_8tap_fn op, type, type_h, type_v
function \op\()_8tap_\type\()_8bpc_neon, export=1
push {r4-r11,lr}
movw r8, \type_h
movw r9, \type_v
b \op\()_8tap_neon
endfunc
.endm
// No spaces in these expressions, due to gas-preprocessor.
#define REGULAR ((0*15<<7)|3*15)
#define SMOOTH ((1*15<<7)|4*15)
#define SHARP ((2*15<<7)|3*15)
.macro filter_fn type, dst, d_strd, src, s_strd, w, h, mx, my, ds2, sr2, shift_hv
make_8tap_fn \type, regular, REGULAR, REGULAR
make_8tap_fn \type, regular_smooth, REGULAR, SMOOTH
make_8tap_fn \type, regular_sharp, REGULAR, SHARP
make_8tap_fn \type, smooth, SMOOTH, SMOOTH
make_8tap_fn \type, smooth_regular, SMOOTH, REGULAR
make_8tap_fn \type, smooth_sharp, SMOOTH, SHARP
make_8tap_fn \type, sharp, SHARP, SHARP
make_8tap_fn \type, sharp_regular, SHARP, REGULAR
make_8tap_fn \type, sharp_smooth, SHARP, SMOOTH
function \type\()_8tap_neon
ldrd r4, r5, [sp, #36]
ldrd r6, r7, [sp, #44]
movw r10, #0x4081 // (1 << 14) | (1 << 7) | (1 << 0)
mul \mx, \mx, r10
mul \my, \my, r10
add \mx, \mx, r8 // mx, 8tap_h, 4tap_h
add \my, \my, r9 // my, 8tap_v, 4tap_v
.ifc \type, prep
lsl \d_strd, \w, #1
.endif
clz r8, \w
tst \mx, #(0x7f << 14)
sub r8, r8, #24
movrel r10, X(mc_subpel_filters), -8
bne L(\type\()_8tap_h)
tst \my, #(0x7f << 14)
bne L(\type\()_8tap_v)
b \type\()_neon
L(\type\()_8tap_h):
cmp \w, #4
ubfx r9, \mx, #7, #7
and \mx, \mx, #0x7f
it gt
movgt \mx, r9
tst \my, #(0x7f << 14)
add \mx, r10, \mx, lsl #3
bne L(\type\()_8tap_hv)
adr r9, L(\type\()_8tap_h_tbl)
ldr r8, [r9, r8, lsl #2]
add r9, r9, r8
bx r9
.align 2
L(\type\()_8tap_h_tbl):
.word 1280f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
20: // 2xN h
.ifc \type, put
add \mx, \mx, #2
vld1.32 {d0[]}, [\mx]
sub \src, \src, #1
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vmovl.s8 q0, d0
2:
vld1.8 {d4}, [\src], \s_strd
vld1.8 {d6}, [\sr2], \s_strd
vmovl.u8 q2, d4
vmovl.u8 q3, d6
vext.8 d5, d4, d5, #2
vext.8 d7, d6, d7, #2
subs \h, \h, #2
vtrn.32 d4, d6
vtrn.32 d5, d7
vmul.s16 d2, d4, d0[0]
vmla.s16 d2, d5, d0[1]
vmla.s16 d2, d6, d0[2]
vmla.s16 d2, d7, d0[3]
vrshr.s16 d2, d2, #2
vqrshrun.s16 d2, q1, #4
vst1.16 {d2[0]}, [\dst, :16], \d_strd
vst1.16 {d2[1]}, [\ds2, :16], \d_strd
bgt 2b
pop {r4-r11,pc}
.endif
40: // 4xN h
add \mx, \mx, #2
vld1.32 {d0[]}, [\mx]
sub \src, \src, #1
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vmovl.s8 q0, d0
4:
vld1.8 {d16}, [\src], \s_strd
vld1.8 {d24}, [\sr2], \s_strd
vmovl.u8 q8, d16
vmovl.u8 q12, d24
vext.8 d18, d16, d17, #2
vext.8 d20, d16, d17, #4
vext.8 d22, d16, d17, #6
vext.8 d26, d24, d25, #2
vext.8 d28, d24, d25, #4
vext.8 d30, d24, d25, #6
subs \h, \h, #2
vmul.s16 d4, d16, d0[0]
vmla.s16 d4, d18, d0[1]
vmla.s16 d4, d20, d0[2]
vmla.s16 d4, d22, d0[3]
vmul.s16 d5, d24, d0[0]
vmla.s16 d5, d26, d0[1]
vmla.s16 d5, d28, d0[2]
vmla.s16 d5, d30, d0[3]
vrshr.s16 q2, q2, #2
.ifc \type, put
vqrshrun.s16 d4, q2, #4
vst1.32 {d4[0]}, [\dst, :32], \d_strd
vst1.32 {d4[1]}, [\ds2, :32], \d_strd
.else
vst1.16 {d4}, [\dst, :64], \d_strd
vst1.16 {d5}, [\ds2, :64], \d_strd
.endif
bgt 4b
pop {r4-r11,pc}
80: // 8xN h
vld1.8 {d0}, [\mx, :64]
sub \src, \src, #3
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vmovl.s8 q0, d0
8:
vld1.8 {q8}, [\src], \s_strd
vld1.8 {q12}, [\sr2], \s_strd
vmovl.u8 q9, d17
vmovl.u8 q8, d16
vmovl.u8 q13, d25
vmovl.u8 q12, d24
vmul.s16 q10, q8, d0[0]
vmul.s16 q14, q12, d0[0]
.irpc i, 1234567
vext.8 q11, q8, q9, #(2*\i)
vext.8 q15, q12, q13, #(2*\i)
.if \i < 4
vmla.s16 q10, q11, d0[\i]
vmla.s16 q14, q15, d0[\i]
.else
vmla.s16 q10, q11, d1[\i-4]
vmla.s16 q14, q15, d1[\i-4]
.endif
.endr
subs \h, \h, #2
vrshr.s16 q10, q10, #2
vrshr.s16 q14, q14, #2
.ifc \type, put
vqrshrun.s16 d20, q10, #4
vqrshrun.s16 d28, q14, #4
vst1.8 {d20}, [\dst, :64], \d_strd
vst1.8 {d28}, [\ds2, :64], \d_strd
.else
vst1.16 {q10}, [\dst, :128], \d_strd
vst1.16 {q14}, [\ds2, :128], \d_strd
.endif
bgt 8b
pop {r4-r11,pc}
160:
320:
640:
1280: // 16xN, 32xN, ... h
// This could be done without touching q4-q6, by using only
// one temporary for vext in the loop. That's slower on A7 and A53,
// (but surprisingly, marginally faster on A8 and A73).
vpush {q4-q6}
vld1.8 {d0}, [\mx, :64]
sub \src, \src, #3
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
vmovl.s8 q0, d0
sub \s_strd, \s_strd, \w
sub \s_strd, \s_strd, #8
.ifc \type, put
lsl \d_strd, \d_strd, #1
sub \d_strd, \d_strd, \w
.endif
161:
vld1.8 {d16, d17, d18}, [\src]!
vld1.8 {d24, d25, d26}, [\sr2]!
mov \mx, \w
vmovl.u8 q10, d18
vmovl.u8 q9, d17
vmovl.u8 q8, d16
vmovl.u8 q14, d26
vmovl.u8 q13, d25
vmovl.u8 q12, d24
16:
vmul.s16 q1, q8, d0[0]
vmul.s16 q2, q9, d0[0]
vmul.s16 q3, q12, d0[0]
vmul.s16 q4, q13, d0[0]
.irpc i, 1234567
vext.8 q5, q8, q9, #(2*\i)
vext.8 q6, q9, q10, #(2*\i)
vext.8 q11, q12, q13, #(2*\i)
vext.8 q15, q13, q14, #(2*\i)
.if \i < 4
vmla.s16 q1, q5, d0[\i]
vmla.s16 q2, q6, d0[\i]
vmla.s16 q3, q11, d0[\i]
vmla.s16 q4, q15, d0[\i]
.else
vmla.s16 q1, q5, d1[\i-4]
vmla.s16 q2, q6, d1[\i-4]
vmla.s16 q3, q11, d1[\i-4]
vmla.s16 q4, q15, d1[\i-4]
.endif
.endr
vrshr.s16 q1, q1, #2
vrshr.s16 q2, q2, #2
vrshr.s16 q3, q3, #2
vrshr.s16 q4, q4, #2
subs \mx, \mx, #16
.ifc \type, put
vqrshrun.s16 d2, q1, #4
vqrshrun.s16 d3, q2, #4
vqrshrun.s16 d4, q3, #4
vqrshrun.s16 d5, q4, #4
vst1.8 {q1}, [\dst, :128]!
vst1.8 {q2}, [\ds2, :128]!
.else
vst1.16 {q1, q2}, [\dst, :128]!
vst1.16 {q3, q4}, [\ds2, :128]!
.endif
ble 9f
vmov q8, q10
vmov q12, q14
vld1.8 {d18, d19}, [\src]!
vld1.8 {d26, d27}, [\sr2]!
vmovl.u8 q10, d19
vmovl.u8 q9, d18
vmovl.u8 q14, d27
vmovl.u8 q13, d26
b 16b
9:
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
add \src, \src, \s_strd
add \sr2, \sr2, \s_strd
subs \h, \h, #2
bgt 161b
vpop {q4-q6}
pop {r4-r11,pc}
L(\type\()_8tap_v):
cmp \h, #4
ubfx r9, \my, #7, #7
and \my, \my, #0x7f
it gt
movgt \my, r9
add \my, r10, \my, lsl #3
adr r9, L(\type\()_8tap_v_tbl)
ldr r8, [r9, r8, lsl #2]
add r9, r9, r8
bx r9
.align 2
L(\type\()_8tap_v_tbl):
.word 1280f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
20: // 2xN v
.ifc \type, put
bgt 28f
cmp \h, #2
add \my, \my, #2
vld1.32 {d0[]}, [\my]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
// 2x2 v
load_16 \src, \sr2, \s_strd, d1, d2, d3, d4, d5
interleave_1_16 d1, d2, d3, d4, d5
bgt 24f
vmovl_u8 q8, d1, q9, d2, q10, d3, q11, d4
mul_mla_4 d6, d16, d18, d20, d22
vqrshrun_s16 6, q3, d6
st_16 \d_strd, d6, 2
pop {r4-r11,pc}
24: // 2x4 v
load_16 \sr2, \src, \s_strd, d6, d7
interleave_1_16 d5, d6, d7
vmovl_u8 q8, d1, q9, d2, q10, d3, q11, d4, q12, d5, q13, d6
vmov d17, d20
vmov d19, d22
vmov d21, d24
vmov d23, d26
mul_mla_4 q3, q8, q9, q10, q11
vqrshrun_s16 6, q3, d6
st_16 \d_strd, d6, 4
pop {r4-r11,pc}
28: // 2x6, 2x8, 2x12, 2x16 v
vpush {q4-q7}
vld1.8 {d0}, [\my, :64]
sub \sr2, \src, \s_strd, lsl #1
add \ds2, \dst, \d_strd
sub \src, \sr2, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vmovl.s8 q0, d0
load_16 \src, \sr2, \s_strd, d2, d4, d6, d8, d10, d12, d14
interleave_1_16 d2, d4, d6, d8, d10
interleave_1_16 d10, d12, d14
vmovl_u8 q1, d2, q2, d4, q3, d6, q4, d8, q5, d10, q6, d12
vmov d3, d6
vmov d5, d8
vmov d7, d10
vmov d9, d12
216:
subs \h, \h, #4
load_16 \sr2, \src, \s_strd, d16, d18, d20, d22
interleave_1_16 d14, d16, d18, d20, d22
vmovl_u8 q7, d14, q8, d16, q9, d18, q10, d20
vmov d11, d14
vmov d13, d16
vmov d15, d18
vmov d17, d20
mul_mla_8_0 q1, q1, q2, q3, q4, q5, q6, q7, q8
vqrshrun_s16 6, q1, d2
st_16 \d_strd, d2, 4
ble 0f
cmp \h, #2
vmov q1, q5
vmov q2, q6
vmov q3, q7
vmov q4, q8
vmov q5, q9
vmov q6, q10
vmov d14, d22
beq 26f
b 216b
26:
load_16 \sr2, \src, \s_strd, d16, d18
interleave_1_16 d14, d16, d18
vmovl_u8 q7, d14, q8, d16
vmov d11, d14
vmov d13, d16
mul_mla_8_0 d2, d2, d4, d6, d8, d10, d12, d14, d16
vqrshrun_s16 6, q1, d2
st_16 \d_strd, d2, 2
0:
vpop {q4-q7}
pop {r4-r11,pc}
.endif
40:
bgt 480f
// 4x2, 4x4 v
cmp \h, #2
add \my, \my, #2
vld1.32 {d0[]}, [\my]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
load_32 \src, \sr2, \s_strd, d1, d2, d3, d4, d5
interleave_1_32 d1, d2, d3, d4, d5
vmovl_u8 q8, d1, q9, d2, q10, d3, q11, d4
mul_mla_4 q3, q8, q9, q10, q11
shift_store_4 \type, \d_strd, q3, d6, d7
ble 0f
load_32 \sr2, \src, \s_strd, d6, d7
interleave_1_32 d5, d6, d7
vmovl_u8 q12, d5, q13, d6
mul_mla_4 q3, q10, q11, q12, q13
shift_store_4 \type, \d_strd, q3, d6, d7
0:
pop {r4-r11,pc}
480: // 4x6, 4x8, 4x12, 4x16 v
vpush {q4}
vld1.8 {d0}, [\my, :64]
sub \sr2, \src, \s_strd, lsl #1
add \ds2, \dst, \d_strd
sub \src, \sr2, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
load_32 \src, \sr2, \s_strd, d2, d4, d6, d8, d16, d18, d20
interleave_1_32 d2, d4, d6
interleave_1_32 d6, d8, d16, d18, d20
vmovl_u8 q1, d2, q2, d4, q3, d6, q4, d8, q8, d16, q9, d18
48:
subs \h, \h, #4
load_32 \sr2, \src, \s_strd, d22, d24, d26, d28
interleave_1_32 d20, d22, d24, d26, d28
vmovl_u8 q10, d20, q11, d22, q12, d24, q13, d26
mul_mla_8_2 q1, q2, q1, q2, q3, q4, q8, q9, q10, q11, q12, q13
shift_store_4 \type, \d_strd, q1, d2, d3, q2, d4, d5
ble 0f
load_32 \sr2, \src, \s_strd, d30, d2
subs \h, \h, #2
interleave_1_32 d28, d30, d2
vmovl_u8 q14, d28, q15, d30
mul_mla_8_0 q8, q8, q9, q10, q11, q12, q13, q14, q15
shift_store_4 \type, \d_strd, q8, d16, d17
ble 0f
load_32 \sr2, \src, \s_strd, d4, d6
subs \h, \h, #2
interleave_1_32 d2, d4, d6
vmovl_u8 q1, d2, q2, d4
mul_mla_8_0 q9, q10, q11, q12, q13, q14, q15, q1, q2
shift_store_4 \type, \d_strd, q9, d18, d19
ble 0f
subs \h, \h, #4
load_32 \sr2, \src, \s_strd, d8, d16, d18, d20
interleave_1_32 d6, d8, d16, d18, d20
vmovl_u8 q3, d6, q4, d8, q8, d16, q9, d18
mul_mla_8_2 q12, q13, q12, q13, q14, q15, q1, q2, q3, q4, q8, q9
shift_store_4 \type, \d_strd, q12, d24, d25, q13, d26, d27
bgt 48b
0:
vpop {q4}
pop {r4-r11,pc}
80:
bgt 880f
// 8x2, 8x4 v
cmp \h, #2
add \my, \my, #2
vld1.32 {d0[]}, [\my]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
load_reg \src, \sr2, \s_strd, d1, d2, d3, d4, d5
vmovl_u8 q8, d1, q9, d2, q10, d3, q11, d4, q12, d5
mul_mla_4 q1, q8, q9, q10, q11
mul_mla_4 q2, q9, q10, q11, q12
shift_store_8 \type, \d_strd, q1, d2, q2, d4
ble 0f
load_reg \sr2, \src, \s_strd, d6, d7
vmovl_u8 q13, d6, q14, d7
mul_mla_4 q1, q10, q11, q12, q13
mul_mla_4 q2, q11, q12, q13, q14
shift_store_8 \type, \d_strd, q1, d2, q2, d4
0:
pop {r4-r11,pc}
880: // 8x6, 8x8, 8x16, 8x32 v
1680: // 16x8, 16x16, ...
320: // 32x8, 32x16, ...
640:
1280:
vpush {q4}
vld1.8 {d0}, [\my, :64]
sub \src, \src, \s_strd
sub \src, \src, \s_strd, lsl #1
vmovl.s8 q0, d0
mov \my, \h
168:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
load_reg \src, \sr2, \s_strd, d2, d4, d6, d8, d16, d18, d20
vmovl_u8 q1, d2, q2, d4, q3, d6, q4, d8, q8, d16, q9, d18, q10, d20
88:
subs \h, \h, #2
load_reg \sr2, \src, \s_strd, d22, d24
vmovl_u8 q11, d22, q12, d24
mul_mla_8_1 q1, q2, q1, q2, q3, q4, q8, q9, q10, q11, q12
shift_store_8 \type, \d_strd, q1, d2, q2, d4
ble 9f
subs \h, \h, #2
load_reg \sr2, \src, \s_strd, d26, d28
vmovl_u8 q13, d26, q14, d28
mul_mla_8_1 q3, q4, q3, q4, q8, q9, q10, q11, q12, q13, q14
shift_store_8 \type, \d_strd, q3, d6, q4, d8
ble 9f
subs \h, \h, #2
load_reg \sr2, \src, \s_strd, d30, d2
vmovl_u8 q15, d30, q1, d2
mul_mla_8_1 q8, q9, q8, q9, q10, q11, q12, q13, q14, q15, q1
shift_store_8 \type, \d_strd, q8, d16, q9, d18
ble 9f
subs \h, \h, #2
load_reg \sr2, \src, \s_strd, d4, d6
vmovl_u8 q2, d4, q3, d6
mul_mla_8_1 q10, q11, q10, q11, q12, q13, q14, q15, q1, q2, q3
shift_store_8 \type, \d_strd, q10, d20, q11, d22
ble 9f
subs \h, \h, #4
load_reg \sr2, \src, \s_strd, d8, d16, d18, d20
vmovl_u8 q4, d8, q8, d16, q9, d18, q10, d20
mul_mla_8_1 q12, q13, q12, q13, q14, q15, q1, q2, q3, q4, q8
mul_mla_8_1 q14, q15, q14, q15, q1, q2, q3, q4, q8, q9, q10
shift_store_8 \type, \d_strd, q12, d24, q13, d26, q14, d28, q15, d30
bgt 88b
9:
subs \w, \w, #8
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #3
mov \h, \my
add \src, \src, #8
.ifc \type, put
add \dst, \dst, #8
.else
add \dst, \dst, #16
.endif
b 168b
0:
vpop {q4}
pop {r4-r11,pc}
160:
bgt 1680b
// 16x2, 16x4 v
add \my, \my, #2
vld1.32 {d0[]}, [\my]
sub \src, \src, \s_strd
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
cmp \h, #2
load_reg \src, \sr2, \s_strd, q11, q12, q13, q14, q15
vmovl.u8 q1, d22
vmovl.u8 q2, d24
vmovl.u8 q3, d26
vmovl.u8 q8, d28
vmovl.u8 q9, d30
vmovl.u8 q11, d23
vmovl.u8 q12, d25
vmovl.u8 q13, d27
vmovl.u8 q14, d29
vmovl.u8 q15, d31
mul_mla_4 q1, q1, q2, q3, q8
mul_mla_4 q10, q2, q3, q8, q9
mul_mla_4 q2, q11, q12, q13, q14
mul_mla_4 q11, q12, q13, q14, q15
shift_store_16 \type, \d_strd, q1, d2, d3, q2, q10, d20, d21, q11
ble 0f
load_reg \sr2, \src, \s_strd, q10, q11
vmovl.u8 q1, d20
vmovl.u8 q10, d21
vmovl.u8 q12, d22
vmovl.u8 q11, d23
mul_mla_4 q2, q3, q8, q9, q1
mul_mla_4 q3, q13, q14, q15, q10
mul_mla_4 q13, q8, q9, q1, q12
mul_mla_4 q14, q14, q15, q10, q11
shift_store_16 \type, \d_strd, q2, d4, d5, q3, q13, d26, d27, q14
0:
pop {r4-r11,pc}
L(\type\()_8tap_hv):
cmp \h, #4
ubfx r9, \my, #7, #7
and \my, \my, #0x7f
it gt
movgt \my, r9
add \my, r10, \my, lsl #3
adr r9, L(\type\()_8tap_hv_tbl)
ldr r8, [r9, r8, lsl #2]
add r9, r9, r8
bx r9
.align 2
L(\type\()_8tap_hv_tbl):
.word 1280f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
20:
.ifc \type, put
add \mx, \mx, #2
vld1.32 {d0[]}, [\mx]
bgt 280f
add \my, \my, #2
vld1.32 {d2[]}, [\my]
// 2x2, 2x4 hv
sub \sr2, \src, #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vld1.8 {d26}, [\src], \s_strd
vmovl.u8 q13, d26
vext.8 q14, q13, q13, #2
vmul.s16 d26, d26, d0
vmul.s16 d28, d28, d0
vpadd.s16 d26, d26, d28
vpadd.s16 d26, d26, d26
vrshr.s16 d16, d26, #2
bl L(\type\()_8tap_filter_2)
vext.8 d16, d16, d16, #4
vmov d17, d26
vext.8 d16, d16, d26, #4
2:
bl L(\type\()_8tap_filter_2)
vext.8 d18, d17, d26, #4
vmull.s16 q2, d16, d2[0]
vmlal.s16 q2, d17, d2[1]
vmlal.s16 q2, d18, d2[2]
vmlal.s16 q2, d26, d2[3]
vqrshrn.s32 d4, q2, #\shift_hv
vqmovun.s16 d4, q2
subs \h, \h, #2
vst1.16 {d4[0]}, [\dst, :16], \d_strd
vst1.16 {d4[1]}, [\ds2, :16], \d_strd
ble 0f
vmov d16, d18
vmov d17, d26
b 2b
280: // 2x8, 2x16, 2x32 hv
vld1.8 {d2}, [\my, :64]
sub \src, \src, #1
sub \sr2, \src, \s_strd, lsl #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vld1.8 {d26}, [\src], \s_strd
vmovl.u8 q13, d26
vext.8 q14, q13, q13, #2
vmul.s16 d26, d26, d0
vmul.s16 d28, d28, d0
vpadd.s16 d26, d26, d28
vpadd.s16 d26, d26, d26
vrshr.s16 d16, d26, #2
bl L(\type\()_8tap_filter_2)
vext.8 d16, d16, d16, #4
vmov d17, d26
vext.8 d16, d16, d26, #4
bl L(\type\()_8tap_filter_2)
vext.8 d18, d17, d26, #4
vmov d19, d26
bl L(\type\()_8tap_filter_2)
vext.8 d20, d19, d26, #4
vmov d21, d26
28:
bl L(\type\()_8tap_filter_2)
vext.8 d22, d21, d26, #4
vmull.s16 q2, d16, d2[0]
vmlal.s16 q2, d17, d2[1]
vmlal.s16 q2, d18, d2[2]
vmlal.s16 q2, d19, d2[3]
vmlal.s16 q2, d20, d3[0]
vmlal.s16 q2, d21, d3[1]
vmlal.s16 q2, d22, d3[2]
vmlal.s16 q2, d26, d3[3]
vqrshrn.s32 d4, q2, #\shift_hv
vqmovun.s16 d4, q2
subs \h, \h, #2
vst1.16 {d4[0]}, [\dst, :16], \d_strd
vst1.16 {d4[1]}, [\ds2, :16], \d_strd
ble 0f
vmov d16, d18
vmov d17, d19
vmov d18, d20
vmov d19, d21
vmov d20, d22
vmov d21, d26
b 28b
0:
pop {r4-r11,pc}
L(\type\()_8tap_filter_2):
vld1.8 {d28}, [\sr2], \s_strd
vld1.8 {d30}, [\src], \s_strd
vext.8 d29, d28, d28, #1
vext.8 d31, d30, d30, #1
vmovl.u8 q13, d28
vmovl.u8 q14, d29
vmov d27, d28
vmovl.u8 q14, d30
vmovl.u8 q15, d31
vtrn.32 d26, d28
vtrn.32 d27, d30
vmul.s16 d26, d26, d0[0]
vmla.s16 d26, d27, d0[1]
vmla.s16 d26, d28, d0[2]
vmla.s16 d26, d30, d0[3]
vrshr.s16 d26, d26, #2
vext.8 d27, d26, d26, #4
bx lr
.endif
40:
add \mx, \mx, #2
vld1.32 {d0[]}, [\mx]
bgt 480f
add \my, \my, #2
vld1.32 {d2[]}, [\my]
sub \sr2, \src, #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
// 4x2, 4x4 hv
vld1.8 {d30}, [\src], \s_strd
vmovl.u8 q14, d30
vext.8 d27, d28, d29, #2
vext.8 d30, d28, d29, #4
vext.8 d31, d28, d29, #6
vmul.s16 d26, d28, d0[0]
vmla.s16 d26, d27, d0[1]
vmla.s16 d26, d30, d0[2]
vmla.s16 d26, d31, d0[3]
vrshr.s16 d16, d26, #2
bl L(\type\()_8tap_filter_4)
vmov d17, d26
vmov d18, d27
4:
bl L(\type\()_8tap_filter_4)
vmull.s16 q2, d16, d2[0]
vmlal.s16 q2, d17, d2[1]
vmlal.s16 q2, d18, d2[2]
vmlal.s16 q2, d26, d2[3]
vmull.s16 q3, d17, d2[0]
vmlal.s16 q3, d18, d2[1]
vmlal.s16 q3, d26, d2[2]
vmlal.s16 q3, d27, d2[3]
vqrshrn.s32 d4, q2, #\shift_hv
vqrshrn.s32 d6, q3, #\shift_hv
subs \h, \h, #2
.ifc \type, put
vqmovun.s16 d4, q2
vqmovun.s16 d6, q3
vst1.32 {d4[0]}, [\dst, :32], \d_strd
vst1.32 {d6[0]}, [\ds2, :32], \d_strd
.else
vst1.16 {d4}, [\dst, :64], \d_strd
vst1.16 {d6}, [\ds2, :64], \d_strd
.endif
ble 0f
vmov d16, d18
vmov d17, d26
vmov d18, d27
b 4b
480: // 4x8, 4x16, 4x32 hv
vld1.8 {d2}, [\my, :64]
sub \src, \src, #1
sub \sr2, \src, \s_strd, lsl #1
sub \src, \sr2, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
vld1.8 {d30}, [\src], \s_strd
vmovl.u8 q14, d30
vext.8 d27, d28, d29, #2
vext.8 d30, d28, d29, #4
vext.8 d31, d28, d29, #6
vmul.s16 d26, d28, d0[0]
vmla.s16 d26, d27, d0[1]
vmla.s16 d26, d30, d0[2]
vmla.s16 d26, d31, d0[3]
vrshr.s16 d16, d26, #2
bl L(\type\()_8tap_filter_4)
vmov d17, d26
vmov d18, d27
bl L(\type\()_8tap_filter_4)
vmov d19, d26
vmov d20, d27
bl L(\type\()_8tap_filter_4)
vmov d21, d26
vmov d22, d27
48:
bl L(\type\()_8tap_filter_4)
vmull.s16 q2, d16, d2[0]
vmlal.s16 q2, d17, d2[1]
vmlal.s16 q2, d18, d2[2]
vmlal.s16 q2, d19, d2[3]
vmlal.s16 q2, d20, d3[0]
vmlal.s16 q2, d21, d3[1]
vmlal.s16 q2, d22, d3[2]
vmlal.s16 q2, d26, d3[3]
vmull.s16 q3, d17, d2[0]
vmlal.s16 q3, d18, d2[1]
vmlal.s16 q3, d19, d2[2]
vmlal.s16 q3, d20, d2[3]
vmlal.s16 q3, d21, d3[0]
vmlal.s16 q3, d22, d3[1]
vmlal.s16 q3, d26, d3[2]
vmlal.s16 q3, d27, d3[3]
vqrshrn.s32 d4, q2, #\shift_hv
vqrshrn.s32 d6, q3, #\shift_hv
subs \h, \h, #2
.ifc \type, put
vqmovun.s16 d4, q2
vqmovun.s16 d6, q3
vst1.32 {d4[0]}, [\dst, :32], \d_strd
vst1.32 {d6[0]}, [\ds2, :32], \d_strd
.else
vst1.16 {d4}, [\dst, :64], \d_strd
vst1.16 {d6}, [\ds2, :64], \d_strd
.endif
ble 0f
vmov d16, d18
vmov d17, d19
vmov d18, d20
vmov d19, d21
vmov d20, d22
vmov d21, d26
vmov d22, d27
b 48b
0:
pop {r4-r11,pc}
L(\type\()_8tap_filter_4):
vld1.8 {d30}, [\sr2], \s_strd
vld1.8 {d31}, [\src], \s_strd
vmovl.u8 q14, d30
vext.8 d27, d28, d29, #2
vext.8 d30, d28, d29, #4
vext.8 d1, d28, d29, #6
vmul.s16 d26, d28, d0[0]
vmla.s16 d26, d27, d0[1]
vmla.s16 d26, d30, d0[2]
vmla.s16 d26, d1, d0[3]
vmovl.u8 q14, d31
vext.8 d30, d28, d29, #2
vext.8 d31, d28, d29, #4
vext.8 d1, d28, d29, #6
vmul.s16 d27, d28, d0[0]
vmla.s16 d27, d30, d0[1]
vmla.s16 d27, d31, d0[2]
vmla.s16 d27, d1, d0[3]
vrshr.s16 d26, d26, #2
vrshr.s16 d27, d27, #2
bx lr
80:
160:
320:
bgt 880f
vpush {q4-q7}
add \my, \my, #2
vld1.8 {d0}, [\mx, :64]
vld1.32 {d2[]}, [\my]
sub \src, \src, #3
sub \src, \src, \s_strd
vmovl.s8 q0, d0
vmovl.s8 q1, d2
mov \my, \h
164: // 8x2, 8x4, 16x2, 16x4, 32x2, 32x4 hv
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vld1.8 {q14}, [\src], \s_strd
vmovl.u8 q12, d28
vmovl.u8 q13, d29
vmul.s16 q10, q12, d0[0]
.irpc i, 123
vext.8 q14, q12, q13, #(2*\i)
vmla.s16 q10, q14, d0[\i]
.endr
.irpc i, 4567
vext.8 q14, q12, q13, #(2*\i)
vmla.s16 q10, q14, d1[\i-4]
.endr
vrshr.s16 q3, q10, #2
bl L(\type\()_8tap_filter_8)
vmov q4, q10
vmov q5, q11
8:
bl L(\type\()_8tap_filter_8)
vmull.s16 q12, d6, d2[0]
vmull.s16 q13, d7, d2[0]
vmull.s16 q14, d8, d2[0]
vmull.s16 q15, d9, d2[0]
vmlal.s16 q12, d8, d2[1]
vmlal.s16 q13, d9, d2[1]
vmlal.s16 q14, d10, d2[1]
vmlal.s16 q15, d11, d2[1]
vmlal.s16 q12, d10, d2[2]
vmlal.s16 q13, d11, d2[2]
vmlal.s16 q14, d20, d2[2]
vmlal.s16 q15, d21, d2[2]
vmlal.s16 q12, d20, d2[3]
vmlal.s16 q13, d21, d2[3]
vmlal.s16 q14, d22, d2[3]
vmlal.s16 q15, d23, d2[3]
vqrshrn.s32 d24, q12, #\shift_hv
vqrshrn.s32 d25, q13, #\shift_hv
vqrshrn.s32 d28, q14, #\shift_hv
vqrshrn.s32 d29, q15, #\shift_hv
subs \h, \h, #2
.ifc \type, put
vqmovun.s16 d24, q12
vqmovun.s16 d28, q14
vst1.8 {d24}, [\dst, :64], \d_strd
vst1.8 {d28}, [\ds2, :64], \d_strd
.else
vst1.16 {q12}, [\dst, :128], \d_strd
vst1.16 {q14}, [\ds2, :128], \d_strd
.endif
ble 9f
vmov q3, q5
vmov q4, q10
vmov q5, q11
b 8b
9:
subs \w, \w, #8
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #2
mov \h, \my
add \src, \src, #8
.ifc \type, put
add \dst, \dst, #8
.else
add \dst, \dst, #16
.endif
b 164b
880: // 8x8, 8x16, ..., 16x8, ..., 32x8, ... hv
640:
1280:
vpush {q4-q7}
vld1.8 {d0}, [\mx, :64]
vld1.8 {d2}, [\my, :64]
sub \src, \src, #3
sub \src, \src, \s_strd
sub \src, \src, \s_strd, lsl #1
vmovl.s8 q0, d0
vmovl.s8 q1, d2
mov \my, \h
168:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
vld1.8 {q14}, [\src], \s_strd
vmovl.u8 q12, d28
vmovl.u8 q13, d29
vmul.s16 q10, q12, d0[0]
.irpc i, 123
vext.8 q14, q12, q13, #(2*\i)
vmla.s16 q10, q14, d0[\i]
.endr
.irpc i, 4567
vext.8 q14, q12, q13, #(2*\i)
vmla.s16 q10, q14, d1[\i-4]
.endr
vrshr.s16 q3, q10, #2
bl L(\type\()_8tap_filter_8)
vmov q4, q10
vmov q5, q11
bl L(\type\()_8tap_filter_8)
vmov q6, q10
vmov q7, q11
bl L(\type\()_8tap_filter_8)
vmov q8, q10
vmov q9, q11
88:
bl L(\type\()_8tap_filter_8)
vmull.s16 q12, d6, d2[0]
vmull.s16 q13, d7, d2[0]
vmull.s16 q14, d8, d2[0]
vmull.s16 q15, d9, d2[0]
vmlal.s16 q12, d8, d2[1]
vmlal.s16 q13, d9, d2[1]
vmlal.s16 q14, d10, d2[1]
vmlal.s16 q15, d11, d2[1]
vmlal.s16 q12, d10, d2[2]
vmlal.s16 q13, d11, d2[2]
vmlal.s16 q14, d12, d2[2]
vmlal.s16 q15, d13, d2[2]
vmlal.s16 q12, d12, d2[3]
vmlal.s16 q13, d13, d2[3]
vmlal.s16 q14, d14, d2[3]
vmlal.s16 q15, d15, d2[3]
vmlal.s16 q12, d14, d3[0]
vmlal.s16 q13, d15, d3[0]
vmlal.s16 q14, d16, d3[0]
vmlal.s16 q15, d17, d3[0]
vmlal.s16 q12, d16, d3[1]
vmlal.s16 q13, d17, d3[1]
vmlal.s16 q14, d18, d3[1]
vmlal.s16 q15, d19, d3[1]
vmlal.s16 q12, d18, d3[2]
vmlal.s16 q13, d19, d3[2]
vmlal.s16 q14, d20, d3[2]
vmlal.s16 q15, d21, d3[2]
vmlal.s16 q12, d20, d3[3]
vmlal.s16 q13, d21, d3[3]
vmlal.s16 q14, d22, d3[3]
vmlal.s16 q15, d23, d3[3]
vqrshrn.s32 d24, q12, #\shift_hv
vqrshrn.s32 d25, q13, #\shift_hv
vqrshrn.s32 d28, q14, #\shift_hv
vqrshrn.s32 d29, q15, #\shift_hv
subs \h, \h, #2
.ifc \type, put
vqmovun.s16 d24, q12
vqmovun.s16 d28, q14
vst1.8 {d24}, [\dst, :64], \d_strd
vst1.8 {d28}, [\ds2, :64], \d_strd
.else
vst1.16 {q12}, [\dst, :128], \d_strd
vst1.16 {q14}, [\ds2, :128], \d_strd
.endif
ble 9f
vmov q3, q5
vmov q4, q6
vmov q5, q7
vmov q6, q8
vmov q7, q9
vmov q8, q10
vmov q9, q11
b 88b
9:
subs \w, \w, #8
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #3
mov \h, \my
add \src, \src, #8
.ifc \type, put
add \dst, \dst, #8
.else
add \dst, \dst, #16
.endif
b 168b
0:
vpop {q4-q7}
pop {r4-r11,pc}
L(\type\()_8tap_filter_8):
vld1.8 {q14}, [\sr2], \s_strd
vld1.8 {q15}, [\src], \s_strd
vmovl.u8 q12, d28
vmovl.u8 q13, d29
vmul.s16 q10, q12, d0[0]
.irpc i, 123
vext.8 q14, q12, q13, #(2*\i)
vmla.s16 q10, q14, d0[\i]
.endr
.irpc i, 4567
vext.8 q14, q12, q13, #(2*\i)
vmla.s16 q10, q14, d1[\i-4]
.endr
vmovl.u8 q12, d30
vmovl.u8 q13, d31
vmul.s16 q11, q12, d0[0]
.irpc i, 123
vext.8 q14, q12, q13, #(2*\i)
vmla.s16 q11, q14, d0[\i]
.endr
.irpc i, 4567
vext.8 q14, q12, q13, #(2*\i)
vmla.s16 q11, q14, d1[\i-4]
.endr
vrshr.s16 q10, q10, #2
vrshr.s16 q11, q11, #2
bx lr
endfunc
function \type\()_bilin_8bpc_neon, export=1
push {r4-r11,lr}
ldrd r4, r5, [sp, #36]
ldrd r6, r7, [sp, #44]
vdup.8 d1, \mx
vdup.8 d3, \my
rsb r8, \mx, #16
rsb r9, \my, #16
vdup.8 d0, r8
vdup.8 d2, r9
.ifc \type, prep
lsl \d_strd, \w, #1
.endif
clz r8, \w
cmp \mx, #0
sub r8, r8, #24
bne L(\type\()_bilin_h)
cmp \my, #0
bne L(\type\()_bilin_v)
b \type\()_neon
L(\type\()_bilin_h):
cmp \my, #0
bne L(\type\()_bilin_hv)
adr r9, L(\type\()_bilin_h_tbl)
ldr r8, [r9, r8, lsl #2]
add r9, r9, r8
bx r9
.align 2
L(\type\()_bilin_h_tbl):
.word 1280f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
20: // 2xN h
.ifc \type, put
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
2:
vld1.32 {d4[]}, [\src], \s_strd
vld1.32 {d6[]}, [\sr2], \s_strd
vext.8 d5, d4, d4, #1
vext.8 d7, d6, d6, #1
vtrn.16 q2, q3
subs \h, \h, #2
vmull.u8 q3, d4, d0
vmlal.u8 q3, d5, d1
vqrshrn.u16 d4, q3, #4
vst1.16 {d4[0]}, [\dst, :16], \d_strd
vst1.16 {d4[1]}, [\ds2, :16], \d_strd
bgt 2b
pop {r4-r11,pc}
.endif
40: // 4xN h
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
4:
vld1.8 {d4}, [\src], \s_strd
vld1.8 {d6}, [\sr2], \s_strd
vext.8 d5, d4, d4, #1
vext.8 d7, d6, d6, #1
vtrn.32 q2, q3
subs \h, \h, #2
vmull.u8 q3, d4, d0
vmlal.u8 q3, d5, d1
.ifc \type, put
vqrshrn.u16 d4, q3, #4
vst1.32 {d4[0]}, [\dst, :32], \d_strd
vst1.32 {d4[1]}, [\ds2, :32], \d_strd
.else
vst1.16 {d6}, [\dst, :64], \d_strd
vst1.16 {d7}, [\ds2, :64], \d_strd
.endif
bgt 4b
pop {r4-r11,pc}
80: // 8xN h
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \d_strd, \d_strd, #1
lsl \s_strd, \s_strd, #1
8:
vld1.8 {q8}, [\src], \s_strd
vld1.8 {q10}, [\sr2], \s_strd
vext.8 q9, q8, q8, #1
vext.8 q11, q10, q10, #1
subs \h, \h, #2
vmull.u8 q8, d16, d0
vmull.u8 q10, d20, d0
vmlal.u8 q8, d18, d1
vmlal.u8 q10, d22, d1
.ifc \type, put
vqrshrn.u16 d16, q8, #4
vqrshrn.u16 d18, q10, #4
vst1.8 {d16}, [\dst, :64], \d_strd
vst1.8 {d18}, [\ds2, :64], \d_strd
.else
vst1.16 {q8}, [\dst, :128], \d_strd
vst1.16 {q10}, [\ds2, :128], \d_strd
.endif
bgt 8b
pop {r4-r11,pc}
160:
320:
640:
1280: // 16xN, 32xN, ... h
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
sub \s_strd, \s_strd, \w
sub \s_strd, \s_strd, #8
.ifc \type, put
lsl \d_strd, \d_strd, #1
sub \d_strd, \d_strd, \w
.endif
161:
vld1.8 {d16}, [\src]!
vld1.8 {d22}, [\sr2]!
mov \mx, \w
16:
vld1.8 {d17,d18}, [\src]!
vld1.8 {d23,d24}, [\sr2]!
vext.8 q10, q8, q9, #1
vext.8 q13, q11, q12, #1
vmull.u8 q2, d16, d0
vmull.u8 q3, d17, d0
vmull.u8 q14, d22, d0
vmull.u8 q15, d23, d0
vmlal.u8 q2, d20, d1
vmlal.u8 q3, d21, d1
vmlal.u8 q14, d26, d1
vmlal.u8 q15, d27, d1
subs \mx, \mx, #16
.ifc \type, put
vqrshrn.u16 d4, q2, #4
vqrshrn.u16 d5, q3, #4
vqrshrn.u16 d28, q14, #4
vqrshrn.u16 d29, q15, #4
vst1.8 {q2}, [\dst, :128]!
vst1.8 {q14}, [\ds2, :128]!
.else
vst1.16 {q2, q3}, [\dst, :128]!
vst1.16 {q14, q15}, [\ds2, :128]!
.endif
ble 9f
vmov d16, d18
vmov d22, d24
b 16b
9:
add \dst, \dst, \d_strd
add \ds2, \ds2, \d_strd
add \src, \src, \s_strd
add \sr2, \sr2, \s_strd
subs \h, \h, #2
bgt 161b
pop {r4-r11,pc}
L(\type\()_bilin_v):
cmp \h, #4
adr r9, L(\type\()_bilin_v_tbl)
ldr r8, [r9, r8, lsl #2]
add r9, r9, r8
bx r9
.align 2
L(\type\()_bilin_v_tbl):
.word 1280f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
20: // 2xN v
.ifc \type, put
cmp \h, #2
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
// 2x2 v
vld1.16 {d16[]}, [\src], \s_strd
bgt 24f
22:
vld1.16 {d17[]}, [\sr2], \s_strd
vld1.16 {d18[]}, [\src], \s_strd
vext.8 d16, d16, d17, #6
vext.8 d17, d17, d18, #6
vmull.u8 q2, d16, d2
vmlal.u8 q2, d17, d3
vqrshrn.u16 d4, q2, #4
vst1.16 {d4[0]}, [\dst, :16]
vst1.16 {d4[1]}, [\ds2, :16]
pop {r4-r11,pc}
24: // 2x4, 2x6, 2x8, ... v
vld1.16 {d17[]}, [\sr2], \s_strd
vld1.16 {d18[]}, [\src], \s_strd
vld1.16 {d19[]}, [\sr2], \s_strd
vld1.16 {d20[]}, [\src], \s_strd
sub \h, \h, #4
vext.8 d16, d16, d17, #6
vext.8 d17, d17, d18, #6
vext.8 d18, d18, d19, #6
vext.8 d19, d19, d20, #6
vtrn.32 d16, d18
vtrn.32 d17, d19
vmull.u8 q2, d16, d2
vmlal.u8 q2, d17, d3
cmp \h, #2
vqrshrn.u16 d4, q2, #4
vst1.16 {d4[0]}, [\dst, :16], \d_strd
vst1.16 {d4[1]}, [\ds2, :16], \d_strd
vst1.16 {d4[2]}, [\dst, :16], \d_strd
vst1.16 {d4[3]}, [\ds2, :16], \d_strd
blt 0f
vmov d16, d20
beq 22b
b 24b
0:
pop {r4-r11,pc}
.endif
40: // 4xN v
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.32 {d16[]}, [\src], \s_strd
4:
vld1.32 {d17[]}, [\sr2], \s_strd
vld1.32 {d18[]}, [\src], \s_strd
vext.8 d16, d16, d17, #4
vext.8 d17, d17, d18, #4
vmull.u8 q2, d16, d2
vmlal.u8 q2, d17, d3
subs \h, \h, #2
.ifc \type, put
vqrshrn.u16 d4, q2, #4
vst1.32 {d4[0]}, [\dst, :32], \d_strd
vst1.32 {d4[1]}, [\ds2, :32], \d_strd
.else
vst1.16 {d4}, [\dst, :64], \d_strd
vst1.16 {d5}, [\ds2, :64], \d_strd
.endif
ble 0f
vmov d16, d18
b 4b
0:
pop {r4-r11,pc}
80: // 8xN v
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.8 {d16}, [\src], \s_strd
8:
vld1.8 {d17}, [\sr2], \s_strd
vld1.8 {d18}, [\src], \s_strd
vmull.u8 q2, d16, d2
vmull.u8 q3, d17, d2
vmlal.u8 q2, d17, d3
vmlal.u8 q3, d18, d3
subs \h, \h, #2
.ifc \type, put
vqrshrn.u16 d4, q2, #4
vqrshrn.u16 d6, q3, #4
vst1.8 {d4}, [\dst, :64], \d_strd
vst1.8 {d6}, [\ds2, :64], \d_strd
.else
vst1.16 {q2}, [\dst, :128], \d_strd
vst1.16 {q3}, [\ds2, :128], \d_strd
.endif
ble 0f
vmov d16, d18
b 8b
0:
pop {r4-r11,pc}
160: // 16xN, 32xN, ...
320:
640:
1280:
mov \my, \h
1:
add \ds2, \dst, \d_strd
add \sr2, \src, \s_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.8 {q8}, [\src], \s_strd
2:
vld1.8 {q9}, [\sr2], \s_strd
vld1.8 {q10}, [\src], \s_strd
vmull.u8 q12, d16, d2
vmull.u8 q13, d17, d2
vmull.u8 q14, d18, d2
vmull.u8 q15, d19, d2
vmlal.u8 q12, d18, d3
vmlal.u8 q13, d19, d3
vmlal.u8 q14, d20, d3
vmlal.u8 q15, d21, d3
subs \h, \h, #2
.ifc \type, put
vqrshrn.u16 d24, q12, #4
vqrshrn.u16 d25, q13, #4
vqrshrn.u16 d28, q14, #4
vqrshrn.u16 d29, q15, #4
vst1.8 {q12}, [\dst, :128], \d_strd
vst1.8 {q14}, [\ds2, :128], \d_strd
.else
vst1.16 {q12, q13}, [\dst, :128], \d_strd
vst1.16 {q14, q15}, [\ds2, :128], \d_strd
.endif
ble 9f
vmov q8, q10
b 2b
9:
subs \w, \w, #16
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #1
mov \h, \my
add \src, \src, #16
.ifc \type, put
add \dst, \dst, #16
.else
add \dst, \dst, #32
.endif
b 1b
0:
pop {r4-r11,pc}
L(\type\()_bilin_hv):
vmovl.u8 q2, d2
vmovl.u8 q3, d3
adr r9, L(\type\()_bilin_hv_tbl)
ldr r8, [r9, r8, lsl #2]
add r9, r9, r8
bx r9
.align 2
L(\type\()_bilin_hv_tbl):
.word 1280f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 640f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 320f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 160f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 80f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 40f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
.word 20f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
20: // 2xN hv
.ifc \type, put
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.32 {d28[]}, [\src], \s_strd
vext.8 d29, d28, d28, #1
vmull.u8 q8, d28, d0
vmlal.u8 q8, d29, d1
2:
vld1.32 {d28[]}, [\sr2], \s_strd
vld1.32 {d30[]}, [\src], \s_strd
vext.8 d29, d28, d28, #1
vext.8 d31, d30, d30, #1
vtrn.16 d28, d30
vtrn.16 d29, d31
vmull.u8 q9, d28, d0
vmlal.u8 q9, d29, d1
vtrn.32 d16, d18
vmul.u16 d20, d16, d4
vmla.u16 d20, d19, d6
vqrshrn.u16 d20, q10, #8
subs \h, \h, #2
vst1.16 {d20[0]}, [\dst, :16], \d_strd
vst1.16 {d20[1]}, [\ds2, :16], \d_strd
ble 0f
vtrn.32 d19, d16
b 2b
0:
pop {r4-r11,pc}
.endif
40: // 4xN hv
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.8 {d28}, [\src], \s_strd
vext.8 d29, d28, d28, #1
vmull.u8 q8, d28, d0
vmlal.u8 q8, d29, d1
4:
vld1.8 {d28}, [\sr2], \s_strd
vld1.8 {d30}, [\src], \s_strd
vext.8 d29, d28, d28, #1
vext.8 d31, d30, d30, #1
vtrn.32 d28, d30
vtrn.32 d29, d31
vmull.u8 q9, d28, d0
vmlal.u8 q9, d29, d1
vmov d17, d18
vmul.u16 q10, q8, q2
vmla.u16 q10, q9, q3
subs \h, \h, #2
.ifc \type, put
vqrshrn.u16 d20, q10, #8
vst1.32 {d20[0]}, [\dst, :32], \d_strd
vst1.32 {d20[1]}, [\ds2, :32], \d_strd
.else
vrshr.u16 q10, q10, #4
vst1.16 {d20}, [\dst, :64], \d_strd
vst1.16 {d21}, [\ds2, :64], \d_strd
.endif
ble 0f
vmov d16, d19
b 4b
0:
pop {r4-r11,pc}
80: // 8xN, 16xN, ... hv
160:
320:
640:
1280:
mov \my, \h
1:
add \sr2, \src, \s_strd
add \ds2, \dst, \d_strd
lsl \s_strd, \s_strd, #1
lsl \d_strd, \d_strd, #1
vld1.8 {q12}, [\src], \s_strd
vext.8 q13, q12, q12, #1
vmull.u8 q8, d24, d0
vmlal.u8 q8, d26, d1
2:
vld1.8 {q12}, [\sr2], \s_strd
vld1.8 {q14}, [\src], \s_strd
vext.8 q13, q12, q12, #1
vext.8 q15, q14, q14, #1
vmull.u8 q9, d24, d0
vmlal.u8 q9, d26, d1
vmull.u8 q10, d28, d0
vmlal.u8 q10, d30, d1
vmul.u16 q8, q8, q2
vmla.u16 q8, q9, q3
vmul.u16 q9, q9, q2
vmla.u16 q9, q10, q3
subs \h, \h, #2
.ifc \type, put
vqrshrn.u16 d16, q8, #8
vqrshrn.u16 d18, q9, #8
vst1.8 {d16}, [\dst, :64], \d_strd
vst1.8 {d18}, [\ds2, :64], \d_strd
.else
vrshr.u16 q8, q8, #4
vrshr.u16 q9, q9, #4
vst1.16 {q8}, [\dst, :128], \d_strd
vst1.16 {q9}, [\ds2, :128], \d_strd
.endif
ble 9f
vmov q8, q10
b 2b
9:
subs \w, \w, #8
ble 0f
asr \s_strd, \s_strd, #1
asr \d_strd, \d_strd, #1
mls \src, \s_strd, \my, \src
mls \dst, \d_strd, \my, \dst
sub \src, \src, \s_strd, lsl #1
mov \h, \my
add \src, \src, #8
.ifc \type, put
add \dst, \dst, #8
.else
add \dst, \dst, #16
.endif
b 1b
0:
pop {r4-r11,pc}
endfunc
.endm
filter_fn put, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, 10
filter_fn prep, r0, r7, r1, r2, r3, r4, r5, r6, r8, r9, 6
.macro load_filter_ptr src
asr r12, \src, #10
add r12, r11, r12, lsl #3
.endm
.macro load_filter_coef dst, src, inc
add \src, \src, \inc
vld1.8 {\dst}, [r12, :64]
.endm
.macro load_filter_row dst, src, inc
load_filter_ptr \src
load_filter_coef \dst, \src, \inc
.endm
function warp_filter_horz_neon
load_filter_ptr r5 // filter 0
vld1.16 {q7}, [r2], r3
vmov.i8 q6, #128
load_filter_coef d0, r5, r7 // filter 0
load_filter_row d1, r5, r7 // filter 1
load_filter_row d2, r5, r7 // filter 2
load_filter_ptr r5 // filter 3
veor q7, q7, q6 // subtract by 128 to allow using vmull
load_filter_coef d3, r5, r7 // filter 3
vext.8 d12, d14, d15, #1 // filter 1 pixels
vext.8 d13, d14, d15, #2 // filter 2 pixels
load_filter_ptr r5 // filter 4
vmull.s8 q2, d14, d0 // filter 0 output
vmull.s8 q3, d12, d1 // filter 1 output
load_filter_coef d0, r5, r7 // filter 4
load_filter_ptr r5 // filter 5
vext.8 d12, d14, d15, #3 // filter 3 pixels
vmull.s8 q4, d13, d2 // filter 2 output
vext.8 d13, d14, d15, #4 // filter 4 pixels
vpadd.i16 d4, d4, d5 // pixel 0 (4x16)
vpadd.i16 d5, d6, d7 // pixel 1 (4x16)
load_filter_coef d1, r5, r7 // filter 5
load_filter_ptr r5 // filter 6
vmull.s8 q5, d12, d3 // filter 3 output
vext.8 d12, d14, d15, #5 // filter 5 pixels
vmull.s8 q3, d13, d0 // filter 4 output
load_filter_coef d0, r5, r7 // filter 6
vext.8 d13, d14, d15, #6 // filter 6 pixels
load_filter_ptr r5 // filter 7
vpadd.i16 d8, d8, d9 // pixel 2 (4x16)
vpadd.i16 d9, d10, d11 // pixel 3 (4x16)
vmull.s8 q5, d12, d1 // filter 5 output
load_filter_coef d1, r5, r7 // filter 7
vext.8 d14, d14, d15, #7 // filter 7 pixels
vpadd.i16 d6, d6, d7 // pixel 4 (4x16)
vpadd.i16 d10, d10, d11 // pixel 5 (4x16)
vmull.s8 q6, d13, d0 // filter 6 output
vmull.s8 q7, d14, d1 // filter 7 output
sub r5, r5, r7, lsl #3
vpadd.i16 d4, d4, d5 // pixel 0,1 (2x16)
vpadd.i16 d5, d8, d9 // pixel 2,3 (2x16)
vpadd.i16 d12, d12, d13 // pixel 6 (4x16)
vpadd.i16 d14, d14, d15 // pixel 7 (4x16)
vpadd.i16 d6, d6, d10 // pixel 4,5 (2x16)
vpadd.i16 d10, d12, d14 // pixel 6,7 (2x16)
vpadd.i16 d4, d4, d5 // pixel 0-3
vpadd.i16 d5, d6, d10 // pixel 4-7
add r5, r5, r8
bx lr
endfunc
// void dav1d_warp_affine_8x8_8bpc_neon(
// pixel *dst, const ptrdiff_t dst_stride,
// const pixel *src, const ptrdiff_t src_stride,
// const int16_t *const abcd, int mx, int my)
.macro warp t, shift
function warp_affine_8x8\t\()_8bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100]
ldr r6, [sp, #108]
ldrd r8, r9, [r4]
sxth r7, r8
asr r8, r8, #16
asr r4, r9, #16
sxth r9, r9
mov r10, #8
sub r2, r2, r3, lsl #1
sub r2, r2, r3
sub r2, r2, #3
movrel r11, X(mc_warp_filter), 64*8
.ifnb \t
lsl r1, r1, #1
.endif
add r5, r5, #512
add r6, r6, #512
bl warp_filter_horz_neon
vrshr.s16 q8, q2, #3
bl warp_filter_horz_neon
vrshr.s16 q9, q2, #3
bl warp_filter_horz_neon
vrshr.s16 q10, q2, #3
bl warp_filter_horz_neon
vrshr.s16 q11, q2, #3
bl warp_filter_horz_neon
vrshr.s16 q12, q2, #3
bl warp_filter_horz_neon
vrshr.s16 q13, q2, #3
bl warp_filter_horz_neon
vrshr.s16 q14, q2, #3
1:
bl warp_filter_horz_neon
vrshr.s16 q15, q2, #3
load_filter_row d8, r6, r9
load_filter_row d9, r6, r9
load_filter_row d10, r6, r9
load_filter_row d11, r6, r9
load_filter_row d12, r6, r9
load_filter_row d13, r6, r9
load_filter_row d14, r6, r9
load_filter_row d15, r6, r9
transpose_8x8b q4, q5, q6, q7, d8, d9, d10, d11, d12, d13, d14, d15
vmovl.s8 q1, d8
vmovl.s8 q2, d9
vmovl.s8 q3, d10
vmovl.s8 q4, d11
vmovl.s8 q5, d12
vmovl.s8 q6, d13
sub r6, r6, r9, lsl #3
// This ordering of vmull/vmlal is highly beneficial for
// Cortex A8/A9/A53 here, but harmful for Cortex A7.
vmull.s16 q0, d16, d2
vmlal.s16 q0, d18, d4
vmlal.s16 q0, d20, d6
vmlal.s16 q0, d22, d8
vmlal.s16 q0, d24, d10
vmlal.s16 q0, d26, d12
vmull.s16 q1, d17, d3
vmlal.s16 q1, d19, d5
vmlal.s16 q1, d21, d7
vmlal.s16 q1, d23, d9
vmlal.s16 q1, d25, d11
vmlal.s16 q1, d27, d13
vmovl.s8 q2, d14
vmovl.s8 q3, d15
vmlal.s16 q0, d28, d4
vmlal.s16 q0, d30, d6
vmlal.s16 q1, d29, d5
vmlal.s16 q1, d31, d7
.ifb \t
vmov.i16 q7, #128
.else
vmov.i16 q7, #0x800
.endif
vmov q8, q9
vmov q9, q10
vqrshrn.s32 d0, q0, #\shift
vmov q10, q11
vqrshrn.s32 d1, q1, #\shift
vmov q11, q12
vadd.i16 q0, q0, q7
vmov q12, q13
.ifb \t
vqmovun.s16 d0, q0
.endif
vmov q13, q14
vmov q14, q15
subs r10, r10, #1
.ifnb \t
vst1.16 {q0}, [r0, :128], r1
.else
vst1.8 {d0}, [r0, :64], r1
.endif
add r6, r6, r4
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
.endm
warp , 11
warp t, 7
// void dav1d_emu_edge_8bpc_neon(
// const intptr_t bw, const intptr_t bh,
// const intptr_t iw, const intptr_t ih,
// const intptr_t x, const intptr_t y,
// pixel *dst, const ptrdiff_t dst_stride,
// const pixel *ref, const ptrdiff_t ref_stride)
function emu_edge_8bpc_neon, export=1
push {r4-r11,lr}
ldrd r4, r5, [sp, #36]
ldrd r6, r7, [sp, #44]
ldrd r8, r9, [sp, #52]
// ref += iclip(y, 0, ih - 1) * PXSTRIDE(ref_stride)
// ref += iclip(x, 0, iw - 1)
sub r12, r3, #1 // ih - 1
cmp r5, r3
sub lr, r2, #1 // iw - 1
it lt
movlt r12, r5 // min(y, ih - 1)
cmp r4, r2
bic r12, r12, r12, asr #31 // max(min(y, ih - 1), 0)
it lt
movlt lr, r4 // min(x, iw - 1)
bic lr, lr, lr, asr #31 // max(min(x, iw - 1), 0)
mla r8, r12, r9, r8 // ref += iclip() * stride
add r8, r8, lr // ref += iclip()
// bottom_ext = iclip(y + bh - ih, 0, bh - 1)
// top_ext = iclip(-y, 0, bh - 1)
add r10, r5, r1 // y + bh
neg r5, r5 // -y
sub r10, r10, r3 // y + bh - ih
sub r12, r1, #1 // bh - 1
cmp r10, r1
bic r5, r5, r5, asr #31 // max(-y, 0)
it ge
movge r10, r12 // min(y + bh - ih, bh-1)
cmp r5, r1
bic r10, r10, r10, asr #31 // max(min(y + bh - ih, bh-1), 0)
it ge
movge r5, r12 // min(max(-y, 0), bh-1)
// right_ext = iclip(x + bw - iw, 0, bw - 1)
// left_ext = iclip(-x, 0, bw - 1)
add r11, r4, r0 // x + bw
neg r4, r4 // -x
sub r11, r11, r2 // x + bw - iw
sub lr, r0, #1 // bw - 1
cmp r11, r0
bic r4, r4, r4, asr #31 // max(-x, 0)
it ge
movge r11, lr // min(x + bw - iw, bw-1)
cmp r4, r0
bic r11, r11, r11, asr #31 // max(min(x + bw - iw, bw-1), 0)
it ge
movge r4, lr // min(max(-x, 0), bw - 1)
// center_h = bh - top_ext - bottom_ext
// dst += top_ext * PXSTRIDE(dst_stride)
// center_w = bw - left_ext - right_ext
sub r1, r1, r5 // bh - top_ext
mla r6, r5, r7, r6
sub r2, r0, r4 // bw - left_ext
sub r1, r1, r10 // center_h = bh - top_ext - bottom_ext
sub r2, r2, r11 // center_w = bw - left_ext - right_ext
mov r0, r6 // backup of dst
.macro v_loop need_left, need_right
0:
.if \need_left
vld1.8 {d0[], d1[]}, [r8]
mov r12, r6 // out = dst
mov r3, r4
1:
subs r3, r3, #16
vst1.8 {q0}, [r12, :128]!
bgt 1b
.endif
mov lr, r8
add r12, r6, r4 // out = dst + left_ext
mov r3, r2
1:
vld1.8 {q0, q1}, [lr]!
subs r3, r3, #32
.if \need_left
vst1.8 {q0, q1}, [r12]!
.else
vst1.8 {q0, q1}, [r12, :128]!
.endif
bgt 1b
.if \need_right
add r3, r8, r2 // in + center_w
sub r3, r3, #1 // in + center_w - 1
add r12, r6, r4 // dst + left_ext
vld1.8 {d0[], d1[]}, [r3]
add r12, r12, r2 // out = dst + left_ext + center_w
mov r3, r11
1:
subs r3, r3, #16
vst1.8 {q0}, [r12]!
bgt 1b
.endif
subs r1, r1, #1 // center_h--
add r6, r6, r7
add r8, r8, r9
bgt 0b
.endm
cmp r4, #0
beq 2f
// need_left
cmp r11, #0
beq 3f
// need_left + need_right
v_loop 1, 1
b 5f
2:
// !need_left
cmp r11, #0
beq 4f
// !need_left + need_right
v_loop 0, 1
b 5f
3:
// need_left + !need_right
v_loop 1, 0
b 5f
4:
// !need_left + !need_right
v_loop 0, 0
5:
cmp r10, #0
// Storing the original dst in r0 overwrote bw, recalculate it here
add r2, r2, r4 // center_w + left_ext
add r2, r2, r11 // bw = center_w + left_ext + right_ext
beq 3f
// need_bottom
sub r8, r6, r7 // ref = dst - stride
mov r4, r2
1:
vld1.8 {q0, q1}, [r8, :128]!
mov r3, r10
2:
subs r3, r3, #1
vst1.8 {q0, q1}, [r6, :128], r7
bgt 2b
mls r6, r7, r10, r6 // dst -= bottom_ext * stride
subs r4, r4, #32 // bw -= 32
add r6, r6, #32 // dst += 32
bgt 1b
3:
cmp r5, #0
beq 3f
// need_top
mls r6, r7, r5, r0 // dst = stored_dst - top_ext * stride
1:
vld1.8 {q0, q1}, [r0, :128]!
mov r3, r5
2:
subs r3, r3, #1
vst1.8 {q0, q1}, [r6, :128], r7
bgt 2b
mls r6, r7, r5, r6 // dst -= top_ext * stride
subs r2, r2, #32 // bw -= 32
add r6, r6, #32 // dst += 32
bgt 1b
3:
pop {r4-r11,pc}
endfunc
|
Admenri/urge
| 15,076
|
third_party/dav1d/src/arm/32/looprestoration_tmpl.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2019, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#define FILTER_OUT_STRIDE 384
.macro sgr_funcs bpc
// void dav1d_sgr_finish_filter_row1_Xbpc_neon(int16_t *tmp,
// const pixel *src,
// const int32_t **a, const int16_t **b,
// const int w);
function sgr_finish_filter_row1_\bpc\()bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldr r4, [sp, #100]
ldrd r6, r7, [r2]
ldr r2, [r2, #8]
ldrd r8, r9, [r3]
ldr r3, [r3, #8]
vmov.i16 q14, #3
vmov.i32 q15, #3
1:
vld1.16 {q0}, [r8, :128]!
vld1.16 {q1}, [r9, :128]!
vld1.16 {q2}, [r3, :128]!
vld1.32 {q8, q9}, [r6, :128]!
vld1.32 {q10, q11}, [r7, :128]!
vld1.32 {q12, q13}, [r2, :128]!
2:
subs r4, r4, #4
vext.8 d6, d0, d1, #2 // -stride
vext.8 d7, d2, d3, #2 // 0
vext.8 d8, d4, d5, #2 // +stride
vext.8 d9, d0, d1, #4 // +1-stride
vext.8 d10, d2, d3, #4 // +1
vext.8 d11, d4, d5, #4 // +1+stride
vadd.i16 d2, d2, d6 // -1, -stride
vadd.i16 d7, d7, d8 // 0, +stride
vadd.i16 d0, d0, d9 // -1-stride, +1-stride
vadd.i16 d2, d2, d7
vadd.i16 d4, d4, d11 // -1+stride, +1+stride
vadd.i16 d2, d2, d10 // +1
vadd.i16 d0, d0, d4
vext.8 q3, q8, q9, #4 // -stride
vshl.i16 d2, d2, #2
vext.8 q4, q8, q9, #8 // +1-stride
vext.8 q5, q10, q11, #4 // 0
vext.8 q6, q10, q11, #8 // +1
vmla.i16 d2, d0, d28 // * 3 -> a
vadd.i32 q3, q3, q10 // -stride, -1
vadd.i32 q8, q8, q4 // -1-stride, +1-stride
vadd.i32 q5, q5, q6 // 0, +1
vadd.i32 q8, q8, q12 // -1+stride
vadd.i32 q3, q3, q5
vext.8 q7, q12, q13, #4 // +stride
vext.8 q10, q12, q13, #8 // +1+stride
.if \bpc == 8
vld1.32 {d24[0]}, [r1, :32]! // src
.else
vld1.16 {d24}, [r1, :64]! // src
.endif
vadd.i32 q3, q3, q7 // +stride
vadd.i32 q8, q8, q10 // +1+stride
vshl.i32 q3, q3, #2
vmla.i32 q3, q8, q15 // * 3 -> b
.if \bpc == 8
vmovl.u8 q12, d24 // src
.endif
vmov d0, d1
vmlsl.u16 q3, d2, d24 // b - a * src
vmov d2, d3
vrshrn.i32 d6, q3, #9
vmov d4, d5
vst1.16 {d6}, [r0]!
ble 3f
vmov q8, q9
vmov q10, q11
vmov q12, q13
vld1.16 {d1}, [r8, :64]!
vld1.16 {d3}, [r9, :64]!
vld1.16 {d5}, [r3, :64]!
vld1.32 {q9}, [r6, :128]!
vld1.32 {q11}, [r7, :128]!
vld1.32 {q13}, [r2, :128]!
b 2b
3:
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
// void dav1d_sgr_finish_filter2_2rows_Xbpc_neon(int16_t *tmp,
// const pixel *src, const ptrdiff_t stride,
// const int32_t **a, const int16_t **b,
// const int w, const int h);
function sgr_finish_filter2_2rows_\bpc\()bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100]
ldr r6, [sp, #108]
ldrd r8, r9, [r3]
ldrd r10, r11, [r4]
mov r7, #2*FILTER_OUT_STRIDE
add r2, r1, r2
add r7, r7, r0
mov lr, r5
1:
vld1.16 {q0, q1}, [r10, :128]!
vld1.16 {q2, q3}, [r11, :128]!
vld1.32 {q8, q9}, [r8, :128]!
vld1.32 {q11, q12}, [r9, :128]!
vld1.32 {q10}, [r8, :128]!
vld1.32 {q13}, [r9, :128]!
2:
vmov.i16 q14, #5
vmov.i16 q15, #6
subs r5, r5, #8
vext.8 q4, q0, q1, #4 // +1-stride
vext.8 q5, q2, q3, #4 // +1+stride
vext.8 q6, q0, q1, #2 // -stride
vext.8 q7, q2, q3, #2 // +stride
vadd.i16 q0, q0, q4 // -1-stride, +1-stride
vadd.i16 q5, q2, q5 // -1+stride, +1+stride
vadd.i16 q2, q6, q7 // -stride, +stride
vadd.i16 q0, q0, q5
vext.8 q4, q8, q9, #8 // +1-stride
vext.8 q5, q9, q10, #8
vext.8 q6, q11, q12, #8 // +1+stride
vext.8 q7, q12, q13, #8
vmul.i16 q0, q0, q14 // * 5
vmla.i16 q0, q2, q15 // * 6
vadd.i32 q4, q4, q8 // -1-stride, +1-stride
vadd.i32 q5, q5, q9
vadd.i32 q6, q6, q11 // -1+stride, +1+stride
vadd.i32 q7, q7, q12
vadd.i32 q4, q4, q6
vadd.i32 q5, q5, q7
vext.8 q6, q8, q9, #4 // -stride
vext.8 q7, q9, q10, #4
vext.8 q8, q11, q12, #4 // +stride
vext.8 q11, q12, q13, #4
.if \bpc == 8
vld1.8 {d4}, [r1, :64]!
.else
vld1.8 {q2}, [r1, :128]!
.endif
vmov.i32 q14, #5
vmov.i32 q15, #6
vadd.i32 q6, q6, q8 // -stride, +stride
vadd.i32 q7, q7, q11
vmul.i32 q4, q4, q14 // * 5
vmla.i32 q4, q6, q15 // * 6
vmul.i32 q5, q5, q14 // * 5
vmla.i32 q5, q7, q15 // * 6
.if \bpc == 8
vmovl.u8 q2, d4
.endif
vmlsl.u16 q4, d0, d4 // b - a * src
vmlsl.u16 q5, d1, d5 // b - a * src
vmov q0, q1
vrshrn.i32 d8, q4, #9
vrshrn.i32 d9, q5, #9
vmov q2, q3
vst1.16 {q4}, [r0, :128]!
ble 3f
vmov q8, q10
vmov q11, q13
vld1.16 {q1}, [r10, :128]!
vld1.16 {q3}, [r11, :128]!
vld1.32 {q9, q10}, [r8, :128]!
vld1.32 {q12, q13}, [r9, :128]!
b 2b
3:
subs r6, r6, #1
ble 0f
mov r5, lr
ldrd r8, r9, [r3]
ldrd r10, r11, [r4]
mov r0, r7
mov r1, r2
vld1.32 {q8, q9}, [r9, :128]!
vld1.16 {q0, q1}, [r11, :128]!
vld1.32 {q10}, [r9, :128]!
vmov.i16 q12, #5
vmov.i16 q13, #6
4:
subs r5, r5, #8
vext.8 q3, q0, q1, #4 // +1
vext.8 q2, q0, q1, #2 // 0
vadd.i16 q0, q0, q3 // -1, +1
vext.8 q4, q8, q9, #4 // 0
vext.8 q5, q9, q10, #4
vext.8 q6, q8, q9, #8 // +1
vext.8 q7, q9, q10, #8
vmul.i16 q2, q2, q13 // * 6
vmla.i16 q2, q0, q12 // * 5 -> a
.if \bpc == 8
vld1.8 {d22}, [r1, :64]!
.else
vld1.16 {q11}, [r1, :128]!
.endif
vadd.i32 q8, q8, q6 // -1, +1
vadd.i32 q9, q9, q7
.if \bpc == 8
vmovl.u8 q11, d22
.endif
vmul.i32 q4, q4, q15 // * 6
vmla.i32 q4, q8, q14 // * 5 -> b
vmul.i32 q5, q5, q15 // * 6
vmla.i32 q5, q9, q14 // * 5 -> b
vmlsl.u16 q4, d4, d22 // b - a * src
vmlsl.u16 q5, d5, d23
vmov q0, q1
vrshrn.i32 d8, q4, #8
vrshrn.i32 d9, q5, #8
vmov q8, q10
vst1.16 {q4}, [r0, :128]!
ble 5f
vld1.16 {q1}, [r11, :128]!
vld1.32 {q9, q10}, [r9, :128]!
b 4b
5:
0:
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
// void dav1d_sgr_weighted_row1_Xbpc_neon(pixel *dst,
// const int16_t *t1, const int w,
// const int w1, const int bitdepth_max);
function sgr_weighted_row1_\bpc\()bpc_neon, export=1
push {lr}
.if \bpc == 16
ldr lr, [sp, #4]
.endif
vdup.16 d31, r3
.if \bpc == 16
vmov.i16 q13, #0
vdup.16 q14, lr
.endif
1:
.if \bpc == 8
vld1.8 {d0}, [r0, :64]
.else
vld1.16 {q0}, [r0, :128]
.endif
vld1.16 {q1}, [r1, :128]!
subs r2, r2, #8
vmull.s16 q2, d2, d31 // v
vmull.s16 q3, d3, d31 // v
vrshrn.i32 d4, q2, #11
vrshrn.i32 d5, q3, #11
.if \bpc == 8
vaddw.u8 q2, q2, d0
vqmovun.s16 d2, q2
vst1.8 {d2}, [r0, :64]!
.else
vadd.i16 q2, q2, q0
vmax.s16 q2, q2, q13
vmin.u16 q2, q2, q14
vst1.16 {q2}, [r0, :128]!
.endif
bgt 1b
0:
pop {pc}
endfunc
// void dav1d_sgr_weighted2_Xbpc_neon(pixel *dst, const ptrdiff_t stride,
// const int16_t *t1, const int16_t *t2,
// const int w, const int h,
// const int16_t wt[2], const int bitdepth_max);
function sgr_weighted2_\bpc\()bpc_neon, export=1
push {r4-r8,lr}
ldrd r4, r5, [sp, #24]
.if \bpc == 8
ldr r6, [sp, #32]
.else
ldrd r6, r7, [sp, #32]
.endif
cmp r5, #2
add r8, r0, r1
add r12, r2, #2*FILTER_OUT_STRIDE
add lr, r3, #2*FILTER_OUT_STRIDE
vld2.16 {d30[], d31[]}, [r6] // wt[0], wt[1]
.if \bpc == 16
vdup.16 q14, r7
.endif
blt 2f
1:
.if \bpc == 8
vld1.8 {d0}, [r0, :64]
vld1.8 {d16}, [r8, :64]
.else
vld1.16 {q0}, [r0, :128]
vld1.16 {q8}, [r8, :128]
.endif
vld1.16 {q1}, [r2, :128]!
vld1.16 {q9}, [r12, :128]!
vld1.16 {q2}, [r3, :128]!
vld1.16 {q10}, [lr, :128]!
subs r4, r4, #8
vmull.s16 q3, d2, d30 // wt[0] * t1
vmlal.s16 q3, d4, d31 // wt[1] * t2
vmull.s16 q12, d3, d30 // wt[0] * t1
vmlal.s16 q12, d5, d31 // wt[1] * t2
vmull.s16 q11, d18, d30 // wt[0] * t1
vmlal.s16 q11, d20, d31 // wt[1] * t2
vmull.s16 q13, d19, d30 // wt[0] * t1
vmlal.s16 q13, d21, d31 // wt[1] * t2
vrshrn.i32 d6, q3, #11
vrshrn.i32 d7, q12, #11
vrshrn.i32 d22, q11, #11
vrshrn.i32 d23, q13, #11
.if \bpc == 8
vaddw.u8 q3, q3, d0
vaddw.u8 q11, q11, d16
vqmovun.s16 d6, q3
vqmovun.s16 d22, q11
vst1.8 {d6}, [r0, :64]!
vst1.8 {d22}, [r8, :64]!
.else
vmov.i16 q13, #0
vadd.i16 q3, q3, q0
vadd.i16 q11, q11, q8
vmax.s16 q3, q3, q13
vmax.s16 q11, q11, q13
vmin.u16 q3, q3, q14
vmin.u16 q11, q11, q14
vst1.16 {q3}, [r0, :128]!
vst1.16 {q11}, [r8, :128]!
.endif
bgt 1b
b 0f
2:
.if \bpc == 8
vld1.8 {d0}, [r0, :64]
.else
vld1.16 {q0}, [r0, :128]
.endif
vld1.16 {q1}, [r2, :128]!
vld1.16 {q2}, [r3, :128]!
subs r4, r4, #8
vmull.s16 q3, d2, d30 // wt[0] * t1
vmlal.s16 q3, d4, d31 // wt[1] * t2
vmull.s16 q11, d3, d30 // wt[0] * t1
vmlal.s16 q11, d5, d31 // wt[1] * t2
vrshrn.i32 d6, q3, #11
vrshrn.i32 d7, q11, #11
.if \bpc == 8
vaddw.u8 q3, q3, d0
vqmovun.s16 d6, q3
vst1.8 {d6}, [r0, :64]!
.else
vmov.i16 q13, #0
vadd.i16 q3, q3, q0
vmax.s16 q3, q3, q13
vmin.u16 q3, q3, q14
vst1.16 {q3}, [r0, :128]!
.endif
bgt 2b
0:
pop {r4-r8,pc}
endfunc
.endm
|
Admenri/urge
| 19,317
|
third_party/dav1d/src/arm/32/cdef_tmpl.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
.macro dir_table w, stride
const directions\w
.byte -1 * \stride + 1, -2 * \stride + 2
.byte 0 * \stride + 1, -1 * \stride + 2
.byte 0 * \stride + 1, 0 * \stride + 2
.byte 0 * \stride + 1, 1 * \stride + 2
.byte 1 * \stride + 1, 2 * \stride + 2
.byte 1 * \stride + 0, 2 * \stride + 1
.byte 1 * \stride + 0, 2 * \stride + 0
.byte 1 * \stride + 0, 2 * \stride - 1
// Repeated, to avoid & 7
.byte -1 * \stride + 1, -2 * \stride + 2
.byte 0 * \stride + 1, -1 * \stride + 2
.byte 0 * \stride + 1, 0 * \stride + 2
.byte 0 * \stride + 1, 1 * \stride + 2
.byte 1 * \stride + 1, 2 * \stride + 2
.byte 1 * \stride + 0, 2 * \stride + 1
endconst
.endm
.macro tables
dir_table 8, 16
dir_table 4, 8
const pri_taps
.byte 4, 2, 3, 3
endconst
.endm
.macro load_px d11, d12, d21, d22, w
.if \w == 8
add r6, r2, r9, lsl #1 // x + off
sub r9, r2, r9, lsl #1 // x - off
vld1.16 {\d11,\d12}, [r6] // p0
vld1.16 {\d21,\d22}, [r9] // p1
.else
add r6, r2, r9, lsl #1 // x + off
sub r9, r2, r9, lsl #1 // x - off
vld1.16 {\d11}, [r6] // p0
add r6, r6, #2*8 // += stride
vld1.16 {\d21}, [r9] // p1
add r9, r9, #2*8 // += stride
vld1.16 {\d12}, [r6] // p0
vld1.16 {\d22}, [r9] // p1
.endif
.endm
.macro handle_pixel s1, s2, thresh_vec, shift, tap, min
.if \min
vmin.u16 q2, q2, \s1
vmax.s16 q3, q3, \s1
vmin.u16 q2, q2, \s2
vmax.s16 q3, q3, \s2
.endif
vabd.u16 q8, q0, \s1 // abs(diff)
vabd.u16 q11, q0, \s2 // abs(diff)
vshl.u16 q9, q8, \shift // abs(diff) >> shift
vshl.u16 q12, q11, \shift // abs(diff) >> shift
vqsub.u16 q9, \thresh_vec, q9 // clip = imax(0, threshold - (abs(diff) >> shift))
vqsub.u16 q12, \thresh_vec, q12// clip = imax(0, threshold - (abs(diff) >> shift))
vsub.i16 q10, \s1, q0 // diff = p0 - px
vsub.i16 q13, \s2, q0 // diff = p1 - px
vneg.s16 q8, q9 // -clip
vneg.s16 q11, q12 // -clip
vmin.s16 q10, q10, q9 // imin(diff, clip)
vmin.s16 q13, q13, q12 // imin(diff, clip)
vdup.16 q9, \tap // taps[k]
vmax.s16 q10, q10, q8 // constrain() = imax(imin(diff, clip), -clip)
vmax.s16 q13, q13, q11 // constrain() = imax(imin(diff, clip), -clip)
vmla.i16 q1, q10, q9 // sum += taps[k] * constrain()
vmla.i16 q1, q13, q9 // sum += taps[k] * constrain()
.endm
// void dav1d_cdef_filterX_Ybpc_neon(pixel *dst, ptrdiff_t dst_stride,
// const uint16_t *tmp, int pri_strength,
// int sec_strength, int dir, int damping,
// int h, size_t edges);
.macro filter_func w, bpc, pri, sec, min, suffix
function cdef_filter\w\suffix\()_\bpc\()bpc_neon
.if \bpc == 8
cmp r8, #0xf
beq cdef_filter\w\suffix\()_edged_neon
.endif
.if \pri
.if \bpc == 16
clz r9, r9
sub r9, r9, #24 // -bitdepth_min_8
neg r9, r9 // bitdepth_min_8
.endif
movrel_local r8, pri_taps
.if \bpc == 16
lsr r9, r3, r9 // pri_strength >> bitdepth_min_8
and r9, r9, #1 // (pri_strength >> bitdepth_min_8) & 1
.else
and r9, r3, #1
.endif
add r8, r8, r9, lsl #1
.endif
movrel_local r9, directions\w
add r5, r9, r5, lsl #1
vmov.u16 d17, #15
vdup.16 d16, r6 // damping
.if \pri
vdup.16 q5, r3 // threshold
.endif
.if \sec
vdup.16 q7, r4 // threshold
.endif
vmov.16 d8[0], r3
vmov.16 d8[1], r4
vclz.i16 d8, d8 // clz(threshold)
vsub.i16 d8, d17, d8 // ulog2(threshold)
vqsub.u16 d8, d16, d8 // shift = imax(0, damping - ulog2(threshold))
vneg.s16 d8, d8 // -shift
.if \sec
vdup.16 q6, d8[1]
.endif
.if \pri
vdup.16 q4, d8[0]
.endif
1:
.if \w == 8
vld1.16 {q0}, [r2, :128] // px
.else
add r12, r2, #2*8
vld1.16 {d0}, [r2, :64] // px
vld1.16 {d1}, [r12, :64] // px
.endif
vmov.u16 q1, #0 // sum
.if \min
vmov.u16 q2, q0 // min
vmov.u16 q3, q0 // max
.endif
// Instead of loading sec_taps 2, 1 from memory, just set it
// to 2 initially and decrease for the second round.
// This is also used as loop counter.
mov lr, #2 // sec_taps[0]
2:
.if \pri
ldrsb r9, [r5] // off1
load_px d28, d29, d30, d31, \w
.endif
.if \sec
add r5, r5, #4 // +2*2
ldrsb r9, [r5] // off2
.endif
.if \pri
ldrb r12, [r8] // *pri_taps
handle_pixel q14, q15, q5, q4, r12, \min
.endif
.if \sec
load_px d28, d29, d30, d31, \w
add r5, r5, #8 // +2*4
ldrsb r9, [r5] // off3
handle_pixel q14, q15, q7, q6, lr, \min
load_px d28, d29, d30, d31, \w
handle_pixel q14, q15, q7, q6, lr, \min
sub r5, r5, #11 // r5 -= 2*(2+4); r5 += 1;
.else
add r5, r5, #1 // r5 += 1
.endif
subs lr, lr, #1 // sec_tap-- (value)
.if \pri
add r8, r8, #1 // pri_taps++ (pointer)
.endif
bne 2b
vshr.s16 q14, q1, #15 // -(sum < 0)
vadd.i16 q1, q1, q14 // sum - (sum < 0)
vrshr.s16 q1, q1, #4 // (8 + sum - (sum < 0)) >> 4
vadd.i16 q0, q0, q1 // px + (8 + sum ...) >> 4
.if \min
vmin.s16 q0, q0, q3
vmax.s16 q0, q0, q2 // iclip(px + .., min, max)
.endif
.if \bpc == 8
vmovn.u16 d0, q0
.endif
.if \w == 8
add r2, r2, #2*16 // tmp += tmp_stride
subs r7, r7, #1 // h--
.if \bpc == 8
vst1.8 {d0}, [r0, :64], r1
.else
vst1.16 {q0}, [r0, :128], r1
.endif
.else
.if \bpc == 8
vst1.32 {d0[0]}, [r0, :32], r1
.else
vst1.16 {d0}, [r0, :64], r1
.endif
add r2, r2, #2*16 // tmp += 2*tmp_stride
subs r7, r7, #2 // h -= 2
.if \bpc == 8
vst1.32 {d0[1]}, [r0, :32], r1
.else
vst1.16 {d1}, [r0, :64], r1
.endif
.endif
// Reset pri_taps and directions back to the original point
sub r5, r5, #2
.if \pri
sub r8, r8, #2
.endif
bgt 1b
vpop {q4-q7}
pop {r4-r9,pc}
endfunc
.endm
.macro filter w, bpc
filter_func \w, \bpc, pri=1, sec=0, min=0, suffix=_pri
filter_func \w, \bpc, pri=0, sec=1, min=0, suffix=_sec
filter_func \w, \bpc, pri=1, sec=1, min=1, suffix=_pri_sec
function cdef_filter\w\()_\bpc\()bpc_neon, export=1
push {r4-r9,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #92]
ldrd r6, r7, [sp, #100]
.if \bpc == 16
ldrd r8, r9, [sp, #108]
.else
ldr r8, [sp, #108]
.endif
cmp r3, #0 // pri_strength
bne 1f
b cdef_filter\w\()_sec_\bpc\()bpc_neon // only sec
1:
cmp r4, #0 // sec_strength
bne 1f
b cdef_filter\w\()_pri_\bpc\()bpc_neon // only pri
1:
b cdef_filter\w\()_pri_sec_\bpc\()bpc_neon // both pri and sec
endfunc
.endm
const div_table, align=4
.short 840, 420, 280, 210, 168, 140, 120, 105
endconst
const alt_fact, align=4
.short 420, 210, 140, 105, 105, 105, 105, 105, 140, 210, 420, 0
endconst
.macro cost_alt dest, s1, s2, s3, s4, s5, s6
vmull.s16 q1, \s1, \s1 // sum_alt[n]*sum_alt[n]
vmull.s16 q2, \s2, \s2
vmull.s16 q3, \s3, \s3
vmull.s16 q5, \s4, \s4 // sum_alt[n]*sum_alt[n]
vmull.s16 q12, \s5, \s5
vmull.s16 q6, \s6, \s6 // q6 overlaps the first \s1-\s2 here
vmul.i32 q1, q1, q13 // sum_alt[n]^2*fact
vmla.i32 q1, q2, q14
vmla.i32 q1, q3, q15
vmul.i32 q5, q5, q13 // sum_alt[n]^2*fact
vmla.i32 q5, q12, q14
vmla.i32 q5, q6, q15
vadd.i32 d2, d2, d3
vadd.i32 d3, d10, d11
vpadd.i32 \dest, d2, d3 // *cost_ptr
.endm
.macro find_best s1, s2, s3
.ifnb \s2
vmov.32 lr, \s2
.endif
cmp r12, r1 // cost[n] > best_cost
itt gt
movgt r0, r3 // best_dir = n
movgt r1, r12 // best_cost = cost[n]
.ifnb \s2
add r3, r3, #1 // n++
cmp lr, r1 // cost[n] > best_cost
vmov.32 r12, \s3
itt gt
movgt r0, r3 // best_dir = n
movgt r1, lr // best_cost = cost[n]
add r3, r3, #1 // n++
.endif
.endm
// int dav1d_cdef_find_dir_Xbpc_neon(const pixel *img, const ptrdiff_t stride,
// unsigned *const var)
.macro find_dir bpc
function cdef_find_dir_\bpc\()bpc_neon, export=1
push {lr}
vpush {q4-q7}
.if \bpc == 16
clz r3, r3 // clz(bitdepth_max)
sub lr, r3, #24 // -bitdepth_min_8
.endif
sub sp, sp, #32 // cost
mov r3, #8
vmov.u16 q1, #0 // q0-q1 sum_diag[0]
vmov.u16 q3, #0 // q2-q3 sum_diag[1]
vmov.u16 q5, #0 // q4-q5 sum_hv[0-1]
vmov.u16 q8, #0 // q6,d16 sum_alt[0]
// q7,d17 sum_alt[1]
vmov.u16 q9, #0 // q9,d22 sum_alt[2]
vmov.u16 q11, #0
vmov.u16 q10, #0 // q10,d23 sum_alt[3]
.irpc i, 01234567
.if \bpc == 8
vld1.8 {d30}, [r0, :64], r1
vmov.u8 d31, #128
vsubl.u8 q15, d30, d31 // img[x] - 128
.else
vld1.16 {q15}, [r0, :128], r1
vdup.16 q14, lr // -bitdepth_min_8
vshl.u16 q15, q15, q14
vmov.u16 q14, #128
vsub.i16 q15, q15, q14 // img[x] - 128
.endif
vmov.u16 q14, #0
.if \i == 0
vmov q0, q15 // sum_diag[0]
.else
vext.8 q12, q14, q15, #(16-2*\i)
vext.8 q13, q15, q14, #(16-2*\i)
vadd.i16 q0, q0, q12 // sum_diag[0]
vadd.i16 q1, q1, q13 // sum_diag[0]
.endif
vrev64.16 q13, q15
vswp d26, d27 // [-x]
.if \i == 0
vmov q2, q13 // sum_diag[1]
.else
vext.8 q12, q14, q13, #(16-2*\i)
vext.8 q13, q13, q14, #(16-2*\i)
vadd.i16 q2, q2, q12 // sum_diag[1]
vadd.i16 q3, q3, q13 // sum_diag[1]
.endif
vpadd.u16 d26, d30, d31 // [(x >> 1)]
vmov.u16 d27, #0
vpadd.u16 d24, d26, d28
vpadd.u16 d24, d24, d28 // [y]
vmov.u16 r12, d24[0]
vadd.i16 q5, q5, q15 // sum_hv[1]
.if \i < 4
vmov.16 d8[\i], r12 // sum_hv[0]
.else
vmov.16 d9[\i-4], r12 // sum_hv[0]
.endif
.if \i == 0
vmov.u16 q6, q13 // sum_alt[0]
.else
vext.8 q12, q14, q13, #(16-2*\i)
vext.8 q14, q13, q14, #(16-2*\i)
vadd.i16 q6, q6, q12 // sum_alt[0]
vadd.i16 d16, d16, d28 // sum_alt[0]
.endif
vrev64.16 d26, d26 // [-(x >> 1)]
vmov.u16 q14, #0
.if \i == 0
vmov q7, q13 // sum_alt[1]
.else
vext.8 q12, q14, q13, #(16-2*\i)
vext.8 q13, q13, q14, #(16-2*\i)
vadd.i16 q7, q7, q12 // sum_alt[1]
vadd.i16 d17, d17, d26 // sum_alt[1]
.endif
.if \i < 6
vext.8 q12, q14, q15, #(16-2*(3-(\i/2)))
vext.8 q13, q15, q14, #(16-2*(3-(\i/2)))
vadd.i16 q9, q9, q12 // sum_alt[2]
vadd.i16 d22, d22, d26 // sum_alt[2]
.else
vadd.i16 q9, q9, q15 // sum_alt[2]
.endif
.if \i == 0
vmov q10, q15 // sum_alt[3]
.elseif \i == 1
vadd.i16 q10, q10, q15 // sum_alt[3]
.else
vext.8 q12, q14, q15, #(16-2*(\i/2))
vext.8 q13, q15, q14, #(16-2*(\i/2))
vadd.i16 q10, q10, q12 // sum_alt[3]
vadd.i16 d23, d23, d26 // sum_alt[3]
.endif
.endr
vmov.u32 q15, #105
vmull.s16 q12, d8, d8 // sum_hv[0]*sum_hv[0]
vmlal.s16 q12, d9, d9
vmull.s16 q13, d10, d10 // sum_hv[1]*sum_hv[1]
vmlal.s16 q13, d11, d11
vadd.s32 d8, d24, d25
vadd.s32 d9, d26, d27
vpadd.s32 d8, d8, d9 // cost[2,6] (s16, s17)
vmul.i32 d8, d8, d30 // cost[2,6] *= 105
vrev64.16 q1, q1
vrev64.16 q3, q3
vext.8 q1, q1, q1, #10 // sum_diag[0][14-n]
vext.8 q3, q3, q3, #10 // sum_diag[1][14-n]
vstr s16, [sp, #2*4] // cost[2]
vstr s17, [sp, #6*4] // cost[6]
movrel_local r12, div_table
vld1.16 {q14}, [r12, :128]
vmull.s16 q5, d0, d0 // sum_diag[0]*sum_diag[0]
vmull.s16 q12, d1, d1
vmlal.s16 q5, d2, d2
vmlal.s16 q12, d3, d3
vmull.s16 q0, d4, d4 // sum_diag[1]*sum_diag[1]
vmull.s16 q1, d5, d5
vmlal.s16 q0, d6, d6
vmlal.s16 q1, d7, d7
vmovl.u16 q13, d28 // div_table
vmovl.u16 q14, d29
vmul.i32 q5, q5, q13 // cost[0]
vmla.i32 q5, q12, q14
vmul.i32 q0, q0, q13 // cost[4]
vmla.i32 q0, q1, q14
vadd.i32 d10, d10, d11
vadd.i32 d0, d0, d1
vpadd.i32 d0, d10, d0 // cost[0,4] = s0,s1
movrel_local r12, alt_fact
vld1.16 {d29, d30, d31}, [r12, :64] // div_table[2*m+1] + 105
vstr s0, [sp, #0*4] // cost[0]
vstr s1, [sp, #4*4] // cost[4]
vmovl.u16 q13, d29 // div_table[2*m+1] + 105
vmovl.u16 q14, d30
vmovl.u16 q15, d31
cost_alt d14, d12, d13, d16, d14, d15, d17 // cost[1], cost[3]
cost_alt d15, d18, d19, d22, d20, d21, d23 // cost[5], cost[7]
vstr s28, [sp, #1*4] // cost[1]
vstr s29, [sp, #3*4] // cost[3]
mov r0, #0 // best_dir
vmov.32 r1, d0[0] // best_cost
mov r3, #1 // n
vstr s30, [sp, #5*4] // cost[5]
vstr s31, [sp, #7*4] // cost[7]
vmov.32 r12, d14[0]
find_best d14[0], d8[0], d14[1]
find_best d14[1], d0[1], d15[0]
find_best d15[0], d8[1], d15[1]
find_best d15[1]
eor r3, r0, #4 // best_dir ^4
ldr r12, [sp, r3, lsl #2]
sub r1, r1, r12 // best_cost - cost[best_dir ^ 4]
lsr r1, r1, #10
str r1, [r2] // *var
add sp, sp, #32
vpop {q4-q7}
pop {pc}
endfunc
.endm
|
Admenri/urge
| 115,266
|
third_party/dav1d/src/arm/32/ipred.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* Copyright © 2019, B Krishnan Iyer
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
// void ipred_dc_128_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_128_8bpc_neon, export=1
push {r4, lr}
ldr r4, [sp, #8]
clz r3, r3
adr r2, L(ipred_dc_128_tbl)
sub r3, r3, #25
ldr r3, [r2, r3, lsl #2]
vmov.i8 q0, #128
add r2, r2, r3
add r12, r0, r1
lsl r1, r1, #1
bx r2
.align 2
L(ipred_dc_128_tbl):
.word 640f - L(ipred_dc_128_tbl) + CONFIG_THUMB
.word 320f - L(ipred_dc_128_tbl) + CONFIG_THUMB
.word 16f - L(ipred_dc_128_tbl) + CONFIG_THUMB
.word 8f - L(ipred_dc_128_tbl) + CONFIG_THUMB
.word 4f - L(ipred_dc_128_tbl) + CONFIG_THUMB
4:
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
subs r4, r4, #4
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
bgt 4b
pop {r4, pc}
8:
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
subs r4, r4, #4
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
bgt 8b
pop {r4, pc}
16:
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
bgt 16b
pop {r4, pc}
320:
vmov.i8 q1, #128
32:
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
bgt 32b
pop {r4, pc}
640:
vmov.i8 q1, #128
sub r1, r1, #32
64:
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
bgt 64b
pop {r4, pc}
endfunc
// void ipred_v_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_v_8bpc_neon, export=1
push {r4, lr}
ldr lr, [sp, #8]
clz r3, r3
adr r4, L(ipred_v_tbl)
sub r3, r3, #25
ldr r3, [r4, r3, lsl #2]
add r2, r2, #1
add r4, r4, r3
add r12, r0, r1
lsl r1, r1, #1
bx r4
.align 2
L(ipred_v_tbl):
.word 640f - L(ipred_v_tbl) + CONFIG_THUMB
.word 320f - L(ipred_v_tbl) + CONFIG_THUMB
.word 160f - L(ipred_v_tbl) + CONFIG_THUMB
.word 80f - L(ipred_v_tbl) + CONFIG_THUMB
.word 40f - L(ipred_v_tbl) + CONFIG_THUMB
40:
vld1.32 {d0[]}, [r2]
4:
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
subs lr, lr, #4
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
bgt 4b
pop {r4, pc}
80:
vld1.8 {d0}, [r2]
8:
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
subs lr, lr, #4
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
bgt 8b
pop {r4, pc}
160:
vld1.8 {q0}, [r2]
16:
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
subs lr, lr, #4
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
bgt 16b
pop {r4, pc}
320:
vld1.8 {q0, q1}, [r2]
32:
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
subs lr, lr, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
bgt 32b
pop {r4, pc}
640:
vld1.8 {q0, q1}, [r2]!
sub r1, r1, #32
vld1.8 {q2, q3}, [r2]
64:
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d4, d5, d6, d7}, [r0, :128], r1
vst1.8 {d4, d5, d6, d7}, [r12, :128], r1
subs lr, lr, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d4, d5, d6, d7}, [r0, :128], r1
vst1.8 {d4, d5, d6, d7}, [r12, :128], r1
bgt 64b
pop {r4, pc}
endfunc
// void ipred_h_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_h_8bpc_neon, export=1
push {r4-r5, lr}
ldr r4, [sp, #12]
clz r3, r3
adr r5, L(ipred_h_tbl)
sub r3, r3, #25
ldr r3, [r5, r3, lsl #2]
sub r2, r2, #4
mov lr, #-4
add r5, r5, r3
add r12, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_h_tbl):
.word 640f - L(ipred_h_tbl) + CONFIG_THUMB
.word 320f - L(ipred_h_tbl) + CONFIG_THUMB
.word 160f - L(ipred_h_tbl) + CONFIG_THUMB
.word 8f - L(ipred_h_tbl) + CONFIG_THUMB
.word 4f - L(ipred_h_tbl) + CONFIG_THUMB
4:
vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], lr
vst1.32 {d3[0]}, [r0, :32], r1
vst1.32 {d2[0]}, [r12, :32], r1
subs r4, r4, #4
vst1.32 {d1[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
bgt 4b
pop {r4-r5, pc}
8:
vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], lr
vst1.8 {d3}, [r0, :64], r1
vst1.8 {d2}, [r12, :64], r1
subs r4, r4, #4
vst1.8 {d1}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
bgt 8b
pop {r4-r5, pc}
160:
add r2, r2, #3
mov lr, #-1
16:
vld1.8 {d0[], d1[]}, [r2], lr
subs r4, r4, #4
vld1.8 {d2[], d3[]}, [r2], lr
vst1.8 {q0}, [r0, :128], r1
vld1.8 {d4[], d5[]}, [r2], lr
vst1.8 {q1}, [r12, :128], r1
vld1.8 {d6[], d7[]}, [r2], lr
vst1.8 {q2}, [r0, :128], r1
vst1.8 {q3}, [r12, :128], r1
bgt 16b
pop {r4-r5, pc}
320:
add r2, r2, #3
mov lr, #-1
sub r1, r1, #16
32:
vld1.8 {d0[], d1[]}, [r2], lr
subs r4, r4, #4
vld1.8 {d2[], d3[]}, [r2], lr
vst1.8 {q0}, [r0, :128]!
vld1.8 {d4[], d5[]}, [r2], lr
vst1.8 {q1}, [r12, :128]!
vld1.8 {d6[], d7[]}, [r2], lr
vst1.8 {q0}, [r0, :128], r1
vst1.8 {q1}, [r12, :128], r1
vst1.8 {q2}, [r0, :128]!
vst1.8 {q3}, [r12, :128]!
vst1.8 {q2}, [r0, :128], r1
vst1.8 {q3}, [r12, :128], r1
bgt 32b
pop {r4-r5, pc}
640:
add r2, r2, #3
mov lr, #-1
sub r1, r1, #48
64:
vld1.8 {d0[], d1[]}, [r2], lr
subs r4, r4, #4
vld1.8 {d2[], d3[]}, [r2], lr
vst1.8 {q0}, [r0, :128]!
vld1.8 {d4[], d5[]}, [r2], lr
vst1.8 {q1}, [r12, :128]!
vld1.8 {d6[], d7[]}, [r2], lr
vst1.8 {q0}, [r0, :128]!
vst1.8 {q1}, [r12, :128]!
vst1.8 {q0}, [r0, :128]!
vst1.8 {q1}, [r12, :128]!
vst1.8 {q0}, [r0, :128], r1
vst1.8 {q1}, [r12, :128], r1
vst1.8 {q2}, [r0, :128]!
vst1.8 {q3}, [r12, :128]!
vst1.8 {q2}, [r0, :128]!
vst1.8 {q3}, [r12, :128]!
vst1.8 {q2}, [r0, :128]!
vst1.8 {q3}, [r12, :128]!
vst1.8 {q2}, [r0, :128], r1
vst1.8 {q3}, [r12, :128], r1
bgt 64b
pop {r4-r5, pc}
endfunc
// void ipred_dc_top_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_top_8bpc_neon, export=1
push {r4-r5, lr}
ldr r4, [sp, #12]
clz r3, r3
adr r5, L(ipred_dc_top_tbl)
sub r3, r3, #25
ldr r3, [r5, r3, lsl #2]
add r2, r2, #1
add r5, r5, r3
add r12, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_dc_top_tbl):
.word 640f - L(ipred_dc_top_tbl) + CONFIG_THUMB
.word 320f - L(ipred_dc_top_tbl) + CONFIG_THUMB
.word 160f - L(ipred_dc_top_tbl) + CONFIG_THUMB
.word 80f - L(ipred_dc_top_tbl) + CONFIG_THUMB
.word 40f - L(ipred_dc_top_tbl) + CONFIG_THUMB
40:
vld1.32 {d0[]}, [r2]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d0, q0, #2
vdup.8 d0, d0[0]
4:
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
subs r4, r4, #4
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
bgt 4b
pop {r4-r5, pc}
80:
vld1.8 {d0}, [r2]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d0, q0, #3
vdup.8 d0, d0[0]
8:
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
subs r4, r4, #4
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
bgt 8b
pop {r4-r5, pc}
160:
vld1.8 {d0, d1}, [r2]
vaddl.u8 q0, d0, d1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d0, q0, #4
vdup.8 q0, d0[0]
16:
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
bgt 16b
pop {r4-r5, pc}
320:
vld1.8 {d0, d1, d2, d3}, [r2]
vaddl.u8 q0, d0, d1
vaddl.u8 q1, d2, d3
vadd.u16 q0, q0, q1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d4, q0, #5
vdup.8 q0, d4[0]
vdup.8 q1, d4[0]
32:
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
bgt 32b
pop {r4-r5, pc}
640:
vld1.8 {d0, d1, d2, d3}, [r2]!
vaddl.u8 q0, d0, d1
vld1.8 {d4, d5, d6, d7}, [r2]
vaddl.u8 q1, d2, d3
vaddl.u8 q2, d4, d5
vaddl.u8 q3, d6, d7
vadd.u16 q0, q0, q1
vadd.u16 q1, q2, q3
vadd.u16 q0, q0, q1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d18, q0, #6
vdup.8 q0, d18[0]
vdup.8 q1, d18[0]
sub r1, r1, #32
64:
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
bgt 64b
pop {r4-r5, pc}
endfunc
// void ipred_dc_left_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_left_8bpc_neon, export=1
push {r4-r5, lr}
ldr r4, [sp, #12]
sub r2, r2, r4
clz r3, r3
clz lr, r4
sub lr, lr, #25
adr r5, L(ipred_dc_left_tbl)
sub r3, r3, #20
ldr r3, [r5, r3, lsl #2]
ldr lr, [r5, lr, lsl #2]
add r3, r5, r3
add r5, r5, lr
add r12, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_dc_left_tbl):
.word L(ipred_dc_left_h64) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_h32) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_h16) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_h8) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_h4) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w64) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w32) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w16) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w8) - L(ipred_dc_left_tbl) + CONFIG_THUMB
.word L(ipred_dc_left_w4) - L(ipred_dc_left_tbl) + CONFIG_THUMB
L(ipred_dc_left_h4):
vld1.32 {d0[]}, [r2, :32]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d0, q0, #2
vdup.8 q0, d0[0]
bx r3
L(ipred_dc_left_w4):
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
subs r4, r4, #4
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
bgt L(ipred_dc_left_w4)
pop {r4-r5, pc}
L(ipred_dc_left_h8):
vld1.8 {d0}, [r2, :64]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d0, q0, #3
vdup.8 q0, d0[0]
bx r3
L(ipred_dc_left_w8):
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
subs r4, r4, #4
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
bgt L(ipred_dc_left_w8)
pop {r4-r5, pc}
L(ipred_dc_left_h16):
vld1.8 {d0, d1}, [r2, :128]
vaddl.u8 q0, d0, d1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d0, q0, #4
vdup.8 q0, d0[0]
bx r3
L(ipred_dc_left_w16):
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
bgt L(ipred_dc_left_w16)
pop {r4-r5, pc}
L(ipred_dc_left_h32):
vld1.8 {d0, d1, d2, d3}, [r2, :128]
vaddl.u8 q0, d0, d1
vaddl.u8 q1, d2, d3
vadd.u16 q0, q0, q1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d0, q0, #5
vdup.8 q0, d0[0]
bx r3
L(ipred_dc_left_w32):
vmov.8 q1, q0
1:
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
bgt 1b
pop {r4-r5, pc}
L(ipred_dc_left_h64):
vld1.8 {d0, d1, d2, d3}, [r2, :128]!
vld1.8 {d4, d5, d6, d7}, [r2, :128]
vaddl.u8 q0, d0, d1
vaddl.u8 q1, d2, d3
vaddl.u8 q2, d4, d5
vaddl.u8 q3, d6, d7
vadd.u16 q0, q0, q1
vadd.u16 q1, q2, q3
vadd.u16 q0, q0, q1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshrn.u16 d0, q0, #6
vdup.8 q0, d0[0]
bx r3
L(ipred_dc_left_w64):
vmov.8 q1, q0
sub r1, r1, #32
1:
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
bgt 1b
pop {r4-r5, pc}
endfunc
// void ipred_dc_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_dc_8bpc_neon, export=1
push {r4-r6, lr}
ldr r4, [sp, #16]
sub r2, r2, r4
add lr, r3, r4 // width + height
clz r3, r3
clz r12, r4
vdup.16 q15, lr // width + height
adr r5, L(ipred_dc_tbl)
rbit lr, lr // rbit(width + height)
sub r3, r3, #20 // 25 leading bits, minus table offset 5
sub r12, r12, #25
clz lr, lr // ctz(width + height)
ldr r3, [r5, r3, lsl #2]
ldr r12, [r5, r12, lsl #2]
neg lr, lr // -ctz(width + height)
add r3, r5, r3
add r5, r5, r12
vshr.u16 q15, q15, #1 // (width + height) >> 1
vdup.16 q14, lr // -ctz(width + height)
add r12, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_dc_tbl):
.word L(ipred_dc_h64) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_h32) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_h16) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_h8) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_h4) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w64) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w32) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w16) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w8) - L(ipred_dc_tbl) + CONFIG_THUMB
.word L(ipred_dc_w4) - L(ipred_dc_tbl) + CONFIG_THUMB
L(ipred_dc_h4):
vld1.32 {d0[]}, [r2, :32]!
vpaddl.u8 d0, d0
add r2, r2, #1
vpadd.u16 d0, d0
bx r3
L(ipred_dc_w4):
vld1.32 {d1[]}, [r2]
vadd.s16 d0, d0, d30
vpaddl.u8 d1, d1
vpadd.u16 d1, d1
cmp r4, #4
vadd.s16 d0, d0, d1
vshl.u16 d0, d0, d28
beq 1f
// h = 8/16
movw lr, #(0x3334/2)
movw r5, #(0x5556/2)
cmp r4, #16
it ne
movne lr, r5
vdup.16 d30, lr
vqdmulh.s16 d0, d0, d30
1:
vdup.8 d0, d0[0]
2:
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
subs r4, r4, #4
vst1.32 {d0[0]}, [r0, :32], r1
vst1.32 {d0[0]}, [r12, :32], r1
bgt 2b
pop {r4-r6, pc}
L(ipred_dc_h8):
vld1.8 {d0}, [r2, :64]!
vpaddl.u8 d0, d0
vpadd.u16 d0, d0
add r2, r2, #1
vpadd.u16 d0, d0
bx r3
L(ipred_dc_w8):
vld1.8 {d2}, [r2]
vadd.s16 d0, d0, d30
vpaddl.u8 d2, d2
vpadd.u16 d2, d2
vpadd.u16 d2, d2
cmp r4, #8
vadd.s16 d0, d0, d2
vshl.u16 d0, d0, d28
beq 1f
// h = 4/16/32
cmp r4, #32
movw lr, #(0x3334/2)
movw r5, #(0x5556/2)
it ne
movne lr, r5
vdup.16 d24, lr
vqdmulh.s16 d0, d0, d24
1:
vdup.8 d0, d0[0]
2:
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
subs r4, r4, #4
vst1.8 {d0}, [r0, :64], r1
vst1.8 {d0}, [r12, :64], r1
bgt 2b
pop {r4-r6, pc}
L(ipred_dc_h16):
vld1.8 {d0, d1}, [r2, :128]!
vaddl.u8 q0, d0, d1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
add r2, r2, #1
vpadd.u16 d0, d0
bx r3
L(ipred_dc_w16):
vld1.8 {d2, d3}, [r2]
vadd.s16 d0, d0, d30
vaddl.u8 q1, d2, d3
vadd.u16 d2, d2, d3
vpadd.u16 d2, d2
vpadd.u16 d2, d2
cmp r4, #16
vadd.s16 d0, d0, d2
vshl.u16 d0, d0, d28
beq 1f
// h = 4/8/32/64
tst r4, #(32+16+8) // 16 added to make a consecutive bitmask
movw lr, #(0x3334/2)
movw r5, #(0x5556/2)
it ne
movne lr, r5
vdup.16 d24, lr
vqdmulh.s16 d0, d0, d24
1:
vdup.8 q0, d0[0]
2:
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1}, [r0, :128], r1
vst1.8 {d0, d1}, [r12, :128], r1
bgt 2b
pop {r4-r6, pc}
L(ipred_dc_h32):
vld1.8 {d0, d1, d2, d3}, [r2, :128]!
vaddl.u8 q0, d0, d1
vaddl.u8 q1, d2, d3
vadd.u16 q0, q0, q1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
add r2, r2, #1
vpadd.u16 d0, d0
bx r3
L(ipred_dc_w32):
vld1.8 {d2, d3, d4, d5}, [r2]
vadd.s16 d0, d0, d30
vaddl.u8 q1, d2, d3
vaddl.u8 q2, d4, d5
vadd.u16 q1, q1, q2
vadd.u16 d2, d2, d3
vpadd.u16 d2, d2
vpadd.u16 d2, d2
cmp r4, #32
vadd.s16 d0, d0, d2
vshl.u16 d4, d0, d28
beq 1f
// h = 8/16/64
cmp r4, #8
movw lr, #(0x3334/2)
movw r5, #(0x5556/2)
it ne
movne lr, r5
vdup.16 d24, lr
vqdmulh.s16 d4, d4, d24
1:
vdup.8 q0, d4[0]
vdup.8 q1, d4[0]
2:
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
bgt 2b
pop {r4-r6, pc}
L(ipred_dc_h64):
vld1.8 {d0, d1, d2, d3}, [r2, :128]!
vaddl.u8 q0, d0, d1
vld1.8 {d4, d5, d6, d7}, [r2, :128]!
vaddl.u8 q1, d2, d3
vaddl.u8 q2, d4, d5
vaddl.u8 q3, d6, d7
vadd.u16 q0, q0, q1
vadd.u16 q1, q2, q3
vadd.u16 q0, q0, q1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
add r2, r2, #1
vpadd.u16 d0, d0
bx r3
L(ipred_dc_w64):
vld1.8 {d2, d3, d4, d5}, [r2]!
vadd.s16 d0, d0, d30
vaddl.u8 q2, d4, d5
vaddl.u8 q1, d2, d3
vadd.u16 d4, d4, d5
vadd.u16 d2, d2, d3
vld1.8 {d16, d17, d18, d19}, [r2]
vpadd.u16 d4, d4
vpadd.u16 d2, d2
vpadd.u16 d4, d4
vpadd.u16 d2, d2
vaddl.u8 q8, d16, d17
vaddl.u8 q9, d18, d19
vadd.u16 d16, d16, d17
vadd.u16 d18, d18, d19
vpadd.u16 d16, d16
vpadd.u16 d18, d18
vpadd.u16 d16, d16
vpadd.u16 d18, d18
vadd.u16 d2, d2, d4
vadd.u16 d3, d16, d18
cmp r4, #64
vadd.s16 d0, d0, d2
vadd.s16 d0, d0, d3
vshl.u16 d18, d0, d28
beq 1f
// h = 16/32
movw lr, #(0x5556/2)
movt lr, #(0x3334/2)
and r5, r4, #31
lsr lr, lr, r5
vdup.16 d30, lr
vqdmulh.s16 d18, d18, d30
1:
sub r1, r1, #32
vdup.8 q0, d18[0]
vdup.8 q1, d18[0]
2:
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
subs r4, r4, #4
vst1.8 {d0, d1, d2, d3}, [r0, :128]!
vst1.8 {d0, d1, d2, d3}, [r12, :128]!
vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
bgt 2b
pop {r4-r6, pc}
endfunc
// void ipred_paeth_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_paeth_8bpc_neon, export=1
push {r4-r8, lr}
ldr r4, [sp, #24]
clz lr, r3
adr r5, L(ipred_paeth_tbl)
sub lr, lr, #25
ldr lr, [r5, lr, lsl #2]
vld1.8 {d4[], d5[]}, [r2]
add r8, r2, #1
sub r2, r2, #4
add r5, r5, lr
mov r7, #-4
add r6, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_paeth_tbl):
.word 640f - L(ipred_paeth_tbl) + CONFIG_THUMB
.word 320f - L(ipred_paeth_tbl) + CONFIG_THUMB
.word 160f - L(ipred_paeth_tbl) + CONFIG_THUMB
.word 80f - L(ipred_paeth_tbl) + CONFIG_THUMB
.word 40f - L(ipred_paeth_tbl) + CONFIG_THUMB
40:
vld1.32 {d6[], d7[]}, [r8]
vsubl.u8 q8, d6, d4 // top - topleft
4:
vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7
vzip.32 d0, d1
vzip.32 d2, d3
vaddw.u8 q9, q8, d0
vaddw.u8 q10, q8, d2
vqmovun.s16 d18, q9 // base
vqmovun.s16 d19, q10
vmov d1, d2
vabd.u8 q10, q3, q9 // tdiff
vabd.u8 q11, q2, q9 // tldiff
vabd.u8 q9, q0, q9 // ldiff
vmin.u8 q12, q10, q11 // min(tdiff, tldiff)
vcge.u8 q10, q11, q10 // tldiff >= tdiff
vcge.u8 q9, q12, q9 // min(tdiff, tldiff) >= ldiff
vbsl q10, q3, q2 // tdiff <= tldiff ? top : topleft
vbit q10, q0, q9 // ldiff <= min ? left : ...
vst1.32 {d21[1]}, [r0, :32], r1
vst1.32 {d21[0]}, [r6, :32], r1
subs r4, r4, #4
vst1.32 {d20[1]}, [r0, :32], r1
vst1.32 {d20[0]}, [r6, :32], r1
bgt 4b
pop {r4-r8, pc}
80:
vld1.8 {d6}, [r8]
vsubl.u8 q8, d6, d4 // top - topleft
vmov d7, d6
8:
vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7
vaddw.u8 q9, q8, d0
vaddw.u8 q10, q8, d1
vaddw.u8 q11, q8, d2
vaddw.u8 q12, q8, d3
vqmovun.s16 d18, q9 // base
vqmovun.s16 d19, q10
vqmovun.s16 d20, q11
vqmovun.s16 d21, q12
vabd.u8 q11, q3, q9 // tdiff
vabd.u8 q12, q3, q10
vabd.u8 q13, q2, q9 // tldiff
vabd.u8 q14, q2, q10
vabd.u8 q10, q1, q10 // ldiff
vabd.u8 q9, q0, q9
vmin.u8 q15, q12, q14 // min(tdiff, tldiff)
vcge.u8 q12, q14, q12 // tldiff >= tdiff
vmin.u8 q14, q11, q13 // min(tdiff, tldiff)
vcge.u8 q11, q13, q11 // tldiff >= tdiff
vcge.u8 q10, q15, q10 // min(tdiff, tldiff) >= ldiff
vcge.u8 q9, q14, q9
vbsl q12, q3, q2 // tdiff <= tldiff ? top : topleft
vbsl q11, q3, q2
vbit q12, q1, q10 // ldiff <= min ? left : ...
vbit q11, q0, q9
vst1.8 {d25}, [r0, :64], r1
vst1.8 {d24}, [r6, :64], r1
subs r4, r4, #4
vst1.8 {d23}, [r0, :64], r1
vst1.8 {d22}, [r6, :64], r1
bgt 8b
pop {r4-r8, pc}
160:
320:
640:
vld1.8 {d6}, [r8]!
mov r12, r3
// Set up pointers for four rows in parallel; r0, r6, r5, lr
add r5, r0, r1
add lr, r6, r1
lsl r1, r1, #1
sub r1, r1, r3
1:
vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7
2:
vsubl.u8 q8, d6, d4 // top - topleft
vmov d7, d6
vaddw.u8 q9, q8, d0
vaddw.u8 q10, q8, d1
vaddw.u8 q11, q8, d2
vaddw.u8 q12, q8, d3
vqmovun.s16 d18, q9 // base
vqmovun.s16 d19, q10
vqmovun.s16 d20, q11
vqmovun.s16 d21, q12
vabd.u8 q11, q3, q9 // tdiff
vabd.u8 q12, q3, q10
vabd.u8 q13, q2, q9 // tldiff
vabd.u8 q14, q2, q10
vabd.u8 q10, q1, q10 // ldiff
vabd.u8 q9, q0, q9
vmin.u8 q15, q12, q14 // min(tdiff, tldiff)
vcge.u8 q12, q14, q12 // tldiff >= tdiff
vmin.u8 q14, q11, q13 // min(tdiff, tldiff)
vcge.u8 q11, q13, q11 // tldiff >= tdiff
vcge.u8 q10, q15, q10 // min(tdiff, tldiff) >= ldiff
vcge.u8 q9, q14, q9
vbsl q12, q3, q2 // tdiff <= tldiff ? top : topleft
vbsl q11, q3, q2
vbit q12, q1, q10 // ldiff <= min ? left : ...
vbit q11, q0, q9
subs r3, r3, #8
vst1.8 {d25}, [r0, :64]!
vst1.8 {d24}, [r6, :64]!
vst1.8 {d23}, [r5, :64]!
vst1.8 {d22}, [lr, :64]!
ble 8f
vld1.8 {d6}, [r8]!
b 2b
8:
subs r4, r4, #4
ble 9f
// End of horizontal loop, move pointers to next four rows
sub r8, r8, r12
add r0, r0, r1
add r6, r6, r1
vld1.8 {d6}, [r8]!
add r5, r5, r1
add lr, lr, r1
mov r3, r12
b 1b
9:
pop {r4-r8, pc}
endfunc
// void ipred_smooth_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_8bpc_neon, export=1
push {r4-r10, lr}
ldr r4, [sp, #32]
movrel r10, X(sm_weights)
add r12, r10, r4
add r10, r10, r3
clz r9, r3
adr r5, L(ipred_smooth_tbl)
sub lr, r2, r4
sub r9, r9, #25
ldr r9, [r5, r9, lsl #2]
vld1.8 {d4[]}, [lr] // bottom
add r8, r2, #1
add r5, r5, r9
add r6, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_smooth_tbl):
.word 640f - L(ipred_smooth_tbl) + CONFIG_THUMB
.word 320f - L(ipred_smooth_tbl) + CONFIG_THUMB
.word 160f - L(ipred_smooth_tbl) + CONFIG_THUMB
.word 80f - L(ipred_smooth_tbl) + CONFIG_THUMB
.word 40f - L(ipred_smooth_tbl) + CONFIG_THUMB
40:
vld1.32 {d16[]}, [r8] // top
vld1.32 {d18[]}, [r10, :32] // weights_hor
sub r2, r2, #4
mov r7, #-4
vdup.8 q3, d16[3] // right
vsubl.u8 q8, d16, d4 // top-bottom
vmovl.u8 q9, d18 // weights_hor
4:
vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7 // left
vld4.8 {d20[], d21[], d22[], d23[]}, [r12, :32]! // weights_ver
vshll.i8 q12, d6, #8 // right*256
vshll.i8 q13, d6, #8
vzip.32 d1, d0 // left, flipped
vzip.32 d3, d2
vzip.32 d20, d21 // weights_ver
vzip.32 d22, d23
vshll.i8 q14, d4, #8 // bottom*256
vshll.i8 q15, d4, #8
vsubl.u8 q0, d1, d6 // left-right
vsubl.u8 q1, d3, d6
vmovl.u8 q10, d20 // weights_ver
vmovl.u8 q11, d22
vmla.i16 q12, q1, q9 // right*256 + (left-right)*weights_hor
vmla.i16 q13, q0, q9 // (left flipped)
vmla.i16 q14, q8, q10 // bottom*256 + (top-bottom)*weights_ver
vmla.i16 q15, q8, q11
vhadd.u16 q12, q12, q14
vhadd.u16 q13, q13, q15
vrshrn.i16 d24, q12, #8
vrshrn.i16 d25, q13, #8
vst1.32 {d24[0]}, [r0, :32], r1
vst1.32 {d24[1]}, [r6, :32], r1
subs r4, r4, #4
vst1.32 {d25[0]}, [r0, :32], r1
vst1.32 {d25[1]}, [r6, :32], r1
bgt 4b
pop {r4-r10, pc}
80:
vld1.8 {d16}, [r8] // top
vld1.8 {d18}, [r10, :64] // weights_hor
sub r2, r2, #2
mov r7, #-2
vdup.8 q3, d16[7] // right
vsubl.u8 q8, d16, d4 // top-bottom
vmovl.u8 q9, d18 // weights_hor
8:
vld2.8 {d0[], d1[]}, [r2, :16], r7 // left
vld2.8 {d20[], d22[]}, [r12, :16]! // weights_ver
vshll.i8 q12, d6, #8 // right*256
vshll.i8 q13, d6, #8
vshll.i8 q14, d4, #8 // bottom*256
vshll.i8 q15, d4, #8
vsubl.u8 q1, d0, d6 // left-right (left flipped)
vsubl.u8 q0, d1, d6
vmovl.u8 q10, d20 // weights_ver
vmovl.u8 q11, d22
vmla.i16 q12, q0, q9 // right*256 + (left-right)*weights_hor
vmla.i16 q13, q1, q9
vmla.i16 q14, q8, q10 // bottom*256 + (top-bottom)*weights_ver
vmla.i16 q15, q8, q11
vhadd.u16 q12, q12, q14
vhadd.u16 q13, q13, q15
vrshrn.i16 d24, q12, #8
vrshrn.i16 d25, q13, #8
subs r4, r4, #2
vst1.8 {d24}, [r0, :64], r1
vst1.8 {d25}, [r6, :64], r1
bgt 8b
pop {r4-r10, pc}
160:
320:
640:
add lr, r2, r3
sub r2, r2, #2
mov r7, #-2
vld1.8 {d6[], d7[]}, [lr] // right
sub r1, r1, r3
mov r9, r3
1:
vld2.8 {d0[], d1[]}, [r2, :16], r7 // left
vld2.8 {d20[], d22[]}, [r12, :16]! // weights_ver
vsubl.u8 q1, d0, d6 // left-right (left flipped)
vsubl.u8 q0, d1, d6
vmovl.u8 q10, d20 // weights_ver
vmovl.u8 q11, d22
2:
vld1.8 {d16}, [r8]! // top
vld1.8 {d18}, [r10, :64]! // weights_hor
vshll.i8 q12, d6, #8 // right*256
vshll.i8 q13, d6, #8
vmovl.u8 q9, d18 // weights_hor
vshll.i8 q14, d4, #8 // bottom*256
vshll.i8 q15, d4, #8
vsubl.u8 q8, d16, d4 // top-bottom
vmla.i16 q12, q0, q9 // right*256 + (left-right)*weights_hor
vmla.i16 q13, q1, q9
vmla.i16 q14, q8, q10 // bottom*256 + (top-bottom)*weights_ver
vmla.i16 q15, q8, q11
vhadd.u16 q12, q12, q14
vhadd.u16 q13, q13, q15
vrshrn.i16 d24, q12, #8
vrshrn.i16 d25, q13, #8
subs r3, r3, #8
vst1.8 {d24}, [r0, :64]!
vst1.8 {d25}, [r6, :64]!
bgt 2b
subs r4, r4, #2
ble 9f
sub r8, r8, r9
sub r10, r10, r9
add r0, r0, r1
add r6, r6, r1
mov r3, r9
b 1b
9:
pop {r4-r10, pc}
endfunc
// void ipred_smooth_v_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_v_8bpc_neon, export=1
push {r4-r7, lr}
ldr r4, [sp, #20]
movrel r7, X(sm_weights)
add r7, r7, r4
clz lr, r3
adr r5, L(ipred_smooth_v_tbl)
sub r12, r2, r4
sub lr, lr, #25
ldr lr, [r5, lr, lsl #2]
vld1.8 {d4[]}, [r12] // bottom
add r2, r2, #1
add r5, r5, lr
add r6, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_smooth_v_tbl):
.word 640f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
.word 320f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
.word 160f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
.word 80f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
.word 40f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
40:
vld1.32 {d6[]}, [r2] // top
vsubl.u8 q3, d6, d4 // top-bottom
4:
vld4.8 {d16[], d17[], d18[], d19[]}, [r7, :32]! // weights_ver
vshll.i8 q10, d4, #8 // bottom*256
vshll.i8 q11, d4, #8
vzip.32 d16, d17 // weights_ver
vzip.32 d18, d19
vmovl.u8 q8, d16 // weights_ver
vmovl.u8 q9, d18
subs r4, r4, #4
vmla.i16 q10, q3, q8 // bottom*256 + (top-bottom)*weights_ver
vmla.i16 q11, q3, q9
vrshrn.i16 d20, q10, #8
vrshrn.i16 d21, q11, #8
vst1.32 {d20[0]}, [r0, :32], r1
vst1.32 {d20[1]}, [r6, :32], r1
vst1.32 {d21[0]}, [r0, :32], r1
vst1.32 {d21[1]}, [r6, :32], r1
bgt 4b
pop {r4-r7, pc}
80:
vld1.8 {d6}, [r2] // top
vsubl.u8 q3, d6, d4 // top-bottom
8:
vld4.8 {d16[], d18[], d20[], d22[]}, [r7, :32]! // weights_ver
vshll.i8 q12, d4, #8 // bottom*256
vshll.i8 q13, d4, #8
vshll.i8 q14, d4, #8
vshll.i8 q15, d4, #8
vmovl.u8 q8, d16 // weights_ver
vmovl.u8 q9, d18
vmovl.u8 q10, d20
vmovl.u8 q11, d22
vmla.i16 q12, q3, q8 // bottom*256 + (top-bottom)*weights_ver
vmla.i16 q13, q3, q9
vmla.i16 q14, q3, q10
vmla.i16 q15, q3, q11
vrshrn.i16 d24, q12, #8
vrshrn.i16 d25, q13, #8
vrshrn.i16 d26, q14, #8
vrshrn.i16 d27, q15, #8
vst1.8 {d24}, [r0, :64], r1
vst1.8 {d25}, [r6, :64], r1
subs r4, r4, #4
vst1.8 {d26}, [r0, :64], r1
vst1.8 {d27}, [r6, :64], r1
bgt 8b
pop {r4-r7, pc}
160:
320:
640:
vpush {q4-q7}
// Set up pointers for four rows in parallel; r0, r6, r5, lr
add r5, r0, r1
add lr, r6, r1
lsl r1, r1, #1
sub r1, r1, r3
mov r12, r3
1:
vld4.8 {d8[], d10[], d12[], d14[]}, [r7, :32]! // weights_ver
vmovl.u8 q4, d8 // weights_ver
vmovl.u8 q5, d10
vmovl.u8 q6, d12
vmovl.u8 q7, d14
2:
vld1.8 {q3}, [r2]! // top
vshll.i8 q8, d4, #8 // bottom*256
vshll.i8 q9, d4, #8
vshll.i8 q10, d4, #8
vshll.i8 q11, d4, #8
vsubl.u8 q0, d6, d4 // top-bottom
vsubl.u8 q1, d7, d4
vshll.i8 q12, d4, #8
vshll.i8 q13, d4, #8
vshll.i8 q14, d4, #8
vshll.i8 q15, d4, #8
vmla.i16 q8, q0, q4 // bottom*256 + (top-bottom)*weights_ver
vmla.i16 q9, q1, q4
vmla.i16 q10, q0, q5
vmla.i16 q11, q1, q5
vmla.i16 q12, q0, q6 // bottom*256 + (top-bottom)*weights_ver
vmla.i16 q13, q1, q6
vmla.i16 q14, q0, q7
vmla.i16 q15, q1, q7
vrshrn.i16 d16, q8, #8
vrshrn.i16 d17, q9, #8
vrshrn.i16 d18, q10, #8
vrshrn.i16 d19, q11, #8
vrshrn.i16 d20, q12, #8
vrshrn.i16 d21, q13, #8
vrshrn.i16 d22, q14, #8
vrshrn.i16 d23, q15, #8
subs r3, r3, #16
vst1.8 {q8}, [r0, :128]!
vst1.8 {q9}, [r6, :128]!
vst1.8 {q10}, [r5, :128]!
vst1.8 {q11}, [lr, :128]!
bgt 2b
subs r4, r4, #4
ble 9f
sub r2, r2, r12
add r0, r0, r1
add r6, r6, r1
add r5, r5, r1
add lr, lr, r1
mov r3, r12
b 1b
9:
vpop {q4-q7}
pop {r4-r7, pc}
endfunc
// void ipred_smooth_h_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int a,
// const int max_width, const int max_height);
function ipred_smooth_h_8bpc_neon, export=1
push {r4-r8, lr}
ldr r4, [sp, #24]
movrel r8, X(sm_weights)
add r8, r8, r3
clz lr, r3
adr r5, L(ipred_smooth_h_tbl)
add r12, r2, r3
sub lr, lr, #25
ldr lr, [r5, lr, lsl #2]
vld1.8 {d4[]}, [r12] // right
add r5, r5, lr
add r6, r0, r1
lsl r1, r1, #1
bx r5
.align 2
L(ipred_smooth_h_tbl):
.word 640f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
.word 320f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
.word 160f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
.word 80f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
.word 40f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
40:
vld1.32 {d6[]}, [r8, :32] // weights_hor
sub r2, r2, #4
mov r7, #-4
vmovl.u8 q3, d6 // weights_hor
4:
vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7 // left
vshll.i8 q8, d4, #8 // right*256
vshll.i8 q9, d4, #8
vzip.32 d3, d2 // left, flipped
vzip.32 d1, d0
vsubl.u8 q1, d3, d4 // left-right
vsubl.u8 q0, d1, d4
subs r4, r4, #4
vmla.i16 q8, q1, q3 // right*256 + (left-right)*weights_hor
vmla.i16 q9, q0, q3
vrshrn.i16 d16, q8, #8
vrshrn.i16 d17, q9, #8
vst1.32 {d16[0]}, [r0, :32], r1
vst1.32 {d16[1]}, [r6, :32], r1
vst1.32 {d17[0]}, [r0, :32], r1
vst1.32 {d17[1]}, [r6, :32], r1
bgt 4b
pop {r4-r8, pc}
80:
vld1.8 {d6}, [r8, :64] // weights_hor
sub r2, r2, #4
mov r7, #-4
vmovl.u8 q3, d6 // weights_hor
8:
vld4.8 {d16[], d18[], d20[], d22[]}, [r2, :32], r7 // left
vshll.i8 q12, d4, #8 // right*256
vshll.i8 q13, d4, #8
vshll.i8 q14, d4, #8
vshll.i8 q15, d4, #8
vsubl.u8 q11, d22, d4 // left-right
vsubl.u8 q10, d20, d4
vsubl.u8 q9, d18, d4
vsubl.u8 q8, d16, d4
vmla.i16 q12, q11, q3 // right*256 + (left-right)*weights_hor
vmla.i16 q13, q10, q3 // (left flipped)
vmla.i16 q14, q9, q3
vmla.i16 q15, q8, q3
vrshrn.i16 d24, q12, #8
vrshrn.i16 d25, q13, #8
vrshrn.i16 d26, q14, #8
vrshrn.i16 d27, q15, #8
vst1.8 {d24}, [r0, :64], r1
vst1.8 {d25}, [r6, :64], r1
subs r4, r4, #4
vst1.8 {d26}, [r0, :64], r1
vst1.8 {d27}, [r6, :64], r1
bgt 8b
pop {r4-r8, pc}
160:
320:
640:
vpush {q4-q7}
sub r2, r2, #4
mov r7, #-4
// Set up pointers for four rows in parallel; r0, r6, r5, lr
add r5, r0, r1
add lr, r6, r1
lsl r1, r1, #1
sub r1, r1, r3
mov r12, r3
1:
vld4.8 {d8[], d10[], d12[], d14[]}, [r2, :32], r7 // left
vsubl.u8 q4, d8, d4 // left-right
vsubl.u8 q5, d10, d4
vsubl.u8 q6, d12, d4
vsubl.u8 q7, d14, d4
2:
vld1.8 {q1}, [r8, :128]! // weights_hor
vshll.i8 q8, d4, #8 // right*256
vshll.i8 q9, d4, #8
vshll.i8 q10, d4, #8
vshll.i8 q11, d4, #8
vmovl.u8 q0, d2 // weights_hor
vmovl.u8 q1, d3
vshll.i8 q12, d4, #8
vshll.i8 q13, d4, #8
vshll.i8 q14, d4, #8
vshll.i8 q15, d4, #8
vmla.i16 q8, q7, q0 // right*256 + (left-right)*weights_hor
vmla.i16 q9, q7, q1 // (left flipped)
vmla.i16 q10, q6, q0
vmla.i16 q11, q6, q1
vmla.i16 q12, q5, q0
vmla.i16 q13, q5, q1
vmla.i16 q14, q4, q0
vmla.i16 q15, q4, q1
vrshrn.i16 d16, q8, #8
vrshrn.i16 d17, q9, #8
vrshrn.i16 d18, q10, #8
vrshrn.i16 d19, q11, #8
vrshrn.i16 d20, q12, #8
vrshrn.i16 d21, q13, #8
vrshrn.i16 d22, q14, #8
vrshrn.i16 d23, q15, #8
subs r3, r3, #16
vst1.8 {q8}, [r0, :128]!
vst1.8 {q9}, [r6, :128]!
vst1.8 {q10}, [r5, :128]!
vst1.8 {q11}, [lr, :128]!
bgt 2b
subs r4, r4, #4
ble 9f
sub r8, r8, r12
add r0, r0, r1
add r6, r6, r1
add r5, r5, r1
add lr, lr, r1
mov r3, r12
b 1b
9:
vpop {q4-q7}
pop {r4-r8, pc}
endfunc
// void ipred_filter_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height, const int filt_idx,
// const int max_width, const int max_height);
function ipred_filter_8bpc_neon, export=1
push {r4-r8, lr}
movw r12, #511
ldrd r4, r5, [sp, #24]
and r5, r5, r12 // 511
movrel r6, X(filter_intra_taps)
lsl r5, r5, #6
add r6, r6, r5
vld1.8 {d20, d21, d22, d23}, [r6, :128]!
clz lr, r3
adr r5, L(ipred_filter_tbl)
vld1.8 {d27, d28, d29}, [r6, :64]
sub lr, lr, #26
ldr lr, [r5, lr, lsl #2]
vmovl.s8 q8, d20
vmovl.s8 q9, d21
add r5, r5, lr
vmovl.s8 q10, d22
vmovl.s8 q11, d23
add r6, r0, r1
lsl r1, r1, #1
vmovl.s8 q12, d27
vmovl.s8 q13, d28
vmovl.s8 q14, d29
add r8, r2, #1
sub r2, r2, #2
mov r7, #-2
bx r5
.align 2
L(ipred_filter_tbl):
.word 320f - L(ipred_filter_tbl) + CONFIG_THUMB
.word 160f - L(ipred_filter_tbl) + CONFIG_THUMB
.word 80f - L(ipred_filter_tbl) + CONFIG_THUMB
.word 40f - L(ipred_filter_tbl) + CONFIG_THUMB
40:
vld1.32 {d0[]}, [r8] // top (0-3)
vmovl.u8 q0, d0 // top (0-3)
4:
vld1.32 {d2[]}, [r2], r7 // left (0-1) + topleft (2)
vmul.i16 q2, q9, d0[0] // p1(top[0]) * filter(1)
vmla.i16 q2, q10, d0[1] // p2(top[1]) * filter(2)
vmla.i16 q2, q11, d0[2] // p3(top[2]) * filter(3)
vmovl.u8 q1, d2 // left (0-1) + topleft (2)
vmla.i16 q2, q12, d0[3] // p4(top[3]) * filter(4)
vmla.i16 q2, q8, d2[2] // p0(topleft) * filter(0)
vmla.i16 q2, q13, d2[1] // p5(left[0]) * filter(5)
vmla.i16 q2, q14, d2[0] // p6(left[1]) * filter(6)
vqrshrun.s16 d4, q2, #4
subs r4, r4, #2
vst1.32 {d4[0]}, [r0, :32], r1
vmovl.u8 q0, d4
vst1.32 {d4[1]}, [r6, :32], r1
vmov d0, d1 // move top from [4-7] to [0-3]
bgt 4b
pop {r4-r8, pc}
80:
vld1.8 {d0}, [r8] // top (0-7)
vmovl.u8 q0, d0 // top (0-7)
8:
vld1.32 {d2[]}, [r2], r7 // left (0-1) + topleft (2)
vmul.i16 q2, q9, d0[0] // p1(top[0]) * filter(1)
vmla.i16 q2, q10, d0[1] // p2(top[1]) * filter(2)
vmla.i16 q2, q11, d0[2] // p3(top[2]) * filter(3)
vmovl.u8 q1, d2 // left (0-1) + topleft (2)
vmla.i16 q2, q12, d0[3] // p4(top[3]) * filter(4)
vmla.i16 q2, q8, d2[2] // p0(topleft) * filter(0)
vmla.i16 q2, q13, d2[1] // p5(left[0]) * filter(5)
vmla.i16 q2, q14, d2[0] // p6(left[1]) * filter(6)
vmul.i16 q3, q9, d1[0] // p1(top[0]) * filter(1)
vmla.i16 q3, q10, d1[1] // p2(top[1]) * filter(2)
vmla.i16 q3, q11, d1[2] // p3(top[2]) * filter(3)
vqrshrun.s16 d4, q2, #4
vmovl.u8 q1, d4 // first block, in 16 bit
vmla.i16 q3, q12, d1[3] // p4(top[3]) * filter(4)
vmla.i16 q3, q8, d0[3] // p0(topleft) * filter(0)
vmla.i16 q3, q13, d2[3] // p5(left[0]) * filter(5)
vmla.i16 q3, q14, d3[3] // p6(left[1]) * filter(6)
vqrshrun.s16 d5, q3, #4
vzip.32 d4, d5
subs r4, r4, #2
vst1.8 {d4}, [r0, :64], r1
vmovl.u8 q0, d5
vst1.8 {d5}, [r6, :64], r1
bgt 8b
pop {r4-r8, pc}
160:
320:
vpush {q4-q5}
sub r1, r1, r3
mov lr, r3
1:
vld1.32 {d0[]}, [r2], r7 // left (0-1) + topleft (2)
vmovl.u8 q0, d0 // left (0-1) + topleft (2)
2:
vld1.8 {q2}, [r8]! // top(0-15)
vmul.i16 q3, q8, d0[2] // p0(topleft) * filter(0)
vmla.i16 q3, q13, d0[1] // p5(left[0]) * filter(5)
vmovl.u8 q1, d4 // top(0-7)
vmovl.u8 q2, d5 // top(8-15)
vmla.i16 q3, q14, d0[0] // p6(left[1]) * filter(6)
vmla.i16 q3, q9, d2[0] // p1(top[0]) * filter(1)
vmla.i16 q3, q10, d2[1] // p2(top[1]) * filter(2)
vmla.i16 q3, q11, d2[2] // p3(top[2]) * filter(3)
vmla.i16 q3, q12, d2[3] // p4(top[3]) * filter(4)
vmul.i16 q4, q9, d3[0] // p1(top[0]) * filter(1)
vmla.i16 q4, q10, d3[1] // p2(top[1]) * filter(2)
vmla.i16 q4, q11, d3[2] // p3(top[2]) * filter(3)
vqrshrun.s16 d6, q3, #4
vmovl.u8 q0, d6 // first block, in 16 bit
vmla.i16 q4, q12, d3[3] // p4(top[3]) * filter(4)
vmla.i16 q4, q8, d2[3] // p0(topleft) * filter(0)
vmla.i16 q4, q13, d0[3] // p5(left[0]) * filter(5)
vmla.i16 q4, q14, d1[3] // p6(left[1]) * filter(6)
vmul.i16 q5, q9, d4[0] // p1(top[0]) * filter(1)
vmla.i16 q5, q10, d4[1] // p2(top[1]) * filter(2)
vmla.i16 q5, q11, d4[2] // p3(top[2]) * filter(3)
vqrshrun.s16 d7, q4, #4
vmovl.u8 q0, d7 // second block, in 16 bit
vmla.i16 q5, q12, d4[3] // p4(top[3]) * filter(4)
vmla.i16 q5, q8, d3[3] // p0(topleft) * filter(0)
vmla.i16 q5, q13, d0[3] // p5(left[0]) * filter(5)
vmla.i16 q5, q14, d1[3] // p6(left[1]) * filter(6)
vmul.i16 q15, q9, d5[0] // p1(top[0]) * filter(1)
vmla.i16 q15, q10, d5[1] // p2(top[1]) * filter(2)
vmla.i16 q15, q11, d5[2] // p3(top[2]) * filter(3)
vqrshrun.s16 d8, q5, #4
vmovl.u8 q0, d8 // third block, in 16 bit
vmov.u8 r12, d5[6]
vmla.i16 q15, q12, d5[3] // p4(top[3]) * filter(4)
vmla.i16 q15, q8, d4[3] // p0(topleft) * filter(0)
vmla.i16 q15, q13, d0[3] // p5(left[0]) * filter(5)
vmla.i16 q15, q14, d1[3] // p6(left[1]) * filter(6)
vmov.8 d0[4], r12
subs r3, r3, #16
vqrshrun.s16 d9, q15, #4
vst4.32 {d6[0], d7[0], d8[0], d9[0]}, [r0, :128]!
vst4.32 {d6[1], d7[1], d8[1], d9[1]}, [r6, :128]!
ble 8f
vmov.u8 r12, d9[7]
vmov.8 d0[0], r12
vmov.u8 r12, d9[3]
vmov.8 d0[2], r12
b 2b
8:
subs r4, r4, #2
ble 9f
sub r8, r6, lr
add r0, r0, r1
add r6, r6, r1
mov r3, lr
b 1b
9:
vpop {q4-q5}
pop {r4-r8, pc}
endfunc
// void pal_pred_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const pal, const uint8_t *idx,
// const int w, const int h);
function pal_pred_8bpc_neon, export=1
push {r4-r5, lr}
ldrd r4, r5, [sp, #12]
vld1.8 {d0}, [r2, :64]
clz lr, r4
adr r12, L(pal_pred_tbl)
sub lr, lr, #25
vmov.i8 q15, #7
ldr lr, [r12, lr, lsl #2]
add r12, r12, lr
add r2, r0, r1
bx r12
.align 2
L(pal_pred_tbl):
.word 640f - L(pal_pred_tbl) + CONFIG_THUMB
.word 320f - L(pal_pred_tbl) + CONFIG_THUMB
.word 160f - L(pal_pred_tbl) + CONFIG_THUMB
.word 80f - L(pal_pred_tbl) + CONFIG_THUMB
.word 40f - L(pal_pred_tbl) + CONFIG_THUMB
40:
lsl r1, r1, #1
4:
vld1.8 {d2}, [r3, :64]!
subs r5, r5, #4
vshr.u8 d3, d2, #4
vand.u8 d2, d2, d30
vzip.8 d2, d3
vtbl.8 d2, {d0}, d2
vtbl.8 d3, {d0}, d3
vst1.32 {d2[0]}, [r0, :32], r1
vst1.32 {d2[1]}, [r2, :32], r1
vst1.32 {d3[0]}, [r0, :32], r1
vst1.32 {d3[1]}, [r2, :32], r1
bgt 4b
pop {r4-r5, pc}
80:
lsl r1, r1, #1
8:
vld1.8 {q1}, [r3, :64]!
subs r5, r5, #4
vshr.u8 q2, q1, #4
vand.u8 q1, q1, q15
vzip.8 q1, q2
vtbl.8 d2, {d0}, d2
vtbl.8 d3, {d0}, d3
vst1.8 {d2}, [r0, :64], r1
vtbl.8 d4, {d0}, d4
vst1.8 {d3}, [r2, :64], r1
vtbl.8 d5, {d0}, d5
vst1.8 {d4}, [r0, :64], r1
vst1.8 {d5}, [r2, :64], r1
bgt 8b
pop {r4-r5, pc}
160:
lsl r1, r1, #1
16:
vld1.8 {q10, q11}, [r3, :64]!
subs r5, r5, #4
vand.u8 q8, q10, q15
vshr.u8 q9, q10, #4
vand.u8 q10, q11, q15
vshr.u8 q11, q11, #4
vzip.8 q8, q9
vzip.8 q10, q11
vtbl.8 d16, {d0}, d16
vtbl.8 d17, {d0}, d17
vtbl.8 d18, {d0}, d18
vtbl.8 d19, {d0}, d19
vtbl.8 d20, {d0}, d20
vtbl.8 d21, {d0}, d21
vst1.8 {q8}, [r0, :128], r1
vtbl.8 d22, {d0}, d22
vst1.8 {q9}, [r2, :128], r1
vtbl.8 d23, {d0}, d23
vst1.8 {q10}, [r0, :128], r1
vst1.8 {q11}, [r2, :128], r1
bgt 16b
pop {r4-r5, pc}
320:
lsl r1, r1, #1
32:
vld1.8 {q10, q11}, [r3, :64]!
subs r5, r5, #2
vand.u8 q8, q10, q15
vshr.u8 q9, q10, #4
vand.u8 q10, q11, q15
vshr.u8 q11, q11, #4
vzip.8 q8, q9
vzip.8 q10, q11
vtbl.8 d16, {d0}, d16
vtbl.8 d17, {d0}, d17
vtbl.8 d18, {d0}, d18
vtbl.8 d19, {d0}, d19
vtbl.8 d20, {d0}, d20
vtbl.8 d21, {d0}, d21
vst1.8 {q8, q9}, [r0, :128], r1
vtbl.8 d22, {d0}, d22
vtbl.8 d23, {d0}, d23
vst1.8 {q10, q11}, [r2, :128], r1
bgt 32b
pop {r4-r5, pc}
640:
sub r1, r1, #32
64:
vld1.8 {q10, q11}, [r3, :64]!
subs r5, r5, #1
vand.u8 q8, q10, q15
vshr.u8 q9, q10, #4
vand.u8 q10, q11, q15
vshr.u8 q11, q11, #4
vzip.8 q8, q9
vzip.8 q10, q11
vtbl.8 d16, {d0}, d16
vtbl.8 d17, {d0}, d17
vtbl.8 d18, {d0}, d18
vtbl.8 d19, {d0}, d19
vtbl.8 d20, {d0}, d20
vtbl.8 d21, {d0}, d21
vst1.8 {q8, q9}, [r0, :128]!
vtbl.8 d22, {d0}, d22
vtbl.8 d23, {d0}, d23
vst1.8 {q10, q11}, [r0, :128], r1
bgt 64b
pop {r4-r5, pc}
endfunc
// void ipred_cfl_128_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha);
function ipred_cfl_128_8bpc_neon, export=1
push {r4-r8, lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
clz lr, r3
adr r12, L(ipred_cfl_128_tbl)
sub lr, lr, #26
ldr lr, [r12, lr, lsl #2]
vmov.i16 q0, #128 // dc
vdup.i16 q1, r6 // alpha
add r12, r12, lr
add r6, r0, r1
lsl r1, r1, #1
bx r12
.align 2
L(ipred_cfl_128_tbl):
L(ipred_cfl_splat_tbl):
.word L(ipred_cfl_splat_w16) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
.word L(ipred_cfl_splat_w16) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
.word L(ipred_cfl_splat_w8) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
.word L(ipred_cfl_splat_w4) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
L(ipred_cfl_splat_w4):
vld1.16 {q2, q3}, [r5, :128]!
vmul.i16 q2, q2, q1 // diff = ac * alpha
vmul.i16 q3, q3, q1
vshr.s16 q8, q2, #15 // sign = diff >> 15
vshr.s16 q9, q3, #15
vadd.i16 q2, q2, q8 // diff + sign
vadd.i16 q3, q3, q9
vrshr.s16 q2, q2, #6 // (diff + sign + 32) >> 6 = apply_sign()
vrshr.s16 q3, q3, #6
vadd.i16 q2, q2, q0 // dc + apply_sign()
vadd.i16 q3, q3, q0
vqmovun.s16 d4, q2 // iclip_pixel(dc + apply_sign())
vqmovun.s16 d5, q3
vst1.32 {d4[0]}, [r0, :32], r1
vst1.32 {d4[1]}, [r6, :32], r1
subs r4, r4, #4
vst1.32 {d5[0]}, [r0, :32], r1
vst1.32 {d5[1]}, [r6, :32], r1
bgt L(ipred_cfl_splat_w4)
pop {r4-r8, pc}
L(ipred_cfl_splat_w8):
vld1.16 {q8, q9}, [r5, :128]!
vld1.16 {q10, q11}, [r5, :128]!
vmul.i16 q8, q8, q1 // diff = ac * alpha
vmul.i16 q9, q9, q1
vmul.i16 q10, q10, q1
vmul.i16 q11, q11, q1
vshr.s16 q12, q8, #15 // sign = diff >> 15
vshr.s16 q13, q9, #15
vshr.s16 q14, q10, #15
vshr.s16 q15, q11, #15
vadd.i16 q8, q8, q12 // diff + sign
vadd.i16 q9, q9, q13
vadd.i16 q10, q10, q14
vadd.i16 q11, q11, q15
vrshr.s16 q8, q8, #6 // (diff + sign + 32) >> 6 = apply_sign()
vrshr.s16 q9, q9, #6
vrshr.s16 q10, q10, #6
vrshr.s16 q11, q11, #6
vadd.i16 q8, q8, q0 // dc + apply_sign()
vadd.i16 q9, q9, q0
vadd.i16 q10, q10, q0
vadd.i16 q11, q11, q0
vqmovun.s16 d16, q8 // iclip_pixel(dc + apply_sign())
vqmovun.s16 d17, q9
vqmovun.s16 d18, q10
vqmovun.s16 d19, q11
vst1.8 {d16}, [r0, :64], r1
vst1.8 {d17}, [r6, :64], r1
subs r4, r4, #4
vst1.8 {d18}, [r0, :64], r1
vst1.8 {d19}, [r6, :64], r1
bgt L(ipred_cfl_splat_w8)
pop {r4-r8, pc}
L(ipred_cfl_splat_w16):
add r12, r5, r3, lsl #1
sub r1, r1, r3
mov lr, r3
1:
vld1.16 {q8, q9}, [r5, :128]!
vmul.i16 q8, q8, q1 // diff = ac * alpha
vld1.16 {q10, q11}, [r12, :128]!
vmul.i16 q9, q9, q1
vmul.i16 q10, q10, q1
vmul.i16 q11, q11, q1
vshr.s16 q12, q8, #15 // sign = diff >> 15
vshr.s16 q13, q9, #15
vshr.s16 q14, q10, #15
vshr.s16 q15, q11, #15
vadd.i16 q8, q8, q12 // diff + sign
vadd.i16 q9, q9, q13
vadd.i16 q10, q10, q14
vadd.i16 q11, q11, q15
vrshr.s16 q8, q8, #6 // (diff + sign + 32) >> 6 = apply_sign()
vrshr.s16 q9, q9, #6
vrshr.s16 q10, q10, #6
vrshr.s16 q11, q11, #6
vadd.i16 q8, q8, q0 // dc + apply_sign()
vadd.i16 q9, q9, q0
vadd.i16 q10, q10, q0
vadd.i16 q11, q11, q0
vqmovun.s16 d16, q8 // iclip_pixel(dc + apply_sign())
vqmovun.s16 d17, q9
vqmovun.s16 d18, q10
vqmovun.s16 d19, q11
subs r3, r3, #16
vst1.16 {q8}, [r0, :128]!
vst1.16 {q9}, [r6, :128]!
bgt 1b
subs r4, r4, #2
add r5, r5, lr, lsl #1
add r12, r12, lr, lsl #1
add r0, r0, r1
add r6, r6, r1
mov r3, lr
bgt 1b
pop {r4-r8, pc}
endfunc
// void ipred_cfl_top_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha);
function ipred_cfl_top_8bpc_neon, export=1
push {r4-r8, lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
clz lr, r3
adr r12, L(ipred_cfl_top_tbl)
sub lr, lr, #26
ldr lr, [r12, lr, lsl #2]
vdup.16 q1, r6 // alpha
add r2, r2, #1
add r12, r12, lr
add r6, r0, r1
lsl r1, r1, #1
bx r12
.align 2
L(ipred_cfl_top_tbl):
.word 32f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
.word 16f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
.word 8f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
.word 4f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
4:
vld1.32 {d0[]}, [r2]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0
vrshr.u16 d0, d0, #2
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w4)
8:
vld1.8 {d0}, [r2]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshr.u16 d0, d0, #3
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w8)
16:
vld1.8 {q0}, [r2]
vaddl.u8 q0, d0, d1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshr.u16 d0, d0, #4
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w16)
32:
vld1.8 {q2, q3}, [r2]
vaddl.u8 q2, d4, d5
vaddl.u8 q3, d6, d7
vadd.u16 q0, q2, q3
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshr.u16 d0, d0, #5
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w16)
endfunc
// void ipred_cfl_left_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha);
function ipred_cfl_left_8bpc_neon, export=1
push {r4-r8, lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
sub r2, r2, r4
clz lr, r3
clz r8, r4
adr r12, L(ipred_cfl_splat_tbl)
adr r7, L(ipred_cfl_left_tbl)
sub lr, lr, #26
sub r8, r8, #26
ldr lr, [r12, lr, lsl #2]
ldr r8, [r7, r8, lsl #2]
vdup.16 q1, r6 // alpha
add r12, r12, lr
add r7, r7, r8
add r6, r0, r1
lsl r1, r1, #1
bx r7
.align 2
L(ipred_cfl_left_tbl):
.word L(ipred_cfl_left_h32) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
.word L(ipred_cfl_left_h16) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
.word L(ipred_cfl_left_h8) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
.word L(ipred_cfl_left_h4) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
L(ipred_cfl_left_h4):
vld1.32 {d0[]}, [r2, :32]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0
vrshr.u16 d0, d0, #2
vdup.16 q0, d0[0]
bx r12
L(ipred_cfl_left_h8):
vld1.8 {d0}, [r2, :64]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshr.u16 d0, d0, #3
vdup.16 q0, d0[0]
bx r12
L(ipred_cfl_left_h16):
vld1.8 {q0}, [r2, :128]
vaddl.u8 q0, d0, d1
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshr.u16 d0, d0, #4
vdup.16 q0, d0[0]
bx r12
L(ipred_cfl_left_h32):
vld1.8 {q2, q3}, [r2, :128]
vaddl.u8 q2, d4, d5
vaddl.u8 q3, d6, d7
vadd.u16 q0, q2, q3
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0
vpadd.u16 d0, d0
vrshr.u16 d0, d0, #5
vdup.16 q0, d0[0]
bx r12
endfunc
// void ipred_cfl_8bpc_neon(pixel *dst, const ptrdiff_t stride,
// const pixel *const topleft,
// const int width, const int height,
// const int16_t *ac, const int alpha);
function ipred_cfl_8bpc_neon, export=1
push {r4-r8, lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
sub r2, r2, r4
add r8, r3, r4 // width + height
vdup.16 q1, r6 // alpha
clz lr, r3
clz r6, r4
vdup.16 d16, r8 // width + height
adr r7, L(ipred_cfl_tbl)
rbit r8, r8 // rbit(width + height)
sub lr, lr, #22 // 26 leading bits, minus table offset 4
sub r6, r6, #26
clz r8, r8 // ctz(width + height)
ldr lr, [r7, lr, lsl #2]
ldr r6, [r7, r6, lsl #2]
neg r8, r8 // -ctz(width + height)
add r12, r7, lr
add r7, r7, r6
vshr.u16 d16, d16, #1 // (width + height) >> 1
vdup.16 d17, r8 // -ctz(width + height)
add r6, r0, r1
lsl r1, r1, #1
bx r7
.align 2
L(ipred_cfl_tbl):
.word L(ipred_cfl_h32) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_h16) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_h8) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_h4) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_w32) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_w16) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_w8) - L(ipred_cfl_tbl) + CONFIG_THUMB
.word L(ipred_cfl_w4) - L(ipred_cfl_tbl) + CONFIG_THUMB
L(ipred_cfl_h4):
vld1.32 {d0[]}, [r2, :32]!
vpaddl.u8 d0, d0
add r2, r2, #1
vpadd.i16 d0, d0
bx r12
L(ipred_cfl_w4):
vld1.32 {d1[]}, [r2]
vadd.i16 d0, d0, d16
vpaddl.u8 d1, d1
vpadd.u16 d1, d1
cmp r4, #4
vadd.i16 d0, d0, d1
vshl.u16 d0, d0, d17
beq 1f
// h = 8/16
movw lr, #(0x3334/2)
movw r8, #(0x5556/2)
cmp r4, #16
it ne
movne lr, r8
vdup.16 d18, lr
vqdmulh.s16 d0, d0, d18
1:
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w4)
L(ipred_cfl_h8):
vld1.8 {d0}, [r2, :64]!
vpaddl.u8 d0, d0
vpadd.i16 d0, d0
add r2, r2, #1
vpadd.i16 d0, d0
bx r12
L(ipred_cfl_w8):
vld1.8 {d1}, [r2]
vadd.i16 d0, d0, d16
vpaddl.u8 d1, d1
vpadd.i16 d1, d1
vpadd.i16 d1, d1
cmp r4, #8
vadd.i16 d0, d0, d1
vshl.u16 d0, d0, d17
beq 1f
// h = 4/16/32
cmp r4, #32
movw lr, #(0x3334/2)
movw r8, #(0x5556/2)
it ne
movne lr, r8
vdup.16 d18, lr
vqdmulh.s16 d0, d0, d18
1:
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w8)
L(ipred_cfl_h16):
vld1.8 {q0}, [r2, :128]!
vaddl.u8 q0, d0, d1
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0
add r2, r2, #1
vpadd.i16 d0, d0
bx r12
L(ipred_cfl_w16):
vld1.8 {q2}, [r2]
vadd.i16 d0, d0, d16
vaddl.u8 q2, d4, d5
vadd.i16 d4, d4, d5
vpadd.i16 d4, d4
vpadd.i16 d4, d4
cmp r4, #16
vadd.i16 d0, d0, d4
vshl.u16 d0, d0, d17
beq 1f
// h = 4/8/32/64
tst r4, #(32+16+8) // 16 added to make a consecutive bitmask
movw lr, #(0x3334/2)
movw r8, #(0x5556/2)
it ne
movne lr, r8
vdup.16 d18, lr
vqdmulh.s16 d0, d0, d18
1:
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w16)
L(ipred_cfl_h32):
vld1.8 {q2, q3}, [r2, :128]!
vaddl.u8 q2, d4, d5
vaddl.u8 q3, d6, d7
vadd.i16 q0, q2, q3
vadd.i16 d0, d0, d1
vpadd.i16 d0, d0
add r2, r2, #1
vpadd.i16 d0, d0
bx r12
L(ipred_cfl_w32):
vld1.8 {q2, q3}, [r2]
vadd.i16 d0, d0, d16
vaddl.u8 q2, d4, d5
vaddl.u8 q3, d6, d7
vadd.i16 q2, q2, q3
vadd.i16 d4, d4, d5
vpadd.i16 d4, d4
vpadd.i16 d4, d4
cmp r4, #32
vadd.i16 d0, d0, d4
vshl.u16 d0, d0, d17
beq 1f
// h = 8/16/64
cmp r4, #8
movw lr, #(0x3334/2)
movw r8, #(0x5556/2)
it ne
movne lr, r8
vdup.16 d18, lr
vqdmulh.s16 d0, d0, d18
1:
vdup.16 q0, d0[0]
b L(ipred_cfl_splat_w16)
endfunc
// void cfl_ac_420_8bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_420_8bpc_neon, export=1
push {r4-r8,lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
clz r8, r5
lsl r4, r4, #2
adr r7, L(ipred_cfl_ac_420_tbl)
sub r8, r8, #27
ldr r8, [r7, r8, lsl #2]
vmov.i16 q8, #0
vmov.i16 q9, #0
vmov.i16 q10, #0
vmov.i16 q11, #0
add r7, r7, r8
sub r8, r6, r4 // height - h_pad
rbit lr, r5 // rbit(width)
rbit r12, r6 // rbit(height)
clz lr, lr // ctz(width)
clz r12, r12 // ctz(height)
add lr, lr, r12 // log2sz
add r12, r1, r2
vdup.32 d31, lr
lsl r2, r2, #1
vneg.s32 d31, d31 // -log2sz
bx r7
.align 2
L(ipred_cfl_ac_420_tbl):
.word L(ipred_cfl_ac_420_w16) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w8) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w4) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_420_w4):
1: // Copy and subsample input
vld1.8 {d0}, [r1, :64], r2
vld1.8 {d2}, [r12, :64], r2
vld1.8 {d1}, [r1, :64], r2
vld1.8 {d3}, [r12, :64], r2
vpaddl.u8 q0, q0
vpaddl.u8 q1, q1
vadd.i16 q0, q0, q1
vshl.i16 q0, q0, #1
subs r8, r8, #2
vst1.16 {q0}, [r0, :128]!
vadd.i16 q8, q8, q0
bgt 1b
cmp r4, #0
vmov d0, d1
vmov d2, d1
vmov d3, d1
L(ipred_cfl_ac_420_w4_hpad):
beq 3f // This assumes that all callers already did "cmp r4, #0"
2: // Vertical padding (h_pad > 0)
subs r4, r4, #4
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q8, q8, q1
bgt 2b
3:
L(ipred_cfl_ac_420_w4_calc_subtract_dc):
// Aggregate the sums
vadd.i16 q0, q8, q9
vadd.i16 q1, q10, q11
vpaddl.u16 q0, q0
vpaddl.u16 q1, q1
vadd.i32 q0, q1
vadd.i32 d0, d0, d1
vpadd.i32 d0, d0, d0 // sum
sub r0, r0, r6, lsl #3
vrshl.u32 d16, d0, d31 // (sum + (1 << (log2sz - 1))) >>= log2sz
vdup.16 q8, d16[0]
L(ipred_cfl_ac_420_w4_subtract_dc):
6: // Subtract dc from ac
vld1.16 {q0, q1}, [r0, :128]
subs r6, r6, #4
vsub.i16 q0, q0, q8
vsub.i16 q1, q1, q8
vst1.16 {q0, q1}, [r0, :128]!
bgt 6b
pop {r4-r8, pc}
L(ipred_cfl_ac_420_w8):
cmp r3, #0
bne L(ipred_cfl_ac_420_w8_wpad)
1: // Copy and subsample input, without padding
vld1.8 {q0}, [r1, :128], r2
vld1.8 {q1}, [r12, :128], r2
vld1.8 {q2}, [r1, :128], r2
vpaddl.u8 q0, q0
vld1.8 {q3}, [r12, :128], r2
vpaddl.u8 q1, q1
vpaddl.u8 q2, q2
vpaddl.u8 q3, q3
vadd.i16 q0, q0, q1
vadd.i16 q2, q2, q3
vshl.i16 q0, q0, #1
vshl.i16 q1, q2, #1
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
bgt 1b
cmp r4, #0
vmov q0, q1
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_420_w8_wpad):
1: // Copy and subsample input, padding 4
vld1.16 {d0}, [r1, :64], r2
vld1.16 {d2}, [r12, :64], r2
vld1.16 {d1}, [r1, :64], r2
vld1.16 {d3}, [r12, :64], r2
vpaddl.u8 q0, q0
vpaddl.u8 q1, q1
vadd.i16 q0, q0, q1
vshl.i16 q0, q0, #1
vdup.16 d3, d1[3]
vmov d2, d1
vdup.16 d1, d0[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
bgt 1b
cmp r4, #0
vmov q0, q1
L(ipred_cfl_ac_420_w8_hpad):
beq 3f // This assumes that all callers already did "cmp r4, #0"
2: // Vertical padding (h_pad > 0)
subs r4, r4, #4
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q10, q10, q0
vadd.i16 q11, q11, q1
bgt 2b
3:
// Double the height and reuse the w4 summing/subtracting
lsl r6, r6, #1
b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
L(ipred_cfl_ac_420_w16):
adr r7, L(ipred_cfl_ac_420_w16_tbl)
ldr r3, [r7, r3, lsl #2]
add r7, r7, r3
bx r7
.align 2
L(ipred_cfl_ac_420_w16_tbl):
.word L(ipred_cfl_ac_420_w16_wpad0) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w16_wpad1) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w16_wpad2) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_420_w16_wpad3) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_420_w16_wpad0):
1: // Copy and subsample input, without padding
vld1.8 {q0, q1}, [r1, :128], r2
vld1.8 {q2, q3}, [r12, :128], r2
vpaddl.u8 q0, q0
vld1.8 {q12, q13}, [r1, :128], r2
vpaddl.u8 q1, q1
vpaddl.u8 q2, q2
vpaddl.u8 q3, q3
vadd.i16 q0, q0, q2
vadd.i16 q1, q1, q3
vld1.8 {q2, q3}, [r12, :128], r2
vpaddl.u8 q12, q12
vpaddl.u8 q13, q13
vpaddl.u8 q2, q2
vpaddl.u8 q3, q3
vadd.i16 q12, q12, q2
vadd.i16 q13, q13, q3
vshl.i16 q0, q0, #1
vshl.i16 q1, q1, #1
vshl.i16 q2, q12, #1
vshl.i16 q3, q13, #1
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad1):
1: // Copy and subsample input, padding 4
vldr d2, [r1, #16]
vld1.8 {q0}, [r1, :128], r2
vldr d6, [r12, #16]
vld1.8 {q2}, [r12, :128], r2
vpaddl.u8 d2, d2
vldr d26, [r1, #16]
vpaddl.u8 q0, q0
vld1.8 {q12}, [r1, :128], r2
vpaddl.u8 d6, d6
vldr d30, [r12, #16]
vpaddl.u8 q2, q2
vld1.8 {q14}, [r12, :128], r2
vpaddl.u8 d26, d26
vpaddl.u8 q12, q12
vpaddl.u8 d30, d30
vpaddl.u8 q14, q14
vadd.i16 d2, d2, d6
vadd.i16 q0, q0, q2
vadd.i16 d26, d26, d30
vadd.i16 q12, q12, q14
vshl.i16 d2, d2, #1
vshl.i16 q0, q0, #1
vshl.i16 d6, d26, #1
vshl.i16 q2, q12, #1
vdup.16 d3, d2[3]
vdup.16 d7, d6[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad2):
1: // Copy and subsample input, padding 8
vld1.8 {q0}, [r1, :128], r2
vld1.8 {q1}, [r12, :128], r2
vld1.8 {q2}, [r1, :128], r2
vpaddl.u8 q0, q0
vld1.8 {q3}, [r12, :128], r2
vpaddl.u8 q1, q1
vpaddl.u8 q2, q2
vpaddl.u8 q3, q3
vadd.i16 q0, q0, q1
vadd.i16 q2, q2, q3
vshl.i16 q0, q0, #1
vshl.i16 q2, q2, #1
vdup.16 q1, d1[3]
vdup.16 q3, d5[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_wpad3):
1: // Copy and subsample input, padding 12
vld1.8 {d0}, [r1, :64], r2
vld1.8 {d1}, [r12, :64], r2
vld1.8 {d4}, [r1, :64], r2
vpaddl.u8 q0, q0
vld1.8 {d5}, [r12, :64], r2
vpaddl.u8 q2, q2
vadd.i16 d0, d0, d1
vadd.i16 d4, d4, d5
vshl.i16 d0, d0, #1
vshl.i16 d4, d4, #1
vdup.16 q1, d0[3]
vdup.16 q3, d4[3]
vdup.16 d1, d0[3]
vdup.16 d5, d4[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_420_w16_hpad):
beq 3f // This assumes that all callers already did "cmp r4, #0"
2: // Vertical padding (h_pad > 0)
subs r4, r4, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 2b
3:
// Quadruple the height and reuse the w4 summing/subtracting
lsl r6, r6, #2
b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
endfunc
// void cfl_ac_422_8bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_422_8bpc_neon, export=1
push {r4-r8,lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
clz r8, r5
lsl r4, r4, #2
adr r7, L(ipred_cfl_ac_422_tbl)
sub r8, r8, #27
ldr r8, [r7, r8, lsl #2]
vmov.i16 q8, #0
vmov.i16 q9, #0
vmov.i16 q10, #0
vmov.i16 q11, #0
add r7, r7, r8
sub r8, r6, r4 // height - h_pad
rbit lr, r5 // rbit(width)
rbit r12, r6 // rbit(height)
clz lr, lr // ctz(width)
clz r12, r12 // ctz(height)
add lr, lr, r12 // log2sz
add r12, r1, r2
vdup.32 d31, lr
lsl r2, r2, #1
vneg.s32 d31, d31 // -log2sz
bx r7
.align 2
L(ipred_cfl_ac_422_tbl):
.word L(ipred_cfl_ac_422_w16) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w8) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w4) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_422_w4):
1: // Copy and subsample input
vld1.8 {d0}, [r1, :64], r2
vld1.8 {d1}, [r12, :64], r2
vld1.8 {d2}, [r1, :64], r2
vld1.8 {d3}, [r12, :64], r2
vpaddl.u8 q0, q0
vpaddl.u8 q1, q1
vshl.i16 q0, q0, #2
vshl.i16 q1, q1, #2
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
bgt 1b
cmp r4, #0
vmov d0, d3
vmov d1, d3
vmov d2, d3
b L(ipred_cfl_ac_420_w4_hpad)
L(ipred_cfl_ac_422_w8):
cmp r3, #0
bne L(ipred_cfl_ac_422_w8_wpad)
1: // Copy and subsample input, without padding
vld1.8 {q0}, [r1, :128], r2
vld1.8 {q1}, [r12, :128], r2
vld1.8 {q2}, [r1, :128], r2
vpaddl.u8 q0, q0
vld1.8 {q3}, [r12, :128], r2
vpaddl.u8 q1, q1
vpaddl.u8 q2, q2
vpaddl.u8 q3, q3
vshl.i16 q0, q0, #2
vshl.i16 q1, q1, #2
vshl.i16 q2, q2, #2
vshl.i16 q3, q3, #2
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q3
vmov q1, q3
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_422_w8_wpad):
1: // Copy and subsample input, padding 4
vld1.8 {d0}, [r1, :64], r2
vld1.8 {d1}, [r12, :64], r2
vld1.8 {d2}, [r1, :64], r2
vld1.8 {d3}, [r12, :64], r2
vpaddl.u8 q0, q0
vpaddl.u8 q1, q1
vshl.i16 q0, q0, #2
vshl.i16 q1, q1, #2
vdup.16 d7, d3[3]
vmov d6, d3
vdup.16 d5, d2[3]
vmov d4, d2
vdup.16 d3, d1[3]
vmov d2, d1
vdup.16 d1, d0[3]
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q3
vmov q1, q3
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_422_w16):
adr r7, L(ipred_cfl_ac_422_w16_tbl)
ldr r3, [r7, r3, lsl #2]
add r7, r7, r3
bx r7
.align 2
L(ipred_cfl_ac_422_w16_tbl):
.word L(ipred_cfl_ac_422_w16_wpad0) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w16_wpad1) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w16_wpad2) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_422_w16_wpad3) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_422_w16_wpad0):
1: // Copy and subsample input, without padding
vld1.8 {q0, q1}, [r1, :128], r2
vld1.8 {q2, q3}, [r12, :128], r2
vpaddl.u8 q0, q0
vpaddl.u8 q1, q1
vpaddl.u8 q2, q2
vpaddl.u8 q3, q3
vshl.i16 q0, q0, #2
vshl.i16 q1, q1, #2
vshl.i16 q2, q2, #2
vshl.i16 q3, q3, #2
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad1):
1: // Copy and subsample input, padding 4
vldr d2, [r1, #16]
vld1.8 {q0}, [r1, :128], r2
vldr d6, [r12, #16]
vld1.8 {q2}, [r12, :128], r2
vpaddl.u8 d2, d2
vpaddl.u8 q0, q0
vpaddl.u8 d6, d6
vpaddl.u8 q2, q2
vshl.i16 d2, d2, #2
vshl.i16 q0, q0, #2
vshl.i16 d6, d6, #2
vshl.i16 q2, q2, #2
vdup.16 d3, d2[3]
vdup.16 d7, d6[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad2):
1: // Copy and subsample input, padding 8
vld1.8 {q0}, [r1, :128], r2
vld1.8 {q2}, [r12, :128], r2
vpaddl.u8 q0, q0
vpaddl.u8 q2, q2
vshl.i16 q0, q0, #2
vshl.i16 q2, q2, #2
vdup.16 q1, d1[3]
vdup.16 q3, d5[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_422_w16_wpad3):
1: // Copy and subsample input, padding 12
vld1.8 {d0}, [r1, :64], r2
vld1.8 {d1}, [r12, :64], r2
vpaddl.u8 q0, q0
vshl.i16 q0, q0, #2
vdup.16 q3, d1[3]
vdup.16 q1, d0[3]
vdup.16 d5, d1[3]
vmov d4, d1
vdup.16 d1, d0[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
endfunc
// void cfl_ac_444_8bpc_neon(int16_t *const ac, const pixel *const ypx,
// const ptrdiff_t stride, const int w_pad,
// const int h_pad, const int cw, const int ch);
function ipred_cfl_ac_444_8bpc_neon, export=1
push {r4-r8,lr}
ldrd r4, r5, [sp, #24]
ldr r6, [sp, #32]
clz r8, r5
lsl r4, r4, #2
adr r7, L(ipred_cfl_ac_444_tbl)
sub r8, r8, #26
ldr r8, [r7, r8, lsl #2]
vmov.i16 q8, #0
vmov.i16 q9, #0
vmov.i16 q10, #0
vmov.i16 q11, #0
add r7, r7, r8
sub r8, r6, r4 // height - h_pad
rbit lr, r5 // rbit(width)
rbit r12, r6 // rbit(height)
clz lr, lr // ctz(width)
clz r12, r12 // ctz(height)
add lr, lr, r12 // log2sz
add r12, r1, r2
vdup.32 d31, lr
lsl r2, r2, #1
vneg.s32 d31, d31 // -log2sz
bx r7
.align 2
L(ipred_cfl_ac_444_tbl):
.word L(ipred_cfl_ac_444_w32) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w16) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w8) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w4) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_444_w4):
1: // Copy and expand input
vld1.32 {d0[]}, [r1, :32], r2
vld1.32 {d0[1]}, [r12, :32], r2
vld1.32 {d2[]}, [r1, :32], r2
vld1.32 {d2[1]}, [r12, :32], r2
vshll.u8 q0, d0, #3
vshll.u8 q1, d2, #3
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
bgt 1b
cmp r4, #0
vmov d0, d3
vmov d1, d3
vmov d2, d3
b L(ipred_cfl_ac_420_w4_hpad)
L(ipred_cfl_ac_444_w8):
1: // Copy and expand input
vld1.16 {d0}, [r1, :64], r2
vld1.16 {d2}, [r12, :64], r2
vld1.16 {d4}, [r1, :64], r2
vshll.u8 q0, d0, #3
vld1.16 {d6}, [r12, :64], r2
vshll.u8 q1, d2, #3
vshll.u8 q2, d4, #3
vshll.u8 q3, d6, #3
subs r8, r8, #4
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q3
vmov q1, q3
b L(ipred_cfl_ac_420_w8_hpad)
L(ipred_cfl_ac_444_w16):
cmp r3, #0
bne L(ipred_cfl_ac_444_w16_wpad)
1: // Copy and expand input, without padding
vld1.8 {q1}, [r1, :128], r2
vld1.8 {q3}, [r12, :128], r2
vshll.u8 q0, d2, #3
vshll.u8 q1, d3, #3
vshll.u8 q2, d6, #3
vshll.u8 q3, d7, #3
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_444_w16_wpad):
1: // Copy and expand input, padding 8
vld1.8 {d0}, [r1, :64], r2
vld1.8 {d4}, [r12, :64], r2
vshll.u8 q0, d0, #3
vshll.u8 q2, d4, #3
vdup.16 q1, d1[3]
vdup.16 q3, d5[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
bgt 1b
cmp r4, #0
vmov q0, q2
vmov q1, q3
b L(ipred_cfl_ac_420_w16_hpad)
L(ipred_cfl_ac_444_w32):
adr r7, L(ipred_cfl_ac_444_w32_tbl)
ldr r3, [r7, r3, lsl #1] // (w3>>1) << 2
add r7, r7, r3
bx r7
.align 2
L(ipred_cfl_ac_444_w32_tbl):
.word L(ipred_cfl_ac_444_w32_wpad0) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w32_wpad2) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w32_wpad4) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
.word L(ipred_cfl_ac_444_w32_wpad6) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
L(ipred_cfl_ac_444_w32_wpad0):
1: // Copy and expand input, without padding
vld1.8 {q2, q3}, [r1, :128], r2
vld1.8 {q13, q14}, [r12, :128], r2
vshll.u8 q0, d4, #3
vshll.u8 q1, d5, #3
vshll.u8 q2, d6, #3
vshll.u8 q3, d7, #3
vshll.u8 q12, d26, #3
vshll.u8 q13, d27, #3
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vshll.u8 q0, d28, #3
vshll.u8 q1, d29, #3
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
vst1.16 {q12, q13}, [r0, :128]!
vadd.i16 q8, q8, q12
vadd.i16 q9, q9, q13
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q10, q10, q0
vadd.i16 q11, q11, q1
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad2):
1: // Copy and expand input, padding 8
vldr d4, [r1, #16]
vld1.8 {q1}, [r1, :128], r2
vldr d28, [r12, #16]
vld1.8 {q13}, [r12, :128], r2
vshll.u8 q2, d4, #3
vshll.u8 q0, d2, #3
vshll.u8 q1, d3, #3
vshll.u8 q12, d26, #3
vshll.u8 q13, d27, #3
vdup.16 q3, d5[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vshll.u8 q0, d28, #3
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
vdup.16 q1, d1[3]
vst1.16 {q12, q13}, [r0, :128]!
vadd.i16 q8, q8, q12
vadd.i16 q9, q9, q13
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q10, q10, q0
vadd.i16 q11, q11, q1
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad4):
1: // Copy and expand input, padding 16
vld1.8 {q1}, [r1, :128], r2
vld1.8 {q13}, [r12, :128], r2
vshll.u8 q0, d2, #3
vshll.u8 q1, d3, #3
vshll.u8 q12, d26, #3
vshll.u8 q13, d27, #3
vdup.16 q2, d3[3]
vdup.16 q3, d3[3]
subs r8, r8, #2
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vdup.16 q0, d27[3]
vdup.16 q1, d27[3]
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
vst1.16 {q12, q13}, [r0, :128]!
vadd.i16 q8, q8, q12
vadd.i16 q9, q9, q13
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q10, q10, q0
vadd.i16 q11, q11, q1
bgt 1b
cmp r4, #0
b L(ipred_cfl_ac_444_w32_hpad)
L(ipred_cfl_ac_444_w32_wpad6):
1: // Copy and expand input, padding 24
vld1.8 {d0}, [r1, :64], r2
vld1.8 {d24}, [r12, :64], r2
vshll.u8 q0, d0, #3
vshll.u8 q12, d24, #3
subs r8, r8, #2
vdup.16 q1, d1[3]
vdup.16 q2, d1[3]
vdup.16 q3, d1[3]
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q8, q8, q0
vadd.i16 q9, q9, q1
vdup.16 q13, d25[3]
vdup.16 q0, d25[3]
vdup.16 q1, d25[3]
vst1.16 {q2, q3}, [r0, :128]!
vadd.i16 q10, q10, q2
vadd.i16 q11, q11, q3
vst1.16 {q12, q13}, [r0, :128]!
vadd.i16 q8, q8, q12
vadd.i16 q9, q9, q13
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q10, q10, q0
vadd.i16 q11, q11, q1
bgt 1b
cmp r4, #0
L(ipred_cfl_ac_444_w32_hpad):
beq 3f // This assumes that all callers already did "cmp r4, #0"
2: // Vertical padding (h_pad > 0)
subs r4, r4, #1
vst1.16 {q12, q13}, [r0, :128]!
vadd.i16 q8, q8, q12
vadd.i16 q9, q9, q13
vst1.16 {q0, q1}, [r0, :128]!
vadd.i16 q10, q10, q0
vadd.i16 q11, q11, q1
bgt 2b
3:
// Multiply the height by eight and reuse the w4 subtracting
lsl r6, r6, #3
// Aggregate the sums, with wider intermediates earlier than in
// ipred_cfl_ac_420_w8_calc_subtract_dc.
vpaddl.u16 q0, q8
vpaddl.u16 q1, q9
vpaddl.u16 q2, q10
vpaddl.u16 q3, q11
vadd.i32 q0, q0, q1
vadd.i32 q2, q2, q3
vadd.i32 q0, q0, q2
vadd.i32 d0, d0, d1
vpadd.i32 d0, d0, d0 // sum
sub r0, r0, r6, lsl #3
vrshl.u32 d16, d0, d31 // (sum + (1 << (log2sz - 1))) >>= log2sz
vdup.16 q8, d16[0]
b L(ipred_cfl_ac_420_w4_subtract_dc)
endfunc
|
Admenri/urge
| 8,005
|
third_party/dav1d/src/arm/32/cdef16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#include "cdef_tmpl.S"
// r1 = d0/q0
// r2 = d2/q1
.macro pad_top_bot_16 s1, s2, w, stride, r1, r2, align, ret
tst r7, #1 // CDEF_HAVE_LEFT
beq 2f
// CDEF_HAVE_LEFT
tst r7, #2 // CDEF_HAVE_RIGHT
beq 1f
// CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
vldr s8, [\s1, #-4]
vld1.16 {\r1}, [\s1, :\align]
vldr s9, [\s1, #2*\w]
vldr s10, [\s2, #-4]
vld1.16 {\r2}, [\s2, :\align]
vldr s11, [\s2, #2*\w]
vstr s8, [r0, #-4]
vst1.16 {\r1}, [r0, :\align]
vstr s9, [r0, #2*\w]
add r0, r0, #2*\stride
vstr s10, [r0, #-4]
vst1.16 {\r2}, [r0, :\align]
vstr s11, [r0, #2*\w]
.if \ret
pop {r4-r8,pc}
.else
add r0, r0, #2*\stride
b 3f
.endif
1:
// CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
vldr s8, [\s1, #-4]
vld1.16 {\r1}, [\s1, :\align]
vldr s9, [\s2, #-4]
vld1.16 {\r2}, [\s2, :\align]
vstr s8, [r0, #-4]
vst1.16 {\r1}, [r0, :\align]
vstr s12, [r0, #2*\w]
add r0, r0, #2*\stride
vstr s9, [r0, #-4]
vst1.16 {\r2}, [r0, :\align]
vstr s12, [r0, #2*\w]
.if \ret
pop {r4-r8,pc}
.else
add r0, r0, #2*\stride
b 3f
.endif
2:
// !CDEF_HAVE_LEFT
tst r7, #2 // CDEF_HAVE_RIGHT
beq 1f
// !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
vld1.16 {\r1}, [\s1, :\align]
vldr s8, [\s1, #2*\w]
vld1.16 {\r2}, [\s2, :\align]
vldr s9, [\s2, #2*\w]
vstr s12, [r0, #-4]
vst1.16 {\r1}, [r0, :\align]
vstr s8, [r0, #2*\w]
add r0, r0, #2*\stride
vstr s12, [r0, #-4]
vst1.16 {\r2}, [r0, :\align]
vstr s9, [r0, #2*\w]
.if \ret
pop {r4-r8,pc}
.else
add r0, r0, #2*\stride
b 3f
.endif
1:
// !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
vld1.16 {\r1}, [\s1, :\align]
vld1.16 {\r2}, [\s2, :\align]
vstr s12, [r0, #-4]
vst1.16 {\r1}, [r0, :\align]
vstr s12, [r0, #2*\w]
add r0, r0, #2*\stride
vstr s12, [r0, #-4]
vst1.16 {\r2}, [r0, :\align]
vstr s12, [r0, #2*\w]
.if \ret
pop {r4-r8,pc}
.else
add r0, r0, #2*\stride
.endif
3:
.endm
// void dav1d_cdef_paddingX_16bpc_neon(uint16_t *tmp, const pixel *src,
// ptrdiff_t src_stride, const pixel (*left)[2],
// const pixel *const top,
// const pixel *const bottom, int h,
// enum CdefEdgeFlags edges);
// r1 = d0/q0
// r2 = d2/q1
.macro padding_func_16 w, stride, r1, r2, align
function cdef_padding\w\()_16bpc_neon, export=1
push {r4-r8,lr}
ldrd r4, r5, [sp, #24]
ldrd r6, r7, [sp, #32]
vmov.i16 q3, #0x8000
tst r7, #4 // CDEF_HAVE_TOP
bne 1f
// !CDEF_HAVE_TOP
sub r12, r0, #2*(2*\stride+2)
vmov.i16 q2, #0x8000
vst1.16 {q2,q3}, [r12]!
.if \w == 8
vst1.16 {q2,q3}, [r12]!
.endif
b 3f
1:
// CDEF_HAVE_TOP
add r8, r4, r2
sub r0, r0, #2*(2*\stride)
pad_top_bot_16 r4, r8, \w, \stride, \r1, \r2, \align, 0
// Middle section
3:
tst r7, #1 // CDEF_HAVE_LEFT
beq 2f
// CDEF_HAVE_LEFT
tst r7, #2 // CDEF_HAVE_RIGHT
beq 1f
// CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
0:
vld1.32 {d2[]}, [r3, :32]!
vldr s5, [r1, #2*\w]
vld1.16 {\r1}, [r1, :\align], r2
subs r6, r6, #1
vstr s4, [r0, #-4]
vst1.16 {\r1}, [r0, :\align]
vstr s5, [r0, #2*\w]
add r0, r0, #2*\stride
bgt 0b
b 3f
1:
// CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
vld1.32 {d2[]}, [r3, :32]!
vld1.16 {\r1}, [r1, :\align], r2
subs r6, r6, #1
vstr s4, [r0, #-4]
vst1.16 {\r1}, [r0, :\align]
vstr s12, [r0, #2*\w]
add r0, r0, #2*\stride
bgt 1b
b 3f
2:
tst r7, #2 // CDEF_HAVE_RIGHT
beq 1f
// !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
0:
vldr s4, [r1, #2*\w]
vld1.16 {\r1}, [r1, :\align], r2
subs r6, r6, #1
vstr s12, [r0, #-4]
vst1.16 {\r1}, [r0, :\align]
vstr s4, [r0, #2*\w]
add r0, r0, #2*\stride
bgt 0b
b 3f
1:
// !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
vld1.16 {\r1}, [r1, :\align], r2
subs r6, r6, #1
vstr s12, [r0, #-4]
vst1.16 {\r1}, [r0, :\align]
vstr s12, [r0, #2*\w]
add r0, r0, #2*\stride
bgt 1b
3:
tst r7, #8 // CDEF_HAVE_BOTTOM
bne 1f
// !CDEF_HAVE_BOTTOM
sub r12, r0, #4
vmov.i16 q2, #0x8000
vst1.16 {q2,q3}, [r12]!
.if \w == 8
vst1.16 {q2,q3}, [r12]!
.endif
pop {r4-r8,pc}
1:
// CDEF_HAVE_BOTTOM
add r8, r5, r2
pad_top_bot_16 r5, r8, \w, \stride, \r1, \r2, \align, 1
endfunc
.endm
padding_func_16 8, 16, q0, q1, 128
padding_func_16 4, 8, d0, d2, 64
tables
filter 8, 16
filter 4, 16
find_dir 16
|
Admenri/urge
| 11,987
|
third_party/dav1d/src/arm/32/refmvs.S
|
/*
* Copyright © 2021, VideoLAN and dav1d authors
* Copyright © 2021, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
// void dav1d_splat_mv_neon(refmvs_block **rr, const refmvs_block *rmv,
// int bx4, int bw4, int bh4)
function splat_mv_neon, export=1
push {r4, lr}
vld1.8 {q3}, [r1]
ldr r4, [sp, #8]
clz r3, r3
adr lr, L(splat_tbl)
sub r3, r3, #26
vext.8 q2, q3, q3, #12
ldr r3, [lr, r3, lsl #2]
add r2, r2, r2, lsl #1
vext.8 q0, q2, q3, #4
add r3, lr, r3
vext.8 q1, q2, q3, #8
lsl r2, r2, #2
vext.8 q2, q2, q3, #12
vmov q3, q0
1:
ldr r1, [r0], #4
subs r4, r4, #1
add r1, r1, r2
bx r3
.align 2
L(splat_tbl):
.word 320f - L(splat_tbl) + CONFIG_THUMB
.word 160f - L(splat_tbl) + CONFIG_THUMB
.word 80f - L(splat_tbl) + CONFIG_THUMB
.word 40f - L(splat_tbl) + CONFIG_THUMB
.word 20f - L(splat_tbl) + CONFIG_THUMB
.word 10f - L(splat_tbl) + CONFIG_THUMB
10:
vst1.8 {d0}, [r1]
vstr s2, [r1, #8]
bgt 1b
pop {r4, pc}
20:
vst1.8 {q0}, [r1]
vstr d2, [r1, #16]
bgt 1b
pop {r4, pc}
40:
vst1.8 {q0, q1}, [r1]!
vst1.8 {q2}, [r1]
bgt 1b
pop {r4, pc}
320:
vst1.8 {q0, q1}, [r1]!
vst1.8 {q2, q3}, [r1]!
vst1.8 {q1, q2}, [r1]!
vst1.8 {q0, q1}, [r1]!
vst1.8 {q2, q3}, [r1]!
vst1.8 {q1, q2}, [r1]!
160:
vst1.8 {q0, q1}, [r1]!
vst1.8 {q2, q3}, [r1]!
vst1.8 {q1, q2}, [r1]!
80:
vst1.8 {q0, q1}, [r1]!
vst1.8 {q2, q3}, [r1]!
vst1.8 {q1, q2}, [r1]
bgt 1b
pop {r4, pc}
endfunc
const mv_tbls, align=4
.byte 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255
.byte 0, 1, 2, 3, 8, 0, 1, 2, 3, 8, 0, 1, 2, 3, 8, 0
.byte 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4
.byte 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4, 5, 6, 7, 9, 4
endconst
const mask_mult, align=4
.byte 1, 2, 1, 2, 0, 0, 0, 0
endconst
// void dav1d_save_tmvs_neon(refmvs_temporal_block *rp, ptrdiff_t stride,
// refmvs_block **rr, const uint8_t *ref_sign,
// int col_end8, int row_end8,
// int col_start8, int row_start8)
function save_tmvs_neon, export=1
push {r4-r11,lr}
ldrd r4, r5, [sp, #36]
ldrd r6, r7, [sp, #44]
vmov.i8 d30, #0
vld1.8 {d31}, [r3]
adr r8, L(save_tmvs_tbl)
movrel_local lr, mask_mult
movrel_local r12, mv_tbls
vld1.8 {d29}, [lr]
vext.8 d31, d30, d31, #7 // [0, ref_sign]
mov r3, #5
mul r1, r1, r3 // stride *= 5
sub r5, r5, r7 // h = row_end8 - row_start8
lsl r7, r7, #1 // row_start8 <<= 1
1:
mov r3, #5
mov r11, #12*2
and r9, r7, #30 // (y & 15) * 2
ldr r9, [r2, r9, lsl #2] // b = rr[(y & 15) * 2]
add r9, r9, #12 // &b[... + 1]
mla r10, r4, r11, r9 // end_cand_b = &b[col_end8*2 + 1]
mla r9, r6, r11, r9 // cand_b = &b[x*2 + 1]
mla r3, r6, r3, r0 // &rp[x]
push {r2,r4,r6}
2:
ldrb r11, [r9, #10] // cand_b->bs
add lr, r9, #8
vld1.8 {d0, d1}, [r9] // cand_b->mv
add r11, r8, r11, lsl #3
vld1.16 {d2[]}, [lr] // cand_b->ref
ldrh lr, [r11] // bw8
mov r2, r8
add r9, r9, lr, lsl #1 // cand_b += bw8*2
cmp r9, r10
vmov d4, d0
bge 3f
ldrb r2, [r9, #10] // cand_b->bs
add lr, r9, #8
vld1.8 {d6, d7}, [r9] // cand_b->mv
add r2, r8, r2, lsl #3
vld1.16 {d2[1]}, [lr] // cand_b->ref
ldrh lr, [r2] // bw8
add r9, r9, lr, lsl #1 // cand_b += bw8*2
vmov d5, d6
3:
vabs.s16 q2, q2 // abs(mv[].xy)
vtbl.8 d2, {d31}, d2 // ref_sign[ref]
vshr.u16 q2, q2, #12 // abs(mv[].xy) >> 12
vmull.u8 q1, d2, d29 // ref_sign[ref] * {1, 2}
vceq.i32 q2, q2, #0 // abs(mv[].xy) <= 4096
vmovn.i32 d4, q2 // abs() condition to 16 bit
vand d2, d2, d4 // h[0-3] contains conditions for mv[0-1]
vpadd.i16 d2, d2, d2 // Combine condition for [1] and [0]
vmov.u16 r4, d2[0] // Extract case for first block
vmov.u16 r6, d2[1]
ldr r11, [r11, #4] // Fetch jump table entry
ldr r2, [r2, #4]
add r4, r12, r4, lsl #4
add r6, r12, r6, lsl #4
vld1.8 {d2, d3}, [r4] // Load permutation table base on case
vld1.8 {d4, d5}, [r6]
add r11, r8, r11 // Find jump table target
add r2, r8, r2
vtbl.8 d16, {d0, d1}, d2 // Permute cand_b to output refmvs_temporal_block
vtbl.8 d17, {d0, d1}, d3
vtbl.8 d18, {d6, d7}, d4
vtbl.8 d19, {d6, d7}, d5
vmov q0, q8
// q1 follows on q0 (q8), with another 3 full repetitions of the pattern.
vext.8 q1, q8, q8, #1
vext.8 q10, q9, q9, #1
// q2 ends with 3 complete repetitions of the pattern.
vext.8 q2, q8, q1, #4
vext.8 q11, q9, q10, #4
blx r11
bge 4f // if (cand_b >= end)
vmov q0, q9
vmov q1, q10
vmov q2, q11
cmp r9, r10
blx r2
blt 2b // if (cand_b < end)
4:
pop {r2,r4,r6}
subs r5, r5, #1 // h--
add r7, r7, #2 // y += 2
add r0, r0, r1 // rp += stride
bgt 1b
pop {r4-r11,pc}
.align 2
L(save_tmvs_tbl):
.word 16 * 12
.word 160f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 16 * 12
.word 160f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 8 * 12
.word 80f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 8 * 12
.word 80f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 8 * 12
.word 80f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 8 * 12
.word 80f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 4 * 12
.word 40f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 4 * 12
.word 40f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 4 * 12
.word 40f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 4 * 12
.word 40f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 2 * 12
.word 20f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 2 * 12
.word 20f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 2 * 12
.word 20f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 2 * 12
.word 20f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 2 * 12
.word 20f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 1 * 12
.word 10f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 1 * 12
.word 10f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 1 * 12
.word 10f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 1 * 12
.word 10f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 1 * 12
.word 10f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 1 * 12
.word 10f - L(save_tmvs_tbl) + CONFIG_THUMB
.word 1 * 12
.word 10f - L(save_tmvs_tbl) + CONFIG_THUMB
10:
add r4, r3, #4
vst1.32 {d0[0]}, [r3]
vst1.8 {d0[4]}, [r4]
add r3, r3, #5
bx lr
20:
add r4, r3, #8
vst1.8 {d0}, [r3]
vst1.16 {d1[0]}, [r4]
add r3, r3, #2*5
bx lr
40:
add r4, r3, #16
vst1.8 {q0}, [r3]
vst1.32 {d2[0]}, [r4]
add r3, r3, #4*5
bx lr
80:
add r4, r3, #(8*5-16)
// This writes 6 full entries plus 2 extra bytes
vst1.8 {q0, q1}, [r3]
// Write the last few, overlapping with the first write.
vst1.8 {q2}, [r4]
add r3, r3, #8*5
bx lr
160:
add r4, r3, #6*5
add r6, r3, #12*5
// This writes 6 full entries plus 2 extra bytes
vst1.8 {q0, q1}, [r3]
// Write another 6 full entries, slightly overlapping with the first set
vst1.8 {q0, q1}, [r4]
add r4, r3, #(16*5-16)
// Write 8 bytes (one full entry) after the first 12
vst1.8 {d0}, [r6]
// Write the last 3 entries
vst1.8 {q2}, [r4]
add r3, r3, #16*5
bx lr
endfunc
|
Admenri/urge
| 22,032
|
third_party/dav1d/src/arm/32/msac.S
|
/*
* Copyright © 2019, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#define BUF_POS 0
#define BUF_END 4
#define DIF 8
#define RNG 12
#define CNT 16
#define ALLOW_UPDATE_CDF 20
const coeffs
.short 60, 56, 52, 48, 44, 40, 36, 32, 28, 24, 20, 16, 12, 8, 4, 0
.short 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
endconst
const bits, align=4
.short 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80
.short 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000
endconst
.macro vld1_align_n d0, q0, q1, src, n
.if \n == 4
vld1.16 {\d0}, [\src, :64]
.elseif \n == 8
vld1.16 {\q0}, [\src, :128]
.else
vld1.16 {\q0, \q1}, [\src, :128]
.endif
.endm
.macro vld1_n d0, q0, q1, src, n
.if \n == 4
vld1.16 {\d0}, [\src]
.elseif \n == 8
vld1.16 {\q0}, [\src]
.else
vld1.16 {\q0, \q1}, [\src]
.endif
.endm
.macro vst1_align_n d0, q0, q1, src, n
.if \n == 4
vst1.16 {\d0}, [\src, :64]
.elseif \n == 8
vst1.16 {\q0}, [\src, :128]
.else
vst1.16 {\q0, \q1}, [\src, :128]
.endif
.endm
.macro vst1_n d0, q0, q1, src, n
.if \n == 4
vst1.16 {\d0}, [\src]
.elseif \n == 8
vst1.16 {\q0}, [\src]
.else
vst1.16 {\q0, \q1}, [\src]
.endif
.endm
.macro vshr_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vshr.u16 \d0, \s0, \s3
.else
vshr.u16 \d1, \s1, \s4
.if \n == 16
vshr.u16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vadd_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vadd.i16 \d0, \s0, \s3
.else
vadd.i16 \d1, \s1, \s4
.if \n == 16
vadd.i16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vsub_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vsub.i16 \d0, \s0, \s3
.else
vsub.i16 \d1, \s1, \s4
.if \n == 16
vsub.i16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vand_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vand \d0, \s0, \s3
.else
vand \d1, \s1, \s4
.if \n == 16
vand \d2, \s2, \s5
.endif
.endif
.endm
.macro vcge_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vcge.u16 \d0, \s0, \s3
.else
vcge.u16 \d1, \s1, \s4
.if \n == 16
vcge.u16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vrhadd_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vrhadd.u16 \d0, \s0, \s3
.else
vrhadd.u16 \d1, \s1, \s4
.if \n == 16
vrhadd.u16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vshl_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vshl.s16 \d0, \s0, \s3
.else
vshl.s16 \d1, \s1, \s4
.if \n == 16
vshl.s16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vqdmulh_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vqdmulh.s16 \d0, \s0, \s3
.else
vqdmulh.s16 \d1, \s1, \s4
.if \n == 16
vqdmulh.s16 \d2, \s2, \s5
.endif
.endif
.endm
// unsigned dav1d_msac_decode_symbol_adapt4_neon(MsacContext *s, uint16_t *cdf,
// size_t n_symbols);
function msac_decode_symbol_adapt4_neon, export=1
.macro decode_update n
push {r4-r10,lr}
sub sp, sp, #48
add r8, r0, #RNG
vld1_align_n d0, q0, q1, r1, \n // cdf
vld1.16 {d16[]}, [r8, :16] // rng
movrel_local r9, coeffs, 30
vmov.i16 d30, #0x7f00 // 0x7f00
sub r9, r9, r2, lsl #1
vmvn.i16 q14, #0x3f // 0xffc0
add r8, sp, #14
vand d22, d16, d30 // rng & 0x7f00
vst1.16 {d16[0]}, [r8, :16] // store original u = s->rng
vand_n d4, q2, q3, d0, q0, q1, d28, q14, q14, \n // cdf & 0xffc0
.if \n > 4
vmov d23, d22
.endif
vld1_n d16, q8, q9, r9, \n // EC_MIN_PROB * (n_symbols - ret)
vqdmulh_n d20, q10, q11, d4, q2, q3, d22, q11, q11, \n // ((cdf >> EC_PROB_SHIFT) * (r - 128)) >> 1
add r8, r0, #DIF + 2
vadd_n d16, q8, q9, d4, q2, q3, d16, q8, q9, \n // v = cdf + EC_MIN_PROB * (n_symbols - ret)
.if \n == 4
vmov.i16 d17, #0
.endif
vadd_n d16, q8, q9, d20, q10, q11, d16, q8, q9, \n // v = ((cdf >> EC_PROB_SHIFT) * r) >> 1 + EC_MIN_PROB * (n_symbols - ret)
add r9, sp, #16
vld1.16 {d20[]}, [r8, :16] // dif >> (EC_WIN_SIZE - 16)
movrel_local r8, bits
vst1_n q8, q8, q9, r9, \n // store v values to allow indexed access
vmov d21, d20
vld1_align_n q12, q12, q13, r8, \n
.if \n == 16
vmov q11, q10
.endif
vcge_n q2, q2, q3, q10, q10, q11, q8, q8, q9, \n // c >= v
vand_n q10, q10, q11, q2, q2, q3, q12, q12, q13, \n // One bit per halfword set in the mask
.if \n == 16
vadd.i16 q10, q10, q11
.endif
vadd.i16 d20, d20, d21 // Aggregate mask bits
ldr r4, [r0, #ALLOW_UPDATE_CDF]
vpadd.i16 d20, d20, d20
lsl r10, r2, #1
vpadd.i16 d20, d20, d20
vmov.u16 r3, d20[0]
cmp r4, #0
rbit r3, r3
clz lr, r3 // ret
beq L(renorm)
// update_cdf
ldrh r3, [r1, r10] // count = cdf[n_symbols]
vmov.i8 q10, #0xff
.if \n == 16
mov r4, #-5
.else
mvn r12, r2
mov r4, #-4
cmn r12, #3 // set C if n_symbols <= 2
.endif
vrhadd_n d16, q8, q9, d20, q10, q10, d4, q2, q3, \n // i >= val ? -1 : 32768
.if \n == 16
sub r4, r4, r3, lsr #4 // -((count >> 4) + 5)
.else
lsr r12, r3, #4 // count >> 4
sbc r4, r4, r12 // -((count >> 4) + (n_symbols > 2) + 4)
.endif
vsub_n d16, q8, q9, d16, q8, q9, d0, q0, q1, \n // (32768 - cdf[i]) or (-1 - cdf[i])
.if \n == 4
vdup.16 d20, r4 // -rate
.else
vdup.16 q10, r4 // -rate
.endif
sub r3, r3, r3, lsr #5 // count - (count == 32)
vsub_n d0, q0, q1, d0, q0, q1, d4, q2, q3, \n // cdf + (i >= val ? 1 : 0)
vshl_n d16, q8, q9, d16, q8, q9, d20, q10, q10, \n // ({32768,-1} - cdf[i]) >> rate
add r3, r3, #1 // count + (count < 32)
vadd_n d0, q0, q1, d0, q0, q1, d16, q8, q9, \n // cdf + (32768 - cdf[i]) >> rate
vst1_align_n d0, q0, q1, r1, \n
strh r3, [r1, r10]
.endm
decode_update 4
L(renorm):
add r8, sp, #16
add r8, r8, lr, lsl #1
ldrh r3, [r8] // v
ldrh r4, [r8, #-2] // u
ldr r6, [r0, #CNT]
ldr r7, [r0, #DIF]
sub r4, r4, r3 // rng = u - v
clz r5, r4 // clz(rng)
eor r5, r5, #16 // d = clz(rng) ^ 16
sub r7, r7, r3, lsl #16 // dif - (v << 16)
L(renorm2):
lsl r4, r4, r5 // rng << d
subs r6, r6, r5 // cnt -= d
lsl r7, r7, r5 // (dif - (v << 16)) << d
str r4, [r0, #RNG]
bhs 4f
// refill
ldr r3, [r0, #BUF_POS] // BUF_POS
ldr r4, [r0, #BUF_END] // BUF_END
add r5, r3, #4
subs r5, r5, r4
bhi 6f
ldr r8, [r3] // next_bits
rsb r5, r6, #16
add r4, r6, #16 // shift_bits = cnt + 16
mvn r8, r8
lsr r5, r5, #3 // num_bytes_read
rev r8, r8 // next_bits = bswap(next_bits)
lsr r8, r8, r4 // next_bits >>= shift_bits
2: // refill_end
add r3, r3, r5
add r6, r6, r5, lsl #3 // cnt += num_bits_read
str r3, [r0, #BUF_POS]
3: // refill_end2
orr r7, r7, r8 // dif |= next_bits
4: // end
str r6, [r0, #CNT]
str r7, [r0, #DIF]
mov r0, lr
add sp, sp, #48
pop {r4-r10,pc}
5: // pad_with_ones
add r8, r6, #-240
lsr r8, r8, r8
b 3b
6: // refill_eob
cmp r3, r4
bhs 5b
ldr r8, [r4, #-4]
lsl r5, r5, #3
lsr r8, r8, r5
add r5, r6, #16
mvn r8, r8
sub r4, r4, r3 // num_bytes_left
rev r8, r8
lsr r8, r8, r5
rsb r5, r6, #16
lsr r5, r5, #3
cmp r5, r4
it hs
movhs r5, r4
b 2b
endfunc
function msac_decode_symbol_adapt8_neon, export=1
decode_update 8
b L(renorm)
endfunc
function msac_decode_symbol_adapt16_neon, export=1
decode_update 16
b L(renorm)
endfunc
function msac_decode_hi_tok_neon, export=1
push {r4-r10,lr}
vld1.16 {d0}, [r1, :64] // cdf
add r4, r0, #RNG
vmov.i16 d31, #0x7f00 // 0x7f00
movrel_local r5, coeffs, 30-2*3
vmvn.i16 d30, #0x3f // 0xffc0
ldrh r9, [r1, #6] // count = cdf[n_symbols]
vld1.16 {d1[]}, [r4, :16] // rng
movrel_local r4, bits
vld1.16 {d29}, [r5] // EC_MIN_PROB * (n_symbols - ret)
add r5, r0, #DIF + 2
vld1.16 {q8}, [r4, :128]
mov r2, #-24
vand d20, d0, d30 // cdf & 0xffc0
ldr r10, [r0, #ALLOW_UPDATE_CDF]
vld1.16 {d2[]}, [r5, :16] // dif >> (EC_WIN_SIZE - 16)
sub sp, sp, #48
ldr r6, [r0, #CNT]
ldr r7, [r0, #DIF]
vmov d3, d2
1:
vand d23, d1, d31 // rng & 0x7f00
vqdmulh.s16 d18, d20, d23 // ((cdf >> EC_PROB_SHIFT) * (r - 128)) >> 1
add r12, sp, #14
vadd.i16 d6, d20, d29 // v = cdf + EC_MIN_PROB * (n_symbols - ret)
vadd.i16 d6, d18, d6 // v = ((cdf >> EC_PROB_SHIFT) * r) >> 1 + EC_MIN_PROB * (n_symbols - ret)
vmov.i16 d7, #0
vst1.16 {d1[0]}, [r12, :16] // store original u = s->rng
add r12, sp, #16
vcge.u16 q2, q1, q3 // c >= v
vst1.16 {q3}, [r12] // store v values to allow indexed access
vand q9, q2, q8 // One bit per halfword set in the mask
vadd.i16 d18, d18, d19 // Aggregate mask bits
vpadd.i16 d18, d18, d18
vpadd.i16 d18, d18, d18
vmov.u16 r3, d18[0]
cmp r10, #0
add r2, r2, #5
rbit r3, r3
add r8, sp, #16
clz lr, r3 // ret
beq 2f
// update_cdf
vmov.i8 d22, #0xff
mov r4, #-5
vrhadd.u16 d6, d22, d4 // i >= val ? -1 : 32768
sub r4, r4, r9, lsr #4 // -((count >> 4) + 5)
vsub.i16 d6, d6, d0 // (32768 - cdf[i]) or (-1 - cdf[i])
vdup.16 d18, r4 // -rate
sub r9, r9, r9, lsr #5 // count - (count == 32)
vsub.i16 d0, d0, d4 // cdf + (i >= val ? 1 : 0)
vshl.s16 d6, d6, d18 // ({32768,-1} - cdf[i]) >> rate
add r9, r9, #1 // count + (count < 32)
vadd.i16 d0, d0, d6 // cdf + (32768 - cdf[i]) >> rate
vst1.16 {d0}, [r1, :64]
vand d20, d0, d30 // cdf & 0xffc0
strh r9, [r1, #6]
2:
add r8, r8, lr, lsl #1
ldrh r3, [r8] // v
ldrh r4, [r8, #-2] // u
sub r4, r4, r3 // rng = u - v
clz r5, r4 // clz(rng)
eor r5, r5, #16 // d = clz(rng) ^ 16
sub r7, r7, r3, lsl #16 // dif - (v << 16)
lsl r4, r4, r5 // rng << d
subs r6, r6, r5 // cnt -= d
lsl r7, r7, r5 // (dif - (v << 16)) << d
str r4, [r0, #RNG]
vdup.16 d1, r4
bhs 5f
// refill
ldr r3, [r0, #BUF_POS] // BUF_POS
ldr r4, [r0, #BUF_END] // BUF_END
add r5, r3, #4
subs r5, r5, r4
bhi 7f
ldr r8, [r3] // next_bits
rsb r5, r6, #16
add r4, r6, #16 // shift_bits = cnt + 16
mvn r8, r8
lsr r5, r5, #3 // num_bytes_read
rev r8, r8 // next_bits = bswap(next_bits)
lsr r8, r8, r4 // next_bits >>= shift_bits
3: // refill_end
add r3, r3, r5
add r6, r6, r5, lsl #3 // cnt += num_bits_read
str r3, [r0, #BUF_POS]
4: // refill_end2
orr r7, r7, r8 // dif |= next_bits
5: // end
lsl lr, lr, #1
sub lr, lr, #5
lsr r12, r7, #16
adds r2, r2, lr // carry = tok_br < 3 || tok == 15
vdup.16 q1, r12
bcc 1b // loop if !carry
add r2, r2, #30
str r6, [r0, #CNT]
add sp, sp, #48
str r7, [r0, #DIF]
lsr r0, r2, #1
pop {r4-r10,pc}
6: // pad_with_ones
add r8, r6, #-240
lsr r8, r8, r8
b 4b
7: // refill_eob
cmp r3, r4
bhs 6b
ldr r8, [r4, #-4]
lsl r5, r5, #3
lsr r8, r8, r5
add r5, r6, #16
mvn r8, r8
sub r4, r4, r3 // num_bytes_left
rev r8, r8
lsr r8, r8, r5
rsb r5, r6, #16
lsr r5, r5, #3
cmp r5, r4
it hs
movhs r5, r4
b 3b
endfunc
function msac_decode_bool_equi_neon, export=1
push {r4-r10,lr}
ldr r5, [r0, #RNG]
ldr r6, [r0, #CNT]
sub sp, sp, #48
ldr r7, [r0, #DIF]
bic r4, r5, #0xff // r &= 0xff00
add r4, r4, #8
mov r2, #0
subs r8, r7, r4, lsl #15 // dif - vw
lsr r4, r4, #1 // v
sub r5, r5, r4 // r - v
itee lo
movlo r2, #1
movhs r4, r5 // if (ret) v = r - v;
movhs r7, r8 // if (ret) dif = dif - vw;
clz r5, r4 // clz(rng)
eor r5, r5, #16 // d = clz(rng) ^ 16
mov lr, r2
b L(renorm2)
endfunc
function msac_decode_bool_neon, export=1
push {r4-r10,lr}
ldr r5, [r0, #RNG]
ldr r6, [r0, #CNT]
sub sp, sp, #48
ldr r7, [r0, #DIF]
lsr r4, r5, #8 // r >> 8
bic r1, r1, #0x3f // f &= ~63
mul r4, r4, r1
mov r2, #0
lsr r4, r4, #7
add r4, r4, #4 // v
subs r8, r7, r4, lsl #16 // dif - vw
sub r5, r5, r4 // r - v
itee lo
movlo r2, #1
movhs r4, r5 // if (ret) v = r - v;
movhs r7, r8 // if (ret) dif = dif - vw;
clz r5, r4 // clz(rng)
eor r5, r5, #16 // d = clz(rng) ^ 16
mov lr, r2
b L(renorm2)
endfunc
function msac_decode_bool_adapt_neon, export=1
push {r4-r10,lr}
ldr r9, [r1] // cdf[0-1]
ldr r5, [r0, #RNG]
movw lr, #0xffc0
ldr r6, [r0, #CNT]
sub sp, sp, #48
ldr r7, [r0, #DIF]
lsr r4, r5, #8 // r >> 8
and r2, r9, lr // f &= ~63
mul r4, r4, r2
mov r2, #0
lsr r4, r4, #7
add r4, r4, #4 // v
subs r8, r7, r4, lsl #16 // dif - vw
sub r5, r5, r4 // r - v
ldr r10, [r0, #ALLOW_UPDATE_CDF]
itee lo
movlo r2, #1
movhs r4, r5 // if (ret) v = r - v;
movhs r7, r8 // if (ret) dif = dif - vw;
cmp r10, #0
clz r5, r4 // clz(rng)
eor r5, r5, #16 // d = clz(rng) ^ 16
mov lr, r2
beq L(renorm2)
lsr r2, r9, #16 // count = cdf[1]
uxth r9, r9 // cdf[0]
sub r3, r2, r2, lsr #5 // count - (count >= 32)
lsr r2, r2, #4 // count >> 4
add r10, r3, #1 // count + (count < 32)
add r2, r2, #4 // rate = (count >> 4) | 4
sub r9, r9, lr // cdf[0] -= bit
sub r3, r9, lr, lsl #15 // {cdf[0], cdf[0] - 32769}
asr r3, r3, r2 // {cdf[0], cdf[0] - 32769} >> rate
sub r9, r9, r3 // cdf[0]
strh r9, [r1]
strh r10, [r1, #2]
b L(renorm2)
endfunc
|
Admenri/urge
| 75,475
|
third_party/dav1d/src/arm/32/filmgrain16.S
|
/*
* Copyright © 2021, VideoLAN and dav1d authors
* Copyright © 2021, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#include "src/arm/asm-offsets.h"
#define GRAIN_WIDTH 82
#define GRAIN_HEIGHT 73
#define SUB_GRAIN_WIDTH 44
#define SUB_GRAIN_HEIGHT 38
.macro increment_seed steps, shift=1
lsr r11, r2, #3
lsr r12, r2, #12
lsr lr, r2, #1
eor r11, r2, r11 // (r >> 0) ^ (r >> 3)
eor r12, r12, lr // (r >> 12) ^ (r >> 1)
eor r11, r11, r12 // (r >> 0) ^ (r >> 3) ^ (r >> 12) ^ (r >> 1)
.if \shift
lsr r2, r2, #\steps
.endif
and r11, r11, #((1 << \steps) - 1) // bit
.if \shift
orr r2, r2, r11, lsl #(16 - \steps) // *state
.else
orr r2, r2, r11, lsl #16 // *state
.endif
.endm
.macro read_rand dest, bits, age
ubfx \dest, r2, #16 - \bits - \age, #\bits
.endm
.macro read_shift_rand dest, bits
ubfx \dest, r2, #17 - \bits, #\bits
lsr r2, r2, #1
.endm
// special calling convention:
// r2 holds seed
// r3 holds dav1d_gaussian_sequence
// clobbers r11-r12
// returns in d0-d1
function get_gaussian_neon
push {r5-r6,lr}
increment_seed 4
read_rand r5, 11, 3
read_rand r6, 11, 2
add r5, r3, r5, lsl #1
add r6, r3, r6, lsl #1
vld1.16 {d0[0]}, [r5]
read_rand r5, 11, 1
vld1.16 {d0[1]}, [r6]
add r5, r3, r5, lsl #1
read_rand r6, 11, 0
increment_seed 4
add r6, r3, r6, lsl #1
vld1.16 {d0[2]}, [r5]
read_rand r5, 11, 3
vld1.16 {d0[3]}, [r6]
add r5, r3, r5, lsl #1
read_rand r6, 11, 2
vld1.16 {d1[0]}, [r5]
add r6, r3, r6, lsl #1
read_rand r5, 11, 1
vld1.16 {d1[1]}, [r6]
read_rand r6, 11, 0
add r5, r3, r5, lsl #1
add r6, r3, r6, lsl #1
vld1.16 {d1[2]}, [r5]
vld1.16 {d1[3]}, [r6]
pop {r5-r6,pc}
endfunc
function get_grain_2_neon
push {r11,lr}
increment_seed 2
read_rand r11, 11, 1
read_rand r12, 11, 0
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d0[0]}, [r11]
vld1.16 {d0[1]}, [r12]
vrshl.s16 d0, d0, d30
pop {r11,pc}
endfunc
.macro get_grain_2 dst
bl get_grain_2_neon
.ifnc \dst, d0
vmov \dst, d0
.endif
.endm
function get_grain_4_neon
push {r11,lr}
increment_seed 4
read_rand r11, 11, 3
read_rand r12, 11, 2
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d0[0]}, [r11]
read_rand r11, 11, 1
vld1.16 {d0[1]}, [r12]
read_rand r12, 11, 0
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d0[2]}, [r11]
vld1.16 {d0[3]}, [r12]
vrshl.s16 d0, d0, d30
pop {r11,pc}
endfunc
.macro get_grain_4 dst
bl get_grain_4_neon
.ifnc \dst, d0
vmov \dst, d0
.endif
.endm
// r1 holds the number of entries to produce
// r6, r8 and r10 hold the previous output entries
// q0 holds the vector of produced entries
// q1 holds the input vector of sums from above
.macro output_lag n
function output_lag\n\()_neon
push {r0, lr}
.if \n == 1
mvn lr, r5 // grain_min = ~grain_max
.else
mov r0, #1
mov lr, #1
sub r7, r7, #1
sub r9, r9, #1
lsl r0, r0, r7
lsl lr, lr, r9
add r7, r7, #1
add r9, r9, #1
.endif
1:
read_shift_rand r12, 11
vmov.32 r11, d2[0]
lsl r12, r12, #1
vext.8 q0, q0, q0, #2
ldrsh r12, [r3, r12]
.if \n == 1
mla r11, r6, r4, r11 // sum (above) + *coeff * prev output
add r6, r11, r8 // 1 << (ar_coeff_shift - 1)
add r12, r12, r10
asr r6, r6, r7 // >> ar_coeff_shift
asr r12, r12, r9 // >> (4 - bitdepth_min_8 + grain_scale_shift)
add r6, r6, r12
cmp r6, r5
.elseif \n == 2
mla r11, r8, r4, r11 // sum (above) + *coeff * prev output 1
mla r11, r6, r10, r11 // += *coeff * prev output 2
mov r8, r6
add r6, r11, r0 // 1 << (ar_coeff_shift - 1)
add r12, r12, lr // 1 << (4 - bitdepth_min_8 + grain_scale_shift - 1)
asr r6, r6, r7 // >> ar_coeff_shift
asr r12, r12, r9 // >> (4 - bitdepth_min_8 + grain_scale_shift)
add r6, r6, r12
push {lr}
cmp r6, r5
mvn lr, r5 // grain_min = ~grain_max
.else
push {r1-r3}
sbfx r1, r4, #0, #8
sbfx r2, r4, #8, #8
sbfx r3, r4, #16, #8
mla r11, r10, r1, r11 // sum (above) + *coeff * prev output 1
mla r11, r8, r2, r11 // sum (above) + *coeff * prev output 2
mla r11, r6, r3, r11 // += *coeff * prev output 3
pop {r1-r3}
mov r10, r8
mov r8, r6
add r6, r11, r0 // 1 << (ar_coeff_shift - 1)
add r12, r12, lr // 1 << (4 - bitdepth_min_8 + grain_scale_shift - 1)
asr r6, r6, r7 // >> ar_coeff_shift
asr r12, r12, r9 // >> (4 - bitdepth_min_8 + grain_scale_shift)
add r6, r6, r12
push {lr}
cmp r6, r5
mvn lr, r5 // grain_min = ~grain_max
.endif
it gt
movgt r6, r5
cmp r6, lr
it lt
movlt r6, lr
.if \n >= 2
pop {lr}
.endif
subs r1, r1, #1
vext.8 q1, q1, q1, #4
vmov.16 d1[3], r6
bgt 1b
pop {r0, pc}
endfunc
.endm
output_lag 1
output_lag 2
output_lag 3
function sum_lag1_above_neon
sub r12, r0, #1*GRAIN_WIDTH*2 - 16
vld1.16 {q10}, [r12] // load top right
vext.8 q0, q8, q9, #14 // top left, top mid
vext.8 q1, q9, q10, #2 // top left, top mid
vmull.s16 q2, d18, d28
vmlal.s16 q2, d0, d27
vmlal.s16 q2, d2, d29
vmull.s16 q3, d19, d28
vmlal.s16 q3, d1, d27
vmlal.s16 q3, d3, d29
vmov q8, q9
vmov q9, q10
bx lr
endfunc
.macro sum_lag_n_body lag, type, uv_layout, edge, elems, uv_coeff
.ifc \lag\()_\edge, lag3_left
bl sum_lag3_left_above_neon
.else
bl sum_\lag\()_above_neon
.endif
.ifc \type, uv_420
vpush {q6-q7}
add r12, r11, #GRAIN_WIDTH*2
vld1.16 {q0, q1}, [r11]!
vld1.16 {q6, q7}, [r12]!
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vpadd.i16 d12, d12, d13
vpadd.i16 d13, d14, d15
vadd.i16 q0, q0, q6
vpop {q6-q7}
vrshr.s16 q0, q0, #2
.endif
.ifc \type, uv_422
vld1.16 {q0, q1}, [r11]!
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vrshr.s16 q0, q0, #1
.endif
.ifc \type, uv_444
vld1.16 {q0}, [r11]!
.endif
.if \uv_layout
.ifnb \uv_coeff
vdup.8 d13, \uv_coeff
vmovl.s8 q6, d13
.endif
vmlal.s16 q2, d0, d13
vmlal.s16 q3, d1, d13
.endif
.if \uv_layout && \elems == 8
b sum_\lag\()_y_\edge\()_start
.elseif \uv_layout == 444 && \elems == 7
b sum_\lag\()_y_\edge\()_start
.elseif \uv_layout == 422 && \elems == 1
b sum_\lag\()_uv_420_\edge\()_start
.else
sum_\lag\()_\type\()_\edge\()_start:
push {r11}
.if \elems > 4
.ifc \edge, left
increment_seed 4
read_rand r11, 11, 3
read_rand r12, 11, 2
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d1[1]}, [r11]
read_rand r11, 11, 1
vld1.16 {d1[2]}, [r12]
add r11, r3, r11, lsl #1
vld1.16 {d1[3]}, [r11]
lsl r2, r2, #1 // shift back the state as if we'd done increment_seed with shift=0
vrshl.s16 d1, d1, d30
vext.8 q2, q2, q2, #12
.ifc \lag, lag3
vmov.s16 r10, d1[1]
.endif
.ifnc \lag, lag1
vmov.s16 r8, d1[2]
.endif
vmov.s16 r6, d1[3]
vmov q1, q2
mov r1, #1
bl output_\lag\()_neon
.else
increment_seed 4, shift=0
vmov q1, q2
mov r1, #4
bl output_\lag\()_neon
.endif
increment_seed 4, shift=0
vmov q1, q3
.ifc \edge, right
mov r1, #3
bl output_\lag\()_neon
read_shift_rand r12, 11
add r12, r3, r12, lsl #1
vld1.16 {d2[0]}, [r12]
vrshl.s16 d2, d2, d30
vext.8 q0, q0, q1, #2
.else
mov r1, #4
bl output_\lag\()_neon
.endif
.else
// elems == 1
increment_seed 4, shift=0
vmov q1, q2
mov r1, #1
bl output_\lag\()_neon
lsr r2, r2, #3
read_rand r11, 11, 2
read_rand r12, 11, 1
add r11, r3, r11, lsl #1
add r12, r3, r12, lsl #1
vld1.16 {d2[0]}, [r11]
read_rand r11, 11, 0
vld1.16 {d2[1]}, [r12]
add r11, r3, r11, lsl #1
vld1.16 {d2[2]}, [r11]
vrshl.s16 d2, d2, d30
vext.8 q0, q0, q1, #14
.endif
vst1.16 {q0}, [r0]!
pop {r11}
pop {r1, pc}
.endif
.endm
.macro sum_lag1_func type, uv_layout, edge, elems=8
function sum_\type\()_lag1_\edge\()_neon
push {r1, lr}
.ifc \edge, left
sub r12, r0, #1*GRAIN_WIDTH*2
vld1.8 {q9}, [r12] // load the previous block right above
.endif
sum_lag_n_body lag1, \type, \uv_layout, \edge, \elems
endfunc
.endm
sum_lag1_func y, 0, left
sum_lag1_func y, 0, mid
sum_lag1_func y, 0, right, 7
sum_lag1_func uv_444, 444, left
sum_lag1_func uv_444, 444, mid
sum_lag1_func uv_444, 444, right, 7
sum_lag1_func uv_422, 422, left
sum_lag1_func uv_422, 422, mid
sum_lag1_func uv_422, 422, right, 1
sum_lag1_func uv_420, 420, left
sum_lag1_func uv_420, 420, mid
sum_lag1_func uv_420, 420, right, 1
function sum_lag2_above_neon
push {lr}
sub r12, r0, #2*GRAIN_WIDTH*2 - 16
sub lr, r0, #1*GRAIN_WIDTH*2 - 16
vld1.16 {q10}, [r12] // load top right
vld1.16 {q13}, [lr]
vdup.8 d10, d28[0]
vext.8 q0, q8, q9, #12 // top left, top mid
vdup.8 d12, d28[1]
vext.8 q1, q8, q9, #14
vdup.8 d14, d28[3]
vext.8 q4, q9, q10, #2 // top mid, top right
vmovl.s8 q5, d10
vmovl.s8 q6, d12
vmovl.s8 q7, d14
vmull.s16 q2, d0, d10
vmlal.s16 q2, d2, d12
vmlal.s16 q2, d8, d14
vmull.s16 q3, d1, d10
vmlal.s16 q3, d3, d12
vmlal.s16 q3, d9, d14
vdup.8 d10, d28[4]
vext.8 q0, q9, q10, #4 // top mid, top right
vdup.8 d12, d28[5]
vext.8 q1, q11, q12, #12 // top left, top mid
vdup.8 d14, d28[6]
vext.8 q4, q11, q12, #14
vmovl.s8 q5, d10
vmovl.s8 q6, d12
vmovl.s8 q7, d14
vmlal.s16 q2, d0, d10
vmlal.s16 q2, d2, d12
vmlal.s16 q2, d8, d14
vmlal.s16 q3, d1, d10
vmlal.s16 q3, d3, d12
vmlal.s16 q3, d9, d14
vdup.8 d10, d29[0]
vext.8 q0, q12, q13, #2 // top mid, top right
vdup.8 d12, d29[1]
vext.8 q1, q12, q13, #4
vdup.8 d14, d28[2]
vdup.8 d8, d28[7]
vmovl.s8 q5, d10
vmovl.s8 q6, d12
vmovl.s8 q7, d14
vmovl.s8 q4, d8
vmlal.s16 q2, d0, d10
vmlal.s16 q2, d2, d12
vmlal.s16 q2, d18, d14
vmlal.s16 q2, d24, d8
vmlal.s16 q3, d1, d10
vmlal.s16 q3, d3, d12
vmlal.s16 q3, d19, d14
vmlal.s16 q3, d25, d8
vmov q8, q9
vmov q9, q10
vmov q11, q12
vmov q12, q13
pop {pc}
endfunc
.macro sum_lag2_func type, uv_layout, edge, elems=8
function sum_\type\()_lag2_\edge\()_neon
push {r1, lr}
.ifc \edge, left
sub r12, r0, #2*GRAIN_WIDTH*2
sub lr, r0, #1*GRAIN_WIDTH*2
vld1.16 {q9}, [r12] // load the previous block right above
vld1.16 {q12}, [lr]
.endif
sum_lag_n_body lag2, \type, \uv_layout, \edge, \elems, uv_coeff=d29[4]
endfunc
.endm
sum_lag2_func y, 0, left
sum_lag2_func y, 0, mid
sum_lag2_func y, 0, right, 7
sum_lag2_func uv_444, 444, left
sum_lag2_func uv_444, 444, mid
sum_lag2_func uv_444, 444, right, 7
sum_lag2_func uv_422, 422, left
sum_lag2_func uv_422, 422, mid
sum_lag2_func uv_422, 422, right, 1
sum_lag2_func uv_420, 420, left
sum_lag2_func uv_420, 420, mid
sum_lag2_func uv_420, 420, right, 1
function sum_lag3_left_above_neon
// A separate codepath for the left edge, to avoid reading outside
// of the edge of the buffer.
sub r12, r0, #3*GRAIN_WIDTH*2
vld1.8 {q11, q12}, [r12]
vext.8 q12, q11, q12, #10
vext.8 q11, q11, q11, #10
b sum_lag3_above_start
endfunc
function sum_lag3_above_neon
movw r12, #(3*GRAIN_WIDTH + 3)*2
sub r12, r0, r12
vld1.8 {q11, q12}, [r12]
sum_lag3_above_start:
vdup.8 d12, d26[0]
vext.8 q1, q11, q12, #2
vdup.8 d14, d26[1]
vext.8 q4, q11, q12, #4
vdup.8 d16, d26[2]
vext.8 q5, q11, q12, #6
vdup.8 d18, d26[3]
vmovl.s8 q6, d12
vmovl.s8 q7, d14
vmovl.s8 q8, d16
vmovl.s8 q9, d18
movw r12, #(2*GRAIN_WIDTH + 3)*2
sub r12, r0, r12
vmull.s16 q2, d22, d12
vmlal.s16 q2, d2, d14
vmlal.s16 q2, d8, d16
vmlal.s16 q2, d10, d18
vmull.s16 q3, d23, d12
vmlal.s16 q3, d3, d14
vmlal.s16 q3, d9, d16
vmlal.s16 q3, d11, d18
vdup.8 d12, d26[4]
vext.8 q0, q11, q12, #8
vdup.8 d14, d26[5]
vext.8 q1, q11, q12, #10
vdup.8 d16, d26[6]
vext.8 q4, q11, q12, #12
vld1.8 {q11, q12}, [r12]
vdup.8 d18, d26[7]
vmovl.s8 q6, d12
vmovl.s8 q7, d14
vmovl.s8 q8, d16
vmovl.s8 q9, d18
vmlal.s16 q2, d0, d12
vmlal.s16 q2, d2, d14
vmlal.s16 q2, d8, d16
vmlal.s16 q2, d22, d18
vmlal.s16 q3, d1, d12
vmlal.s16 q3, d3, d14
vmlal.s16 q3, d9, d16
vmlal.s16 q3, d23, d18
vdup.8 d12, d27[0]
vext.8 q0, q11, q12, #2
vdup.8 d14, d27[1]
vext.8 q1, q11, q12, #4
vdup.8 d16, d27[2]
vext.8 q4, q11, q12, #6
vdup.8 d18, d27[3]
vext.8 q5, q11, q12, #8
vmovl.s8 q6, d12
vmovl.s8 q7, d14
vmovl.s8 q8, d16
vmovl.s8 q9, d18
sub r12, r0, #(1*GRAIN_WIDTH + 3)*2
vmlal.s16 q2, d0, d12
vmlal.s16 q2, d2, d14
vmlal.s16 q2, d8, d16
vmlal.s16 q2, d10, d18
vmlal.s16 q3, d1, d12
vmlal.s16 q3, d3, d14
vmlal.s16 q3, d9, d16
vmlal.s16 q3, d11, d18
vdup.8 d12, d27[4]
vext.8 q0, q11, q12, #10
vdup.8 d14, d27[5]
vext.8 q1, q11, q12, #12
vld1.8 {q11, q12}, [r12]
vdup.8 d16, d27[6]
vdup.8 d18, d27[7]
vmovl.s8 q6, d12
vmovl.s8 q7, d14
vext.8 q5, q11, q12, #2
vmovl.s8 q8, d16
vmovl.s8 q9, d18
vmlal.s16 q2, d0, d12
vmlal.s16 q2, d2, d14
vmlal.s16 q2, d22, d16
vmlal.s16 q2, d10, d18
vmlal.s16 q3, d1, d12
vmlal.s16 q3, d3, d14
vmlal.s16 q3, d23, d16
vmlal.s16 q3, d11, d18
vdup.8 d12, d28[0]
vext.8 q0, q11, q12, #4
vdup.8 d14, d28[1]
vext.8 q1, q11, q12, #6
vdup.8 d16, d28[2]
vext.8 q4, q11, q12, #8
vdup.8 d18, d28[3]
vext.8 q5, q11, q12, #10
vmovl.s8 q6, d12
vmovl.s8 q7, d14
vmovl.s8 q8, d16
vmovl.s8 q9, d18
vmlal.s16 q2, d0, d12
vmlal.s16 q2, d2, d14
vmlal.s16 q2, d8, d16
vmlal.s16 q2, d10, d18
vmlal.s16 q3, d1, d12
vmlal.s16 q3, d3, d14
vmlal.s16 q3, d9, d16
vmlal.s16 q3, d11, d18
vdup.8 d12, d28[4]
vext.8 q0, q11, q12, #12
vmovl.s8 q6, d12
vmlal.s16 q2, d0, d12
vmlal.s16 q3, d1, d12
bx lr
endfunc
.macro sum_lag3_func type, uv_layout, edge, elems=8
function sum_\type\()_lag3_\edge\()_neon
push {r1, lr}
sum_lag_n_body lag3, \type, \uv_layout, \edge, \elems, uv_coeff=d29[0]
endfunc
.endm
sum_lag3_func y, 0, left
sum_lag3_func y, 0, mid
sum_lag3_func y, 0, right, 7
sum_lag3_func uv_444, 444, left
sum_lag3_func uv_444, 444, mid
sum_lag3_func uv_444, 444, right, 7
sum_lag3_func uv_422, 422, left
sum_lag3_func uv_422, 422, mid
sum_lag3_func uv_422, 422, right, 1
sum_lag3_func uv_420, 420, left
sum_lag3_func uv_420, 420, mid
sum_lag3_func uv_420, 420, right, 1
function generate_grain_rows_neon
push {r10-r11,lr}
1:
mov r10, #80
2:
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
subs r10, r10, #8
vst1.16 {q0}, [r0]!
bgt 2b
get_grain_2 d0
subs r1, r1, #1
vst1.32 {d0[0]}, [r0]!
bgt 1b
pop {r10-r11,pc}
endfunc
function generate_grain_rows_44_neon
push {r10-r11,lr}
1:
mov r10, #40
2:
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
subs r10, r10, #8
vst1.16 {q0}, [r0]!
bgt 2b
get_grain_4 d0
subs r1, r1, #1
vst1.16 {d0}, [r0]
add r0, r0, #GRAIN_WIDTH*2-80
bgt 1b
pop {r10-r11,pc}
endfunc
function gen_grain_uv_444_lag0_neon
vld1.16 {q3}, [r11]!
gen_grain_uv_lag0_8_start:
push {r11,lr}
bl get_gaussian_neon
vrshl.s16 q0, q0, q15
gen_grain_uv_lag0_8_add:
vand q3, q3, q1
vmull.s16 q2, d6, d22
vmull.s16 q3, d7, d22
vrshl.s32 q2, q2, q12
vrshl.s32 q3, q3, q12
vqmovn.s32 d4, q2
vqmovn.s32 d5, q3
vqadd.s16 q2, q2, q0
vmin.s16 q2, q2, q9
vmax.s16 q2, q2, q10
vst1.16 {q2}, [r0]!
pop {r11,pc}
endfunc
function gen_grain_uv_420_lag0_8_neon
add r12, r11, #GRAIN_WIDTH*2
vld1.16 {q2,q3}, [r11]!
vld1.16 {q4,q5}, [r12]
vpadd.i16 d4, d4, d5
vpadd.i16 d5, d6, d7
vpadd.i16 d8, d8, d9
vpadd.i16 d9, d10, d11
vadd.i16 q2, q2, q4
vrshr.s16 q3, q2, #2
b gen_grain_uv_lag0_8_start
endfunc
function gen_grain_uv_422_lag0_8_neon
vld1.16 {q2,q3}, [r11]!
vpadd.i16 d4, d4, d5
vpadd.i16 d5, d6, d7
vrshr.s16 q3, q2, #1
b gen_grain_uv_lag0_8_start
endfunc
function gen_grain_uv_420_lag0_4_neon
add r12, r11, #GRAIN_WIDTH*2
vld1.16 {q2}, [r11]
vld1.16 {q0}, [r12]
add r11, r11, #32
vpadd.i16 d4, d4, d5
vpadd.i16 d0, d0, d1
vadd.i16 d4, d4, d0
vrshr.s16 d6, d4, #2
push {r11,lr}
get_grain_4 d0
b gen_grain_uv_lag0_8_add
endfunc
function gen_grain_uv_422_lag0_4_neon
vld1.16 {q2}, [r11]
add r11, r11, #32
vpadd.i16 d4, d4, d5
vrshr.s16 d6, d4, #1
push {r11,lr}
get_grain_4 d0
b gen_grain_uv_lag0_8_add
endfunc
.macro gen_grain_82 type
function generate_grain_\type\()_16bpc_neon, export=1
push {r4-r11,lr}
.ifc \type, uv_444
ldr r4, [sp, #36]
mov r12, r3
mov lr, #28
add r11, r1, #3*GRAIN_WIDTH*2
mov r1, r2
mul r12, r12, lr
clz lr, r4
.else
clz lr, r2
.endif
movrel r3, X(gaussian_sequence)
sub lr, lr, #24 // -bitdepth_min_8
ldr r2, [r1, #FGD_SEED]
ldr r9, [r1, #FGD_GRAIN_SCALE_SHIFT]
.ifc \type, y
add r4, r1, #FGD_AR_COEFFS_Y
.else
add r4, r1, #FGD_AR_COEFFS_UV
.endif
add r9, r9, lr // grain_scale_shift - bitdepth_min_8
adr r5, L(gen_grain_\type\()_tbl)
ldr r6, [r1, #FGD_AR_COEFF_LAG]
add r9, r9, #4
ldr r6, [r5, r6, lsl #2]
vdup.16 q15, r9 // 4 - bitdepth_min_8 + data->grain_scale_shift
add r5, r5, r6
vneg.s16 q15, q15
.ifc \type, uv_444
push {lr}
cmp r12, #0
movw r10, #0x49d8
movw lr, #0xb524
// Intentionally using a separate register instead of moveq with an
// immediate constant, to avoid armv8 deprecated it instruction forms.
it eq
moveq r10, lr
add r4, r4, r12 // Add offset to ar_coeffs_uv[1]
eor r2, r2, r10
pop {lr}
.endif
ldr r7, [r1, #FGD_AR_COEFF_SHIFT]
neg lr, lr // bitdepth_min_8
mov r8, #1
mov r10, #1
lsl r8, r8, r7 // 1 << ar_coeff_shift
lsl r10, r10, r9 // 1 << (4 + data->grain_scale_shift)
lsr r8, r8, #1 // 1 << (ar_coeff_shift - 1)
lsr r10, r10, #1 // 1 << (4 + data->grain_scale_shift - 1)
bx r5
.align 2
L(gen_grain_\type\()_tbl):
.word L(generate_grain_\type\()_lag0) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag1) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag2) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag3) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
L(generate_grain_\type\()_lag0):
.ifc \type, y
mov r1, #GRAIN_HEIGHT
bl generate_grain_rows_neon
.else
mov r5, #128
lsl r5, r5, lr // 128 << bitdepth_min_8
sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
mvn r6, r5 // grain_min = ~grain_max
mov r1, #3
bl generate_grain_rows_neon
mov r1, #GRAIN_HEIGHT-3
vdup.32 q12, r7
vld1.8 {d22[]}, [r4] // ar_coeffs_uv[0]
vmov.i8 q0, #0
vmov.i8 q1, #255
vdup.16 q9, r5
vdup.16 q10, r6
vext.8 q13, q0, q1, #10
vext.8 q14, q1, q0, #2
vneg.s32 q12, q12
vmovl.s8 q11, d22
1:
vmov q1, q13
bl gen_grain_uv_444_lag0_neon // 8
vmov.i8 q1, #255
bl gen_grain_uv_444_lag0_neon // 16
bl gen_grain_uv_444_lag0_neon // 24
bl gen_grain_uv_444_lag0_neon // 32
bl gen_grain_uv_444_lag0_neon // 40
bl gen_grain_uv_444_lag0_neon // 48
bl gen_grain_uv_444_lag0_neon // 56
bl gen_grain_uv_444_lag0_neon // 64
bl gen_grain_uv_444_lag0_neon // 72
vmov q1, q14
bl gen_grain_uv_444_lag0_neon // 80
get_grain_2 d16
subs r1, r1, #1
add r11, r11, #4
vst1.32 {d16[0]}, [r0]!
bgt 1b
.endif
pop {r4-r11,pc}
L(generate_grain_\type\()_lag1):
vpush {q4-q7}
mov r5, #128
lsl r5, r5, lr // 128 << bitdepth_min_8
sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
vld1.8 {d27[]}, [r4]! // ar_coeffs_y[0]
vld1.8 {d28[]}, [r4]! // ar_coeffs_y[1]
vld1.8 {d29[]}, [r4] // ar_coeffs_y[2]
.ifc \type, y
ldrsb r4, [r4, #1] // ar_coeffs_y[3]
.else
add r4, r4, #2
.endif
mov r1, #3
.ifc \type, uv_444
vld1.8 {d13[]}, [r4] // ar_coeffs_uv[4]
ldrsb r4, [r4, #-1] // ar_coeffs_uv[3]
.endif
bl generate_grain_rows_neon
vmovl.s8 q13, d27
vmovl.s8 q12, d29
vmovl.s8 q14, d28
vmov d29, d24
.ifc \type, uv_444
vmovl.s8 q6, d13
.endif
mov r1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag1_left_neon // 8
bl sum_\type\()_lag1_mid_neon // 16
bl sum_\type\()_lag1_mid_neon // 24
bl sum_\type\()_lag1_mid_neon // 32
bl sum_\type\()_lag1_mid_neon // 40
bl sum_\type\()_lag1_mid_neon // 48
bl sum_\type\()_lag1_mid_neon // 56
bl sum_\type\()_lag1_mid_neon // 64
bl sum_\type\()_lag1_mid_neon // 72
bl sum_\type\()_lag1_right_neon // 80
get_grain_2 d16
subs r1, r1, #1
.ifc \type, uv_444
add r11, r11, #4
.endif
vst1.32 {d16[0]}, [r0]!
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
L(generate_grain_\type\()_lag2):
vpush {q4-q7}
mov r5, #128
lsl r5, r5, lr // 128 << bitdepth_min_8
sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
vld1.8 {d28,d29}, [r4] // ar_coeffs_y[0-11], ar_coeffs_uv[0-12]
vmov.s8 r4, d29[2]
vmov.s8 r10, d29[3]
mov r1, #3
bl generate_grain_rows_neon
mov r1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag2_left_neon // 8
bl sum_\type\()_lag2_mid_neon // 16
bl sum_\type\()_lag2_mid_neon // 24
bl sum_\type\()_lag2_mid_neon // 32
bl sum_\type\()_lag2_mid_neon // 40
bl sum_\type\()_lag2_mid_neon // 48
bl sum_\type\()_lag2_mid_neon // 56
bl sum_\type\()_lag2_mid_neon // 64
bl sum_\type\()_lag2_mid_neon // 72
bl sum_\type\()_lag2_right_neon // 80
get_grain_2 d16
subs r1, r1, #1
.ifc \type, uv_444
add r11, r11, #4
.endif
vst1.32 {d16[0]}, [r0]!
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
L(generate_grain_\type\()_lag3):
vpush {q4-q7}
mov r5, #128
lsl r5, r5, lr // 128 << bitdepth_min_8
sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
vld1.8 {q13, q14}, [r4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
vmov.u8 r4, d28[5]
vmov.u8 r10, d28[6]
vmov.u8 r12, d28[7]
orr r4, r4, r10, lsl #8
orr r4, r4, r12, lsl #16
mov r1, #3
vpush {d26}
bl generate_grain_rows_neon
vpop {d26}
mov r1, #GRAIN_HEIGHT - 3
1:
bl sum_\type\()_lag3_left_neon // 8
bl sum_\type\()_lag3_mid_neon // 16
bl sum_\type\()_lag3_mid_neon // 24
bl sum_\type\()_lag3_mid_neon // 32
bl sum_\type\()_lag3_mid_neon // 40
bl sum_\type\()_lag3_mid_neon // 48
bl sum_\type\()_lag3_mid_neon // 56
bl sum_\type\()_lag3_mid_neon // 64
bl sum_\type\()_lag3_mid_neon // 72
bl sum_\type\()_lag3_right_neon // 80
get_grain_2 d16
subs r1, r1, #1
.ifc \type, uv_444
add r11, r11, #4
.endif
vst1.32 {d16[0]}, [r0]!
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
.endm
gen_grain_82 y
gen_grain_82 uv_444
.macro set_height dst, type
.ifc \type, uv_420
mov \dst, #SUB_GRAIN_HEIGHT-3
.else
mov \dst, #GRAIN_HEIGHT-3
.endif
.endm
.macro increment_y_ptr reg, type
.ifc \type, uv_420
add \reg, \reg, #2*GRAIN_WIDTH*2-(6*32)
.else
sub \reg, \reg, #6*32-GRAIN_WIDTH*2
.endif
.endm
.macro gen_grain_44 type
function generate_grain_\type\()_16bpc_neon, export=1
push {r4-r11,lr}
ldr r4, [sp, #36]
mov r12, r3
movw r11, #(3*GRAIN_WIDTH-3)*2
mov lr, #28
add r11, r1, r11
mov r1, r2
mul r12, r12, lr
clz lr, r4
movrel r3, X(gaussian_sequence)
sub lr, lr, #24 // -bitdepth_min_8
ldr r2, [r1, #FGD_SEED]
ldr r9, [r1, #FGD_GRAIN_SCALE_SHIFT]
add r4, r1, #FGD_AR_COEFFS_UV
add r9, r9, lr // grain_scale_shift - bitdepth_min_8
adr r5, L(gen_grain_\type\()_tbl)
ldr r6, [r1, #FGD_AR_COEFF_LAG]
add r9, r9, #4
ldr r6, [r5, r6, lsl #2]
vdup.16 q15, r9 // 4 - bitdepth_min_8 + data->grain_scale_shift
add r5, r5, r6
vneg.s16 q15, q15
push {lr}
cmp r12, #0
movw r10, #0x49d8
movw lr, #0xb524
// Intentionally using a separate register instead of moveq with an
// immediate constant, to avoid armv8 deprecated it instruction forms.
it eq
moveq r10, lr
add r4, r4, r12 // Add offset to ar_coeffs_uv[1]
eor r2, r2, r10
pop {lr}
ldr r7, [r1, #FGD_AR_COEFF_SHIFT]
neg lr, lr
mov r8, #1
mov r10, #1
lsl r8, r8, r7 // 1 << ar_coeff_shift
lsl r10, r10, r9 // 1 << (4 + data->grain_scale_shift)
lsr r8, r8, #1 // 1 << (ar_coeff_shift - 1)
lsr r10, r10, #1 // 1 << (4 + data->grain_scale_shift - 1)
bx r5
.align 2
L(gen_grain_\type\()_tbl):
.word L(generate_grain_\type\()_lag0) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag1) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag2) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
.word L(generate_grain_\type\()_lag3) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
L(generate_grain_\type\()_lag0):
.ifc \type, uv_420
vpush {q4-q5}
.endif
mov r5, #128
lsl r5, r5, lr // 128 << bitdepth_min_8
sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
mvn r6, r5 // grain_min = ~grain_max
mov r1, #3
bl generate_grain_rows_44_neon
set_height r1, \type
vdup.32 q12, r7
vld1.8 {d22[]}, [r4] // ar_coeffs_uv[0]
vmov.i8 q0, #0
vmov.i8 q1, #255
vdup.16 q9, r5
vdup.16 q10, r6
vext.8 q13, q0, q1, #10
vext.8 q14, q1, q0, #14
vneg.s32 q12, q12
vmovl.s8 q11, d22
1:
vmov q1, q13
bl gen_grain_\type\()_lag0_8_neon // 8
vmov.i8 q1, #255
bl gen_grain_\type\()_lag0_8_neon // 16
bl gen_grain_\type\()_lag0_8_neon // 24
bl gen_grain_\type\()_lag0_8_neon // 32
bl gen_grain_\type\()_lag0_8_neon // 40
vmov q1, q14
bl gen_grain_\type\()_lag0_4_neon // 44
subs r1, r1, #1
increment_y_ptr r11, \type
add r0, r0, #GRAIN_WIDTH*2-6*16
bgt 1b
.ifc \type, uv_420
vpop {q4-q5}
.endif
pop {r4-r11,pc}
L(generate_grain_\type\()_lag1):
vpush {q4-q7}
mov r5, #128
lsl r5, r5, lr // 128 << bitdepth_min_8
sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
vld1.8 {d27[]}, [r4]! // ar_coeffs_uv[0]
vld1.8 {d28[]}, [r4]! // ar_coeffs_uv[1]
vld1.8 {d29[]}, [r4] // ar_coeffs_uv[2]
add r4, r4, #2
mov r1, #3
vld1.8 {d13[]}, [r4] // ar_coeffs_uv[4]
ldrsb r4, [r4, #-1] // ar_coeffs_uv[3]
bl generate_grain_rows_44_neon
vmovl.s8 q13, d27
vmovl.s8 q12, d29
vmovl.s8 q14, d28
vmov d29, d24
vmovl.s8 q6, d13
set_height r1, \type
1:
bl sum_\type\()_lag1_left_neon // 8
bl sum_\type\()_lag1_mid_neon // 16
bl sum_\type\()_lag1_mid_neon // 24
bl sum_\type\()_lag1_mid_neon // 32
bl sum_\type\()_lag1_mid_neon // 40
bl sum_\type\()_lag1_right_neon // 44
subs r1, r1, #1
increment_y_ptr r11, \type
add r0, r0, #GRAIN_WIDTH*2-6*16
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
L(generate_grain_\type\()_lag2):
vpush {q4-q7}
mov r5, #128
lsl r5, r5, lr // 128 << bitdepth_min_8
sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
vld1.8 {d28,d29}, [r4] // ar_coeffs_uv[0-12]
vmov.s8 r4, d29[2]
vmov.s8 r10, d29[3]
mov r1, #3
bl generate_grain_rows_44_neon
set_height r1, \type
1:
bl sum_\type\()_lag2_left_neon // 8
bl sum_\type\()_lag2_mid_neon // 16
bl sum_\type\()_lag2_mid_neon // 24
bl sum_\type\()_lag2_mid_neon // 32
bl sum_\type\()_lag2_mid_neon // 40
bl sum_\type\()_lag2_right_neon // 44
subs r1, r1, #1
increment_y_ptr r11, \type
add r0, r0, #GRAIN_WIDTH*2-6*16
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
L(generate_grain_\type\()_lag3):
vpush {q4-q7}
mov r5, #128
lsl r5, r5, lr // 128 << bitdepth_min_8
sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
vld1.8 {q13, q14}, [r4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
vmov.u8 r4, d28[5]
vmov.u8 r10, d28[6]
vmov.u8 r12, d28[7]
orr r4, r4, r10, lsl #8
orr r4, r4, r12, lsl #16
mov r1, #3
bl generate_grain_rows_44_neon
set_height r1, \type
1:
bl sum_\type\()_lag3_left_neon // 8
bl sum_\type\()_lag3_mid_neon // 16
bl sum_\type\()_lag3_mid_neon // 24
bl sum_\type\()_lag3_mid_neon // 32
bl sum_\type\()_lag3_mid_neon // 40
bl sum_\type\()_lag3_right_neon // 44
subs r1, r1, #1
increment_y_ptr r11, \type
add r0, r0, #GRAIN_WIDTH*2-6*16
bgt 1b
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
.endm
gen_grain_44 uv_420
gen_grain_44 uv_422
.macro gather_interleaved dst1, dst2, src1, src2, src3, src4, off
vmov.u16 r11, \src1[0+\off]
vmov.u16 r12, \src3[0+\off]
add r11, r11, r3
vmov.u16 lr, \src1[2+\off]
add r12, r12, r3
vld1.8 {\dst1[0+\off]}, [r11]
vmov.u16 r11, \src3[2+\off]
add lr, lr, r3
vld1.8 {\dst2[0+\off]}, [r12]
vmov.u16 r12, \src2[0+\off]
add r11, r11, r3
vld1.8 {\dst1[2+\off]}, [lr]
vmov.u16 lr, \src4[0+\off]
add r12, r12, r3
vld1.8 {\dst2[2+\off]}, [r11]
vmov.u16 r11, \src2[2+\off]
add lr, lr, r3
vld1.8 {\dst1[4+\off]}, [r12]
vmov.u16 r12, \src4[2+\off]
add r11, r11, r3
vld1.8 {\dst2[4+\off]}, [lr]
add r12, r12, r3
vld1.8 {\dst1[6+\off]}, [r11]
vld1.8 {\dst2[6+\off]}, [r12]
.endm
.macro gather dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5, src6, src7, src8
gather_interleaved \dst1, \dst3, \src1, \src2, \src5, \src6, 0
gather_interleaved \dst1, \dst3, \src1, \src2, \src5, \src6, 1
gather_interleaved \dst2, \dst4, \src3, \src4, \src7, \src8, 0
gather_interleaved \dst2, \dst4, \src3, \src4, \src7, \src8, 1
.endm
function gather32_neon
push {r11-r12,lr}
gather d8, d9, d10, d11, d0, d1, d2, d3, d4, d5, d6, d7
pop {r11-r12,pc}
endfunc
function gather16_neon
push {r11-r12,lr}
gather_interleaved d8, d9, d0, d1, d2, d3, 0
gather_interleaved d8, d9, d0, d1, d2, d3, 1
pop {r11-r12,pc}
endfunc
const overlap_coeffs_0, align=4
.short 27, 17, 0, 0
.short 17, 27, 32, 32
endconst
const overlap_coeffs_1, align=4
.short 23, 0, 0, 0
.short 22, 32, 32, 32
endconst
.macro calc_offset offx, offy, src, sx, sy
and \offy, \src, #0xF // randval & 0xF
lsr \offx, \src, #4 // randval >> 4
.if \sy == 0
add \offy, \offy, \offy // 2 * (randval & 0xF)
.endif
.if \sx == 0
add \offx, \offx, \offx // 2 * (randval >> 4)
.endif
.endm
.macro add_offset dst, offx, offy, src, stride
mla \dst, \stride, \offy, \src // grain_lut += grain_stride * offy
add \dst, \dst, \offx, lsl #1 // grain_lut += offx
.endm
// void dav1d_fgy_32x32_16bpc_neon(pixel *const dst, const pixel *const src,
// const ptrdiff_t stride,
// const uint8_t scaling[SCALING_SIZE],
// const int scaling_shift,
// const entry grain_lut[][GRAIN_WIDTH],
// const int offsets[][2],
// const int h, const ptrdiff_t clip,
// const ptrdiff_t type,
// const int bitdepth_max);
function fgy_32x32_16bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100] // scaling_shift, grain_lut
ldrd r6, r7, [sp, #108] // offsets, h
ldr r8, [sp, #116] // clip
mov r9, #GRAIN_WIDTH*2 // grain_lut stride
ldr r10, [sp, #124] // bitdepth_max
eor r4, r4, #15 // 15 - scaling_shift
vdup.16 q6, r10 // bitdepth_max
clz r10, r10
vdup.16 q13, r4 // 15 - scaling_shift
rsb r10, r10, #24 // bitdepth_min_8
cmp r8, #0
vdup.16 q12, r10 // bitdepth_min_8
movrel_local r12, overlap_coeffs_0
beq 1f
// clip
vmov.i16 q14, #16
vmov.i16 q15, #235
vshl.s16 q14, q14, q12
vshl.s16 q15, q15, q12
b 2f
1:
// no clip
vmov.i16 q14, #0
vmov q15, q6
2:
vshr.u16 q6, q6, #1 // grain_max
vld1.16 {d24, d25}, [r12, :128] // overlap_coeffs
add r5, r5, #18 // grain_lut += 9
add r5, r5, r9, lsl #3 // grain_lut += 8 * grain_stride
add r5, r5, r9 // grain_lut += grain_stride
ldr r10, [r6, #8] // offsets[1][0]
calc_offset r10, r4, r10, 0, 0
add_offset r4, r10, r4, r5, r9
ldr r10, [r6, #4] // offsets[0][1]
calc_offset r10, r11, r10, 0, 0
add_offset r11, r10, r11, r5, r9
ldr r10, [r6, #12] // offsets[1][1]
calc_offset r10, r8, r10, 0, 0
add_offset r8, r10, r8, r5, r9
ldr r6, [r6] // offsets[0][0]
calc_offset r6, lr, r6, 0, 0
add_offset r5, r6, lr, r5, r9
add r4, r4, #32*2 // grain_lut += FG_BLOCK_SIZE * bx
add r6, r11, r9, lsl #5 // grain_lut += grain_stride * FG_BLOCK_SIZE * by
ldr r10, [sp, #120] // type
adr r11, L(fgy_loop_tbl)
tst r10, #1
ldr r10, [r11, r10, lsl #2]
add r8, r8, r9, lsl #5 // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add r8, r8, #32*2 // grain_lut += FG_BLOCK_SIZE * bx
add r11, r11, r10
beq 1f
// y overlap
vdup.16 d14, d24[0]
vdup.16 d15, d24[1]
mov r10, r7 // backup actual h
mov r7, #2
1:
sub r2, r2, #32 // src_stride -= 32
sub r9, r9, #32 // grain_stride -= 32
bx r11
endfunc
function fgy_loop_neon
L(fgy_loop_tbl):
.word L(loop_00) - L(fgy_loop_tbl) + CONFIG_THUMB
.word L(loop_01) - L(fgy_loop_tbl) + CONFIG_THUMB
.word L(loop_10) - L(fgy_loop_tbl) + CONFIG_THUMB
.word L(loop_11) - L(fgy_loop_tbl) + CONFIG_THUMB
.macro fgy ox, oy
L(loop_\ox\oy):
1:
.if \ox
vld1.16 {d0}, [r4], r9 // grain_lut old
.endif
.if \oy
vld1.16 {q2, q3}, [r6]! // grain_lut top
.endif
.if \ox && \oy
vld1.16 {d2}, [r8], r9 // grain_lut top old
.endif
.if \oy
vld1.16 {q4, q5}, [r6], r9 // grain_lut top
.endif
.if !\ox && !\oy
vld1.16 {q0, q1}, [r1, :128]! // src
.endif
vld1.16 {q8, q9}, [r5]! // grain_lut
.if !\ox && !\oy
vld1.16 {q2, q3}, [r1, :128], r2 // src
.endif
.if !\oy
vmvn.i16 q5, #0xf000 // 0x0fff
.endif
vld1.16 {q10, q11}, [r5], r9 // grain_lut
.if \ox
add r4, r4, #32
vmull.s16 q0, d0, d24
vmlal.s16 q0, d16, d25
.endif
.if \oy
.if \ox
add r8, r8, #32
vmull.s16 q1, d2, d24
vmlal.s16 q1, d4, d25
vqrshrn.s32 d16, q0, #5
vmvn d0, d12 // grain_min
vqrshrn.s32 d4, q1, #5
vmin.s16 d16, d16, d12
vmin.s16 d4, d4, d12
vmax.s16 d16, d16, d0
vmax.s16 d4, d4, d0
.endif
vmull.s16 q0, d4, d14
vmull.s16 q1, d5, d14
vmull.s16 q2, d6, d14
vmull.s16 q3, d7, d14
vmlal.s16 q0, d16, d15
vmlal.s16 q1, d17, d15
vmlal.s16 q2, d18, d15
vmlal.s16 q3, d19, d15
vmull.s16 q8, d20, d15
vmull.s16 q9, d21, d15
vmull.s16 q10, d22, d15
vmull.s16 q11, d23, d15
vmlal.s16 q8, d8, d14
vmlal.s16 q9, d9, d14
vmlal.s16 q10, d10, d14
vmlal.s16 q11, d11, d14
vmvn q4, q6 // grain_min
vqrshrn.s32 d0, q0, #5
vqrshrn.s32 d1, q1, #5
vqrshrn.s32 d2, q2, #5
vqrshrn.s32 d3, q3, #5
vqrshrn.s32 d4, q8, #5
vqrshrn.s32 d5, q9, #5
vqrshrn.s32 d6, q10, #5
vqrshrn.s32 d7, q11, #5
vmin.s16 q8, q0, q6
vmin.s16 q9, q1, q6
vld1.16 {q0, q1}, [r1, :128]! // src
vmin.s16 q10, q2, q6
vmin.s16 q11, q3, q6
vmax.s16 q8, q8, q4
vmax.s16 q9, q9, q4
vld1.16 {q2, q3}, [r1, :128], r2 // src
vmvn.i16 q5, #0xf000 // 0x0fff
vmax.s16 q10, q10, q4
vmax.s16 q11, q11, q4
.elseif \ox
vmvn d4, d12 // grain_min
vqrshrn.s32 d16, q0, #5
vld1.16 {q0, q1}, [r1, :128]! // src
vmin.s16 d16, d16, d12
vmax.s16 d16, d16, d4
vld1.16 {q2, q3}, [r1, :128], r2 // src
.endif
// Make sure that uninitialized pixels out of range past the right
// edge are in range; their actual values shouldn't matter.
vand q0, q0, q5
vand q1, q1, q5
vand q2, q2, q5
vand q3, q3, q5
bl gather32_neon
.if \ox || \oy
vpush {q6-q7}
.endif
vmovl.u8 q6, d8 // scaling
vmovl.u8 q7, d9
vmovl.u8 q4, d10
vmovl.u8 q5, d11
vshl.u16 q6, q6, q13 // scaling << (15 - scaling_shift)
vshl.u16 q7, q7, q13
vshl.u16 q4, q4, q13
vshl.u16 q5, q5, q13
vqrdmulh.s16 q8, q8, q6 // round2((scaling << (15 - scaling_shift) * grain, 15)
vqrdmulh.s16 q9, q9, q7
vqrdmulh.s16 q10, q10, q4
vqrdmulh.s16 q11, q11, q5
.if \ox || \oy
vpop {q6-q7}
.endif
vqadd.s16 q0, q0, q8 // *src + noise
vqadd.s16 q1, q1, q9
vqadd.s16 q2, q2, q10
vqadd.s16 q3, q3, q11
vmax.s16 q0, q0, q14
vmax.s16 q1, q1, q14
vmax.s16 q2, q2, q14
vmax.s16 q3, q3, q14
vmin.s16 q0, q0, q15
vmin.s16 q1, q1, q15
vmin.s16 q2, q2, q15
vmin.s16 q3, q3, q15
vst1.16 {q0, q1}, [r0, :128]! // dst
subs r7, r7, #1
.if \oy
vdup.16 d14, d25[0]
vdup.16 d15, d25[1]
.endif
vst1.16 {q2, q3}, [r0, :128], r2 // dst
bgt 1b
.if \oy
cmp r10, #2
sub r7, r10, #2 // restore actual remaining h
bgt L(loop_\ox\()0)
.endif
vpop {q4-q7}
pop {r4-r11,pc}
.endm
fgy 0, 0
fgy 0, 1
fgy 1, 0
fgy 1, 1
endfunc
// void dav1d_fguv_32x32_420_16bpc_neon(pixel *const dst,
// const pixel *const src,
// const ptrdiff_t stride,
// const uint8_t scaling[SCALING_SIZE],
// const Dav1dFilmGrainData *const data,
// const entry grain_lut[][GRAIN_WIDTH],
// const pixel *const luma_row,
// const ptrdiff_t luma_stride,
// const int offsets[][2],
// const ptrdiff_t h, const ptrdiff_t uv,
// const ptrdiff_t is_id,
// const ptrdiff_t type,
// const int bitdepth_max);
.macro fguv layout, sx, sy
function fguv_32x32_\layout\()_16bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100] // data, grain_lut
ldrd r10, r11, [sp, #124] // uv, is_id
ldr r6, [sp, #136] // bitdepth_max
clz r7, r6
rsb r7, r7, #24 // bitdepth_min_8
// !csfl
add r10, r4, r10, lsl #2 // + 4*uv
add r12, r10, #FGD_UV_LUMA_MULT
add lr, r10, #FGD_UV_MULT
ldrh r10, [r10, #FGD_UV_OFFSET] // uv_offset
vld1.16 {d30[]}, [r12] // uv_luma_mult
lsl r10, r10, r7 // uv_offset << bitdepth_min_8
vld1.16 {d30[1]}, [lr] // uv_mult
ldr lr, [r4, #FGD_SCALING_SHIFT]
ldr r12, [r4, #FGD_CLIP_TO_RESTRICTED_RANGE]
eor lr, lr, #15 // 15 - scaling_shift
vmov.16 d30[2], r10 // uv_offset << bitdepth_min_8
cmp r12, #0
vdup.16 q13, lr // 15 - scaling_shift
beq 1f
// clip
cmp r11, #0
mov r8, #16
mov r9, #240
lsl r8, r8, r7
lsl r9, r9, r7
beq 2f
// is_id
mov r9, #235
lsl r9, r9, r7
b 2f
1:
// no clip
mov r8, #0
mov r9, r6 // bitdepth_max
2:
vmov.16 d30[3], r6 // bitdepth_max
vdup.16 d31, r8 // clip_min
mov r10, #GRAIN_WIDTH*2 // grain_lut stride
.if \sy
mov r6, #23
mov r7, #22
.else
mov r6, #27
mov r7, #17
.endif
vmov.16 d31[1], r9 // clip_max
ldrd r8, r9, [sp, #116] // offsets, h
add r5, r5, #(2*(3 + (2 >> \sx)*3)) // grain_lut += 9 or 6
.if \sy
add r5, r5, r10, lsl #2 // grain_lut += 4 * grain_stride
add r5, r5, r10, lsl #1 // grain_lut += 2 * grain_stride
.else
add r5, r5, r10, lsl #3 // grain_lut += 8 * grain_stride
add r5, r5, r10 // grain_lut += grain_stride
.endif
vmov.16 d31[2], r6 // overlap y [0]
ldr r12, [r8, #8] // offsets[1][0]
calc_offset r12, r4, r12, \sx, \sy
add_offset r4, r12, r4, r5, r10
ldr r12, [r8, #4] // offsets[0][1]
calc_offset r12, lr, r12, \sx, \sy
add_offset lr, r12, lr, r5, r10
ldr r12, [r8, #12] // offsets[1][1]
calc_offset r12, r11, r12, \sx, \sy
add_offset r11, r12, r11, r5, r10
ldr r8, [r8] // offsets[0][0]
calc_offset r8, r12, r8, \sx, \sy
add_offset r5, r8, r12, r5, r10
vmov.16 d31[3], r7 // overlap y [1]
add r4, r4, #2*(32 >> \sx) // grain_lut += FG_BLOCK_SIZE * bx
add r8, lr, r10, lsl #(5 - \sy) // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add r11, r11, r10, lsl #(5 - \sy) // grain_lut += grain_stride * FG_BLOCK_SIZE * by
add r11, r11, #2*(32 >> \sx) // grain_lut += FG_BLOCK_SIZE * bx
movrel_local r12, overlap_coeffs_\sx
ldr lr, [sp, #132] // type
ldrd r6, r7, [sp, #108] // luma_row, luma_stride
vld1.16 {d24, d25}, [r12, :128] // overlap_coeffs
movrel_local r12, L(fguv_loop_sx\sx\()_tbl)
#if CONFIG_THUMB
// This uses movrel_local instead of adr above, because the target
// can be out of range for adr. But movrel_local leaves the thumb bit
// set on COFF (but probably wouldn't if building for thumb on ELF),
// thus try to clear the bit for robustness.
bic r12, r12, #1
#endif
tst lr, #1
ldr lr, [r12, lr, lsl #2]
add r12, r12, lr
beq 1f
// y overlap
sub lr, r9, #(2 >> \sy) // backup remaining h
mov r9, #(2 >> \sy)
1:
.if \sy
add r7, r7, r7 // luma_stride *= 2
.endif
sub r7, r7, #32 // luma_stride -= 32
bx r12
endfunc
.endm
fguv 420, 1, 1
fguv 422, 1, 0
fguv 444, 0, 0
function fguv_loop_sx0_neon
L(fguv_loop_sx0_tbl):
.word L(fguv_loop_sx0_csfl0_00) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl0_01) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl0_10) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl0_11) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl1_00) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl1_01) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl1_10) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx0_csfl1_11) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
.macro fguv_loop_sx0 csfl, ox, oy
L(fguv_loop_sx0_csfl\csfl\()_\ox\oy):
sub r2, r2, #32 // src_stride -= 32
sub r10, r10, #32 // grain_stride -= 32
.if \oy
mov r12, lr
.endif
L(fguv_loop_sx0_csfl\csfl\()_\ox\oy\()_loopstart):
1:
.if \ox
vld1.16 {d0}, [r4], r10 // grain_lut old
.endif
.if \oy
vld1.16 {q2, q3}, [r8]! // grain_lut top
.endif
.if \ox && \oy
vld1.16 {d2}, [r11], r10 // grain_lut top old
.endif
.if !\ox && !\oy
vld1.16 {q0, q1}, [r6, :128]! // luma
.endif
vld1.16 {q8, q9}, [r5]! // grain_lut
.if \oy
vld1.16 {q4, q5}, [r8], r10 // grain_lut top
.endif
.if !\ox && !\oy
vld1.16 {q2, q3}, [r6, :128], r7 // luma
.endif
.if \oy
vdup.16 d28, d31[2] // overlap y coeff
vdup.16 d29, d31[3] // overlap y coeff
.endif
vld1.16 {q10, q11}, [r5], r10 // grain_lut
.if \ox
vdup.16 q7, d30[3] // bitdepth_max
add r4, r4, #32
vmull.s16 q0, d0, d24
vshr.u16 q7, q7, #1 // grain_max
vmlal.s16 q0, d16, d25
vmvn q6, q7 // grain_min
.endif
.if \oy
.if \ox
add r11, r11, #32
vmull.s16 q1, d2, d24
vmlal.s16 q1, d4, d25
vqrshrn.s32 d16, q0, #5
vqrshrn.s32 d4, q1, #5
vmin.s16 d4, d4, d14
vmin.s16 d16, d16, d14
vmax.s16 d4, d4, d12
vmax.s16 d16, d16, d12
.endif
vmull.s16 q0, d4, d28
vmull.s16 q1, d5, d28
vmull.s16 q2, d6, d28
vmull.s16 q3, d7, d28
.if !\ox
vdup.16 q7, d30[3] // bitdepth_max
.endif
vmlal.s16 q0, d16, d29
vmlal.s16 q1, d17, d29
vmlal.s16 q2, d18, d29
vmlal.s16 q3, d19, d29
.if !\ox
vshr.u16 q7, q7, #1 // grain_max
.endif
vmull.s16 q8, d20, d29
vmull.s16 q9, d21, d29
vmull.s16 q10, d22, d29
vmull.s16 q11, d23, d29
.if !\ox
vmvn q6, q7 // grain_min
.endif
vmlal.s16 q8, d8, d28
vmlal.s16 q9, d9, d28
vmlal.s16 q10, d10, d28
vmlal.s16 q11, d11, d28
vqrshrn.s32 d0, q0, #5
vqrshrn.s32 d1, q1, #5
vqrshrn.s32 d2, q2, #5
vqrshrn.s32 d3, q3, #5
vqrshrn.s32 d4, q8, #5
vqrshrn.s32 d5, q9, #5
vqrshrn.s32 d6, q10, #5
vqrshrn.s32 d7, q11, #5
vmin.s16 q8, q0, q7
vmin.s16 q9, q1, q7
vld1.16 {q0, q1}, [r6, :128]! // luma
vmin.s16 q10, q2, q7
vmin.s16 q11, q3, q7
vmax.s16 q8, q8, q6
vmax.s16 q9, q9, q6
vld1.16 {q2, q3}, [r6, :128], r7 // luma
vmax.s16 q10, q10, q6
vmax.s16 q11, q11, q6
.elseif \ox
vqrshrn.s32 d16, q0, #5
vld1.16 {q0, q1}, [r6, :128]! // luma
vmin.s16 d16, d16, d14
vld1.16 {q2, q3}, [r6, :128], r7 // luma
vmax.s16 d16, d16, d12
.endif
.if !\csfl
vdup.16 d28, d30[0] // uv_luma_mult
vld1.16 {q4, q5}, [r1, :128]! // src
vdup.16 d29, d30[1] // uv_mult
vmull.s16 q6, d0, d28
vmull.s16 q7, d1, d28
vmull.s16 q0, d2, d28
vmull.s16 q1, d3, d28
vmlal.s16 q6, d8, d29
vmlal.s16 q7, d9, d29
vmlal.s16 q0, d10, d29
vmlal.s16 q1, d11, d29
vld1.16 {q4, q5}, [r1, :128] // src
sub r1, r1, #32
vshrn.s32 d12, q6, #6
vshrn.s32 d13, q7, #6
vshrn.s32 d14, q0, #6
vshrn.s32 d15, q1, #6
vmull.s16 q0, d4, d28
vmull.s16 q1, d5, d28
vmull.s16 q2, d6, d28
vmull.s16 q3, d7, d28
vmlal.s16 q0, d8, d29
vmlal.s16 q1, d9, d29
vmlal.s16 q2, d10, d29
vmlal.s16 q3, d11, d29
vdup.16 q14, d30[2] // uv_offset
vshrn.s32 d0, q0, #6
vshrn.s32 d1, q1, #6
vshrn.s32 d2, q2, #6
vshrn.s32 d3, q3, #6
vdup.16 q4, d30[3] // bitdepth_max
vmov.i16 q5, #0
vadd.i16 q6, q6, q14
vadd.i16 q7, q7, q14
vadd.i16 q2, q0, q14
vadd.i16 q3, q1, q14
vmin.s16 q0, q6, q4
vmin.s16 q1, q7, q4
vmin.s16 q2, q2, q4
vmin.s16 q3, q3, q4
vmax.s16 q0, q0, q5
vmax.s16 q1, q1, q5
vmax.s16 q2, q2, q5
vmax.s16 q3, q3, q5
.else
vdup.16 q14, d30[3] // bitdepth_max
// Make sure that uninitialized pixels out of range past the right
// edge are in range; their actual values shouldn't matter.
vand q0, q0, q14
vand q1, q1, q14
vand q2, q2, q14
vand q3, q3, q14
.endif
bl gather32_neon
vld1.16 {q0, q1}, [r1, :128]! // src
vmovl.u8 q6, d8 // scaling
vmovl.u8 q7, d9
vmovl.u8 q4, d10
vmovl.u8 q5, d11
vld1.16 {q2, q3}, [r1, :128], r2 // src
vshl.u16 q6, q6, q13 // scaling << (15 - scaling_shift)
vshl.u16 q7, q7, q13
vshl.u16 q4, q4, q13
vshl.u16 q5, q5, q13
vqrdmulh.s16 q8, q8, q6 // round2((scaling << (15 - scaling_shift) * grain, 15)
vqrdmulh.s16 q9, q9, q7
vqrdmulh.s16 q10, q10, q4
vqrdmulh.s16 q11, q11, q5
vdup.16 q4, d31[0] // clip_min
vdup.16 q5, d31[1] // clip_max
vqadd.s16 q0, q0, q8 // *src + noise
vqadd.s16 q1, q1, q9
vqadd.s16 q2, q2, q10
vqadd.s16 q3, q3, q11
.if \oy
vmov.32 lr, d25[0] // 2 first 16 bit coeffs from overlap x
.endif
vmax.s16 q0, q0, q4
vmax.s16 q1, q1, q4
vmax.s16 q2, q2, q4
vmax.s16 q3, q3, q4
vmin.s16 q0, q0, q5
vmin.s16 q1, q1, q5
vmin.s16 q2, q2, q5
vmin.s16 q3, q3, q5
vst1.16 {q0, q1}, [r0, :128]! // dst
subs r9, r9, #1
.if \oy
vmov.32 d31[1], lr // new coeffs for overlap y
.endif
vst1.16 {q2, q3}, [r0, :128], r2 // dst
bgt 1b
.if \oy
cmp r12, #0
mov r9, r12 // restore actual remaining h
bgt L(fguv_loop_sx0_csfl\csfl\()_\ox\()0_loopstart)
.endif
b 9f
.endm
fguv_loop_sx0 0, 0, 0
fguv_loop_sx0 0, 0, 1
fguv_loop_sx0 0, 1, 0
fguv_loop_sx0 0, 1, 1
fguv_loop_sx0 1, 0, 0
fguv_loop_sx0 1, 0, 1
fguv_loop_sx0 1, 1, 0
fguv_loop_sx0 1, 1, 1
9:
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
function fguv_loop_sx1_neon
L(fguv_loop_sx1_tbl):
.word L(fguv_loop_sx1_csfl0_00) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl0_01) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl0_10) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl0_11) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl1_00) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl1_01) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl1_10) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.word L(fguv_loop_sx1_csfl1_11) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
.macro fguv_loop_sx1 csfl, ox, oy
L(fguv_loop_sx1_csfl\csfl\()_\ox\oy):
.if \oy
mov r12, lr
.endif
1:
.if \ox
vld1.16 {d0}, [r4], r10 // grain_lut old
.endif
.if \ox && \oy
vld1.16 {d2}, [r11], r10 // grain_lut top old
.endif
.if \oy
vld1.16 {q2, q3}, [r8], r10 // grain_lut top
.endif
.if !\ox && !\oy
vld1.16 {q0, q1}, [r6, :128]! // luma
.endif
vld1.16 {q8, q9}, [r5], r10 // grain_lut
.if \oy
vdup.16 d28, d31[2] // overlap y coeff
vdup.16 d29, d31[3] // overlap y coeff
.endif
.if !\ox && !\oy
vld1.16 {q2, q3}, [r6, :128], r7 // luma
.endif
.if \ox
vdup.16 q7, d30[3] // bitdepth_max
vmull.s16 q0, d0, d24
vshr.u16 q7, q7, #1 // grain_max
vmlal.s16 q0, d16, d25
vmvn q6, q7 // grain_min
.endif
.if \oy
.if \ox
vmull.s16 q1, d2, d24
vmlal.s16 q1, d4, d25
vqrshrn.s32 d16, q0, #5
vqrshrn.s32 d4, q1, #5
vmin.s16 d4, d4, d14
vmin.s16 d16, d16, d14
vmax.s16 d4, d4, d12
vmax.s16 d16, d16, d12
.endif
vmull.s16 q0, d4, d28
vmull.s16 q1, d5, d28
vmull.s16 q2, d6, d28
vmull.s16 q3, d7, d28
.if !\ox
vdup.16 q7, d30[3] // bitdepth_max
.endif
vmlal.s16 q0, d16, d29
vmlal.s16 q1, d17, d29
vmlal.s16 q2, d18, d29
vmlal.s16 q3, d19, d29
.if !\ox
vshr.u16 q7, q7, #1 // grain_max
.endif
vqrshrn.s32 d16, q0, #5
vqrshrn.s32 d17, q1, #5
vqrshrn.s32 d18, q2, #5
vqrshrn.s32 d19, q3, #5
.if !\ox
vmvn q6, q7 // grain_min
.endif
vld1.16 {q0, q1}, [r6, :128]! // luma
vmin.s16 q8, q8, q7
vmin.s16 q9, q9, q7
vmax.s16 q8, q8, q6
vmax.s16 q9, q9, q6
vld1.16 {q2, q3}, [r6, :128], r7 // luma
.elseif \ox
vqrshrn.s32 d16, q0, #5
vld1.16 {q0, q1}, [r6, :128]! // luma
vmin.s16 d16, d16, d14
vld1.16 {q2, q3}, [r6, :128], r7 // luma
vmax.s16 d16, d16, d12
.endif
vpadd.i16 d0, d0, d1
vpadd.i16 d1, d2, d3
vpadd.i16 d2, d4, d5
vpadd.i16 d3, d6, d7
vrshr.u16 q0, q0, #1
vrshr.u16 q1, q1, #1
.if !\csfl
vdup.16 d28, d30[0] // uv_luma_mult
vld1.16 {q2, q3}, [r1, :128], r2 // src
vdup.16 d29, d30[1] // uv_mult
vmull.s16 q6, d0, d28
vmull.s16 q7, d1, d28
vmull.s16 q0, d2, d28
vmull.s16 q1, d3, d28
vmlal.s16 q6, d4, d29
vmlal.s16 q7, d5, d29
vmlal.s16 q0, d6, d29
vmlal.s16 q1, d7, d29
vshrn.s32 d12, q6, #6
vshrn.s32 d13, q7, #6
vshrn.s32 d14, q0, #6
vshrn.s32 d15, q1, #6
vdup.16 q14, d30[2] // uv_offset
vdup.16 q4, d30[3] // bitdepth_max
vmov.i16 q5, #0
vadd.i16 q6, q6, q14
vadd.i16 q7, q7, q14
vmin.s16 q0, q6, q4
vmin.s16 q1, q7, q4
vmax.s16 q0, q0, q5
vmax.s16 q1, q1, q5
.else
vdup.16 q14, d30[3] // bitdepth_max
vld1.16 {q2, q3}, [r1, :128], r2 // src
// Make sure that uninitialized pixels out of range past the right
// edge are in range; their actual values shouldn't matter.
vand q0, q0, q14
vand q1, q1, q14
.endif
bl gather16_neon
vmovl.u8 q6, d8 // scaling
vmovl.u8 q7, d9
vshl.u16 q6, q6, q13 // scaling << (15 - scaling_shift)
vshl.u16 q7, q7, q13
vqrdmulh.s16 q8, q8, q6 // round2((scaling << (15 - scaling_shift) * grain, 15)
vqrdmulh.s16 q9, q9, q7
vdup.16 q4, d31[0] // clip_min
vdup.16 q5, d31[1] // clip_max
vqadd.s16 q0, q2, q8 // *src + noise
vqadd.s16 q1, q3, q9
.if \oy
// Swap the two last coefficients of d31, place them first in d28
vrev64.16 d28, d31
.endif
vmax.s16 q0, q0, q4
vmax.s16 q1, q1, q4
vmin.s16 q0, q0, q5
vmin.s16 q1, q1, q5
subs r9, r9, #1
.if \oy
// Take the first two 16 bit coefficients of d28 and place them at the
// end of d31
vtrn.32 d31, d28
.endif
vst1.16 {q0, q1}, [r0, :128], r2 // dst
bgt 1b
.if \oy
cmp r12, #0
mov r9, r12 // restore actual remaining h
bgt L(fguv_loop_sx1_csfl\csfl\()_\ox\()0)
.endif
b 9f
.endm
fguv_loop_sx1 0, 0, 0
fguv_loop_sx1 0, 0, 1
fguv_loop_sx1 0, 1, 0
fguv_loop_sx1 0, 1, 1
fguv_loop_sx1 1, 0, 0
fguv_loop_sx1 1, 0, 1
fguv_loop_sx1 1, 1, 0
fguv_loop_sx1 1, 1, 1
9:
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
|
Admenri/urge
| 32,723
|
third_party/dav1d/src/arm/32/loopfilter16.S
|
/*
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
.macro loop_filter wd
function lpf_4_wd\wd\()_neon
vabd.u16 d0, d22, d23 // abs(p1 - p0)
vabd.u16 d1, d25, d24 // abs(q1 - q0)
vabd.u16 d2, d23, d24 // abs(p0 - q0)
vabd.u16 d3, d22, d25 // abs(p1 - q1)
.if \wd >= 6
vabd.u16 d4, d21, d22 // abs(p2 - p1)
vabd.u16 d5, d26, d25 // abs(q2 - q1)
.endif
.if \wd >= 8
vabd.u16 d6, d20, d21 // abs(p3 - p2)
vabd.u16 d7, d27, d26 // abs(q3 - q3)
.endif
.if \wd >= 6
vmax.u16 d4, d4, d5
.endif
vqadd.u16 d2, d2, d2 // abs(p0 - q0) * 2
.if \wd >= 8
vmax.u16 d6, d6, d7
.endif
vshr.u16 d3, d3, #1
.if \wd >= 8
vmax.u16 d4, d4, d6
.endif
vmax.u16 d0, d0, d1 // max(abs(p1 - p0), abs(q1 - q0))
vqadd.u16 d2, d2, d3 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
.if \wd >= 6
vmax.u16 d4, d0, d4
vcge.u16 d1, d11, d4 // max(abs(p1 - p0), abs(q1 - q0), abs(), abs(), ...) <= I
.else
vcge.u16 d1, d11, d0 // max(abs(p1 - p0), abs(q1 - q0)) <= I
.endif
vcge.u16 d2, d10, d2 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1 <= E
vand d1, d1, d2 // fm && wd >= 4 (implicit)
.if \wd >= 6
vmov d14, d1 // fm && wd > 4 (implicit)
.endif
.if \wd >= 16
vmov d15, d1 // fm && wd == 16 (implicit)
.endif
vmov r10, r11, d1
orrs r10, r10, r11
beq 9f // if (!fm || wd < 4) return;
.if \wd >= 6
vmov.i16 d10, #1
vabd.u16 d2, d21, d23 // abs(p2 - p0)
vabd.u16 d3, d22, d23 // abs(p1 - p0)
vabd.u16 d4, d25, d24 // abs(q1 - q0)
vabd.u16 d5, d26, d24 // abs(q2 - q0)
vdup.16 d9, r9 // bitdepth_min_8
.if \wd >= 8
vabd.u16 d6, d20, d23 // abs(p3 - p0)
vabd.u16 d7, d27, d24 // abs(q3 - q0)
.endif
vmax.u16 d2, d2, d3
vmax.u16 d4, d4, d5
.if \wd >= 8
vmax.u16 d6, d6, d7
.endif
vmax.u16 d2, d2, d4
vshl.u16 d10, d10, d9 // F = 1 << bitdepth_min_8
.if \wd >= 8
vmax.u16 d2, d2, d6
.endif
.if \wd == 16
vabd.u16 d3, d17, d23 // abs(p6 - p0)
vabd.u16 d4, d18, d23 // abs(p5 - p0)
vabd.u16 d5, d19, d23 // abs(p4 - p0)
.endif
vcge.u16 d2, d10, d2 // flat8in
.if \wd == 16
vabd.u16 d6, d28, d24 // abs(q4 - q0)
vabd.u16 d7, d29, d24 // abs(q5 - q0)
vabd.u16 d8, d30, d24 // abs(q6 - q0)
.endif
vand d14, d2, d14 // flat8in && fm && wd > 4
vbic d1, d1, d14 // fm && wd >= 4 && !flat8in
.if \wd == 16
vmax.u16 d3, d3, d4
vmax.u16 d5, d5, d6
.endif
vmov r10, r11, d1
.if \wd == 16
vmax.u16 d7, d7, d8
vmax.u16 d3, d3, d5
vmax.u16 d3, d3, d7
vcge.u16 d3, d10, d3 // flat8out
.endif
orrs r10, r10, r11
.if \wd == 16
vand d15, d15, d3 // flat8out && fm && wd == 16
vand d15, d15, d14 // flat8out && flat8in && fm && wd == 16
vbic d14, d14, d15 // flat8in && fm && wd >= 4 && !flat8out
.endif
beq 1f // skip wd == 4 case
.endif
vdup.16 d3, r8 // bitdepth_max
vsub.u16 d2, d22, d25 // p1 - q1
vshr.u16 d3, d3, #1 // 128 << bitdepth_min_8 - 1
vcgt.u16 d0, d0, d12 // hev
vmvn d9, d3 // - 128 * (1 << bitdepth_min_8)
vmin.s16 d2, d2, d3 // iclip_diff(p1 - q1)
vmax.s16 d2, d2, d9 // iclip_diff(p1 - q1)
vand d4, d2, d0 // if (hev) iclip_diff(p1 - q1)
vsub.u16 d2, d24, d23
vmov.i16 d6, #3
vbic d0, d1, d0 // (fm && wd >= 4 && !hev)
vmul.i16 d2, d2, d6
vmov.i16 d7, #4
vadd.i16 d2, d2, d4
vmin.s16 d2, d2, d3 // f = iclip_diff()
vmax.s16 d2, d2, d9 // f = iclip_diff()
vqadd.s16 d4, d7, d2 // f + 4
vqadd.s16 d5, d6, d2 // f + 3
vmin.s16 d4, d4, d3 // imin(f + 4, 128 << bitdepth_min_8 - 1)
vmin.s16 d5, d5, d3 // imin(f + 3, 128 << bitdepth_min_8 - 1)
vshr.s16 d4, d4, #3 // f1
vshr.s16 d5, d5, #3 // f2
vmov.i16 d9, #0
vdup.16 d3, r8 // bitdepth_max
vqadd.s16 d2, d23, d5 // p0 + f2
vqsub.s16 d6, d24, d4 // q0 - f1
vrshr.s16 d4, d4, #1 // (f1 + 1) >> 1
vmin.s16 d2, d2, d3 // out p0 = iclip_pixel()
vmin.s16 d6, d6, d3 // out q0 = iclip_pixel()
vmax.s16 d2, d2, d9 // out p0 = iclip_pixel()
vmax.s16 d6, d6, d9 // out q0 = iclip_pixel()
vbit d23, d2, d1 // if (fm && wd >= 4)
vbit d24, d6, d1 // if (fm && wd >= 4)
vqadd.s16 d2, d22, d4 // p1 + f
vqsub.s16 d6, d25, d4 // q1 - f
vmin.s16 d2, d2, d3 // out p1 = iclip_pixel()
vmin.s16 d6, d6, d3 // out q1 = iclip_pixel()
vmax.s16 d2, d2, d9 // out p1 = iclip_pixel()
vmax.s16 d6, d6, d9 // out q1 = iclip_pixel()
vbit d22, d2, d0 // if (fm && wd >= 4 && !hev)
vbit d25, d6, d0 // if (fm && wd >= 4 && !hev)
1:
.if \wd == 6
vmov r10, r11, d14
orrs r10, r10, r11
beq 2f // skip if there's no flat8in
vadd.i16 d0, d21, d21 // p2 * 2
vadd.i16 d2, d21, d22 // p2 + p1
vadd.i16 d4, d22, d23 // p1 + p0
vadd.i16 d6, d23, d24 // p0 + q0
vadd.i16 d8, d0, d2
vadd.i16 d10, d4, d6
vadd.i16 d12, d24, d25 // q0 + q1
vadd.i16 d8, d8, d10
vsub.i16 d12, d12, d0
vadd.i16 d10, d25, d26 // q1 + q2
vrshr.u16 d0, d8, #3 // out p1
vadd.i16 d8, d8, d12
vsub.i16 d10, d10, d2
vadd.i16 d12, d26, d26 // q2 + q2
vrshr.u16 d1, d8, #3 // out p0
vadd.i16 d8, d8, d10
vsub.i16 d12, d12, d4
vrshr.u16 d2, d8, #3 // out q0
vbit d22, d0, d14 // p1 if (flat8in)
vadd.i16 d8, d8, d12
vbit d23, d1, d14 // p0 if (flat8in)
vrshr.u16 d3, d8, #3 // out q1
vbit d24, d2, d14 // q0 if (flat8in)
vbit d25, d3, d14 // q1 if (flat8in)
.elseif \wd >= 8
vmov r10, r11, d14
orrs r10, r10, r11
.if \wd == 8
beq 8f // skip if there's no flat8in
.else
beq 2f // skip if there's no flat8in
.endif
vadd.i16 d0, d20, d21 // p3 + p2
vadd.i16 d2, d22, d25 // p1 + q1
vadd.i16 d4, d20, d22 // p3 + p1
vadd.i16 d6, d23, d26 // p0 + q2
vadd.i16 d8, d0, d0 // 2 * (p3 + p2)
vadd.i16 d9, d23, d24 // p0 + q0
vadd.i16 d8, d8, d4 // + p3 + p1
vsub.i16 d2, d2, d0 // p1 + q1 - p3 - p2
vadd.i16 d8, d8, d9 // + p0 + q0
vsub.i16 d6, d6, d4 // p0 + q2 - p3 - p1
vrshr.u16 d10, d8, #3 // out p2
vadd.i16 d8, d8, d2
vadd.i16 d0, d20, d23 // p3 + p0
vadd.i16 d2, d24, d27 // q0 + q3
vrshr.u16 d11, d8, #3 // out p1
vadd.i16 d8, d8, d6
vsub.i16 d2, d2, d0 // q0 + q3 - p3 - p0
vadd.i16 d4, d21, d24 // p2 + q0
vadd.i16 d6, d25, d27 // q1 + q3
vrshr.u16 d12, d8, #3 // out p0
vadd.i16 d8, d8, d2
vsub.i16 d6, d6, d4 // q1 + q3 - p2 - q0
vadd.i16 d0, d22, d25 // p1 + q1
vadd.i16 d2, d26, d27 // q2 + q3
vrshr.u16 d13, d8, #3 // out q0
vadd.i16 d8, d8, d6
vsub.i16 d2, d2, d0 // q2 + q3 - p1 - q1
vrshr.u16 d0, d8, #3 // out q1
vadd.i16 d8, d8, d2
vbit d21, d10, d14
vbit d22, d11, d14
vbit d23, d12, d14
vrshr.u16 d1, d8, #3 // out q2
vbit d24, d13, d14
vbit d25, d0, d14
vbit d26, d1, d14
.endif
2:
.if \wd == 16
vmov r10, r11, d15
orrs r10, r10, r11
bne 1f // check if flat8out is needed
vmov r10, r11, d14
orrs r10, r10, r11
beq 8f // if there was no flat8in, just write the inner 4 pixels
b 7f // if flat8in was used, write the inner 6 pixels
1:
vadd.i16 d2, d17, d17 // p6 + p6
vadd.i16 d4, d17, d18 // p6 + p5
vadd.i16 d6, d17, d19 // p6 + p4
vadd.i16 d8, d17, d20 // p6 + p3
vadd.i16 d12, d2, d4
vadd.i16 d10, d6, d8
vadd.i16 d6, d17, d21 // p6 + p2
vadd.i16 d12, d12, d10
vadd.i16 d8, d17, d22 // p6 + p1
vadd.i16 d10, d18, d23 // p5 + p0
vadd.i16 d6, d6, d8
vadd.i16 d8, d19, d24 // p4 + q0
vadd.i16 d12, d12, d6
vadd.i16 d10, d10, d8
vadd.i16 d6, d20, d25 // p3 + q1
vadd.i16 d12, d12, d10
vsub.i16 d6, d6, d2
vadd.i16 d2, d21, d26 // p2 + q2
vrshr.u16 d0, d12, #4 // out p5
vadd.i16 d12, d12, d6 // - (p6 + p6) + (p3 + q1)
vsub.i16 d2, d2, d4
vadd.i16 d4, d22, d27 // p1 + q3
vadd.i16 d6, d17, d19 // p6 + p4
vrshr.u16 d1, d12, #4 // out p4
vadd.i16 d12, d12, d2 // - (p6 + p5) + (p2 + q2)
vsub.i16 d4, d4, d6
vadd.i16 d6, d23, d28 // p0 + q4
vadd.i16 d8, d17, d20 // p6 + p3
vrshr.u16 d2, d12, #4 // out p3
vadd.i16 d12, d12, d4 // - (p6 + p4) + (p1 + q3)
vsub.i16 d6, d6, d8
vadd.i16 d8, d24, d29 // q0 + q5
vadd.i16 d4, d17, d21 // p6 + p2
vrshr.u16 d3, d12, #4 // out p2
vadd.i16 d12, d12, d6 // - (p6 + p3) + (p0 + q4)
vsub.i16 d8, d8, d4
vadd.i16 d6, d25, d30 // q1 + q6
vadd.i16 d10, d17, d22 // p6 + p1
vrshr.u16 d4, d12, #4 // out p1
vadd.i16 d12, d12, d8 // - (p6 + p2) + (q0 + q5)
vsub.i16 d6, d6, d10
vadd.i16 d8, d26, d30 // q2 + q6
vbif d0, d18, d15 // out p5
vadd.i16 d10, d18, d23 // p5 + p0
vrshr.u16 d5, d12, #4 // out p0
vadd.i16 d12, d12, d6 // - (p6 + p1) + (q1 + q6)
vsub.i16 d8, d8, d10
vadd.i16 d10, d27, d30 // q3 + q6
vbif d1, d19, d15 // out p4
vadd.i16 d18, d19, d24 // p4 + q0
vrshr.u16 d6, d12, #4 // out q0
vadd.i16 d12, d12, d8 // - (p5 + p0) + (q2 + q6)
vsub.i16 d10, d10, d18
vadd.i16 d8, d28, d30 // q4 + q6
vbif d2, d20, d15 // out p3
vadd.i16 d18, d20, d25 // p3 + q1
vrshr.u16 d7, d12, #4 // out q1
vadd.i16 d12, d12, d10 // - (p4 + q0) + (q3 + q6)
vsub.i16 d18, d8, d18
vadd.i16 d10, d29, d30 // q5 + q6
vbif d3, d21, d15 // out p2
vadd.i16 d20, d21, d26 // p2 + q2
vrshr.u16 d8, d12, #4 // out q2
vadd.i16 d12, d12, d18 // - (p3 + q1) + (q4 + q6)
vsub.i16 d10, d10, d20
vadd.i16 d18, d30, d30 // q6 + q6
vbif d4, d22, d15 // out p1
vadd.i16 d20, d22, d27 // p1 + q3
vrshr.u16 d9, d12, #4 // out q3
vadd.i16 d12, d12, d10 // - (p2 + q2) + (q5 + q6)
vsub.i16 d18, d18, d20
vbif d5, d23, d15 // out p0
vrshr.u16 d10, d12, #4 // out q4
vadd.i16 d12, d12, d18 // - (p1 + q3) + (q6 + q6)
vrshr.u16 d11, d12, #4 // out q5
vbif d6, d24, d15 // out q0
vbif d7, d25, d15 // out q1
vbif d8, d26, d15 // out q2
vbif d9, d27, d15 // out q3
vbif d10, d28, d15 // out q4
vbif d11, d29, d15 // out q5
.endif
bx lr
.if \wd == 16
7:
// Return to a shorter epilogue, writing only the inner 6 pixels
bx r6
.endif
.if \wd >= 8
8:
// Return to a shorter epilogue, writing only the inner 4 pixels
bx r7
.endif
9:
// Return directly without writing back any pixels
bx r12
endfunc
.endm
loop_filter 16
loop_filter 8
loop_filter 6
loop_filter 4
.macro lpf_4_wd16
adr r6, 7f + CONFIG_THUMB
adr r7, 8f + CONFIG_THUMB
bl lpf_4_wd16_neon
.endm
.macro lpf_4_wd8
adr r7, 8f + CONFIG_THUMB
bl lpf_4_wd8_neon
.endm
.macro lpf_4_wd6
bl lpf_4_wd6_neon
.endm
.macro lpf_4_wd4
bl lpf_4_wd4_neon
.endm
function lpf_v_4_4_neon
mov r12, lr
sub r10, r0, r1, lsl #1
vld1.16 {d22}, [r10, :64], r1 // p1
vld1.16 {d24}, [r0, :64], r1 // q0
vld1.16 {d23}, [r10, :64], r1 // p0
vld1.16 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
lpf_4_wd4
sub r10, r0, r1, lsl #1
vst1.16 {d22}, [r10, :64], r1 // p1
vst1.16 {d24}, [r0, :64], r1 // q0
vst1.16 {d23}, [r10, :64], r1 // p0
vst1.16 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
bx r12
endfunc
function lpf_h_4_4_neon
mov r12, lr
sub r10, r0, #4
add r0, r10, r1, lsl #1
vld1.16 {d22}, [r10], r1
vld1.16 {d24}, [r0], r1
vld1.16 {d23}, [r10], r1
vld1.16 {d25}, [r0], r1
add r0, r0, #4
transpose_4x4h q11, q12, d22, d23, d24, d25
lpf_4_wd4
sub r10, r0, r1, lsl #2
sub r10, r10, #4
transpose_4x4h q11, q12, d22, d23, d24, d25
add r0, r10, r1, lsl #1
vst1.16 {d22}, [r10], r1
vst1.16 {d24}, [r0], r1
vst1.16 {d23}, [r10], r1
vst1.16 {d25}, [r0], r1
add r0, r0, #4
bx r12
endfunc
function lpf_v_6_4_neon
mov r12, lr
sub r10, r0, r1, lsl #1
sub r10, r10, r1
vld1.16 {d21}, [r10, :64], r1 // p2
vld1.16 {d24}, [r0, :64], r1 // q0
vld1.16 {d22}, [r10, :64], r1 // p1
vld1.16 {d25}, [r0, :64], r1 // q1
vld1.16 {d23}, [r10, :64], r1 // p0
vld1.16 {d26}, [r0, :64], r1 // q2
sub r0, r0, r1, lsl #1
sub r0, r0, r1
lpf_4_wd6
sub r10, r0, r1, lsl #1
vst1.16 {d22}, [r10, :64], r1 // p1
vst1.16 {d24}, [r0, :64], r1 // q0
vst1.16 {d23}, [r10, :64], r1 // p0
vst1.16 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
bx r12
endfunc
function lpf_h_6_4_neon
mov r12, lr
sub r10, r0, #8
vld1.16 {d20}, [r10, :64], r1
vld1.16 {d24}, [r0, :64], r1
vld1.16 {d21}, [r10, :64], r1
vld1.16 {d25}, [r0, :64], r1
vld1.16 {d22}, [r10, :64], r1
vld1.16 {d26}, [r0, :64], r1
vld1.16 {d23}, [r10, :64], r1
vld1.16 {d27}, [r0, :64], r1
transpose_4x4h q10, q11, d20, d21, d22, d23
transpose_4x4h q12, q13, d24, d25, d26, d27
lpf_4_wd6
sub r0, r0, #4
transpose_4x4h q11, q12, d22, d23, d24, d25
sub r10, r0, r1, lsl #2
sub r0, r0, r1, lsl #1
vst1.16 {d22}, [r10], r1
vst1.16 {d24}, [r0], r1
vst1.16 {d23}, [r10], r1
vst1.16 {d25}, [r0], r1
add r0, r0, #4
bx r12
endfunc
function lpf_v_8_4_neon
mov r12, lr
sub r10, r0, r1, lsl #2
vld1.16 {d20}, [r10, :64], r1 // p3
vld1.16 {d24}, [r0, :64], r1 // q0
vld1.16 {d21}, [r10, :64], r1 // p2
vld1.16 {d25}, [r0, :64], r1 // q1
vld1.16 {d22}, [r10, :64], r1 // p1
vld1.16 {d26}, [r0, :64], r1 // q2
vld1.16 {d23}, [r10, :64], r1 // p0
vld1.16 {d27}, [r0, :64], r1 // q3
sub r0, r0, r1, lsl #2
lpf_4_wd8
sub r10, r0, r1, lsl #1
sub r10, r10, r1
vst1.16 {d21}, [r10, :64], r1 // p2
vst1.16 {d24}, [r0, :64], r1 // q0
vst1.16 {d22}, [r10, :64], r1 // p1
vst1.16 {d25}, [r0, :64], r1 // q1
vst1.16 {d23}, [r10, :64], r1 // p0
vst1.16 {d26}, [r0, :64], r1 // q2
sub r0, r0, r1, lsl #1
sub r0, r0, r1
bx r12
8:
sub r10, r0, r1, lsl #1
vst1.16 {d22}, [r10, :64], r1 // p1
vst1.16 {d24}, [r0, :64], r1 // q0
vst1.16 {d23}, [r10, :64], r1 // p0
vst1.16 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
bx r12
endfunc
function lpf_h_8_4_neon
mov r12, lr
sub r10, r0, #8
vld1.16 {d20}, [r10, :64], r1
vld1.16 {d24}, [r0, :64], r1
vld1.16 {d21}, [r10, :64], r1
vld1.16 {d25}, [r0, :64], r1
vld1.16 {d22}, [r10, :64], r1
vld1.16 {d26}, [r0, :64], r1
vld1.16 {d23}, [r10, :64], r1
vld1.16 {d27}, [r0, :64], r1
transpose_4x4h q10, q11, d20, d21, d22, d23
transpose_4x4h q12, q13, d24, d25, d26, d27
lpf_4_wd8
sub r0, r0, r1, lsl #2
transpose_4x4h q10, q11, d20, d21, d22, d23
transpose_4x4h q12, q13, d24, d25, d26, d27
sub r10, r0, #8
vst1.16 {d20}, [r10, :64], r1
vst1.16 {d24}, [r0, :64], r1
vst1.16 {d21}, [r10, :64], r1
vst1.16 {d25}, [r0, :64], r1
vst1.16 {d22}, [r10, :64], r1
vst1.16 {d26}, [r0, :64], r1
vst1.16 {d23}, [r10, :64], r1
vst1.16 {d27}, [r0, :64], r1
bx r12
8:
sub r0, r0, #4
transpose_4x4h q11, q12, d22, d23, d24, d25
sub r10, r0, r1, lsl #2
sub r0, r0, r1, lsl #1
vst1.16 {d22}, [r10], r1
vst1.16 {d24}, [r0], r1
vst1.16 {d23}, [r10], r1
vst1.16 {d25}, [r0], r1
add r0, r0, #4
bx r12
endfunc
function lpf_v_16_4_neon
mov r12, lr
sub r10, r0, r1, lsl #3
add r10, r10, r1
vld1.16 {d17}, [r10, :64], r1 // p6
vld1.16 {d24}, [r0, :64], r1 // q0
vld1.16 {d18}, [r10, :64], r1 // p5
vld1.16 {d25}, [r0, :64], r1 // q1
vld1.16 {d19}, [r10, :64], r1 // p4
vld1.16 {d26}, [r0, :64], r1 // q2
vld1.16 {d20}, [r10, :64], r1 // p3
vld1.16 {d27}, [r0, :64], r1 // q3
vld1.16 {d21}, [r10, :64], r1 // p2
vld1.16 {d28}, [r0, :64], r1 // q4
vld1.16 {d22}, [r10, :64], r1 // p1
vld1.16 {d29}, [r0, :64], r1 // q5
vld1.16 {d23}, [r10, :64], r1 // p0
vld1.16 {d30}, [r0, :64], r1 // q6
sub r0, r0, r1, lsl #3
add r0, r0, r1
lpf_4_wd16
sub r10, r0, r1, lsl #2
sub r10, r10, r1, lsl #1
vst1.16 {d0}, [r10, :64], r1 // p5
vst1.16 {d6}, [r0, :64], r1 // q0
vst1.16 {d1}, [r10, :64], r1 // p4
vst1.16 {d7}, [r0, :64], r1 // q1
vst1.16 {d2}, [r10, :64], r1 // p3
vst1.16 {d8}, [r0, :64], r1 // q2
vst1.16 {d3}, [r10, :64], r1 // p2
vst1.16 {d9}, [r0, :64], r1 // q3
vst1.16 {d4}, [r10, :64], r1 // p1
vst1.16 {d10}, [r0, :64], r1 // q4
vst1.16 {d5}, [r10, :64], r1 // p0
vst1.16 {d11}, [r0, :64], r1 // q5
sub r0, r0, r1, lsl #2
sub r0, r0, r1, lsl #1
bx r12
7:
sub r10, r0, r1
sub r10, r10, r1, lsl #1
vst1.16 {d21}, [r10, :64], r1 // p2
vst1.16 {d24}, [r0, :64], r1 // q0
vst1.16 {d22}, [r10, :64], r1 // p1
vst1.16 {d25}, [r0, :64], r1 // q1
vst1.16 {d23}, [r10, :64], r1 // p0
vst1.16 {d26}, [r0, :64], r1 // q2
sub r0, r0, r1, lsl #1
sub r0, r0, r1
bx r12
8:
sub r10, r0, r1, lsl #1
vst1.16 {d22}, [r10, :64], r1 // p1
vst1.16 {d24}, [r0, :64], r1 // q0
vst1.16 {d23}, [r10, :64], r1 // p0
vst1.16 {d25}, [r0, :64], r1 // q1
sub r0, r0, r1, lsl #1
bx r12
endfunc
function lpf_h_16_4_neon
mov r12, lr
sub r10, r0, #16
sub r0, r0, #8
vld1.16 {d16}, [r10, :64], r1
vld1.16 {d20}, [r0, :64], r1
vld1.16 {d17}, [r10, :64], r1
vld1.16 {d21}, [r0, :64], r1
vld1.16 {d18}, [r10, :64], r1
vld1.16 {d22}, [r0, :64], r1
vld1.16 {d19}, [r10, :64], r1
vld1.16 {d23}, [r0, :64], r1
sub r10, r10, r1, lsl #2
sub r0, r0, r1, lsl #2
add r10, r10, #16
add r0, r0, #16
vld1.16 {d24}, [r10, :64], r1
vld1.16 {d28}, [r0, :64], r1
vld1.16 {d25}, [r10, :64], r1
vld1.16 {d29}, [r0, :64], r1
vld1.16 {d26}, [r10, :64], r1
vld1.16 {d30}, [r0, :64], r1
vld1.16 {d27}, [r10, :64], r1
vld1.16 {d31}, [r0, :64], r1
sub r0, r0, #8
transpose_4x4h q8, q9, d16, d17, d18, d19
transpose_4x4h q10, q11, d20, d21, d22, d23
transpose_4x4h q12, q13, d24, d25, d26, d27
transpose_4x4h q14, q15, d28, d29, d30, d31
lpf_4_wd16
sub r0, r0, r1, lsl #2
transpose_4x4h q8, q0, d16, d17, d0, d1
transpose_4x4h q1, q2, d2, d3, d4, d5
transpose_4x4h q3, q4, d6, d7, d8, d9
transpose_4x4h q5, q15, d10, d11, d30, d31
sub r10, r0, #16
sub r0, r0, #8
vst1.16 {d16}, [r10, :64], r1
vst1.16 {d2}, [r0, :64], r1
vst1.16 {d17}, [r10, :64], r1
vst1.16 {d3}, [r0, :64], r1
vst1.16 {d0}, [r10, :64], r1
vst1.16 {d4}, [r0, :64], r1
vst1.16 {d1}, [r10, :64], r1
vst1.16 {d5}, [r0, :64], r1
sub r10, r10, r1, lsl #2
sub r0, r0, r1, lsl #2
add r10, r10, #16
add r0, r0, #16
vst1.16 {d6}, [r10, :64], r1
vst1.16 {d10}, [r0, :64], r1
vst1.16 {d7}, [r10, :64], r1
vst1.16 {d11}, [r0, :64], r1
vst1.16 {d8}, [r10, :64], r1
vst1.16 {d30}, [r0, :64], r1
vst1.16 {d9}, [r10, :64], r1
vst1.16 {d31}, [r0, :64], r1
sub r0, r0, #8
bx r12
7:
sub r0, r0, r1, lsl #2
transpose_4x4h q10, q11, d20, d21, d22, d23
transpose_4x4h q12, q13, d24, d25, d26, d27
sub r10, r0, #8
vst1.16 {d20}, [r10, :64], r1
vst1.16 {d24}, [r0, :64], r1
vst1.16 {d21}, [r10, :64], r1
vst1.16 {d25}, [r0, :64], r1
vst1.16 {d22}, [r10, :64], r1
vst1.16 {d26}, [r0, :64], r1
vst1.16 {d23}, [r10, :64], r1
vst1.16 {d27}, [r0, :64], r1
bx r12
8:
sub r0, r0, #4
transpose_4x4h q11, q12, d22, d23, d24, d25
sub r10, r0, r1, lsl #2
sub r0, r0, r1, lsl #1
vst1.16 {d22}, [r10], r1
vst1.16 {d24}, [r0], r1
vst1.16 {d23}, [r10], r1
vst1.16 {d25}, [r0], r1
add r0, r0, #4
bx r12
endfunc
// void dav1d_lpf_v_sb_y_16bpc_neon(pixel *dst, const ptrdiff_t stride,
// const uint32_t *const vmask,
// const uint8_t (*l)[4], ptrdiff_t b4_stride,
// const Av1FilterLUT *lut, const int w,
// const int bitdepth_max)
.macro lpf_func dir, type
function lpf_\dir\()_sb_\type\()_16bpc_neon, export=1
push {r4-r11,lr}
vpush {q4-q7}
ldrd r4, r5, [sp, #100]
ldr r8, [sp, #112] // bitdepth_max; the 'w' parameter isn't loaded
sub sp, sp, #8
clz r9, r8
rsb r9, r9, #24 // bitdepth_min_8
ldrd r6, r7, [r2] // vmask[0], vmask[1]
.ifc \type, y
ldr r2, [r2, #8] // vmask[2]
.endif
add r5, r5, #128 // Move to sharp part of lut
.ifc \type, y
orr r7, r7, r2 // vmask[1] |= vmask[2]
.endif
.ifc \dir, v
sub r4, r3, r4, lsl #2
.else
sub r3, r3, #4
lsl r4, r4, #2
.endif
orr r6, r6, r7 // vmask[0] |= vmask[1]
1:
tst r6, #0x01
strd r6, r7, [sp]
.ifc \dir, v
ldrb r10, [r4], #4
ldrb r11, [r3], #4
.else
ldrb r10, [r3]
ldrb r11, [r3, #4]
add r3, r3, r4
.endif
beq 7f // if (!(vm & bits)) continue;
orrs r12, r10, r11
vdup.16 d31, r9 // bitdepth_min_8
beq 7f // if (!(l[0][0] | l[offset][0])) continue;
cmp r11, #0 // Check for nonzero values in l[0][0]
ldrb r6, [r5], #8 // sharp[0]
it eq
moveq r11, r10 // if (!l[0][0]) L = l[offset][0]
ldrb r12, [r5] // sharp[1]
lsr r6, r11, r6 // L >> sharp[0]
sub r5, r5, #8
cmp r12, r6
lsr r10, r11, #4 // H
add r11, r11, #2 // L + 2
it lt
movlt r6, r12 // imin(L >> sharp[0], sharp[1])
add r11, r11, r11 // 2*(L + 2)
cmp r6, #1
lsl r10, r10, r9 // H << bitdepth_min_8
it lt
movlt r6, #1 // imax(imin(), 1) = limit = I
vdup.16 d12, r10 // H << bitdepth_min_8
add r11, r11, r6 // 2*(L + 2) + limit = E
lsl r6, r6, r9 // I << bitdepth_min_8
lsl r11, r11, r9 // E << bitdepth_min_8
vdup.16 d11, r6 // I << bitdepth_min_8
vdup.16 d10, r11 // E << bitdepth_min_8
.ifc \type, y
tst r2, #0x01
beq 2f
// wd16
bl lpf_\dir\()_16_4_neon
b 8f
2:
.endif
tst r7, #0x01
beq 3f
.ifc \type, y
// wd8
bl lpf_\dir\()_8_4_neon
.else
// wd6
bl lpf_\dir\()_6_4_neon
.endif
b 8f
3:
// wd4
bl lpf_\dir\()_4_4_neon
.ifc \dir, h
b 8f
7:
// For dir h, the functions above increment r0.
// If the whole function is skipped, increment it here instead.
add r0, r0, r1, lsl #2
.else
7:
.endif
8:
ldrd r6, r7, [sp]
.ifc \type, y
lsr r2, r2, #1 // vmask[2] >>= 1
.endif
.ifc \dir, v
add r0, r0, #8
.else
// For dir h, r0 is returned incremented
.endif
lsrs r6, r6, #1 // vmask[0] >>= 1
lsr r7, r7, #1 // vmask[1] >>= 1
bne 1b
add sp, sp, #8
vpop {q4-q7}
pop {r4-r11,pc}
endfunc
.endm
lpf_func v, y
lpf_func h, y
lpf_func v, uv
lpf_func h, uv
|
Admenri/urge
| 7,680
|
third_party/dav1d/tests/checkasm/arm/checkasm_64.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2015 Martin Storsjo
* Copyright © 2015 Janne Grunau
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define PRIVATE_PREFIX checkasm_
#include "src/arm/asm.S"
#include "src/arm/64/util.S"
const register_init, align=4
.quad 0x21f86d66c8ca00ce
.quad 0x75b6ba21077c48ad
.quad 0xed56bb2dcb3c7736
.quad 0x8bda43d3fd1a7e06
.quad 0xb64a9c9e5d318408
.quad 0xdf9a54b303f1d3a3
.quad 0x4a75479abd64e097
.quad 0x249214109d5d1c88
.quad 0x1a1b2550a612b48c
.quad 0x79445c159ce79064
.quad 0x2eed899d5a28ddcd
.quad 0x86b2536fcd8cf636
.quad 0xb0856806085e7943
.quad 0x3f2bf84fc0fcca4e
.quad 0xacbd382dcf5b8de2
.quad 0xd229e1f5b281303f
.quad 0x71aeaff20b095fd9
.quad 0xab63e2e11fa38ed9
endconst
const error_message_register
.asciz "failed to preserve register"
error_message_stack:
.asciz "stack clobbered"
endconst
// max number of args used by any asm function.
#define MAX_ARGS 15
#define CLOBBER_STACK ((8*MAX_ARGS + 15) & ~15)
function stack_clobber, export=1
mov x3, sp
mov x2, #CLOBBER_STACK
1:
stp x0, x1, [sp, #-16]!
subs x2, x2, #16
b.gt 1b
mov sp, x3
ret
endfunc
// + 16 for stack canary reference
#define ARG_STACK ((8*(MAX_ARGS - 8) + 15) & ~15 + 16)
function checked_call, export=1
stp x29, x30, [sp, #-16]!
mov x29, sp
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp d8, d9, [sp, #-16]!
stp d10, d11, [sp, #-16]!
stp d12, d13, [sp, #-16]!
stp d14, d15, [sp, #-16]!
movrel x9, register_init
ldp d8, d9, [x9], #16
ldp d10, d11, [x9], #16
ldp d12, d13, [x9], #16
ldp d14, d15, [x9], #16
ldp x19, x20, [x9], #16
ldp x21, x22, [x9], #16
ldp x23, x24, [x9], #16
ldp x25, x26, [x9], #16
ldp x27, x28, [x9], #16
sub sp, sp, #ARG_STACK
.equ pos, 0
.rept MAX_ARGS-8
// Skip the first 8 args, that are loaded into registers
ldr x9, [x29, #16 + 8*8 + pos]
str x9, [sp, #pos]
.equ pos, pos + 8
.endr
// Fill x8-x17 with garbage. This doesn't have to be preserved,
// but avoids relying on them having any particular value.
movrel x9, register_init
ldp x10, x11, [x9], #32
ldp x12, x13, [x9], #32
ldp x14, x15, [x9], #32
ldp x16, x17, [x9], #32
ldp x8, x9, [x9]
// For stack overflows, the callee is free to overwrite the parameters
// that were passed on the stack (if any), so we can only check after
// that point. First figure out how many parameters the function
// really took on the stack:
ldr w2, [x29, #16 + 8*8 + (MAX_ARGS-8)*8]
// Load the first non-parameter value from the stack, that should be
// left untouched by the function. Store a copy of it inverted, so that
// e.g. overwriting everything with zero would be noticed.
ldr x2, [sp, x2, lsl #3]
mvn x2, x2
str x2, [sp, #ARG_STACK-8]
// Load the in-register arguments
mov x12, x0
ldp x0, x1, [x29, #16]
ldp x2, x3, [x29, #32]
ldp x4, x5, [x29, #48]
ldp x6, x7, [x29, #64]
// Call the target function
blr x12
// Load the number of stack parameters, stack canary and its reference
ldr w2, [x29, #16 + 8*8 + (MAX_ARGS-8)*8]
ldr x2, [sp, x2, lsl #3]
ldr x3, [sp, #ARG_STACK-8]
add sp, sp, #ARG_STACK
stp x0, x1, [sp, #-16]!
mvn x3, x3
cmp x2, x3
b.ne 2f
movrel x9, register_init
movi v3.8h, #0
.macro check_reg_neon reg1, reg2
ldr q1, [x9], #16
uzp1 v2.2d, v\reg1\().2d, v\reg2\().2d
eor v1.16b, v1.16b, v2.16b
orr v3.16b, v3.16b, v1.16b
.endm
check_reg_neon 8, 9
check_reg_neon 10, 11
check_reg_neon 12, 13
check_reg_neon 14, 15
uqxtn v3.8b, v3.8h
umov x3, v3.d[0]
.macro check_reg reg1, reg2
ldp x0, x1, [x9], #16
eor x0, x0, \reg1
eor x1, x1, \reg2
orr x3, x3, x0
orr x3, x3, x1
.endm
check_reg x19, x20
check_reg x21, x22
check_reg x23, x24
check_reg x25, x26
check_reg x27, x28
cbz x3, 0f
movrel x0, error_message_register
b 1f
2:
movrel x0, error_message_stack
1:
#ifdef PREFIX
bl _checkasm_fail_func
#else
bl checkasm_fail_func
#endif
0:
ldp x0, x1, [sp], #16
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ldp x27, x28, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ldp x29, x30, [sp], #16
ret
endfunc
|
Admenri/urge
| 6,678
|
third_party/dav1d/tests/checkasm/arm/checkasm_32.S
|
/******************************************************************************
* Copyright © 2018, VideoLAN and dav1d authors
* Copyright © 2015 Martin Storsjo
* Copyright © 2015 Janne Grunau
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define PRIVATE_PREFIX checkasm_
#include "src/arm/asm.S"
#include "src/arm/32/util.S"
const register_init, align=3
.quad 0x21f86d66c8ca00ce
.quad 0x75b6ba21077c48ad
.quad 0xed56bb2dcb3c7736
.quad 0x8bda43d3fd1a7e06
.quad 0xb64a9c9e5d318408
.quad 0xdf9a54b303f1d3a3
.quad 0x4a75479abd64e097
.quad 0x249214109d5d1c88
endconst
const error_message_fpscr
.asciz "failed to preserve register FPSCR, changed bits: %x"
error_message_gpr:
.asciz "failed to preserve register r%d"
error_message_vfp:
.asciz "failed to preserve register d%d"
error_message_stack:
.asciz "failed to preserve stack"
endconst
@ max number of args used by any asm function.
#define MAX_ARGS 15
#define ARG_STACK 4*(MAX_ARGS - 4)
@ Align the used stack space to 8 to preserve the stack alignment.
@ +8 for stack canary reference.
#define ARG_STACK_A (((ARG_STACK + pushed + 7) & ~7) - pushed + 8)
.macro clobbercheck variant
.equ pushed, 4*9
function checked_call_\variant, export=1
push {r4-r11, lr}
.ifc \variant, vfp
vpush {d8-d15}
fmrx r4, FPSCR
push {r4}
.equ pushed, pushed + 16*4 + 4
.endif
movrel r12, register_init
.ifc \variant, vfp
vldm r12, {d8-d15}
.endif
ldm r12, {r4-r11}
sub sp, sp, #ARG_STACK_A
.equ pos, 0
.rept MAX_ARGS-4
ldr r12, [sp, #ARG_STACK_A + pushed + 8 + pos]
str r12, [sp, #pos]
.equ pos, pos + 4
.endr
@ For stack overflows, the callee is free to overwrite the parameters
@ that were passed on the stack (if any), so we can only check after
@ that point. First figure out how many parameters the function
@ really took on the stack:
ldr r12, [sp, #ARG_STACK_A + pushed + 8 + 4*(MAX_ARGS-4)]
@ Load the first non-parameter value from the stack, that should be
@ left untouched by the function. Store a copy of it inverted, so that
@ e.g. overwriting everything with zero would be noticed.
ldr r12, [sp, r12, lsl #2]
mvn r12, r12
str r12, [sp, #ARG_STACK_A - 4]
mov r12, r0
mov r0, r2
mov r1, r3
ldrd r2, r3, [sp, #ARG_STACK_A + pushed]
@ Call the target function
blx r12
@ Load the number of stack parameters, stack canary and its reference
ldr r12, [sp, #ARG_STACK_A + pushed + 8 + 4*(MAX_ARGS-4)]
ldr r2, [sp, r12, lsl #2]
ldr r3, [sp, #ARG_STACK_A - 4]
add sp, sp, #ARG_STACK_A
push {r0, r1}
mvn r3, r3
cmp r2, r3
bne 5f
movrel r12, register_init
.ifc \variant, vfp
.macro check_reg_vfp, dreg, offset
ldrd r2, r3, [r12, #8 * (\offset)]
vmov r0, lr, \dreg
eor r2, r2, r0
eor r3, r3, lr
orrs r2, r2, r3
bne 4f
.endm
.irp n, 8, 9, 10, 11, 12, 13, 14, 15
@ keep track of the checked double/SIMD register
mov r1, #\n
check_reg_vfp d\n, \n-8
.endr
.purgem check_reg_vfp
fmrx r1, FPSCR
ldr r3, [sp, #8]
eor r1, r1, r3
@ Ignore changes in bits 0-4 and 7
bic r1, r1, #0x9f
@ Ignore changes in the topmost 5 bits
bics r1, r1, #0xf8000000
bne 3f
.endif
@ keep track of the checked GPR
mov r1, #4
.macro check_reg reg1, reg2=
ldrd r2, r3, [r12], #8
eors r2, r2, \reg1
bne 2f
add r1, r1, #1
.ifnb \reg2
eors r3, r3, \reg2
bne 2f
.endif
add r1, r1, #1
.endm
check_reg r4, r5
check_reg r6, r7
@ r9 is a volatile register in the ios ABI
#ifdef __APPLE__
check_reg r8
#else
check_reg r8, r9
#endif
check_reg r10, r11
.purgem check_reg
b 0f
5:
movrel r0, error_message_stack
b 1f
4:
movrel r0, error_message_vfp
b 1f
3:
movrel r0, error_message_fpscr
b 1f
2:
movrel r0, error_message_gpr
1:
#ifdef PREFIX
bl _checkasm_fail_func
#else
bl checkasm_fail_func
#endif
0:
pop {r0, r1}
.ifc \variant, vfp
pop {r2}
fmxr FPSCR, r2
vpop {d8-d15}
.endif
pop {r4-r11, pc}
endfunc
.endm
clobbercheck vfp
|
admkopec/BetaOS
| 4,707
|
Kernel/Kernel/asmlib.S
|
//
// asmlib.s
// BetaOS
//
// Created by Adam Kopeć on 3/13/16.
// Copyright © 2016-2018 Adam Kopeć. All rights reserved.
//
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <i386/asm.h>
#define PAL_RTC_TSC_BASE 0x0
#define PAL_RTC_NS_BASE 0x8
#define PAL_RTC_SCALE 0x10
#define PAL_RTC_SHIFT 0x14
#define PAL_RTC_GENERATION 0x18
#define CPU_ACTIVE_STACK 0x18 // offsetof(cpu_data_t, cpu_kernel_stack)
#define CPU_KERNEL_STACK 0x20 // offsetof(cpu_data_t, cpu_kernel_stack)
//Entry(Load_context)
// movq %rdi, %rcx
// leaq -IKS_SIZE(%rcx), %rdx
// movq %rcx, %gs:CPU_ACTIVE_STACK
// movq %rdx, %gs:CPU_KERNEL_STACK
// movq %rdx, %rsp
// xorl %ebp, %ebp
// xorl %edi,%edi
//
// movq %rax, %rdi
// xorq %rbp,%rbp
// call *%rbx
Entry(x86_64_context_switch)
/* save the old context and restore the new */
cli
// pushq %gs
// pushq %fs
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbx
pushq %rax
xor %rax, %rax
mov %ax, %ds
pushq %rax
mov %ax, %es
pushq %rax
movq %rsp, 8(%rdi)
movq 8(%rsi), %rsp
popq %rax
mov %es, %ax
popq %rax
mov %ds, %ax
popq %rax
popq %rbx
popq %rcx
popq %rdx
popq %rsi
popq %rdi
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
// popq %fs
// popq %gs
// movq %rsp, %gs:CPU_ACTIVE_STACK
// movq %rbp, %gs:CPU_KERNEL_STACK
iretq
Entry(x86_64_context_switch_first)
movq %rsp, %gs:CPU_ACTIVE_STACK
movq %rbp, %gs:CPU_KERNEL_STACK
movq 8(%rdi), %rsp
swapgs
cli
popq %rax
mov %ax, %es
popq %rax
mov %ax, %ds
popq %rax
popq %rbx
popq %rcx
popq %rdx
popq %rsi
popq %rdi
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
// popq %fs
// popq %gs
// movq %rsp, %gs:CPU_ACTIVE_STACK
// movq %rbp, %gs:CPU_KERNEL_STACK
// xorl %ebp, %ebp
// xorl %edi,%edi
// xorq %rbp,%rbp
iretq
/* halt_cpu */
Entry(halt_cpu)
sti
hlt
ret
/* x86_init_wrapper */
Entry(x86_init_wrapper)
xor %rbp, %rbp
movq %rsi, %rsp
callq *%rdi
/*
* uint64_t tmrCvt(uint64_t time, // %rdi
* uint64_t conversion) // %rsi
*
*/
ENTRY(tmrCvt)
cmpq $1,%rsi /* check for unity fastpath */
je 1f
movq %rdi,%rax
mulq %rsi /* result is %rdx:%rax */
shrdq $32,%rdx,%rax /* %rdx:%rax >>= 32 */
ret
1:
mov %rdi,%rax
ret
ENTRY(_rtc_nanotime_read)
0:
movl PAL_RTC_GENERATION(%rdi),%esi
test %esi,%esi /* info updating? */
jz 0b /* - wait if so */
lfence
rdtsc
lfence
shlq $32,%rdx
movl PAL_RTC_SHIFT(%rdi),%ecx
orq %rdx,%rax /* %rax := tsc */
subq PAL_RTC_TSC_BASE(%rdi),%rax /* tsc - tsc_base */
shlq %cl,%rax
movl PAL_RTC_SCALE(%rdi),%ecx
mulq %rcx /* delta * scale */
shrdq $32,%rdx,%rax /* %rdx:%rax >>= 32 */
addq PAL_RTC_NS_BASE(%rdi),%rax /* add ns_base */
cmpl PAL_RTC_GENERATION(%rdi),%esi /* repeat if changed */
jne 0b
ret
/*
* void pal_rtc_nanotime_store(
* uint64_t tsc, // %rdi
* uint64_t nsec, // %rsi
* uint32_t scale, // %rdx
* uint32_t shift, // %rcx
* rtc_nanotime_t *dst); // %r8
*/
ENTRY(pal_rtc_nanotime_store)
movl PAL_RTC_GENERATION(%r8),%eax /* get current generation */
movl $0,PAL_RTC_GENERATION(%r8) /* flag data as being updated */
movq %rdi,PAL_RTC_TSC_BASE(%r8)
movq %rsi,PAL_RTC_NS_BASE(%r8)
movl %edx,PAL_RTC_SCALE(%r8)
movl %ecx,PAL_RTC_SHIFT(%r8)
incl %eax /* next generation */
jnz 1f
incl %eax /* skip 0, which is a flag */
1: movl %eax,PAL_RTC_GENERATION(%r8) /* update generation */
ret
/* x86_triplefault */
Entry(x86_triplefault)
//lidt idt_zero
//int $3 /* Just to interrupt, CPU won't like it */
Entry(do_mfence)
mfence
ret
#ifdef __ELF__
.data
#else
.section __DATA,__data
#endif
idt_zero:
.long 0, 0
#ifdef __ELF__
.text
#else
.section __TEXT,__text
#endif
|
admkopec/BetaOS
| 43,110
|
Kernel/Kernel/x86_64/idt64.S
|
//
// idt64.s
// BetaOS
//
// Created by Adam Kopeć on 6/24/16.
// Copyright © 2016-2018 Adam Kopeć. All rights reserved.
//
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <i386/asm.h>
#include <i386/thread_status.h>
#include <i386/pmap.h>
#include <i386/seg.h>
#include <i386/trap.h>
#include <i386/proc_reg.h>
#include <i386/vm_param.h>
#include <i386/eflags.h>
#include <i386/syscall.h>
/*
* Handlers:
*/
#define HNDL_ALLINTRS EXT(hndl_allintrs)
#define HNDL_ALLTRAPS EXT(hndl_alltraps)
#define HNDL_SYSENTER EXT(hndl_sysenter)
#define HNDL_SYSCALL EXT(hndl_syscall)
#define HNDL_UNIX_SCALL EXT(hndl_unix_scall)
#define HNDL_MACH_SCALL EXT(hndl_mach_scall)
#define HNDL_MDEP_SCALL EXT(hndl_mdep_scall)
#define HNDL_DOUBLE_FAULT EXT(hndl_double_fault)
#define HNDL_MACHINE_CHECK EXT(hndl_machine_check)
// Not Working (leaq doesn't callq func)
#define PUSH_FUNCTION(func) \
sub $8, %rsp ;\
push %rax ;\
leaq func(%rip), %rax ;\
movq %rax, 8(%rsp) ;\
pop %rax ;\
// Simple jmp is working
//#define PUSH_FUNCTION(func) \
// jmp func
/* The wrapper for all non-special traps/interrupts */
/* Everything up to PUSH_FUNCTION is just to output
* the interrupt number out to the postcode display
*/
#define IDT_ENTRY_WRAPPER(n, f) \
PUSH_FUNCTION(f) ;\
pushq $(n) ;\
jmp L_dispatch
/* A trap that comes with an error code already on the stack */
#define TRAP_ERR(n, f) \
Entry(f) ;\
IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
/* A normal trap */
#define TRAP(n, f) \
Entry(f) ;\
pushq $0 ;\
IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
#define USER_TRAP TRAP
/* An interrupt */
#define INTERRUPT(n) \
Entry(_intr_ ## n) ;\
pushq $0 ;\
IDT_ENTRY_WRAPPER(n, HNDL_ALLINTRS)
/* A trap with a special-case handler, hence we don't need to define anything */
#define TRAP_SPC(n, f)
#define TRAP_IST1(n, f)
#define TRAP_IST2(n, f)
#define USER_TRAP_SPC(n, f)
/* Generate all the stubs */
#include "idt_table.h"
/* Temporary defines, maybe not so temporary after all */
#define ISF64_TRAPNO 0x0 // offsetof(x86_64_intr_stack_frame_t, trapno)
#define ISF64_TRAPFN 0x8 // offsetof(x86_64_intr_stack_frame_t, trapfn)
#define ISF64_ERR 0x10 // offsetof(x86_64_intr_stack_frame_t. err)
#define ISF64_RIP 0x18 // offsetof(x86_64_intr_stack_frame_t. rip)
#define ISF64_CS 0x20 // offsetof(x86_64_intr_stack_frame_t, cs)
#define ISF64_RFLAGS 0x28 // offsetof(x86_64_intr_stack_frame_t, rflags)
#define ISF64_RSP 0x30 // offsetof(x86_64_intr_stack_frame_t, rsp)
#define ISF64_SS 0x38 // offsetof(x86_64_intr_stack_frame_t, ss)
#define R64_RDI 0x10 // offsetof(x86_saved_state_t, ss_64.rdi)
#define R64_RSI 0x18 // offsetof(x86_saved_state_t, ss_64.rsi)
#define R64_RDX 0x20 // offsetof(x86_saved_state_t, ss_64.rdx)
#define R64_R10 0x28 // offsetof(x86_saved_state_t, ss_64.r10)
#define R64_R8 0x30 // offsetof(x86_saved_state_t, ss_64.r8)
#define R64_R9 0x38 // offsetof(x86_saved_state_t, ss_64.r9)
#define R64_CR2 0x40 // offsetof(x86_saved_state_t, ss_64.cr2)
#define R64_R15 0x48 // offsetof(x86_saved_state_t, ss_64.r15)
#define R64_R14 0x50 // offsetof(x86_saved_state_t, ss_64.r14)
#define R64_R13 0x58 // offsetof(x86_saved_state_t, ss_64.r13)
#define R64_R12 0x60 // offsetof(x86_saved_state_t, ss_64.r12)
#define R64_R11 0x68 // offsetof(x86_saved_state_t, ss_64.r11)
#define R64_RBP 0x70 // offsetof(x86_saved_state_t, ss_64.rbp)
#define R64_RBX 0x78 // offsetof(x86_saved_state_t, ss_64.rbx)
#define R64_RCX 0x80 // offsetof(x86_saved_state_t, ss_64.rcx)
#define R64_RAX 0x88 // offsetof(x86_saved_state_t, ss_64.rax)
#define R64_GS 0x90 // offsetof(x86_saved_state_t, ss_64.gs)
#define R64_FS 0x94 // offsetof(x86_saved_state_t, ss_64.fs)
#define ISS64_OFFSET 0xA0 // offsetof(x86_saved_state_t, ss_64.isf)
#define R64_TRAPNO 0xA0 // offsetof(x86_saved_state_t, ss_64.isf.trapno)
#define R64_TRAPFN 0xA8 // offsetof(x86_saved_state_t, ss_64.isf.trapfn)
#define R64_ERR 0xB0 // offsetof(x86_saved_state_t, ss_64.isf.err)
#define R64_RIP 0xB8 // offsetof(x86_saved_state_t, ss_64.isf.rip)
#define R64_CS 0xC0 // offsetof(x86_saved_state_t, ss_64.isf.cs)
#define R64_RFLAGS 0xC8 // offsetof(x86_saved_state_t, ss_64.isf.rflags)
#define R64_RSP 0xD0 // offsetof(x86_saved_state_t, ss_64.isf.rsp)
#define R64_SS 0xD8 // offsetof(x86_saved_state_t, ss_64.isf.ss)
#define R32_GS 0x10 // offsetof(x86_saved_state_t, ss_32.gs)
#define R32_FS 0x14 // offsetof(x86_saved_state_t, ss_32.fs)
#define R32_ES 0x18 // offsetof(x86_saved_state_t, ss_32.es)
#define R32_DS 0x1C // offsetof(x86_saved_state_t, ss_32.ds)
#define R32_EDI 0x20 // offsetof(x86_saved_state_t, ss_32.edi)
#define R32_ESI 0x24 // offsetof(x86_saved_state_t, ss_32.esi)
#define R32_EBP 0x28 // offsetof(x86_saved_state_t, ss_32.ebp)
#define R32_CR2 0x2C // offsetof(x86_saved_state_t, ss_32.cr2)
#define R32_EBX 0x30 // offsetof(x86_saved_state_t, ss_32.ebx)
#define R32_EDX 0x34 // offsetof(x86_saved_state_t, ss_32.edx)
#define R32_ECX 0x38 // offsetof(x86_saved_state_t, ss_32.ecx)
#define R32_EAX 0x3C // offsetof(x86_saved_state_t, ss_32.eax)
#define R32_TRAPNO 0x40 // offsetof(x86_saved_state_t, ss_32.trapno)
#define R32_CPU 0x42 // offsetof(x86_saved_state_t, ss_32.cpu)
#define R32_ERR 0x44 // offsetof(x86_saved_state_t, ss_32.err)
#define R32_EIP 0x48 // offsetof(x86_saved_state_t, ss_32.eip)
#define R32_CS 0x4C // offsetof(x86_saved_state_t, ss_32.cs)
#define R32_EFLAGS 0x50 // offsetof(x86_saved_state_t, ss_32.efl)
#define R32_UESP 0x54 // offsetof(x86_saved_state_t, ss_32.uesp)
#define R32_SS 0x58 // offsetof(x86_saved_state_t, ss_32.ss)
#define CPU_TASK_MAP 0x10C // offsetof(cpu_data_t, cpu_task_map)
#define CPU_TASK_CR3 0x110 // offsetof(cpu_data_t, cpu_task_cr3)
#define CPU_KERNEL_CR3 0x118 // offsetof(cpu_data_t, cpu_kernel_cr3)
#define CPU_ACTIVE_CR3 0x100 // offsetof(cpu_data_t, cpu_active_cr3)
#define CPU_TLB_INVALID 0x108 // offsetof(cpu_data_t, cpu_tlb_invalid)
#define CPU_TLB_INVALID_LOCAL 0x108 // offsetof(cpu_data_t, cpu_tlb_inavlid_local)
#define CPU_KERNEL_STACK 0x20 // offsetof(cpu_data_t, cpu_kernel_stack)
#define CPU_DR7 0x1628 // offsetof(cpu_data_t, cpu_dr7)
#define CPU_INT_STACK_TOP 0x28 // offsetof(cpu_data_t, cpu_int_stack_top)
#define CPU_INT_STATE 0x10 // offsetof(cpu_data_t, cpu_int_state)
#define CPU_UBER_ISF 0x120 // offsetof(cpu_data_t, cpu_uber.cu_isf)
#define CPU_UBER_TMP 0x128 // offsetof(cpu_data_t, cpu_uber.cu_tmp)
#define SS_FLAVOR 0x0 // offsetof(x86_saved_state_t, flavor)
#define hwIntCnt 0x224 // offsetof(cpu_data_t, cpu_hwIntCnt)
#define SS_64 x86_SAVED_STATE64
#define SS_32 x86_SAVED_STATE32
#define TASK_MAP_32BIT 0
#define TASK_MAP_64BIT 1
/*
* Common dispatch point.
* Determine what mode has been interrupted and save state accordingly.
* Here with:
* rsp from user-space: interrupt state in PCB, or
* from kernel-space: interrupt state in kernel or interrupt stack
* GSBASE from user-space: pthread area, or
* from kernel-space: cpu_data
*/
L_dispatch:
cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
je L_dispatch_kernel
swapgs
L_dispatch_user:
cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
je L_dispatch_U32 // 32-bit user task
L_dispatch_U64:
subq $(ISS64_OFFSET), %rsp
mov %r15, R64_R15(%rsp)
mov %rsp, %r15
mov %gs:CPU_KERNEL_STACK, %rsp
jmp L_dispatch_64bit
L_dispatch_kernel:
subq $(ISS64_OFFSET), %rsp
mov %r15, R64_R15(%rsp)
mov %rsp, %r15
/*
* Here for 64-bit user task or kernel
*/
L_dispatch_64bit:
movl $(SS_64), SS_FLAVOR(%r15)
/*
* Save segment regs - for completeness since theyre not used.
*/
movl %fs, R64_FS(%r15)
movl %gs, R64_GS(%r15)
/* Save general-purpose registers */
mov %rax, R64_RAX(%r15)
mov %rbx, R64_RBX(%r15)
mov %rcx, R64_RCX(%r15)
mov %rdx, R64_RDX(%r15)
mov %rbp, R64_RBP(%r15)
mov %rdi, R64_RDI(%r15)
mov %rsi, R64_RSI(%r15)
mov %r8, R64_R8(%r15)
mov %r9, R64_R9(%r15)
mov %r10, R64_R10(%r15)
mov %r11, R64_R11(%r15)
mov %r12, R64_R12(%r15)
mov %r13, R64_R13(%r15)
mov %r14, R64_R14(%r15)
/* cr2 is significant only for page-faults */
mov %cr2, %rax
mov %rax, R64_CR2(%r15)
mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */
mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */
mov R64_CS(%r15), %esi /* %esi := cs for later */
jmp L_common_dispatch
L_64bit_entry_reject:
/*
* Here for a 64-bit user attempting an invalid kernel entry.
*/
pushq %rax
leaq HNDL_ALLTRAPS(%rip), %rax
movq %rax, ISF64_TRAPFN+8(%rsp)
popq %rax
movq $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
jmp L_dispatch_U64
L_32bit_entry_check:
/*
* Check we're not a confused 64-bit user.
*/
cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
jne L_64bit_entry_reject
/* fall through to 32-bit handler: */
L_dispatch_U32: /* 32-bit user task */
subq $(ISS64_OFFSET), %rsp
mov %rsp, %r15
mov %gs:CPU_KERNEL_STACK, %rsp
movl $(SS_32), SS_FLAVOR(%r15)
/*
* Save segment regs
*/
movl %ds, R32_DS(%r15)
movl %es, R32_ES(%r15)
movl %fs, R32_FS(%r15)
movl %gs, R32_GS(%r15)
/*
* Save general 32-bit registers
*/
mov %eax, R32_EAX(%r15)
mov %ebx, R32_EBX(%r15)
mov %ecx, R32_ECX(%r15)
mov %edx, R32_EDX(%r15)
mov %ebp, R32_EBP(%r15)
mov %esi, R32_ESI(%r15)
mov %edi, R32_EDI(%r15)
/* Unconditionally save cr2; only meaningful on page faults */
mov %cr2, %rax
mov %eax, R32_CR2(%r15)
/*
* Copy registers already saved in the machine state
* (in the interrupt stack frame) into the compat save area.
*/
mov R64_RIP(%r15), %eax
mov %eax, R32_EIP(%r15)
mov R64_RFLAGS(%r15), %eax
mov %eax, R32_EFLAGS(%r15)
mov R64_RSP(%r15), %eax
mov %eax, R32_UESP(%r15)
mov R64_SS(%r15), %eax
mov %eax, R32_SS(%r15)
L_dispatch_U32_after_fault:
mov R64_CS(%r15), %esi /* %esi := %cs for later */
mov %esi, R32_CS(%r15)
mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */
mov %ebx, R32_TRAPNO(%r15)
mov R64_ERR(%r15), %eax
mov %eax, R32_ERR(%r15)
mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */
L_common_dispatch:
jmp *%rdx // There's a problem somewhere below on real hardware (VM works fine without this skip)
cld /* Ensure the direction flag is clear in the kernel */
cmpl $0, EXT(pmap_smap_enabled)(%rip)
je 1f
clac /* Clear EFLAGS.AC if SMAP is present/enabled */
1:
/*
* On entering the kernel, we don't need to switch cr3
* because the kernel shares the user's address space.
* But we mark the kernel's cr3 as "active".
* If, however, the invalid cr3 flag is set, we have to flush tlbs
* since the kernel's mapping was changed while we were in userspace.
*
* But: if global no_shared_cr3 is true we do switch to the kernel's cr3
* so that illicit accesses to userspace can be trapped.
*/
mov %gs:CPU_KERNEL_CR3, %rcx
mov %rcx, %gs:CPU_ACTIVE_CR3
test $3, %esi /* user/kernel? */
jz 2f /* skip cr3 reload from kernel */
xor %rbp, %rbp
cmpl $0, EXT(no_shared_cr3)(%rip)
je 2f
mov %rcx, %cr3 /* load kernel cr3 */
jmp 4f /* and skip tlb flush test */
2:
mov %gs:CPU_ACTIVE_CR3+4, %rcx
shr $32, %rcx
testl %ecx, %ecx
jz 4f
testl $(1<<16), %ecx /* Global? */
jz 3f
movl $0, %gs:CPU_TLB_INVALID
mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/
and $(~CR4_PGE), %rcx
mov %rcx, %cr4
or $(CR4_PGE), %rcx
mov %rcx, %cr4
jmp 4f
3:
movb $0, %gs:CPU_TLB_INVALID_LOCAL
mov %cr3, %rcx
mov %rcx, %cr3
4:
//mov %gs:CPU_ACTIVE_THREAD, %rcx /* Get the active thread */
//movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling trap */
//cmpq $0, TH_PCB_IDS(%rcx) /* Is there a debug register state? */
//je 5f
//xor %ecx, %ecx /* If so, reset DR7 (the control) */
//mov %rcx, %dr7
5:
incl %gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
/* Dispatch the designated handler */
jmp *%rdx
/*
* Control is passed here to return to user.
*/
Entry(return_to_user)
//TIME_TRAP_UEXIT RTC_ASM.h
Entry(ret_to_user)
//mov %gs:CPU_ACTIVE_THREAD, %rdx
//movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */
test %rax, %rax /* Is there a debug register context? */
je 2f /* branch if not */
cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
jne 1f
//movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */
movq %rcx, %dr0
//movl DS_DR1(%rax), %ecx
movq %rcx, %dr1
//movl DS_DR2(%rax), %ecx
movq %rcx, %dr2
//movl DS_DR3(%rax), %ecx
movq %rcx, %dr3
//movl DS_DR7(%rax), %ecx
movq %rcx, %gs:CPU_DR7
jmp 2f
1:
//mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/
mov %rcx, %dr0
//mov DS64_DR1(%rax), %rcx
mov %rcx, %dr1
//mov DS64_DR2(%rax), %rcx
mov %rcx, %dr2
//mov DS64_DR3(%rax), %rcx
mov %rcx, %dr3
//mov DS64_DR7(%rax), %rcx
mov %rcx, %gs:CPU_DR7
2:
/*
* On exiting the kernel there's no need to switch cr3 since we're
* already running in the user's address space which includes the
* kernel. Nevertheless, we now mark the task's cr3 as active.
* But, if no_shared_cr3 is set, we do need to switch cr3 at this point.
*/
mov %gs:CPU_TASK_CR3, %rcx
mov %rcx, %gs:CPU_ACTIVE_CR3
movl EXT(no_shared_cr3)(%rip), %eax
test %eax, %eax /* -no_shared_cr3 */
jz 3f
mov %rcx, %cr3
3:
mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
cmp $0, %rax
je 4f
mov %rax, %dr7 /* Set DR7 */
movq $0, %gs:CPU_DR7
4:
cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */
je L_64bit_return
L_32bit_return:
/*
* Restore registers into the machine state for iret.
* Here on fault stack and PCB address in R11.
*/
movl R32_EIP(%r15), %eax
movl %eax, R64_RIP(%r15)
movl R32_EFLAGS(%r15), %eax
movl %eax, R64_RFLAGS(%r15)
movl R32_CS(%r15), %eax
movl %eax, R64_CS(%r15)
movl R32_UESP(%r15), %eax
movl %eax, R64_RSP(%r15)
movl R32_SS(%r15), %eax
movl %eax, R64_SS(%r15)
/*
* Restore general 32-bit registers
*/
movl R32_EAX(%r15), %eax
movl R32_EBX(%r15), %ebx
movl R32_ECX(%r15), %ecx
movl R32_EDX(%r15), %edx
movl R32_EBP(%r15), %ebp
movl R32_ESI(%r15), %esi
movl R32_EDI(%r15), %edi
/*
* Restore segment registers. A segment exception taken here will
* push state on the IST1 stack and will not affect the "PCB stack".
*/
mov %r15, %rsp /* Set the PCB as the stack */
swapgs
EXT(ret32_set_ds):
movl R32_DS(%rsp), %ds
EXT(ret32_set_es):
movl R32_ES(%rsp), %es
EXT(ret32_set_fs):
movl R32_FS(%rsp), %fs
EXT(ret32_set_gs):
movl R32_GS(%rsp), %gs
/* pop compat frame + trapno, trapfn and error */
add $(ISS64_OFFSET)+8+8+8, %rsp
cmpl $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp) /* test for fast entry/exit */
je L_fast_exit
EXT(ret32_iret):
iretq /* return from interrupt */
L_fast_exit:
pop %rdx /* user return eip */
pop %rcx /* pop and toss cs */
andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
popf /* flags - carry denotes failure */
pop %rcx /* user return esp */
sti /* interrupts enabled after sysexit */
sysexitl /* 32-bit sysexit */
ret_to_kernel:
L_64bit_return:
/*
* Restore general 64-bit registers.
* Here on fault stack and PCB address in R15.
*/
mov R64_R14(%r15), %r14
mov R64_R13(%r15), %r13
mov R64_R12(%r15), %r12
mov R64_R11(%r15), %r11
mov R64_R10(%r15), %r10
mov R64_R9(%r15), %r9
mov R64_R8(%r15), %r8
mov R64_RSI(%r15), %rsi
mov R64_RDI(%r15), %rdi
mov R64_RBP(%r15), %rbp
mov R64_RDX(%r15), %rdx
mov R64_RCX(%r15), %rcx
mov R64_RBX(%r15), %rbx
mov R64_RAX(%r15), %rax
/*
* We must swap GS base if we're returning to user-space,
* or we're returning from an NMI that occurred in a trampoline
* before the user GS had been swapped. In the latter case, the NMI
* handler will have flagged the high-order 32-bits of the CS.
*/
cmpq $(KERNEL64_CS), R64_CS(%r15)
jz 1f
swapgs
1:
mov R64_R15(%r15), %rsp
xchg %r15, %rsp
add $(ISS64_OFFSET)+24, %rsp /* pop saved state */
/* + trapno/trapfn/error */
cmpl $(SYSCALL_CS),ISF64_CS-24(%rsp) /* test for fast entry/exit */
je L_sysret
.globl _dump_iretq
EXT(ret64_iret):
iretq /* return from interrupt */
L_sysret:
/*
* Here to load rcx/r11/rsp and perform the sysret back to user-space.
* rcx user rip
* r11 user rflags
* rsp user stack pointer
*/
mov ISF64_RIP-24(%rsp), %rcx
mov ISF64_RFLAGS-24(%rsp), %r11
mov ISF64_RSP-24(%rsp), %rsp
sysretq /* return from sys call */
/*
* System call handlers.
* These are entered via a syscall interrupt. The system call number in %rax
* is saved to the error code slot in the stack frame. We then branch to the
* common state saving code.
*/
Entry(idt64_unix_scall)
swapgs /* switch to kernel gs (cpu_data) */
pushq %rax /* save system call number */
PUSH_FUNCTION(HNDL_UNIX_SCALL)
pushq $(UNIX_INT)
jmp L_32bit_entry_check
Entry(idt64_mach_scall)
swapgs /* switch to kernel gs (cpu_data) */
pushq %rax /* save system call number */
PUSH_FUNCTION(HNDL_MACH_SCALL)
pushq $(MACH_INT)
jmp L_32bit_entry_check
Entry(idt64_mdep_scall)
swapgs /* switch to kernel gs (cpu_data) */
pushq %rax /* save system call number */
PUSH_FUNCTION(HNDL_MDEP_SCALL)
pushq $(MACHDEP_INT)
jmp L_32bit_entry_check
/* Programmed into MSR_IA32_LSTAR by mp_desc.c */
Entry(hi64_syscall)
Entry(idt64_syscall)
L_syscall_continue:
swapgs /* Kapow! get per-cpu data area */
mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
/*
* Save values in the ISF frame in the PCB
* to cons up the saved machine state.
*/
movl $(USER_DS), ISF64_SS(%rsp)
movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
mov %rcx, ISF64_RIP(%rsp) /* rip */
mov %gs:CPU_UBER_TMP, %rcx
mov %rcx, ISF64_RSP(%rsp) /* user stack */
mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
movq $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
leaq HNDL_SYSCALL(%rip), %r11
movq %r11, ISF64_TRAPFN(%rsp)
mov ISF64_RFLAGS(%rsp), %r11 /* Avoid leak, restore R11 */
jmp L_dispatch_U64 /* this can only be 64-bit */
/*
* sysenter entry point
* Requires user code to set up:
* edx: user instruction pointer (return address)
* ecx: user stack pointer
* on which is pushed stub ret addr and saved ebx
* Return to user-space is made using sysexit.
* Note: sysenter/sysexit cannot be used for calls returning a value in edx,
* or requiring ecx to be preserved.
*/
Entry(hi64_sysenter)
Entry(idt64_sysenter)
movq (%rsp), %rsp
/*
* Push values on to the PCB stack
* to cons up the saved machine state.
*/
push $(USER_DS) /* ss */
push %rcx /* uesp */
pushf /* flags */
/*
* Clear, among others, the Nested Task (NT) flags bit;
* this is zeroed by INT, but not by SYSENTER.
*/
push $0
popf
push $(SYSENTER_CS) /* cs */
L_sysenter_continue:
swapgs /* switch to kernel gs (cpu_data) */
push %rdx /* eip */
push %rax /* err/eax - syscall code */
PUSH_FUNCTION(HNDL_SYSENTER)
pushq $(T_SYSENTER)
orl $(EFL_IF), ISF64_RFLAGS(%rsp)
jmp L_32bit_entry_check
Entry(idt64_page_fault)
PUSH_FUNCTION(HNDL_ALLTRAPS)
push $(T_PAGE_FAULT)
push %rax /* save %rax temporarily */
testb $3, 8+ISF64_CS(%rsp) /* was trap from kernel? */
jz L_kernel_trap /* - yes, handle with care */
pop %rax /* restore %rax, swapgs, and continue */
swapgs
jmp L_dispatch_user
/*
* Debug trap. Check for single-stepping across system call into
* kernel. If this is the case, taking the debug trap has turned
* off single-stepping - save the flags register with the trace
* bit set.
*/
Entry(idt64_debug)
push $0 /* error code */
PUSH_FUNCTION(HNDL_ALLTRAPS)
pushq $(T_DEBUG)
testb $3, ISF64_CS(%rsp)
jnz L_dispatch
/*
* trap came from kernel mode
*/
push %rax /* save %rax temporarily */
lea EXT(idt64_sysenter)(%rip), %rax
cmp %rax, ISF64_RIP+8(%rsp)
pop %rax
jne L_dispatch
/*
* Interrupt stack frame has been pushed on the temporary stack.
* We have to switch to pcb stack and patch up the saved state.
*/
mov %rcx, ISF64_ERR(%rsp) /* save %rcx in error slot */
mov ISF64_SS+8(%rsp), %rcx /* top of temp stack -> pcb stack */
xchg %rcx,%rsp /* switch to pcb stack */
push $(USER_DS) /* ss */
push ISF64_ERR(%rcx) /* saved %rcx into rsp slot */
push ISF64_RFLAGS(%rcx) /* rflags */
push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
mov ISF64_ERR(%rcx),%rcx /* restore %rcx */
jmp L_sysenter_continue /* continue sysenter entry */
Entry(idt64_double_fault)
PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
pushq $(T_DOUBLE_FAULT)
jmp L_dispatch_kernel
/*
* For GP/NP/SS faults, we use the IST1 stack.
* For faults from user-space, we have to copy the machine state to the
* PCB stack and then dispatch as normal.
* For faults in kernel-space, we need to scrub for kernel exit faults and
* treat these as user-space faults. But for all other kernel-space faults
* we continue to run on the IST1 stack and we dispatch to handle the fault
* as fatal.
*/
Entry(idt64_gen_prot)
PUSH_FUNCTION(HNDL_ALLTRAPS)
pushq $(T_GENERAL_PROTECTION)
jmp trap_check_kernel_exit /* check for kernel exit sequence */
Entry(idt64_stack_fault)
PUSH_FUNCTION(HNDL_ALLTRAPS)
pushq $(T_STACK_FAULT)
jmp trap_check_kernel_exit /* check for kernel exit sequence */
Entry(idt64_segnp)
PUSH_FUNCTION(HNDL_ALLTRAPS)
pushq $(T_SEGMENT_NOT_PRESENT)
/* indicate fault type */
trap_check_kernel_exit:
testb $3,ISF64_CS(%rsp)
jz L_kernel_gpf
/* Here for fault from user-space. Copy interrupt state to PCB. */
swapgs
push %rax
mov %rcx, %gs:CPU_UBER_TMP /* save user RCX */
mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */
mov ISF64_SS+8(%rsp), %rax
mov %rax, ISF64_SS(%rcx)
mov ISF64_RSP+8(%rsp), %rax
mov %rax, ISF64_RSP(%rcx)
mov ISF64_RFLAGS+8(%rsp), %rax
mov %rax, ISF64_RFLAGS(%rcx)
mov ISF64_CS+8(%rsp), %rax
mov %rax, ISF64_CS(%rcx)
mov ISF64_RIP+8(%rsp), %rax
mov %rax, ISF64_RIP(%rcx)
mov ISF64_ERR+8(%rsp), %rax
mov %rax, ISF64_ERR(%rcx)
mov ISF64_TRAPFN+8(%rsp), %rax
mov %rax, ISF64_TRAPFN(%rcx)
mov ISF64_TRAPNO+8(%rsp), %rax
mov %rax, ISF64_TRAPNO(%rcx)
pop %rax
mov %gs:CPU_UBER_TMP, %rsp /* user RCX into RSP */
xchg %rcx, %rsp /* to PCB stack with user RCX */
jmp L_dispatch_user
L_kernel_gpf:
/* Here for GPF from kernel_space. Check for recoverable cases. */
push %rax
leaq EXT(ret32_iret)(%rip), %rax
cmp %rax, 8+ISF64_RIP(%rsp)
je L_fault_iret
leaq EXT(ret64_iret)(%rip), %rax
cmp %rax, 8+ISF64_RIP(%rsp)
je L_fault_iret
leaq EXT(ret32_set_ds)(%rip), %rax
cmp %rax, 8+ISF64_RIP(%rsp)
je L_32bit_fault_set_seg
leaq EXT(ret32_set_es)(%rip), %rax
cmp %rax, 8+ISF64_RIP(%rsp)
je L_32bit_fault_set_seg
leaq EXT(ret32_set_fs)(%rip), %rax
cmp %rax, 8+ISF64_RIP(%rsp)
je L_32bit_fault_set_seg
leaq EXT(ret32_set_gs)(%rip), %rax
cmp %rax, 8+ISF64_RIP(%rsp)
je L_32bit_fault_set_seg
/* Fall through */
L_kernel_trap:
/*
* Here after taking an unexpected trap from kernel mode - perhaps
* while running in the trampolines hereabouts.
* Note: %rax has been pushed on stack.
* Make sure we're not on the PCB stack, if so move to the kernel stack.
* This is likely a fatal condition.
* But first, ensure we have the kernel gs base active...
*/
push %rcx
push %rdx
mov $(MSR_IA32_GS_BASE), %ecx
rdmsr /* read kernel gsbase */
test $0x80000000, %edx /* test MSB of address */
jne 1f
swapgs /* so swap */
1:
pop %rdx
pop %rcx
movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */
subq %rsp, %rax
cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */
jb 2f /* - yes, deal with it */
pop %rax /* - no, restore %rax */
jmp L_dispatch_kernel
2:
/*
* Here if %rsp is in the PCB
* Copy the interrupt stack frame from PCB stack to kernel stack
*/
movq %gs:CPU_KERNEL_STACK, %rax
xchgq %rax, %rsp
pushq 8+ISF64_SS(%rax)
pushq 8+ISF64_RSP(%rax)
pushq 8+ISF64_RFLAGS(%rax)
pushq 8+ISF64_CS(%rax)
pushq 8+ISF64_RIP(%rax)
pushq 8+ISF64_ERR(%rax)
pushq 8+ISF64_TRAPFN(%rax)
pushq 8+ISF64_TRAPNO(%rax)
movq (%rax), %rax
jmp L_dispatch_kernel
/*
* GP/NP fault on IRET: CS or SS is in error.
* User GSBASE is active.
* On IST1 stack containing:
* (rax saved above, which is immediately popped)
* 0 ISF64_TRAPNO: trap code (NP or GP)
* 8 ISF64_TRAPFN: trap function
* 16 ISF64_ERR: segment number in error (error code)
* 24 ISF64_RIP: kernel RIP
* 32 ISF64_CS: kernel CS
* 40 ISF64_RFLAGS: kernel RFLAGS
* 48 ISF64_RSP: kernel RSP
* 56 ISF64_SS: kernel SS
* On the PCB stack, pointed to by the kernel's RSP is:
* 0 user RIP
* 8 user CS
* 16 user RFLAGS
* 24 user RSP
* 32 user SS
*
* We need to move the kernel's TRAPNO, TRAPFN and ERR to the PCB and handle
* as a user fault with:
* 0 ISF64_TRAPNO: trap code (NP or GP)
* 8 ISF64_TRAPFN: trap function
* 16 ISF64_ERR: segment number in error (error code)
* 24 user RIP
* 32 user CS
* 40 user RFLAGS
* 48 user RSP
* 56 user SS
*/
L_fault_iret:
pop %rax /* recover saved %rax */
mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
mov ISF64_RSP(%rsp), %rax
xchg %rax, %rsp /* switch to PCB stack */
push ISF64_ERR(%rax)
push ISF64_TRAPFN(%rax)
push ISF64_TRAPNO(%rax)
mov ISF64_RIP(%rax), %rax /* restore rax */
/* now treat as fault from user */
jmp L_dispatch
/*
* Fault restoring a segment register. All of the saved state is still
* on the stack untouched since we haven't yet moved the stack pointer.
* On IST1 stack containing:
* (rax saved above, which is immediately popped)
* 0 ISF64_TRAPNO: trap code (NP or GP)
* 8 ISF64_TRAPFN: trap function
* 16 ISF64_ERR: segment number in error (error code)
* 24 ISF64_RIP: kernel RIP
* 32 ISF64_CS: kernel CS
* 40 ISF64_RFLAGS: kernel RFLAGS
* 48 ISF64_RSP: kernel RSP
* 56 ISF64_SS: kernel SS
* On the PCB stack, pointed to by the kernel's RSP is:
* 0 user trap code
* 8 user trap function
* 16 user err
* 24 user RIP
* 32 user CS
* 40 user RFLAGS
* 48 user RSP
* 56 user SS
*/
L_32bit_fault_set_seg:
swapgs
pop %rax /* toss saved %rax from stack */
mov ISF64_TRAPNO(%rsp), %rax
mov ISF64_TRAPFN(%rsp), %rcx
mov ISF64_ERR(%rsp), %rdx
mov ISF64_RSP(%rsp), %rsp /* reset stack to saved state */
mov %rax,R64_TRAPNO(%rsp)
mov %rcx,R64_TRAPFN(%rsp)
mov %rdx,R64_ERR(%rsp)
/* now treat as fault from user */
/* except that all the state is */
/* already saved - we just have to */
/* move the trapno and error into */
/* the compatibility frame */
jmp L_dispatch_U32_after_fault
/*
* Fatal exception handlers:
*/
Entry(idt64_db_task_dbl_fault)
PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
pushq $(T_DOUBLE_FAULT)
jmp L_dispatch
Entry(idt64_db_task_stk_fault)
PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
pushq $(T_STACK_FAULT)
jmp L_dispatch
Entry(idt64_mc)
push $(0) /* Error */
PUSH_FUNCTION(HNDL_MACHINE_CHECK)
pushq $(T_MACHINE_CHECK)
jmp L_dispatch
/*
* NMI
* This may or may not be fatal but extreme care is required
* because it may fall when control was already in another trampoline.
*
* We get here on IST2 stack which is used for NMIs only.
* We must be aware of the interrupted state:
* - from user-space, we
* - copy state to the PCB and continue;
* - from kernel-space, we
* - copy state to the kernel stack and continue, but
* - check what GSBASE was active, set the kernel base and
* - ensure that the active state is restored when the NMI is dismissed.
*/
Entry(idt64_nmi)
push %rax /* save RAX to ISF64_ERR */
push %rcx /* save RCX to ISF64_TRAPFN */
push %rdx /* save RDX to ISF64_TRAPNO */
testb $3, ISF64_CS(%rsp) /* NMI from user-space? */
je 1f
/* From user-space: copy interrupt state to user PCB */
swapgs
//mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */
//add $(ISF64_SIZE), %rcx /* adjust to base of ISF */
//swapgs /* swap back for L_dispatch */
//jmp 4f /* Copy state to PCB */
1:
/*
* From kernel-space:
* Determine whether the kernel or user GS is set.
* Set the kernel and ensure that we'll swap back correctly at IRET.
*/
mov $(MSR_IA32_GS_BASE), %ecx
rdmsr /* read kernel gsbase */
test $0x80000000, %edx /* test MSB of address */
jne 2f
swapgs /* so swap */
movl $1, ISF64_CS+4(%rsp) /* and set flag in CS slot */
2:
/*
* Determine whether we're on the kernel or interrupt stack
* when the NMI hit.
*/
mov ISF64_RSP(%rsp), %rcx
mov %gs:CPU_KERNEL_STACK, %rax
xor %rcx, %rax
//and EXT(kernel_stack_mask)(%rip), %rax
test %rax, %rax /* are we on the kernel stack? */
je 3f /* yes */
mov %gs:CPU_INT_STACK_TOP, %rax
dec %rax /* intr stack top is byte above max */
xor %rcx, %rax
//and EXT(kernel_stack_mask)(%rip), %rax
test %rax, %rax /* are we on the interrupt stack? */
je 3f /* yes */
mov %gs:CPU_KERNEL_STACK, %rcx
3:
/* 16-byte-align kernel/interrupt stack for state push */
and $0xFFFFFFFFFFFFFFF0, %rcx
4:
/*
* Copy state from NMI stack (RSP) to the save area (RCX) which is
* the PCB for user or kernel/interrupt stack from kernel.
* ISF64_ERR(RSP) saved RAX
* ISF64_TRAPFN(RSP) saved RCX
* ISF64_TRAPNO(RSP) saved RDX
*/
xchg %rsp, %rcx /* set for pushes */
push ISF64_SS(%rcx)
push ISF64_RSP(%rcx)
push ISF64_RFLAGS(%rcx)
push ISF64_CS(%rcx)
push ISF64_RIP(%rcx)
push $(0) /* error code 0 */
lea HNDL_ALLINTRS(%rip), %rax
push %rax /* trapfn allintrs */
push $(T_NMI) /* trapno T_NMI */
mov ISF64_ERR(%rcx), %rax
mov ISF64_TRAPNO(%rcx), %rdx
mov ISF64_TRAPFN(%rcx), %rcx
jmp L_dispatch
/* All 'exceptions' enter hndl_alltraps, with:
* r15 x86_saved_state_t address
* rsp kernel stack if user-space, otherwise interrupt or kernel stack
* esi cs at trap
*
* The rest of the state is set up as:
* both rsp and r15 are 16-byte aligned
* interrupts disabled
* direction flag cleared
*/
Entry(hndl_alltraps)
mov %esi, %eax
testb $3, %al
jz trap_from_kernel
//TIME_TRAP_UENTRY
/* Check for active vtimers in the current task */
//mov %gs:CPU_ACTIVE_THREAD, %rcx
//movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling trap/exception */
//mov TH_TASK(%rcx), %rbx
//TASK_VTIMER_CHECK(%rbx, %rcx)
CCALL1(user_trap, %r15) /* call user trap routine */
/* user_trap() unmasks interrupts */
cli /* hold off intrs - critical section */
xorl %ecx, %ecx /* don't check if we're in the PFZ */
Entry(return_from_trap)
//movq %gs:CPU_ACTIVE_THREAD,%r15 /* Get current thread */
//movl $-1, TH_IOTIER_OVERRIDE(%r15) /* Reset IO tier override to -1 before returning to userspace */
//cmpl $0, TH_RWLOCK_COUNT(%r15) /* Check if current thread has pending RW locks held */
//jz 1f
xorq %rbp, %rbp /* clear framepointer */
mov %r15, %rdi /* Set RDI to current thread */
//CCALL(lck_rw_clear_promotions_x86) /* Clear promotions if needed */
1:
//movq TH_PCB_ISS(%r15), %r15 /* PCB stack */
//movl %gs:CPU_PENDING_AST,%eax
testl %eax,%eax
je EXT(return_to_user) /* branch if no AST */
L_return_from_trap_with_ast:
testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
je 2f /* no, go handle the AST */
cmpl $(SS_64), SS_FLAVOR(%r15) /* are we a 64-bit task? */
je 1f
/* no... 32-bit user mode */
movl R32_EIP(%r15), %edi
xorq %rbp, %rbp /* clear framepointer */
//CCALL(commpage_is_in_pfz32)
testl %eax, %eax
je 2f /* not in the PFZ... go service AST */
movl %eax, R32_EBX(%r15) /* let the PFZ know we've pended an AST */
jmp EXT(return_to_user)
1:
movq R64_RIP(%r15), %rdi
xorq %rbp, %rbp /* clear framepointer */
//CCALL(commpage_is_in_pfz64)
testl %eax, %eax
je 2f /* not in the PFZ... go service AST */
movl %eax, R64_RBX(%r15) /* let the PFZ know we've pended an AST */
jmp EXT(return_to_user)
2:
sti /* interrupts always enabled on return to user mode */
xor %edi, %edi /* zero %rdi */
xorq %rbp, %rbp /* clear framepointer */
//CCALL(i386_astintr) /* take the AST */
cli
mov %rsp, %r15 /* AST changes stack, saved state */
xorl %ecx, %ecx /* don't check if we're in the PFZ */
jmp EXT(return_from_trap) /* and check again (rare) */
/*
* Trap from kernel mode. No need to switch stacks.
* Interrupts must be off here - we will set them to state at time of trap
* as soon as it's safe for us to do so and not recurse doing preemption
*
*/
trap_from_kernel:
movq %r15, %rdi /* saved state addr */
//pushq R64_RIP(%r15) /* Simulate a CALL from fault point */ /* Page Fault */
pushq %rbp /* Extend framepointer chain */
movq %rsp, %rbp
CCALLWITHSP(kernel_trap) /* to kernel trap routine */
popq %rbp
addq $8, %rsp
mov %rsp, %r15 /* DTrace slides stack/saved-state */
cli
//movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
//testl $(AST_URGENT),%eax /* any urgent preemption? */
//je ret_to_kernel /* no, nothing to do */
cmpl $(T_PREEMPT),R64_TRAPNO(%r15)
je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
testl $(EFL_IF),R64_RFLAGS(%r15) /* interrupts disabled? */
je ret_to_kernel
//cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
//jne ret_to_kernel
movq %gs:CPU_KERNEL_STACK,%rax
movq %rsp,%rcx
xorq %rax,%rcx
//andq EXT(kernel_stack_mask)(%rip),%rcx
testq %rcx,%rcx /* are we on the kernel stack? */
jne ret_to_kernel /* no, skip it */
//CCALL1(i386_astintr, $1) /* take the AST */
mov %rsp, %r15 /* AST changes stack, saved state */
jmp ret_to_kernel
/*
* All interrupts on all tasks enter here with:
* r15 x86_saved_state_t
* rsp kernel or interrupt stack
* esi cs at trap
*
* both rsp and r15 are 16-byte aligned
* interrupts disabled
* direction flag cleared
*/
Entry(hndl_allintrs)
/*
* test whether already on interrupt stack
*/
movq %gs:CPU_INT_STACK_TOP,%rcx
cmpq %rsp,%rcx
jb 1f
leaq -INTSTACK_SIZE(%rcx),%rdx
cmpq %rsp,%rdx
jb int_from_intstack
1:
xchgq %rcx,%rsp /* switch to interrupt stack */
mov %cr0,%rax /* get cr0 */
orl $(CR0_TS),%eax /* or in TS bit */
mov %rax,%cr0 /* set cr0 */
pushq %rcx /* save pointer to old stack */
pushq %gs:CPU_INT_STATE /* save previous intr state */
movq %r15,%gs:CPU_INT_STATE /* set intr state */
//TIME_INT_ENTRY /* do timing */
/* Check for active vtimers in the current task */
//mov %gs:CPU_ACTIVE_THREAD, %rcx
//mov TH_TASK(%rcx), %rbx
//TASK_VTIMER_CHECK(%rbx, %rcx)
//incl %gs:CPU_PREEMPTION_LEVEL
//incl %gs:CPU_INTERRUPT_LEVEL
CCALL1(interrupt, %r15) /* call generic interrupt routine */
.globl EXT(return_to_iret)
LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
//decl %gs:CPU_INTERRUPT_LEVEL
//decl %gs:CPU_PREEMPTION_LEVEL
//TIME_INT_EXIT /* do timing */
popq %gs:CPU_INT_STATE /* reset/clear intr state pointer */
popq %rsp /* switch back to old stack */
//movq %gs:CPU_ACTIVE_THREAD,%rax
//movq TH_PCB_FPS(%rax),%rax /* get pcb's ifps */
cmpq $0,%rax /* Is there a context */
je 1f /* Branch if not */
//movl FP_VALID(%rax),%eax /* Load fp_valid */
cmpl $0,%eax /* Check if valid */
jne 1f /* Branch if valid */
clts /* Clear TS */
jmp 2f
1:
mov %cr0,%rax /* get cr0 */
orl $(CR0_TS),%eax /* or in TS bit */
mov %rax,%cr0 /* set cr0 */
2:
/* Load interrupted code segment into %eax */
movl R32_CS(%r15),%eax /* assume 32-bit state */
cmpl $(SS_64),SS_FLAVOR(%r15)/* 64-bit? */
jne 3f
movl R64_CS(%r15),%eax /* 64-bit user mode */
3:
testb $3,%al /* user mode, */
jnz ast_from_interrupt_user /* go handle potential ASTs */
/*
* we only want to handle preemption requests if
* the interrupt fell in the kernel context
* and preemption isn't disabled
*/
//movl %gs:CPU_PENDING_AST,%eax
//testl $(AST_URGENT),%eax /* any urgent requests? */
//je ret_to_kernel /* no, nothing to do */
//cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
//jne ret_to_kernel /* yes, skip it */
/*
* Take an AST from kernel space. We don't need (and don't want)
* to do as much as the case where the interrupt came from user
* space.
*/
//CCALL1(i386_astintr, $1)
mov %rsp, %r15 /* AST changes stack, saved state */
jmp ret_to_kernel
/*
* nested int - simple path, can't preempt etc on way out
*/
int_from_intstack:
//incl %gs:CPU_PREEMPTION_LEVEL
//incl %gs:CPU_INTERRUPT_LEVEL
//incl %gs:CPU_NESTED_ISTACK
push %gs:CPU_INT_STATE
mov %r15, %gs:CPU_INT_STATE
CCALL1(interrupt, %r15)
pop %gs:CPU_INT_STATE
//decl %gs:CPU_INTERRUPT_LEVEL
//decl %gs:CPU_PREEMPTION_LEVEL
//decl %gs:CPU_NESTED_ISTACK
jmp ret_to_kernel
/*
* Take an AST from an interrupted user
*/
ast_from_interrupt_user:
//movl %gs:CPU_PENDING_AST,%eax
//testl %eax,%eax /* pending ASTs? */
//je EXT(ret_to_user) /* no, nothing to do */
//TIME_TRAP_UENTRY
movl $1, %ecx /* check if we're in the PFZ */
jmp L_return_from_trap_with_ast /* return */
/* Syscall dispatch routines! */
/*
*
* 32bit Tasks
* System call entries via INTR_GATE or sysenter:
*
* r15 x86_saved_state32_t
* rsp kernel stack
*
* both rsp and r15 are 16-byte aligned
* interrupts disabled
* direction flag cleared
*/
Entry(hndl_sysenter)
/*
* We can be here either for a mach syscall or a unix syscall,
* as indicated by the sign of the code:
*/
movl R32_EAX(%r15),%eax
testl %eax,%eax
js EXT(hndl_mach_scall) /* < 0 => mach */
/* > 0 => unix */
Entry(hndl_unix_scall)
//TIME_TRAP_UENTRY
//movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
//movq TH_TASK(%rcx),%rbx /* point to current task */
//incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
/* Check for active vtimers in the current task */
//TASK_VTIMER_CHECK(%rbx,%rcx)
sti
//CCALL1(unix_syscall, %r15)
/*
* always returns through thread_exception_return
*/
Entry(hndl_mach_scall)
//TIME_TRAP_UENTRY
//movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
//movq TH_TASK(%rcx),%rbx /* point to current task */
//incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
/* Check for active vtimers in the current task */
//TASK_VTIMER_CHECK(%rbx,%rcx)
sti
//CCALL1(mach_call_munger, %r15)
/*
* always returns through thread_exception_return
*/
Entry(hndl_mdep_scall)
//TIME_TRAP_UENTRY
/* Check for active vtimers in the current task */
//movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
//movq TH_TASK(%rcx),%rbx /* point to current task */
//TASK_VTIMER_CHECK(%rbx,%rcx)
sti
//CCALL1(machdep_syscall, %r15)
/*
* always returns through thread_exception_return
*/
/*
* 64bit Tasks
* System call entries via syscall only:
*
* r15 x86_saved_state64_t
* rsp kernel stack
*
* both rsp and r15 are 16-byte aligned
* interrupts disabled
* direction flag cleared
*/
Entry(hndl_syscall)
//TIME_TRAP_UENTRY
//movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
//movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling syscall */
//movq TH_TASK(%rcx),%rbx /* point to current task */
/* Check for active vtimers in the current task */
//TASK_VTIMER_CHECK(%rbx,%rcx)
/*
* We can be here either for a mach, unix machdep or diag syscall,
* as indicated by the syscall class:
*/
movl R64_RAX(%r15), %eax /* syscall number/class */
movl %eax, %edx
//andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
//cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
//je EXT(hndl_mach_scall64)
//cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
//je EXT(hndl_unix_scall64)
//cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
//je EXT(hndl_mdep_scall64)
//cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
//je EXT(hndl_diag_scall64)
/* Syscall class unknown */
sti
CCALL3(i386_exception, $(EXC_SYSCALL), %rax, $1)
/* no return */
Entry(hndl_unix_scall64)
//incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
sti
CCALL1(unix_syscall64, %r15) // Not yet implemented
/*
* always returns through thread_exception_return
*/
Entry(hndl_mach_scall64)
//incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
sti
CCALL1(mach_call_munger64, %r15)
/*
* always returns through thread_exception_return
*/
Entry(hndl_mdep_scall64)
sti
CCALL1(machdep_syscall64, %r15)
/*
* always returns through thread_exception_return
*/
Entry(hndl_diag_scall64)
//CCALL1(diagCall64, %r15) // Call diagnostics
test %eax, %eax // What kind of return is this?
je 1f // branch if bad (zero)
jmp EXT(return_to_user) // Normal return, do not check asts...
1:
sti
CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
/* no return */
Entry(hndl_machine_check)
CCALL1(panic_machine_check64, %r15)
hlt
Entry(hndl_double_fault)
CCALL1(panic_double_fault64, %r15)
hlt
|
admkopec/BetaOS
| 5,598
|
Kernel/Kernel/x86_64/start.S
|
#
# start.s
# BetaOS
#
# Created by Adam Kopeć on 9/26/15 and modified for use with x86_64 CPUs on 5/2/16.
# Copyright © 2015-2016 Adam Kopeć. All rights reserved.
#
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
.code32
# Declare constants used for creating a multiboot header.
.set FLAGS, 1<<0 | 1<<1 //| 1<<2 # this is the Multiboot 'flag' field
.set MAGIC, 0x1BADB002 # 'magic number' lets bootloader find the header
.set CHECKSUM, -(MAGIC + FLAGS) # checksum of above, to prove we are multiboot
# Declare a header as in the Multiboot Standard.
//Multiboot_Header:
//.long 0xE85250D6 // Magic
//.long 0 // Architecture
//.long Multiboot_Header_END - Multiboot_Header // Header length
//.long -(0xE85250D6 + 0 + (Multiboot_Header_END - Multiboot_Header)) // Checksum
//.long 0 // Type
//.long 0 // Flags
//.long 8 // Size
//.long __start // Entry
//Multiboot_Header_END:
//.align 4
//Multiboot_Header:
//.long MAGIC
//.long FLAGS
//.long CHECKSUM
//.long Multiboot_Header, 0x100000, 0x10000000, 0, _start
//.long 0
//Multiboot_Header_END:
//.long 0
//.long 1280, 768, 8
#include <i386/asm.h>
#include <i386/seg.h>
#include <i386/proc_reg.h>
#include <i386/pmap.h>
#include <i386/vm_param.h>
/* in the __HIB section since the hibernate restore code uses this stack. */
//.section .bootstrap_stack, "aw", @nobits
#ifdef __ELF__
.section .data
#else
.section __HIB,__data
#endif
//.align 12
.globl EXT(low_intstack)
EXT(low_intstack):
.globl EXT(gIOHibernateRestoreStack)
EXT(gIOHibernateRestoreStack):
.space INTSTACK_SIZE
.globl EXT(low_eintstack)
EXT(low_eintstack):
.globl EXT(gIOHibernateRestoreStackEnd)
EXT(gIOHibernateRestoreStackEnd):
#ifdef __ELF__
#else
.section __DATA,__data
#endif
/*
* Stack for machine-check handler.
*/
//.align 12
.globl EXT(mc_task_stack)
EXT(mc_task_stack):
.space INTSTACK_SIZE
.globl EXT(mc_task_stack_end)
EXT(mc_task_stack_end):
#define SWITCH_TO_64BIT_MODE \
movl $(CR4_PAE),%eax /* enable PAE */ ;\
movl %eax,%cr4 ;\
movl $MSR_IA32_EFER,%ecx ;\
rdmsr ;\
/* enable long mode, NX */ ;\
orl $(MSR_IA32_EFER_LME | MSR_IA32_EFER_NXE),%eax ;\
wrmsr ;\
movl $EXT(BootPML4),%eax ;\
movl %eax,%cr3 ;\
movl %cr0,%eax ;\
orl $(CR0_PG|CR0_WP),%eax /* enable paging */ ;\
movl %eax,%cr0 ;\
ljmpl $KERNEL64_CS,$64f ;\
64: ;\
.code64
// The kernel entry point.
.code32
#ifdef __ELF__
.section .text
#else
.section __HIB,__text
#endif
.align ALIGN
.globl EXT(_start)
.globl EXT(pstart)
LEXT(_start)
LEXT(pstart)
mov %eax, %edi /* save kernbootstruct */
/* Use low 32-bits of address as 32-bit stack */
movl $EXT(low_eintstack), %esp
movl $EXT(protected_mode_gdtr), %eax
lgdtl (%eax)
movl $EXT(BootPML4), %eax // Level 4:
add %eax, 0*8+0(%eax) // - 1:1
add %eax, KERNEL_PML4_INDEX*8+0(%eax) // - kernel space
movl $EXT(BootPDPT), %edx // Level 3:
add %eax, 0*8+0(%edx)
add %eax, 1*8+0(%edx)
add %eax, 2*8+0(%edx)
add %eax, 3*8+0(%edx)
/* the following code is shared by the master CPU and all slave CPUs */
L_pstart_common:
/* Switch to 64 bit mode */
SWITCH_TO_64BIT_MODE
/* Flush data segment registors */
xor %eax, %eax
mov %ax, %ss
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
test %edi, %edi /* Populate stack canary on BSP */
jz Lvstartshim
mov $1, %eax
cpuid
test $(1 << 30), %ecx
jz Lnon_rdrand
rdrand %rax /* RAX := 64 bits of DRBG entropy */
jnc Lnon_rdrand
Lstore_random_guard:
xor %ah, %ah /* Security: zero second byte of stack canary */
movq %rax, ___stack_chk_guard(%rip)
/* %edi = boot_args_start if BSP */
Lvstartshim:
/* %edi = boot_args_start */
#ifdef __ELF__
leaq vstart(%rip), %rcx
#else
leaq _vstart(%rip), %rcx
#endif
movq $0xffffff8000000000, %rax /* adjust pointer up high */
or %rax, %rsp /* and stack pointer up there */
or %rcx, %rax
andq $0xfffffffffffffff0, %rsp /* align stack */
xorq %rbp, %rbp /* zero frame pointer */
callq *%rax
Lnon_rdrand:
rdtsc /* EDX:EAX := TSC */
/* Distribute low order bits */
mov %eax, %ecx
xor %al, %ah
shl $16, %rcx
xor %rcx, %rax
xor %eax, %edx
/* Incorporate ASLR entropy, if any */
lea (%rip), %rcx
shr $21, %rcx
movzbl %cl, %ecx
shl $16, %ecx
xor %ecx, %edx
mov %ah, %cl
ror %cl, %edx /* Right rotate EDX (TSC&0xFF ^ (TSC>>8 & 0xFF))&1F */
shl $32, %rdx
xor %rdx, %rax
mov %cl, %al
jmp Lstore_random_guard
#ifdef __ELF__
.size _start, . - _start
#endif
/* Slave CPUs entry point */
.align ALIGN
.globl EXT(slave_pstart)
LEXT(slave_pstart)
.code32
cli /* disable interrupts, so we don`t need IDT for a while */
movl $EXT(mp_slave_stack) + PAGE_SIZE, %esp
xor %edi, %edi /* AP, no "kernbootstruct" */
jmp L_pstart_common /* hop a ride to vstart() */
#ifdef __ELF__
.size slave_pstart, . - slave_pstart
#endif
.code32
#ifdef __ELF__
.section .text
#else
.section __HIB,__text
#endif
Entry(protected_mode_gdtr)
.short 160 /* limit (8*20 segs) */
.quad EXT(master_gdt)
|
AdmiralCurtiss/SenPatcher
| 15,150
|
native/zstd/decompress/huf_decompress_amd64.S
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "../common/portability_macros.h"
#if defined(__ELF__) && defined(__GNUC__)
/* Stack marking
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
*/
.section .note.GNU-stack,"",%progbits
#if defined(__aarch64__)
/* Mark that this assembly supports BTI & PAC, because it is empty for aarch64.
* See: https://github.com/facebook/zstd/issues/3841
* See: https://gcc.godbolt.org/z/sqr5T4ffK
* See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/
* See: https://reviews.llvm.org/D62609
*/
.pushsection .note.gnu.property, "a"
.p2align 3
.long 4 /* size of the name - "GNU\0" */
.long 0x10 /* size of descriptor */
.long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */
.asciz "GNU"
.long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4 /* pr_datasz - 4 bytes */
.long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */
.p2align 3 /* pr_padding - bring everything to 8 byte alignment */
.popsection
#endif
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
/* Calling convention:
*
* %rdi (or %rcx on Windows) contains the first argument: HUF_DecompressAsmArgs*.
* %rbp isn't maintained (no frame pointer).
* %rsp contains the stack pointer that grows down.
* No red-zone is assumed, only addresses >= %rsp are used.
* All register contents are preserved.
*/
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.text
/* Sets up register mappings for clarity.
* op[], bits[], dtable & ip[0] each get their own register.
* ip[1,2,3] & olimit alias var[].
* %rax is a scratch register.
*/
#define op0 rsi
#define op1 rbx
#define op2 rcx
#define op3 rdi
#define ip0 r8
#define ip1 r9
#define ip2 r10
#define ip3 r11
#define bits0 rbp
#define bits1 rdx
#define bits2 r12
#define bits3 r13
#define dtable r14
#define olimit r15
/* var[] aliases ip[1,2,3] & olimit
* ip[1,2,3] are saved every iteration.
* olimit is only used in compute_olimit.
*/
#define var0 r15
#define var1 r9
#define var2 r10
#define var3 r11
/* 32-bit var registers */
#define vard0 r15d
#define vard1 r9d
#define vard2 r10d
#define vard3 r11d
/* Calls X(N) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM(X) \
X(0); \
X(1); \
X(2); \
X(3)
/* Calls X(N, idx) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \
X(0, idx); \
X(1, idx); \
X(2, idx); \
X(3, idx)
/* Define both _HUF_* & HUF_* symbols because MacOS
* C symbols are prefixed with '_' & Linux symbols aren't.
*/
_HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push 104(%rax) /* ilowest */
push 112(%rax) /* oend */
push %olimit /* olimit space */
subq $24, %rsp
.L_4X1_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rbx, rdx must be saved
* op3 & ip0 mustn't be clobbered
*/
movq %rbx, 0(%rsp)
movq %rdx, 8(%rsp)
movq 32(%rsp), %rax /* rax = oend */
subq %op3, %rax /* rax = oend - op3 */
/* r15 = (oend - op3) / 5 */
movabsq $-3689348814741910323, %rdx
mulq %rdx
movq %rdx, %r15
shrq $2, %r15
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %rbx /* rbx = ip0 - ilowest */
/* rdx = (ip0 - ilowest) / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %rbx
shrq %rbx
addq %rbx, %rdx
shrq $2, %rdx
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* r15 = r15 * 5 */
leaq (%r15, %r15, 4), %r15
/* olimit = op3 + r15 */
addq %op3, %olimit
movq 8(%rsp), %rdx
movq 0(%rsp), %rbx
/* If (op3 + 20 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X1_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X1_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X1_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X1_exit
/* Reads top 11 bits from bits[n]
* Loads dt[bits[n]] into var[n]
*/
#define GET_NEXT_DELT(n) \
movq $53, %var##n; \
shrxq %var##n, %bits##n, %var##n; \
movzwl (%dtable,%var##n,2),%vard##n
/* var[n] must contain the DTable entry computed with GET_NEXT_DELT
* Moves var[n] to %rax
* bits[n] <<= var[n] & 63
* op[n][idx] = %rax >> 8
* %ah is a way to access bits [8, 16) of %rax
*/
#define DECODE_FROM_DELT(n, idx) \
movq %var##n, %rax; \
shlxq %var##n, %bits##n, %bits##n; \
movb %ah, idx(%op##n)
/* Assumes GET_NEXT_DELT has been called.
* Calls DECODE_FROM_DELT then GET_NEXT_DELT
*/
#define DECODE_AND_GET_NEXT(n, idx) \
DECODE_FROM_DELT(n, idx); \
GET_NEXT_DELT(n) \
/* // ctz & nbBytes is stored in bits[n]
* // nbBits is stored in %rax
* ctz = CTZ[bits[n]]
* nbBits = ctz & 7
* nbBytes = ctz >> 3
* op[n] += 5
* ip[n] -= nbBytes
* // Note: x86-64 is little-endian ==> no bswap
* bits[n] = MEM_readST(ip[n]) | 1
* bits[n] <<= nbBits
*/
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
andq $7, %rax; \
shrq $3, %bits##n; \
leaq 5(%op##n), %op##n; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlx %rax, %bits##n, %bits##n
/* Store clobbered variables on the stack */
movq %olimit, 24(%rsp)
movq %ip1, 0(%rsp)
movq %ip2, 8(%rsp)
movq %ip3, 16(%rsp)
/* Call GET_NEXT_DELT for each stream */
FOR_EACH_STREAM(GET_NEXT_DELT)
.p2align 6
.L_4X1_loop_body:
/* Decode 5 symbols in each of the 4 streams (20 total)
* Must have called GET_NEXT_DELT for each stream
*/
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4)
/* Load ip[1,2,3] from stack (var[] aliases them)
* ip[] is needed for RELOAD_BITS
* Each will be stored back to the stack after RELOAD
*/
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Reload each stream & fetch the next table entry
* to prepare for the next iteration
*/
RELOAD_BITS(0)
GET_NEXT_DELT(0)
RELOAD_BITS(1)
movq %ip1, 0(%rsp)
GET_NEXT_DELT(1)
RELOAD_BITS(2)
movq %ip2, 8(%rsp)
GET_NEXT_DELT(2)
RELOAD_BITS(3)
movq %ip3, 16(%rsp)
GET_NEXT_DELT(3)
/* If op3 < olimit: continue the loop */
cmp %op3, 24(%rsp)
ja .L_4X1_loop_body
/* Reload ip[1,2,3] from stack */
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Re-compute olimit */
jmp .L_4X1_compute_olimit
#undef GET_NEXT_DELT
#undef DECODE_FROM_DELT
#undef DECODE
#undef RELOAD_BITS
.L_4X1_exit:
addq $24, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* olimit */
pop %rax /* oend */
pop %rax /* ilowest */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
_HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push %rax /* olimit */
push 104(%rax) /* ilowest */
movq 112(%rax), %rax
push %rax /* oend3 */
movq %op3, %rax
push %rax /* oend2 */
movq %op2, %rax
push %rax /* oend1 */
movq %op1, %rax
push %rax /* oend0 */
/* Scratch space */
subq $8, %rsp
.L_4X2_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rdx must be saved
* op[1,2,3,4] & ip0 mustn't be clobbered
*/
movq %rdx, 0(%rsp)
/* We can consume up to 7 input bytes each iteration. */
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %r15 /* r15 = ip0 - ilowest */
/* rdx = rax / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %r15
shrq %r15
addq %r15, %rdx
shrq $2, %rdx
/* r15 = (ip0 - ilowest) / 7 */
movq %rdx, %r15
/* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */
movq 8(%rsp), %rax /* rax = oend0 */
subq %op0, %rax /* rax = oend0 - op0 */
movq 16(%rsp), %rdx /* rdx = oend1 */
subq %op1, %rdx /* rdx = oend1 - op1 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 24(%rsp), %rax /* rax = oend2 */
subq %op2, %rax /* rax = oend2 - op2 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 32(%rsp), %rax /* rax = oend3 */
subq %op3, %rax /* rax = oend3 - op3 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movabsq $-3689348814741910323, %rax
mulq %rdx
shrq $3, %rdx /* rdx = rdx / 10 */
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* olimit = op3 + 5 * r15 */
movq %r15, %rax
leaq (%op3, %rax, 4), %olimit
addq %rax, %olimit
movq 0(%rsp), %rdx
/* If (op3 + 10 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X2_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X2_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X2_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X2_exit
#define DECODE(n, idx) \
movq %bits##n, %rax; \
shrq $53, %rax; \
movzwl 0(%dtable,%rax,4),%r8d; \
movzbl 2(%dtable,%rax,4),%r15d; \
movzbl 3(%dtable,%rax,4),%eax; \
movw %r8w, (%op##n); \
shlxq %r15, %bits##n, %bits##n; \
addq %rax, %op##n
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
shrq $3, %bits##n; \
andq $7, %rax; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlxq %rax, %bits##n, %bits##n
movq %olimit, 48(%rsp)
.p2align 6
.L_4X2_loop_body:
/* We clobber r8, so store it on the stack */
movq %r8, 0(%rsp)
/* Decode 5 symbols from each of the 4 streams (20 symbols total). */
FOR_EACH_STREAM_WITH_INDEX(DECODE, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 4)
/* Reload r8 */
movq 0(%rsp), %r8
FOR_EACH_STREAM(RELOAD_BITS)
cmp %op3, 48(%rsp)
ja .L_4X2_loop_body
jmp .L_4X2_compute_olimit
#undef DECODE
#undef RELOAD_BITS
.L_4X2_exit:
addq $8, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* oend0 */
pop %rax /* oend1 */
pop %rax /* oend2 */
pop %rax /* oend3 */
pop %rax /* ilowest */
pop %rax /* olimit */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
#endif
|
adoptium/aqa-tests
| 918
|
functional/MBCS_Tests/i18n/data/aix_ko_KR.UTF-8.txt.s
|
South Korea 한국 韓國
Seoul 서울
Busan 부산 釜山
Chuncheon 춘천 春川
Daegu 대구 大邱
Daejeon 대전 大田
Gwangju 광주 光州
Gyeongju 경주 慶州
Incheon 인천 仁川
Jeonju 전주 全州
Seoul서울
Busan부산釜山
Chuncheon춘천春川
Daegu대구大邱
Daejeon대전大田
Gwangju광주光州
Gyeongju경주慶州
Incheon인천仁川
Jeonju전주全州
CJK_SYMBOLS_AND_PUNCTUATION 、。‥…〃―∥\∼
HALFWIDTH_AND_FULLWIDTH_FORMS !"#$%&'()*
HANGUL_COMPATIBILITY_JAMO ㄱㄲㄳㄴㄵㄶㄷㄸㄹㄺ
NUMBER_FORMS ⅰⅱⅲⅳⅴⅵⅶⅷⅸⅹ
BOX_DRAWING ─│┌┐┘└├┬┤┴
CJK_COMPATIBILITY ㎕㎖㎗ℓ㎘㏄㎣㎤㎥㎦
LATIN_EXTENDED_A ĦIJĿŁŒŦŊ㉠㉡㉢
HIRAGANA ぁあぃいぅうぇえぉお
KATAKANA ァアィイゥウェエォオ
CYRILLIC АБВГДЕЁЖЗИ
HANGUL_SYLLABLES 가각간갇갈갉갊감갑값
CJK_UNIFIED_IDEOGRAPHS 伽佳假價加可呵哥嘉嫁
CJK_SYMBOLS_AND_PUNCTUATION 、。‥…〃―∥\∼
HALFWIDTH_AND_FULLWIDTH_FORMS!"#$%&'()*
HANGUL_COMPATIBILITY_JAMOㄱㄲㄳㄴㄵㄶㄷㄸㄹㄺ
NUMBER_FORMSⅰⅱⅲⅳⅴⅵⅶⅷⅸⅹ
BOX_DRAWING─│┌┐┘└├┬┤┴
CJK_COMPATIBILITY㎕㎖㎗ℓ㎘㏄㎣㎤㎥㎦
LATIN_EXTENDED_AĦIJĿŁŒŦŊ㉠㉡㉢
HIRAGANAぁあぃいぅうぇえぉお
KATAKANAァアィイゥウェエォオ
CYRILLICАБВГДЕЁЖЗИ
HANGUL_SYLLABLES가각간갇갈갉갊감갑값
CJK_UNIFIED_IDEOGRAPHS伽佳假價加可呵哥嘉嫁
|
ADTPro/adtpro
| 13,372
|
src/client/w5100/w5100_udp.s
|
;
; ADTPro - Apple Disk Transfer ProDOS
; Copyright (C) 2014 by David Schmidt
; 1110325+david-schmidt@users.noreply.github.com
;
; This program is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 2 of the License, or (at your
; option) any later version.
;
; This program is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
;
; You should have received a copy of the GNU General Public License along
; with this program; if not, write to the Free Software Foundation, Inc.,
; 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
;
;
; Copyright (c) 2014, Oliver Schmidt
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions are met:
; * Redistributions of source code must retain the above copyright
; notice, this list of conditions and the following disclaimer.
; * Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in the
; documentation and/or other materials provided with the distribution.
; * Neither the name of the <organization> nor the
; names of its contributors may be used to endorse or promote products
; derived from this software without specific prior written permission.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
; DISCLAIMED. IN NO EVENT SHALL OLIVER SCHMIDT BE LIABLE FOR ANY
; DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
; ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ptr := $80 ; 2 byte pointer value
tmp := $82 ; 1 byte temporary value
bas := $83 ; 1 byte socket 1 Base Address (hibyte)
sha := $84 ; 2 byte physical addr shadow ($F000-$FFFF)
;len := $86 ; 2 byte frame length
;adv := $88 ; 2 byte pointer register advancement
mode := $C0C4 ; Usage will be self-modified
addr := $C0C5 ; Usage will be self-modified
data := $C0C7 ; Usage will be self-modified
.export w5100_init
.export recv_init, recv_byte, recv_done
.export send_init, send_byte, send_done
;------------------------------------------------------------------------------
w5100_init_error:
sec
rts
w5100_init:
; Set ip_parms pointer
lda #<ip_parms
sta ptr
ldx #>ip_parms
stx ptr+1
stax ptr
jsr w5100_self_modify
; S/W Reset
lda #$80
fix_mode_01:
sta mode
fix_mode_02:
lda mode
bne w5100_init_error
; Indirect Bus I/F mode, Address Auto-Increment
lda #$03
fix_mode_03:
sta mode
; Gateway IP Address Register: IP address of router on local network
ldx #$00 ; Hibyte
ldy #$01 ; Lobyte
jsr set_addr
ldy #3*4 ; ip_parms::cfg_gateway
jsr set_ipv4value
; Subnet Mask Register: Netmask of local network
; -> addr is already set
ldy #2*4 ; ip_parms::cfg_netmask
jsr set_ipv4value
; Source Hardware Address Register: MAC Address
; -> addr is already set
ldx #$00
imac: lda w5100_mac,x
fix_data_01:
sta data
inx
cpx #$06
bcc imac
; Source IP Address Register: IP address of local machine
; -> addr is already set
ldy #1*4 ; ip_parms::cfg_ip
jsr set_ipv4value
; RX Memory Size Register: Assign 4KB each to sockets 0 and 1
ldx #$00 ; Hibyte
ldy #$1A ; Lobyte
jsr set_addr
lda #$0A
fix_data_02:
sta data
; TX Memory Size Register: Assign 4KB each to sockets 0 and 1
; -> addr is already set
; -> A is still $0A
fix_data_03:
sta data
; Socket 1 Destination IP Address Register: Destination IP address
; This has to be the last call to set_ipv4value because it writes
; as a side effect to 'hdr' and it is the destination IP address
; that has to be present in 'hdr' after initialization
ldy #$0C
jsr set_addrsocket1
ldy #0*4 ; ip_parms::serverip
jsr set_ipv4value
; Socket 1 Destination Port Register: 6502
; -> addr is already set
jsr set_data6502
; Socket 1 Source Port Register: 6502
ldy #$04
jsr set_addrsocket1
jsr set_data6502
; Socket 1 Mode Register: UDP
ldy #$00
jsr set_addrsocket1
lda #$02
fix_data_04:
sta data
; Socket 1 Command Register: OPEN
; addr is already set
lda #$01
fix_data_05:
sta data
clc
rts
;------------------------------------------------------------------------------
set_ipv4value:
ldx #$03
simore: lda (ptr),y
iny
fix_data_06:
sta data
sta hdr+2,x
dex
bpl simore
rts
;------------------------------------------------------------------------------
set_data6502:
lda #<6502
ldx #>6502
fix_data_07:
stx data ; Hibyte
fix_data_08:
sta data ; Lobyte
rts
;------------------------------------------------------------------------------
recv_init:
; Socket 1 RX Received Size Register: 0 or volatile ?
lda #$26 ; Socket RX Received Size Register
jsr prolog
bcs :+++
; Socket 0 RX Read Pointer Register
; -> addr already set
; Calculate and set pyhsical address
ldx #>$7000 ; Socket 1 RX Base Address
jsr set_addrphysical
; Compare peer IP addr and peer port with expected values
; in 'hdr' and set C(arry flag) if there's a mismatch
clc
ldx #$05
stx tmp
: jsr recv_byte ; Doesn't trash C
ldx tmp
eor hdr,x ; Doesn't trash C
beq :+
sec
: dec tmp
bpl :--
php ; Save C
; Read data length
jsr recv_byte ; Hibyte
sta len+1
jsr recv_byte ; Lobyte
sta len
; Add 8 byte header to set pointer advancement
clc
adc #<$0008
sta adv
lda len+1
adc #>$0008
sta adv+1
; Skip frame if it doesn't originate from our
; expected communicaion peer
plp ; Restore C
bcs recv_done
; Return success with data in length
lda len
ldx len+1
clc
: rts
;------------------------------------------------------------------------------
send_init:
; Set pointer advancement
lda #$00
sta adv
sta adv+1
; Socket 1 TX Free Size Register: 0 or volatile ?
lda #$20 ; Socket TX Free Size Register
jsr prolog
bcs :+
; Socket 1 TX Free Size Register: < advancement ?
; cpx adv ; Lobyte
; sbc adv+1 ; Hibyte
; bcc rts_cs
; Socket 1 TX Write Pointer Register
ldy #$24
jsr set_addrsocket1
; Calculate and set pyhsical address
ldx #>$5000 ; Socket 1 TX Base Address
jsr set_addrphysical
; Return success
clc
: rts
;------------------------------------------------------------------------------
prolog:
; Check for completion of previous command
; Socket 1 Command Register: 0 ?
jsr set_addrcmdreg1
fix_data_09:
ldx data
bne rts_cs ; Not completed -> error
; Socket Size Register: not 0 ?
tay ; Select Size Register
jsr get_wordsocket1
stx ptr ; Lobyte
sta ptr+1 ; Hibyte
ora ptr
bne :+
rts_cs: sec ; Error (size == 0)
rts
; Socket Size Register: volatile ?
: jsr get_wordsocket1
cpx ptr ; Lobyte
bne rts_cs ; Volatile size -> error
cmp ptr+1 ; Hibyte
bne rts_cs ; Volatile size -> error
clc ; Sucess (size != 0)
rts
;------------------------------------------------------------------------------
recv_byte:
; Read byte
fix_data_10:
lda data
; Increment physical addr shadow lobyte
inc sha
beq incsha
clc
rts
;------------------------------------------------------------------------------
send_byte:
; Write byte
fix_data_11:
sta data
inc adv
bne :+
inc adv+1
; Increment physical addr shadow lobyte
: inc sha
beq incsha
rts
; Increment physical addr shadow hibyte
incsha: inc sha+1
bne :+
jsr set_addrbase
: clc
rts
;------------------------------------------------------------------------------
recv_done:
; Set parameters for commit code
lda #$40 ; RECV
ldy #$28 ; Socket RX Read Pointer Register
bne epilog ; Always
;------------------------------------------------------------------------------
send_done:
; Set parameters for commit code
lda #$20 ; SEND
ldy #$24 ; Socket TX Write Pointer Register
; Advance pointer register
epilog: jsr set_addrsocket1
tay ; Save command
clc
lda ptr
adc adv
tax
lda ptr+1
adc adv+1
fix_data_12:
sta data ; Hibyte
fix_data_13:
stx data ; Lobyte
; Set command register
tya ; Restore command
jsr set_addrcmdreg1
fix_data_14:
sta data
sec ; When coming from _recv_init -> error
rts
;------------------------------------------------------------------------------
set_addrphysical:
fix_data_15:
lda data ; Hibyte
fix_data_16:
ldy data ; Lobyte
sty ptr
sta ptr+1
and #>$0FFF ; Socket Mask Address (hibyte)
stx bas ; Socket Base Address (hibyte)
ora bas
tax
ora #>$F000 ; Move sha/sha+1 to $F000-$FFFF
sty sha
sta sha+1
set_addr:
fix_addr_01:
stx addr ; Hibyte
fix_addr_02:
sty addr+1 ; Lobyte
rts
;------------------------------------------------------------------------------
set_addrcmdreg1:
ldy #$01 ; Socket Command Register
set_addrsocket1:
ldx #>$0500 ; Socket 1 register base address
bne set_addr ; Always
;------------------------------------------------------------------------------
set_addrbase:
ldx bas ; Socket Base Address (hibyte)
ldy #<$0000 ; Socket Base Address (lobyte)
beq set_addr ; Always
;------------------------------------------------------------------------------
get_wordsocket1:
jsr set_addrsocket1
fix_data_17:
lda data ; Hibyte
fix_data_18:
ldx data ; Lobyte
rts
; w5100_self_modify - make all entry points variable so we can move the
; hardware addresses around in the Apple
;
w5100_self_modify:
ldy COMMSLOT ; GET SLOT# (0..6)
iny ; NOW 1..7
tya
asl
asl
asl
asl
clc
adc #$84 ; Now $84+S0 ($c0b0)
; Make the accumulator contain slot number plus $80
; i.e. Slot 1 = $94
; i.e. Slot 2 = $A4
; i.e. Slot 3 = $B4
; i.e. Slot 4 = $C4
; i.e. Slot 5 = $D4
; i.e. Slot 6 = $E4
; i.e. Slot 7 = $F4
; $c0s4 - WIZNET_MODE_REG - save off all references to mode
sta fix_mode_01 + 1
sta fix_mode_02 + 1
sta fix_mode_03 + 1
; $c0s5 - WIZNET_ADDR_HI
adc #$01
sta fix_addr_01 + 1
; $c0s6 - WIZNET_ADDR_LO
adc #$01
sta fix_addr_02 + 1
; $c0s7 - WIZNET_DATA_REG
adc #$01
sta fix_data_01 + 1
sta fix_data_02 + 1
sta fix_data_03 + 1
sta fix_data_04 + 1
sta fix_data_05 + 1
sta fix_data_06 + 1
sta fix_data_07 + 1
sta fix_data_08 + 1
sta fix_data_09 + 1
sta fix_data_10 + 1
sta fix_data_11 + 1
sta fix_data_12 + 1
sta fix_data_13 + 1
sta fix_data_14 + 1
sta fix_data_15 + 1
sta fix_data_16 + 1
sta fix_data_17 + 1
sta fix_data_18 + 1
rts
;------------------------------------------------------------------------------
.rodata
w5100_mac: .byte $00, $08, $DC ; OUI of WIZnet
.byte $11, $11, $11
;------------------------------------------------------------------------------
.data
hdr: .word 6502 ; Destination Port
.res 4 ; Destination IP Address
adv: .res 2
len: .res 2
|
ADTPro/adtpro
| 2,886
|
src/client/dos/ssc.s
|
;
; ADTPro - Apple Disk Transfer ProDOS
; Copyright (C) 2006 - 2010 by David Schmidt
; 1110325+david-schmidt@users.noreply.github.com
;
; This program is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 2 of the License, or (at your
; option) any later version.
;
; This program is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
;
; You should have received a copy of the GNU General Public License along
; with this program; if not, write to the Free Software Foundation, Inc.,
; 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
;
;---------------------------------------------------------
; INITSSC - Initialize the SSC
; Y holds the desired slot number
;---------------------------------------------------------
initssc:
tya
asl
asl
asl
asl ; NOW $S0
adc #$88
tax
lda #$0B ; COMMAND: NO PARITY, RTS ON,
sta $C002,X ; DTR ON, NO INTERRUPTS
ldy pspeed ; CONTROL: 8 DATA BITS, 1 STOP
lda bpsctrl,Y ; BIT, BAUD RATE DEPENDS ON
sta $C003,X ; PSPEED
stx mod0+1 ; SELF-MODS FOR $C088+S0
stx mod2+1 ; IN MAIN LOOP
stx mod4+1 ; AND IN sscget AND sscput
inx
stx mod1+1 ; SELF-MODdS FOR $C089+S0
stx mod3+1 ; IN sscget AND sscput
jsr patchssc
rts
;---------------------------------------------------------
; sscput - Send accumulator out the serial line
;---------------------------------------------------------
sscput:
pha ; Push A onto the stack
putc1: lda $C000
cmp #esc ; Escape = abort
beq pabort
mod1: lda $C089 ; Check status bits
mod5: and #$50 ; Mask for DSR (must ignore for Laser 128)
cmp #$10
bne putc1 ; Output register is full, so loop
pla
mod2: sta $C088 ; Put character
rts
pabort: jmp babort
;---------------------------------------------------------
; sscget - Get a character from Super Serial Card (XY unchanged)
;---------------------------------------------------------
sscget:
lda $C000
cmp #esc ; Escape = abort
beq pabort
mod3: lda $C089 ; Check status bits
mod6: and #$68
cmp #$8
bne sscget ; Input register empty, loop
mod4: lda $C088 ; Get character
rts
;---------------------------------------------------------
; resetssc - Clean up SSC
;---------------------------------------------------------
resetssc:
mod0: bit $C088 ; CLEAR SSC INPUT REGISTER
rts
;---------------------------------------------------------
; PATCHSSC - Patch the entry points of SSC processing
;---------------------------------------------------------
patchssc:
lda #<sscput
sta putc+1
lda #>sscput
sta putc+2
lda #<sscget
sta getc+1
lda #>sscget
sta getc+2
lda #<resetssc
sta resetio+1
lda #>resetssc
sta resetio+2
rts
|
ADTPro/adtpro
| 67,902
|
src/client/dos/adt.s
|
.include "applechr.i"
;--------------------------------
; Apple Disk Transfer
; By Paul Guertin
; pg@sff.net
; DISTRIBUTE FREELY
;--------------------------------
.LIST ON
; Overview
; --------
; This program transfers a 16-sector disk
; to a 140K MS-DOS file and back. The file
; format (dos-ordered, .dsk) is compatible
; with most Apple II emulators.
; SSC, IIgs or compatible hardware is required.
; Protocol
; --------
; This program initiates any data exchange with the host, which typically
; runs on a PC or Macintosh computer.
;
; The first byte transmited is always a command code:
; - R to receive a disk image
; - S to send a disk in sector form
; - N to send a disk in nibble form
; - V to send a disk in nibble form, using half-tracks
; - D to get a directory listing of the hosts's working directory
;
; The protocol for each of the commands is described below.
;
; Initial protocol negotiation
; ----------------------------
; In the case of the R, S, N, or V command code, ADT sends three bytes
; to negotiate a protocol:
; - High-order byte of a protocol number (range 0x01 to 0x1F)
; - Low-order byte of a protcol number (range 0x01 to 0xFF)
; - A null byte
; The protocol number identifies the exact protocol that will
; be used. It allows the host to decide whether it supports this
; protocol or not. ADT waits for an answer:
; - ACK (0x06) means the host supports this protocol
; - NAK (0x15) means the host does not support this protocol
; - Any other answers means the host is incompatible.
; Currently, the only supported protocol version is 0x0101.
;
; In the case of an ACK response, ADT continues by sending the
; file name of the disk image file the host is supposed to save
; (in the case of S, N or V command) or read (in the case of the
; R) command. The file name is terminated by a 0-byte.
; It is up to the host how to interpret the file name. The host
; probably considers it relative to a working directory, the
; same as used in the D command.
; ADT then waits for an answer from the host, being one byte:
; - 0x00 means OK, file name is accepted
; - 0x1A means the file name is invalid (any command) or the file doesn't
; exist on the host (R command only)
; - 0x1C (S, N or V command only) means the file already exists on the host
; - 0x1E (R command only) means the file exists on the host, but is not a
; valid disk image file
;
; Any response other than 0x00 aborts the operation.
; When the response is 0x00, different things occur for the different
; commands.
;
; The R command
; -------------
; In the case of the R command (after the initial negotiation described
; above), ADT starts by sending ACK to the host, to indicate it is ready
; to receive. ADT then expects to receive the 560 sectors of a diskette,
; starting at track 0 sector 0.
; Each track is sent in increasing order of DOS logical sector numbers.
; The data is compressed with RLE (see below). Each sector is followed by
; a 2-byte checksum (which does not take part in the RLE compression),
; and is acknowledged by ADT. The possible answers are:
; - ACK : checksum matches; please send next sector
; - NAK : checksum does not match; please resend same sector
; After receiving the entire disk (all 560 sectors), ADT send a final
; byte with the overall result: 0 is success, any non-0 value is an error.
; When the user interrupts the transfer, the host is not notified; adt
; just stops reading the input. The transfer at the host side must be
; stopped manually in whatever way the host software provides.
;
; The S command
; -------------
; After the initial protocol negotiation described above, ADT starts
; by sending ACK to the host, to indicate it is ready to send. It then
; sends the 560 disk sectors exactly as described for the R command, with
; the roles of ADT and host reversed.
; After sending the entire disk, ADT sends a final byte with the overall
; result: 0 is success, any non-0 value is an error.
; When the user interrupts the transfer, the host is not notified; ADT
; just stops sending bytes The transfer at the host side must be stopped
; manually in whatever way the host software provides.
;
; The N and V commands
; --------------------
; After the initial protocol negotiation described above, ADT starts by
; sending ACK. It then sends either 35 tracks (N command) or 70 tracks
; (V command). Each track is handled as follows.
; ADT sends 52 blocks of 256 bytes each. The bytes are raw disk nibbles,
; and these 13312 bytes (about 2 full disk rotations) give the host enough
; raw material to analyze the data.
; It is guaranteed that ADT starts each track with a nibble that comes right
; after a gap.
; Each of the 256-byte blocks is sent as follows:
; - 1 byte block number within the track (starting at 0)
; - 1 byte track number (N command) or half-track number (V command)
; - 1 byte with fixed value 0x02
; - 256 data bytes, compressed with RLE (see below)
; - 2 bytes checksum over the 256 data bytes
; The host must either confirm (ACK) or reject (NAK) the block, just as in
; the S command.
; After sending the entire track, ADT waits for a response byte from the host:
; - ACK: analysis was successful; please proceed with the next track
; - NAK: analysis failed; abort the operation
; - CAN (0x18): analysis failed, but please continue with next track
; - ENQ (0x05): analysis was inconclusive; please re-read the
; same track and send the newly read data
; After sending all tracks, ADT sends a final status code, which is always
; 0x00 (success).
; Note that the host plays an important role in controlling the process.
; For example, it is up to the host to decide how many times to request
; for a track re-read, and also to decide when continuing the transfer is
; useless.
;
; The D command
; -------------
; No preliminary protocol negotiation takes place.
; The host starts sending text to be displayed on the Apple II screen. The
; layout is completely up to the host. The text is plain ASCII, not Apple II
; "high" ASCII. It might contain \r end-of-line characters. The host must
; make sure the text does not exceed one screen on the Apple II.
; The last character of the screen text sent is 0x00. After that, the host
; sends a continuation byte:
; - 0x01 to indicate more screens are to follow
; - 0x00 to indicate this was the last screen; this ends the Dir command
; If the continuation character is 0x01, ADT has two options:
; - Send 0x00 to abort the Dir command (this ends the command), or
; - Send any other byte to have the host send the next screen.
; RLE compression
; ---------------
; The 256 byte blocks sent over the line are compressed with a form of
; run length encoding. Refer to the source for details on the exact
; operation.
; Checksum calculation
; --------------------
; The 16-bit checksum is based on the exclusive or of two items:
; - The data bytes before RLE is applied
; - The bytes form a crc table, indexed with the data bytes
; Refer to the source code for details.
; Version History:
; ----------------
; Version 2.41 August 2017
; - Leave the final screen up after a disk transfer
; - Set default speed at 115.2k
; Version 2.4 March 2010
; - Fix to allow Laser 128 machines to run at 115.2kbps
; - Fix slot scan for Franklin Ace 500 computers
; Version 2.3 February 2009
; - Add slot scan for Apple /// computers
; Version 2.2 January 2008
; David Schmidt
; - Nibble disk send by Gerard Putter
; - Half track disk send by Eric Neilson
; - Fix slot scan for IIc computers
; Version 1.33 July 2007
; David Schmidt
; - Support Laser 128-style serial port via Pascal
; entry points
; Version 1.32 June 2007
; David Schmidt
; - Scan slots for initial/default comms device
; - Managed interrupt disabling/enabling correctly
; Version 1.31 December 2006
; David Schmidt
; - Added self-save feature, removed BASIC configuration
; - Included bug fixes from Joe Oswald:
; . Mask interrupts
; . Recover from host NAKs
; Version 1.30 November 2006 - Only released with ADTPro
; David Schmidt
; - Added native IIgs (Zilog SCC) support
; Version 1.23 November 2005
; Knut Roll-Lund and Ed Eastman
; - Added 115200b rate for SSC card
; - Added additional baud rates to
; Windows and DOS verisons
; - added buffering to DIR handling
; so it works on higher baudrates
; Version 1.22 CHANGES "ABOUT" MSG
; Version 1.21 FILLS UNREADABLE SECTORS WITH ZEROS
; Version 1.20
; - HAS A CONFIGURATION MENU
; - HAS A DIRECTORY FUNCTION
; - ABORTS INSTANTLY IF USER PUSHES ESCAPE
; - FIXES THE "256 RETRIES" BUG
; - HAS MORE EFFICIENT CRC ROUTINES
; Version 1.11 SETS IOBVOL TO 0 BEFORE CALLING RWTS
; Version 1.10 ADDS THESE ENHANCEMENTS:
; - DIFFERENTIAL RLE COMPRESSION TO SPEED UP TRANSFER
; - 16-BIT CRC ERROR DETECTION
; - AUTOMATIC RE-READS OF BAD SECTORS
; Version 1.01 CORRECTS THE FOLLOWING BUGS:
; - INITIALIZATION ROUTINE CRASHED WITH SOME CARDS
; - FULL 8.3 MS-DOS FILENAMES NOW ACCEPTED
; Version 1.00 - FIRST PUBLIC RELEASE
; The version number as a macro. Must not be more than 7 characters.
.define version_no "2.41"
; Protocol number. Note it must be assigned a higher value when the protcol is
; modified, and must never be < $0101 or > $01FF
protono = $0101
; CONSTANTS
esc = $9b ; ESCAPE KEY
ack = $06 ; ACKNOWLEDGE
nak = $15 ; NEGATIVE ACKNOWLEDGE
parmnum = 9 ; NUMBER OF CONFIGURABLE PARMS
nibpages = $34 ; Number of nibble pages to send
enq = $05 ; Request to re-read track
can = $18 ; Track not accepted at host
; ZERO PAGE LOCATIONS (ALL UNUSED BY DOS, BASIC & MONITOR)
msgptr = $6 ; POINTER TO MESSAGE TEXT (2B)
secptr = $8 ; POINTER TO SECTOR DATA (2B)
nibptr = $8 ; Pointer to nibble data (2B)
hlftrk = $18 ; Half track send enabled (1B)
nibpcnt = $1e ; Counts nibble pages (1B)
trkcnt = $1e ; COUNTS SEVEN TRACKS (1B)
synccnt = $eb ; Count bytes during sync (2B)
crc = $eb ; TRACK CRC-16 (2B)
prev = $ed ; PREVIOUS BYTE FOR RLE (1B)
ysave = $ee ; TEMP STORAGE (1B)
xsave = $ef ; Temp storage for X reg (1B)
; BIG FILES
tracks = $2000 ; 7 TRACKS AT 2000-8FFF (28KB)
crctbll = $9000 ; CRC LOW TABLE (256B)
crctblh = $9100 ; CRC HIGH TABLE (256B)
; MONITOR STUFF
ch = $24 ; CURSOR HORIZONTAL POSITION
cv = $25 ; CURSOR VERTICAL POSITION
basl = $28 ; BASE LINE ADDRESS
invflg = $32 ; INVERSE FLAG
clreol = $fc9c ; CLEAR TO END OF LINE
clreop = $fc42 ; CLEAR TO END OF SCREEN
home = $fc58 ; CLEAR WHOLE SCREEN
tabv = $fb5b ; SET BASL FROM A
vtab = $fc22 ; SET BASL FROM CV
rdkey = $fd0c ; CHARACTER INPUT
nxtchar = $fd75 ; LINE INPUT
cout = $fded ; Monitor output
cout1 = $fdf0 ; CHARACTER OUTPUT
crout = $fd8e ; OUTPUT RETURN
; MESSAGES. These numbers are byte offsets into msgtbl
mtitle = 0 ; TITLE SCREEN
mconfig = 2 ; CONFIGURATION TOP OF SCREEN
mconfg2 = 4 ; CONFIGURATION BOTTOM OF SCREEN
mprompt = 6 ; MAIN PROMPT
mdircon = 8 ; CONTINUED DIRECTORY PROMPT
mdirend = 10 ; END OF DIRECTORY PROMPT
mfrecv = 12 ; FILE TO RECEIVE:_
mfsend = 14 ; FILE TO SEND:_
mrecv = 16 ; RECEIVING FILE_ (_ = SPACE)
msend = 18 ; SENDING FILE_
mconfus = 20 ; NONSENSE FROM HOST
mnot16 = 22 ; NOT A 16 SECTOR DISK
merror = 24 ; ERROR: FILE_
mcant = 26 ; |CAN'T BE OPENED. (| = CR)
mexists = 28 ; |ALREADY EXISTS.
mnot140 = 30 ; |IS NOT A 140K IMAGE.
mfull = 32 ; |DOESN'T FIT ON DISK.
manykey = 34 ; __ANY KEY:_
mdont = 36 ; <- DO NOT CHANGE
mabout = 38 ; ABOUT ADT...
mtest = 40 ; TESTING DISK FORMAT
mpcans = 42 ; AWAITING ANSWER FROM HOST
mpause = 44 ; HIT ANY KEY TO CONTINUE...
mdoserr = 46 ; DOS ERROR:_
mdos0a = 48 ; FILE LOCKED
mdos04 = 50 ; WRITE PROTECTED
mdos08 = 52 ; I/O ERROR
mnibsend = 54 ; Sending nibble file_
mnodiskc = 56 ; Slot has no disk card
manalys = 58 ; Host could not analyze the disk
mhlfsend = 60 ; Sending halftrack/nibble file
msendtype = 62 ; Type of send
mproterr = 64 ; Incompatible host
;*********************************************************
.ORG $803
jmp start ; Skip early stuff
; Next few functions contain loops that must be within a
; page for correct timing, so place them at the start.
;---------------------------------------------------------
; calibrat - calibrate the disk arm to track #0
; the code is essentially like in the disk ii card
;---------------------------------------------------------
calibrat:
jsr slot2x ; a = x = slot * 16
sta xsave ; store slot * 16 in memory
lda $c08e,x ; prepare latch for input
lda $c08c,x ; strobe data latch for i/o
lda pdrive ; is 0 for drive 1
beq caldriv1
inx
caldriv1:
lda $c08a,x ; engage drive 1 or 2
lda xsave
tax ; restore x
lda $c089,x ; motor on
ldy #$50 ; number of half-tracks
caldriv3:
lda $c080,x ; stepper motor phase n off
tya
and #$03 ; make phase from count in y
asl ; times 2
ora xsave ; make index for i/o address
tax
lda $c081,x ; stepper motor phase n on
lda #$56 ; param for wait loop
jsr $fca8 ; wait specified time units
dey ; decrement count
bpl caldriv3 ; jump back while y >= 0
rts
;---------------------------------------------------------
; rdnibtr - read track as nibbles into tracks buffer.
; total bytes read is nibpages * 256, or about twice
; the track length.
; the drive has been calibrated, so we know we are in read
; mode, the motor is running, and and the correct drive
; number is engaged.
; we wait until we encounter a first nibble after a gap.
; for this purpose, a gap is at least 4 ff nibbles in a
; row. note this is not 100% fool proof; the ff nibble
; can occur as a regular nibble instead of autosync.
; but this is conform beneath apple dos, so is
; probably ok.
;---------------------------------------------------------
rdnibtr:
jsr slot2x ; a = x = slot * 16
lda #0 ; a = 0
tay ; y = 0 (index)
sta nibptr ; set running ptr (lo) to 0
lda #>tracks ; tracks address high
sta nibptr+1 ; set running ptr (hi)
lda #nibpages
sta nibpcnt ; page counter
; use jmp, not jsr, to perform nibsync. that way we
; have a bit more breathing room, cycle-wise. the
; "function" returns with a jmp to rdnibtr8.
jmp nibsync ; find first post-gap byte
; the read loop must be fast enough to read 1 byte every
; 32 cycles. it appears the interval is 17 cycles within
; one data page, and 29 cycles when crossing a data page.
; these numbers are based on code that does not cross
; a page boundary.
rdnibtr7:
lda $c08c,x ; read (4 cycles)
bpl rdnibtr7 ; until byte complete (2c)
rdnibtr8:
sta (nibptr),y ; store in buffer (6c)
iny ; (2c)
bne rdnibtr7 ; 256 bytes done? (2 / 3c)
inc nibptr+1 ; next page (5c)
dec nibpcnt ; count (5c)
bne rdnibtr7 ; and back (3c)
rts
;---------------------------------------------------------
; seekabs - copy of standard dos seekabs at b9a0.
; by copying it we are independent on the dos version,
; while still avoiding rwts in the nibble copy function.
; on entry, x is slot * 16; a is desired half-track;
; $478 is current half-track
;---------------------------------------------------------
seekabs:
stx $2b
sta $2a
cmp $0478
beq seekabs9
lda #$00
sta $26
seekabs1:
lda $0478
sta $27
sec
sbc $2a
beq seekabs6
bcs seekabs2
eor #$ff
inc $0478
bcc seekabs3
seekabs2:
adc #$fe
dec $0478
seekabs3:
cmp $26
bcc seekabs4
lda $26
seekabs4:
cmp #$0c
bcs seekabs5
tay
seekabs5:
sec
jsr seekabs7
lda delaytb1,y
jsr armdelay
lda $27
clc
jsr seekabs8
lda delaytb2,y
jsr armdelay
inc $26
bne seekabs1
seekabs6:
jsr armdelay
clc
seekabs7:
lda $0478
seekabs8:
and #$03
rol
ora $2b
tax
lda $c080,x
ldx $2b
seekabs9:
rts
;---------------------------------------------------------
; armdelay - copy of standard dos armdelay at $ba00
;---------------------------------------------------------
armdelay:
ldx #$11
armdela1:
dex
bne armdela1
inc $46
bne armdela3
inc $47
armdela3:
sec
sbc #$01
bne armdelay
rts
;---------------------------------------------------------
; next are two tables used in the arm movements. they must
; also lie in one page.
;---------------------------------------------------------
delaytb1:
.byte $01,$30,$28,$24,$20,$1e
.byte $1d,$1c,$1c,$1c,$1c,$1c
delaytb2:
.byte $70,$2c,$26,$22,$1f,$1e
.byte $1d,$1c,$1c,$1c,$1c,$1c
; End of the page-bound stuff.
;---------------------------------------------------------
; START - MAIN PROGRAM
;---------------------------------------------------------
start:
sei ; Turn off interrupts
cld ; Binary mode
jsr $fe84 ; Normal text
jsr $fb2f ; Text mode, full window
jsr $fe89 ; Input from keyboard
lda #$15
jsr cout ; Switch to 40 columns
lda #$00
sta secptr ; secptr is always page-aligned
sta stddos ; Assume standard DOS initially
sta hlftrk ; Init hlftrk global to zero
lda $b92e ; Save contents of DOS
sta dosbyte ; Checksum bytes
cmp #$13
beq dosok1 ; Decrement stddos (making
dec stddos ; it non-zero) if the correct
dosok1: lda $b98a ; bytes aren't there
sta dosbyte+1
cmp #$b7
beq dosok2
dec stddos
dosok2:
jsr maketbl ; MAKE CRC-16 TABLES
jsr parmdft ; RESET PARAMETERS TO DEFAULTS
jsr parmint ; INTERPRET PARAMETERS
redraw: jsr title ; DRAW TITLE SCREEN
mainlup:
lda #0 ; Turn hlftrk flag back off
sta hlftrk
ldy #mprompt ; SHOW MAIN PROMPT
mainl:
resetio:
jsr $0000 ; Pseudo-indirect JSR to reset the IO device
jsr showmsg ; AT BOTTOM OF SCREEN
jsr rdkey ; GET ANSWER
and #$df ; CONVERT TO UPPERCASE
cmp #_'S' ; SEND?
bne krecv ; Nope, try receive
ldy #msendtype
jsr showmsg
sendtype:
jsr rdkey ; GET ANSWER
and #$df ; CONVERT TO UPPERCASE
cmp #_'H' ; Half?
bne :+
lda #1 ; set hlftrk flag on
sta hlftrk
jsr sendhlf ; yes, do halftrack/nib send
jmp redraw ; changed the screen, so restore
:
cmp #_'N' ; Nibble?
bne :+
jsr sendnib ; yes, do send nibble disk
jmp redraw ; changed the screen, so restore
:
cmp #_'S' ; SEND?
bne :+ ; Nope, invalid input
jsr send ; YES, DO SEND ROUTINE
jmp redraw ; changed the screen, so restore
:
cmp #esc
beq mainlup
jmp sendtype
krecv: cmp #_'R' ; RECEIVE?
bne kdir ; NOPE, TRY DIR
jsr receive ; YES, DO RECEIVE ROUTINE
jmp mainlup
kdir: cmp #_'D' ; DIR?
bne kconf ; NOPE, TRY CONFIGURE
jsr dir ; YES, DO DIR ROUTINE
jmp redraw
kconf: cmp #_'C' ; CONFIGURE?
beq :+
cmp #_'G' ; Yeah, so, G is as good as C.
bne kabout ; NOPE, TRY ABOUT
: jsr config ; YES, DO CONFIGURE ROUTINE
jsr parmint ; AND INTERPRET PARAMETERS
jmp redraw
kabout: cmp #$9f ; ABOUT MESSAGE? ("?" KEY)
bne kquit ; NOPE, TRY QUIT
jsr home
ldy #mabout ; YES, SHOW MESSAGE, WAIT
jsr showm1 ; FOR KEY, AND RETURN
jsr rdkey
jmp redraw
kquit: cmp #_'Q' ; QUIT?
beq :+ ; Yes - need to do a bit of jumping
jmp mainlup ; NOPE, WAS A BAD KEY
: lda dosbyte ; YES, RESTORE DOS CHECKSUM CODE
sta $b92e
lda dosbyte+1
sta $b98a
cli ; Restore interrupts
jsr home ; Clear screen
jmp $3d0 ; AND QUIT TO DOS
;---------------------------------------------------------
; DIR - GET DIRECTORY FROM THE HOST AND PRINT IT
; HOST SENDS 0,1 AFTER PAGES 1..N-1, 0,0 AFTER LAST PAGE
;---------------------------------------------------------
dir:
ldy #mpcans
jsr showmsg
lda #_'D' ; SEND DIR COMMAND TO HOST
jsr putc
lda #>tracks ; GET BUFFER POINTER HIGHBYTE
sta secptr+1 ; SET SECTOR BUFFER POINTER
ldy #0 ; COUNTER
dirbuff:
jsr getc ; GET SERIAL CHARACTER
php ; SAVE FLAGS
sta (secptr),y ; STORE BYTE
iny ; BUMP
bne dirnext ; SKIP
inc secptr+1 ; NEXT 256 BYTES
dirnext:
plp ; RESTORE FLAGS
bne dirbuff ; LOOP UNTIL ZERO
jsr getc ; GET CONTINUATION CHARACTER
sta (secptr),y ; STORE CONTINUATION BYTE TOO
jsr home ; CLEAR SCREEN
lda #>tracks ; GET BUFFER POINTER HIGHBYTE
sta secptr+1 ; SET SECTOR BUFFER POINTER
ldy #0 ; COUNTER
dirdisp:
lda (secptr),y ; GET BYTE FROM BUFFER
php ; SAVE FLAGS
iny ; BUMP
bne dirmore ; SKIP
inc secptr+1 ; NEXT 256 BYTES
dirmore:
plp ; RESTORE FLAGS
beq dirpage ; PAGE OR DIR END
ora #$80
jsr cout1 ; DISPLAY
jmp dirdisp ; LOOP
dirpage:
lda (secptr),y ; GET BYTE FROM BUFFER
bne dircont
ldy #mdirend ; NO MORE FILES, WAIT FOR KEY
jsr showmsg ; AND RETURN
jsr rdkey
rts
dircont:
ldy #mdircon ; SPACE TO CONTINUE, ESC TO STOP
jsr showmsg
jsr rdkey
eor #esc ; NOT ESCAPE, CONTINUE NORMALLY
bne dir ; BY SENDING A "D" TO HOST
jmp putc ; ESCAPE, SEND 00 AND RETURN
;---------------------------------------------------------
; FindSlot - Find a comms device
;---------------------------------------------------------
FindSlot:
lda #$00
sta msgptr ; Borrow msgptr
sta TempSlot
sta TempIIgsSlot
ldx #$07 ; Slot number - start high
FindSlotLoop:
clc
txa
adc #$c0
sta msgptr+1
ldy #$05 ; Lookup offset
lda (msgptr),y
cmp #$38 ; Is $Cn05 == $38?
bne FindSlotNext
ldy #$07 ; Lookup offset
lda (msgptr),y
cmp #$18 ; Is $Cn07 == $18?
bne FindSlotNext
ldy #$0b ; Lookup offset
lda (msgptr),y
cmp #$01 ; Is $Cn0B == $01?
bne FindSlotMaybeIII
ldy #$0c ; Lookup offset
lda (msgptr),y
cmp #$31 ; Is $Cn0C == $31?
bne FindSlotNext
; Ok, we have a set of signature bytes for a comms card (or IIc/IIgs, or Laser).
; Remove more specific models/situations first.
ldy #$1b ; Lookup offset
lda (msgptr),y
cmp #$eb ; Do we have a goofy XBA instruction in $C01B?
bne FoundNotIIgs ; If not, it's not an IIgs.
cpx #$02 ; Only bothering to check IIgs Modem slot (2)
bne FindSlotNext
lda #$07 ; We found the IIgs modem port, so store it
sta TempIIgsSlot
jmp FindSlotNext
FoundNotIIgs:
ldy #$00
lda (msgptr),y
cmp #$da ; Is $Cn00 == $DA?
bne NotLaser ; No - it's not a Laser 128
lda #$10 ; Yes - it's a Laser 128. Set SSCPUT to ignore DSR.
sta mod5+1
lda #$08 ; Set SSCGET to ignore DSR and DCD.
sta mod6+1
jmp ProcessIIc ; Now treat it like a IIc.
NotLaser:
ldy #$0a
lda (msgptr),y
cmp #$0e ; Is this a newer IIc - $Cn0a == $0E?
beq ProcessIIc
NotNewIIc:
cmp #$25 ; Is this an older IIc - $Cn0a == $25?
beq ProcessIIc ; Yes - treat it like a IIc.
NotOldIIc:
ldy #$01
lda (msgptr),y
cmp #$a7 ; Is this a Franklin Ace 500 - $Cn01 == $A7?
bne GenericSSC ; No - call it a generic SSC. Yes - treat it like a IIc.
ProcessIIc:
cpx #$02 ; Only bothering to check IIc Modem slot (2)
bne FindSlotNext
stx TempSlot
jmp FindSlotBreak ; Don't check port #1 on an IIc - we don't care
GenericSSC:
stx TempSlot ; Nope, nothing special. Just a Super Serial card.
lda #$50 ; Make sure we can watch for DSR
sta mod5+1
lda #$68 ; Make sure we can watch for DSR and DCD
sta mod6+1
FindSlotNext:
dex
bne FindSlotLoop
; All done now, so clean up
FindSlotBreak:
ldx TempSlot
beq :+
dex ; Subtract 1 to match slot# to parm index
stx pssc
stx default+2 ; Store the slot number discovered as default
rts
: lda TempIIgsSlot
beq FindSlotDone ; Didn't find anything in particular
sta pssc
sta default+2 ; Store the slot number discovered as default
FindSlotDone:
rts
FindSlotMaybeIII:
cmp #$08 ; Is $Cn0B == $08?
bne FindSlotNext
ldy #$0c ; Lookup offset
lda (msgptr),y
cmp #$48 ; Is $Cn0C == $48?
bne FindSlotNext
lda #$10 ; Yes - it's a Laser 128. Set SSCPUT to ignore DSR.
sta mod5+1
lda #$08 ; Set SSCGET to ignore DSR and DCD.
sta mod6+1
jmp FindSlotNext ; It's an Apple /// SSC-like thing.
TempSlot: .byte 0
TempIIgsSlot: .byte 0
;---------------------------------------------------------
; CONFIG - ADT CONFIGURATION
;---------------------------------------------------------
config:
jsr home ; CLEAR SCREEN
; No matter what, we put in the default value for
; 'save' - always turn it off when showing the config screen.
lda #$01 ; Index for 'NO' save
sta psave
ldy #mconfig ; SHOW CONFIGURATION SCREEN
jsr showm1
ldy #mconfg2
jsr showmsg ; IN 2 PARTS BECAUSE >256 CHARS
ldy #parmnum-1 ; SAVE PREVIOUS PARAMETERS
savparm:
lda parms,y ; IN CASE OF ESCAPE
sta oldparm,y
dey
bpl savparm
;--------------- FIRST PART: DISPLAY SCREEN --------------
refresh:
lda pssc
cmp #$08 ; Are we talking about the Laser/Pascal Entry Points?
bmi restore ; No, go on ahead
lda pspeed ; Yes - so check baudrate
cmp #$06 ; Is it too fast?
bne refnext ; No, go on ahead
sta svspeed
lda #$05 ; Yes - so slow it down
sta pspeed
jmp refnext
restore:
lda svspeed ; Did we have speed previously re-set by Laser?
beq refnext ; No, go on ahead
sta pspeed ; Yes - so restore it now
lda #$00
sta svspeed ; Forget about resetting speed until we roll through Laser again
refnext:
lda #3 ; FIRST PARAMETER IS ON LINE 3
jsr tabv
ldx #0 ; PARAMETER NUMBER
ldy #$ff ; OFFSET INTO PARAMETER TEXT
nxtline:
stx linecnt ; SAVE CURRENT LINE
lda #15
sta ch
clc
lda parmsiz,x ; GET CURRENT VALUE (NEGATIVE:
sbc parms,x ; LAST VALUE HAS CURVAL=0)
sta curval
lda parmsiz,x ; X WILL BE EACH POSSIBLE VALUE
tax ; STARTING WITH THE LAST ONE
dex
valloop:
cpx curval ; X EQUAL TO CURRENT VALUE?
beq printit ; YES, PRINT IT
skipchr:
iny ; NO, SKIP IT
lda parmtxt,y
bne skipchr
beq endval
printit:
lda linecnt ; IF WE'RE ON THE ACTIVE LINE,
cmp curparm ; THEN PRINT VALUE IN INVERSE
bne prtval ; ELSE PRINT IT NORMALLY
lda #$3f
sta invflg
prtval: lda #$a0 ; SPACE BEFORE & AFTER VALUE
jsr cout1
prtloop:
iny ; PRINT VALUE
lda parmtxt,y
beq endprt
jsr cout1
jmp prtloop
endprt: lda #$a0
jsr cout1
lda #$ff ; BACK TO NORMAL
sta invflg
endval: dex
bpl valloop ; PRINT REMAINING VALUES
sty ysave ; CLREOL USES Y
jsr clreol ; REMOVE GARBAGE AT EOL
lda #$a0 ; Add an extra space to output
jsr cout ; Without it, the crout will
jsr crout ; sometimes cause the disk to
; spin! Strange but true!
ldy ysave
ldx linecnt ; INCREMENT CURRENT LINE
inx
cpx #parmnum
bcc nxtline ; Loop parmnum times
lda stddos ; IF NON-STANDARD DOS, WRITE
beq getcmd ;"DO NOT CHANGE" ON SCREEN
lda #9 ; NEXT TO THE CHECKSUMS OPTION
jsr tabv
ldy #23
sty ch
ldy #mdont
jsr showm1
;--------------- SECOND PART: CHANGE VALUES --------------
getcmd: lda $c000 ; Wait for next command
bpl getcmd
bit $c010
ldx curparm ; Current parameter in X
cmp #$88
bne notleft
dec parms,x ; Left arrow hit
bpl leftok ; Decrement current value
lda parmsiz,x
sbc #1
sta parms,x
leftok: jmp refresh
notleft:
cmp #$95
bne notrgt
lda parms,x ; Right arrow hit
adc #0 ; Increment current value
cmp parmsiz,x
bcc rightok
lda #0
rightok:
sta parms,x
jmp refresh
notrgt: cmp #$8b
bne notup
dex ; Up arrow hit
bpl upok ; Decrement parameter
ldx #parmnum-1
upok: stx curparm
jmp refresh
notup: cmp #$8a
beq isdown
cmp #$a0
bne notdown
isdown: inx ; Down arrow or space hit
cpx #parmnum ; Increment parameter
bcc downok
ldx #0
downok: stx curparm
jmp refresh
notdown:
cmp #$84
bne notctld
jsr parmdft ; Ctrl-D pushed, restore default
notesc: jmp refresh ; parameters
notctld:
cmp #$8d
beq endcfg ; Return hit, stop configuration
cmp #esc
bne notesc
ldy #parmnum-1 ; Escape pushed; restore old
parmrst:
lda oldparm,y ; parameters and continue
sta parms,y
dey
bpl parmrst
endcfg:
jsr chekslot ; Verify if slot has disk card
bcs getcmd ; Don't accept slot
lda #$01
sta configyet
lda psave ; Did they ask to save parms?
bne nosave
lda #$01 ; Index for 'NO' save
sta psave
ldy #parmnum-1 ; Save previous parameters
savparm2:
lda parms,y
sta default,y
dey
bpl savparm2
lda #$00
sta curparm
jsr bsave
nosave:
rts
linecnt:
.byte $00 ; Current line number
curparm:
.byte $00 ; Active parameter
curval: .byte $00 ; Value of active parameter
default:
.byte 5,0,1,6,1,0,0,0,1 ; DEFAULT PARM VALUES
oldparm:
.res parmnum ; Old parameters saved here
svspeed:
.byte $06 ; Storage for speed setting
;---------------------------------------------------------
; bsave - Save a copy of ADT in memory
;---------------------------------------------------------
bsave:
lda length+1 ; Convert 16-bit length to a hex string
pha
jsr tochrhi ; Hi nybble, hi byte
sta nybble1
pla
jsr tochrlo ; Lo nybble, hi byte
sta nybble2
lda length
pha
jsr tochrhi ; Hi nybble, lo byte
sta nybble3
pla
jsr tochrlo ; Lo nybble, lo byte
sta nybble4
patch: ; Patch in our "error handler:"
lda #$85 ; It saves the DOS error code in $DE
sta $A6D2
lda #$DE ; sta $DE
sta $A6D3
lda #$60 ; rts - return from error
sta $A6D4
ldx #$00
stx $DE
cmdloop: ; Send BSAVE command to DOS
lda command,X
beq :+
jsr cout
inx
jmp cmdloop
:
lda $DE ; Everything cool?
beq bsavedone ; All done
err:
ldy #mdoserr
jsr showm1
cmp #$0a ; File locked?
bne :+
ldy #mdos0a
jmp ermsg
: cmp #$04 ; Write protected?
bne :+
ldy #mdos04
jmp ermsg
: ldy #mdos08 ; Catch-all: I/O error
ermsg: jsr showm1
bsavedone:
jsr pause
rts
;---------------------------------------------------------
; tochrlo/hi:
; Convert a nybble in A to a character representing its
; hex value, returned in A
;---------------------------------------------------------
tochrlo:
and #$0f
jmp tochrgo
tochrhi:
lsr
lsr
lsr
lsr
tochrgo:
clc
cmp #$09
bcc gt9 ; A is greater than 9
adc #$B6
jmp tochrdone
gt9:
ora #$B0
tochrdone:
rts
command:
.byte $8d,$84
asc "BSAVE ADT,A$0803,L$"
nybble1:
.byte $00
nybble2:
.byte $00
nybble3:
.byte $00
nybble4:
.byte $00
.byte $8D,$00
length: .word endasm-start
;---------------------------------------------------------
; PAUSE - print 'PRESS A KEY TO CONTINUE...' and wait
;---------------------------------------------------------
pause:
lda #$00
sta ch
lda #$17
jsr tabv
jsr clreop
ldy #mpause
jsr showmsg
jsr rdkey
cmp #$9B
beq pauseesc
clc
rts
pauseesc:
sec
rts
;---------------------------------------------------------
; checkslot - see if chosen slot has a disk card
; the check is identical to what is in the autoboot rom
;---------------------------------------------------------
chekslot:
ldy pdslot ;get slot# (0..6)
iny ;now 1..7
tya
ora #$c0 ;a now has page address of slot
sta secptr+1 ;abuse this pointer
lda #0
sta secptr ;secptr now points to firmware
ldy #7
checksl1:
lda (secptr),y ;get firmware byte
cmp diskid-1,y ;byte ok?
bne checksl3 ;no: report error
dey
dey
bpl checksl1 ;until 4 bytes checked
bmi checksl7 ;jump always
checksl3:
jsr awbeep
ldy #mnodiskc ;error message number
jsr showmsg ;and show message
checksl5:
jsr rdkey ;get answer
and #$df ;convert to uppercase
cmp #'Y'
beq checksl7 ;user agrees
cmp #'N'
bne checksl5 ;wrong answer
ldy #mconfg2
jsr showmsg ;restore default text
sec ;indicate error
rts
checksl7:
clc ;indicate: no error
checksl9:
rts
; Define diskid ourselves, to be independent of ROM
diskid: .byte $20,$ff,$00,$ff,$03,$ff,$3c
;---------------------------------------------------------
; PARMINT - INTERPRET PARAMETERS
;---------------------------------------------------------
parmint:
ldy pdslot ; GET SLOT# (0..6)
iny ; NOW 1..7
tya
ora #_'0' ; CONVERT TO ASCII AND PUT
sta mtslt ; INTO TITLE SCREEN
tya
asl
asl
asl
asl ; NOW $S0
sta iobslt ; STORE IN IOB
adc #$89 ; NOW $89+S0
sta drvmod+1 ; SELF-MOD FOR "DRIVES ON"
ldy pdrive ; GET DRIVE# (0..1)
iny ; NOW 1..2
sty iobdrv ; STORE IN IOB
tya
ora #_'0' ; CONVERT TO ASCII AND PUT
sta mtdrv ; INTO TITLE SCREEN
ldy pssc ; GET SSC SLOT# (0..6)
iny ; NOW 1..7
tya
ora #_'0' ; CONVERT TO ASCII AND PUT
sta mtssc ; INTO TITLE SCREEN
tya
asl
asl
asl
asl ; NOW $S0
adc #$88
tax
ldy pspeed ; CONTROL: 8 DATA BITS, 1 STOP
tya ; GET SPEED (0..6)
asl
asl
adc pspeed ; 6*SPEED IN Y, NOW COPY
tay ; FIVE CHARACTERS INTO
ldx #4 ; TITLE SCREEN
putspd: lda spdtxt,y
sta mtspd,x
iny
dex
bpl putspd
ldy #1 ; CONVERT RETRIES FROM 0..7
trylup: ldx pretry,y ; TO 0..5,10,128
lda trytbl,x
sta realtry,y
dey
bpl trylup
ldx #0 ; IF PCKSUM IS 'NO', WE PATCH
ldy #0 ; DOS TO IGNORE ADDRESS AND
lda pcksum ; DATA CHECKSUM ERRORS
bne rwtsmod
ldx dosbyte+1
ldy dosbyte
rwtsmod:
stx $b98a ; IS THERE AN APPLE II TODAY
sty $b92e ; THAT DOESN'T HAVE >=48K RAM?
;(YES)
ldy pssc ; GET SLOT# (0..6)
iny ; NOW 1..7
tya
cmp #$08
bpl drivers
jmp initssc ; Y holds slot number
drivers:
cmp #$09
bpl laser
jmp initzgs
laser:
jmp initpas
rts
spdtxt: asc " 003 0021 0042 0084 006900291 K511"
bpsctrl:
.byte $16,$18,$1a,$1c,$1e,$1f,$10
trytbl: .byte 0,1,2,3,4,5,10,99
;---------------------------------------------------------
; GETNAME - GET FILENAME AND SEND TO HOST
; When an acceptable file name has been entered, the function
; sends the command letter to the host.
; This function also does the protocol negotiation, because
; that has to happen after sending the command letter.
; When the host accepts the protocol, the function sends
; the entered file name and waits for the host's response.
;---------------------------------------------------------
getname:
stx directn ; TFR DIRECTION (0=RECV, 1=SEND)
ldy prmptbl,x
jsr showmsg ; ASK FILENAME
ldx #0 ; GET ANSWER AT $200
jsr nxtchar
lda #0 ; NULL-TERMINATE IT
sta $200,x
txa
bne fnameok
jmp abort ; ABORT IF NO FILENAME
fnameok:
ldy #mtest ;"TESTING THE DISK"
jsr showmsg
lda #>tracks ; READ TRACK 1 SECTOR 1
sta iobbuf+1 ; TO SEE IF THERE'S A 16-SECTOR
lda #1 ; DISK IN THE DRIVE
sta iobcmd
sta iobtrk
sta iobsec
lda #>iob
ldy #<iob
jsr $3d9
bcc diskok ; READ SUCCESSFUL
ldy #mnot16 ; NOT 16-SECTOR DISK
jsr showmsg
ldy #manykey ; APPEND PROMPT
jsr showm1
jsr awbeep
jsr rdkey ; WAIT FOR KEY
jmp abort ; AND ABORT
diskok: ldy #mpcans ;"AWAITING ANSWER FROM HOST"
jsr showmsg
lda #_'R' ; LOAD ACC WITH "R" OR "S"
adc directn ; Rather tricky way to change R into S.
jsr putc ; AND SEND TO HOST
jsr initprot
bcc :+ ; Protocol accepted
jmp abort ; Exit via abort
: ldx #0
fnloop: lda $200,x ; SEND FILENAME TO HOST
jsr putc
beq getans ; STOP AT NULL
inx
bne fnloop
getans: jsr getc ; ANSWER FROM HOST SHOULD BE 0
bne pcerror ; THERE'S A PROBLEM
jsr title ; CLEAR STATUS
ldx directn
ldy tfrtbl,x
jsr showmsg ; SHOW TRANSFER MESSAGE
showfn: lda #2 ; AND ADD FILENAME
sta msgptr+1
lda #0
sta msgptr
tay
jmp msgloop ; AND RETURN THROUGH SHOWMSG
pcerror:
pha ; SAVE ERROR NUMBER
ldy #merror ; SHOW "ERROR: FILE "
jsr showmsg ; SHOW FILENAME
jsr showfn
pla
tay
jsr showm1 ; SHOW ERROR MESSAGE
ldy #manykey ; APPEND PROMPT
jsr showm1
jsr awbeep
jsr rdkey ; WAIT FOR KEY
jmp abort ; AND RESTART
directn:
.byte $00
prmptbl:
.byte mfrecv,mfsend
tfrtbl: .byte mrecv,msend
;---------------------------------------------------------
; initprot - Negotiate the protocol. Return carry clear
; if successful, carry set otherwise. Displays an error
; message if the host does not accept the protocol.
;---------------------------------------------------------
initprot:
lda #>protono ; High order byte
jsr putc ; Send to host
lda #<protono ; Low order byte
jsr putc ; Send to host
lda #0
jsr putc ; Delimiter
jsr getc ; Read response from host
cmp #ack
bne :+ ; Not ack, so invalid protocol or host
clc
rts ; Exit with OK status
: ldy #mproterr ; Erroneous protocol negotiation
jsr showmsg ; Display appropriate error message
jsr awbeep ; This error deserves some attention
jsr rdkey ; Wait for key
sec ; Error status
rts
;---------------------------------------------------------
; RECEIVE - MAIN RECEIVE ROUTINE
;---------------------------------------------------------
receive:
ldx #0 ; DIRECTION = HOST-->APPLE
jsr getname ; ASK FOR FILENAME & SEND TO HOST
lda #ack ; 1ST MESSAGE ALWAYS ACK
sta message
lda #0 ; START ON TRACK 0
sta iobtrk
sta errors ; NO DISK ERRORS YET
recvlup:
sta savtrk ; SAVE CURRENT TRACK
ldx #1
jsr sr7trk ; RECEIVE 7 TRACKS FROM HOST
ldx #2
jsr rw7trk ; WRITE 7 TRACKS TO DISK
lda iobtrk
cmp #$23 ; REPEAT UNTIL TRACK $23
bcc recvlup
lda message ; SEND LAST ACK
jsr putc
lda errors
jsr putc ; SEND ERROR FLAG TO HOST
jsr awbeep
jmp pause ; WAIT FOR KEY AND END
;---------------------------------------------------------
; SEND - MAIN SEND ROUTINE
;---------------------------------------------------------
send:
ldx #1 ; DIRECTION = APPLE-->HOST
jsr getname ; ASK FOR FILENAME & SEND TO HOST
lda #ack ; SEND INITIAL ACK
jsr putc
lda #0 ; START ON TRACK 0
sta iobtrk
sta errors ; NO DISK ERRORS YET
sendlup:
sta savtrk ; SAVE CURRENT TRACK
ldx #1
jsr rw7trk ; READ 7 TRACKS FROM DISK
ldx #0
jsr sr7trk ; SEND 7 TRACKS TO HOST
lda iobtrk
cmp #$23 ; REPEAT UNTIL TRACK $23
bcc sendlup
lda errors
jsr putc ; SEND ERROR FLAG TO HOST
jsr awbeep
jmp pause ; WAIT FOR KEY AND END
;---------------------------------------------------------
; SR7TRK - SEND (X=0) OR RECEIVE (X=1) 7 TRACKS
;---------------------------------------------------------
sr7trk: stx what2do ; X=0 FOR SEND, X=1 FOR RECEIVE
lda #7 ; DO 7 TRACKS
sta trkcnt
lda #>tracks ; STARTING HERE
sta secptr+1
jsr homecur ; RESET CURSOR POSITION
s7trk: lda #$f ; COUNT SECTORS FROM F TO 0
sta iobsec
s7sec: ldx what2do ; PRINT STATUS CHARACTER
lda srchar,x
jsr chrover
lda what2do ; EXECUTE SEND OR RECEIVE
bne dorecv ; ROUTINE
;------------------------ SENDING ------------------------
jsr sendsec ; SEND CURRENT SECTOR
lda crc ; FOLLOWED BY CRC
jsr putc
lda crc+1
jsr putc
jsr getc ; GET RESPONSE FROM HOST
cmp #ack ; IS IT ACK?
beq srokay ; YES, ALL RIGHT
cmp #nak ; IS IT NAK?
beq s7sec ; YES, SEND AGAIN
ldy #mconfus ; SOMETHING IS WRONG
jsr showmsg ; TELL BAD NEWS
ldy #manykey ; APPEND PROMPT
jsr showm1
jsr awbeep
jsr rdkey ; WAIT FOR KEY
jmp abort ; AND ABORT
;----------------------- RECEIVING -----------------------
dorecv: ldy #0 ; CLEAR NEW SECTOR
tya
clrloop:
sta (secptr),y
iny
bne clrloop
lda message ; SEND RESULT OF PREV SECTOR
jsr putc
jsr recvsec ; RECEIVE SECTOR
jsr getc
sta pccrc ; AND CRC
jsr getc
sta pccrc+1
jsr undiff ; UNCOMPRESS SECTOR
lda crc ; CHECK RECEIVED CRC VS
cmp pccrc ; CALCULATED CRC
bne recverr
lda crc+1
cmp pccrc+1
beq srokay
recverr:
lda #nak ; CRC ERROR, ASK FOR RESEND
sta message
bne s7sec
;------------------ BACK TO COMMON LOOP ------------------
srokay:
lda #ack ; WAS SUCCESSFUL
sta message ; SEND ACK
jsr chrrest ; RESTORE PREVIOUS STATUS CHAR
inc secptr+1 ; NEXT SECTOR
dec iobsec
bpl s7sec ; TRACK NOT FINISHED
lda trkcnt
cmp #2 ; STARTING LAST TRACK, TURN
bne notone ; DRIVE ON, EXCEPT IN THE LAST
lda savtrk ; BLOCK
cmp #$1c
beq notone
drvmod: bit $c089
notone: dec trkcnt
beq srend
jmp s7trk ; LOOP UNTIL 7 TRACKS DONE
srend: rts
srchar: asc "OI" ; STATUS CHARACTERS: OUT/IN
what2do:
.byte $00
;---------------------------------------------------------
; SENDSEC - SEND CURRENT SECTOR WITH RLE
; CRC IS COMPUTED BUT NOT SENT
;---------------------------------------------------------
sendsec:
ldy #0 ; START AT FIRST BYTE
sty crc ; ZERO CRC
sty crc+1
sty prev ; NO PREVIOUS CHARACTER
ss1: lda (secptr),y ; GET BYTE TO SEND
jsr updcrc ; UPDATE CRC
tax ; KEEP A COPY IN X
sec ; SUBTRACT FROM PREVIOUS
sbc prev
stx prev ; SAVE PREVIOUS BYTE
jsr putc ; SEND DIFFERENCE
beq ss3 ; WAS IT A ZERO?
iny ; NO, DO NEXT BYTE
bne ss1 ; LOOP IF MORE TO DO
rts ; ELSE RETURN
ss2: jsr updcrc
ss3: iny ; ANY MORE BYTES?
beq ss4 ; NO, IT WAS 00 UP TO END
lda (secptr),y ; LOOK AT NEXT BYTE
cmp prev
beq ss2 ; SAME AS BEFORE, CONTINUE
ss4: tya ; DIFFERENCE NOT A ZERO
jsr putc ; SEND NEW ADDRESS
bne ss1 ; AND GO BACK TO MAIN LOOP
rts ; OR RETURN IF NO MORE BYTES
;---------------------------------------------------------
; RECVSEC - RECEIVE SECTOR WITH RLE (NO TIME TO UNDIFF)
;---------------------------------------------------------
recvsec:
ldy #0 ; START AT BEGINNING OF BUFFER
rc1: jsr getc ; GET DIFFERENCE
beq rc2 ; IF ZERO, GET NEW INDEX
sta (secptr),y ; ELSE PUT CHAR IN BUFFER
iny ; AND INCREMENT INDEX
bne rc1 ; LOOP IF NOT AT BUFFER END
rts ; ELSE RETURN
rc2: jsr getc ; GET NEW INDEX
tay ; IN Y REGISTER
bne rc1 ; LOOP IF INDEX <> 0
rts ; ELSE RETURN
;---------------------------------------------------------
; UNDIFF - FINISH RLE DECOMPRESSION AND UPDATE CRC
;---------------------------------------------------------
undiff: ldy #0
sty crc ; CLEAR CRC
sty crc+1
sty prev ; INITIAL BASE IS ZERO
udloop: lda (secptr),y ; GET NEW DIFFERENCE
clc
adc prev ; ADD TO BASE
jsr updcrc ; UPDATE CRC
sta prev ; THIS IS THE NEW BASE
sta (secptr),y ; STORE REAL BYTE
iny
bne udloop ; REPEAT 256 TIMES
rts
;---------------------------------------------------------
; RW7TRK - READ (X=1) OR WRITE (X=2) 7 TRACKS
; USES A,X,Y. IF ESCAPE, CALLS ABORT
;---------------------------------------------------------
rw7trk: stx iobcmd ; X=1 FOR READ, X=2 FOR WRITE
lda #7 ; COUNT 7 TRACKS
sta trkcnt
lda #>tracks ; START AT BEGINNING OF BUFFER
sta iobbuf+1
jsr homecur ; RESET CURSOR POSITION
nexttrk:
lda #$f ; START AT SECTOR F (READ IS
sta iobsec ; FASTER THIS WAY)
nextsec:
ldx iobcmd ; GET MAX RETRIES FROM
lda realtry-1,x ; PARAMETER DATA
sta retries
lda rwchar-1,x ; PRINT STATUS CHARACTER
jsr chrover
rwagain:
lda $c000 ; CHECK KEYBOARD
cmp #esc ; ESCAPE PUSHED?
bne rwcont ; NO, CONTINUE
jmp babort ; YES, ABORT
rwcont: lda #>iob ; GET IOB ADDRESS IN REGISTERS
ldy #<iob
jsr $3d9 ; CALL RWTS THROUGH VECTOR
lda #_'.' ; CARRY CLEAR MEANS NO ERROR
bcc sectok ; NO ERROR: PUT . IN STATUS
dec retries ; ERROR: SOME PATIENCE LEFT?
bpl rwagain ; YES, TRY AGAIN
rol errors ; NO, SET ERRORS TO NONZERO
jsr clrsect ; FILL SECTOR WITH ZEROS
lda #_I'*' ; AND PUT INVERSE * IN STATUS
sectok: jsr chradv ; PRINT SECTOR STATUS & ADVANCE
inc iobbuf+1 ; NEXT PAGE IN BUFFER
dec iobsec ; NEXT SECTOR
bpl nextsec ; LOOP UNTIL END OF TRACK
inc iobtrk ; NEXT TRACK
dec trkcnt ; LOOP UNTIL 7 TRACKS DONE
bne nexttrk
rts
rwchar: asc "RW" ; STATUS CHARACTERS: READ/WRITE
retries:
.byte $00
realtry:
.byte $00,$00 ; REAL NUMBER OF RETRIES
;---------------------------------------------------------
; CLRSECT - CLEAR CURRENT SECTOR
;---------------------------------------------------------
clrsect:
lda iobbuf+1 ; POINT TO CORRECT SECTOR
sta csloop+2
ldy #0 ; AND FILL 256 ZEROS
tya
csloop: sta $ff00,y
iny
bne csloop
rts
;---------------------------------------------------------
; sendhlf - send entire disk as nibbles with halftracking
;
; this routine is essentially the same as sendnib except
; the stepper motor is increased only two phases instead
; of four and there are 70 halftracks instead of the normal
; 35. file format is .v2h
;---------------------------------------------------------
sendhlf:
jsr nibtitle ; adjust screen
jsr initshlf ; ask for filename & send to pc
jsr nibblank ; clear progress to all blanks
jsr calibrat ; calibrate the disk
lda #ack ; send initial ack
jsr putc
lda #0 ; don't actually use rwts...
sta iobtrk ; ...so use this as just memory
shlfloop:
lda #_'R'
jsr nibshow ; show "R" at current track
jsr rdnibtr ; read track as nibbles
jsr snibtrak ; send nibbles to other side
bcs shlfloop ; re-read same track
inc iobtrk ; next trackno
lda iobtrk
cmp #$46 ; repeat while trackno < 70
bcs shlffin ; jump if ready
jsr hlfnextt ; goto next half track
jmp shlfloop
shlffin:
lda #0 ; no errors encountered
jsr putc ; send (no) error flag to pc
jsr motoroff ; we're finished with the drive
jmp awbeep ; beep and end
;---------------------------------------------------------
; initshlf - init send halftrack/nibble disk
; ask for a filename, then send "V" command and filename
; to the other side and await an acknowledgement.
; note we do not check for a valid disk in the drive;
; basically any disk will do. if there is no disk present,
; bad luck (behaves the same as when booting).
;---------------------------------------------------------
initshlf:
ldy #mfsend
jsr showmsg ; Ask for filename
ldx #0 ; Get answer at $200
jsr nxtchar ; Input the line (Apple ROM)
lda #0 ; Null-terminate it
sta $200,x
txa
bne hlfnamok
jmp abort ; Abort if no filename
hlfnamok:
ldy #mpcans ; "awaiting answer from host"
jsr showmsg
lda #_'V' ; Load acc with command code
jsr putc ; ...and send to host
jsr initprot ; Protocol negotiation
bcc :+ ; Protocol accepted
jmp abort ; Exit via abort
: ldx #0
hfloop2:
lda $200,x ; Send filename to host
jsr putc
beq gethans2 ; Stop at null
inx
bne hfloop2
gethans2:
; for test only: activate next and deactivate line after
; lda #0 ; simulate ok
jsr getc ; answer from host should be 0
beq initsh2
jmp pcerror ; error; exit via pcerror
initsh2:
ldy #mhlfsend
jsr showmsg ; show transfer message
jmp showfn ; exit via showfn
;---------------------------------------------------------
; hlfnextt - goto next halftrack. we know there is still room
; to move further. next track is in iobtrk.
; use copy of dos function seekabs.
;---------------------------------------------------------
hlfnextt:
jsr slot2x ; a = x = slot * 16
lda iobtrk ; a = desired halftrack
pha ; save on stack
sec ; prepare subtract
sbc #1 ; a now contains current track
sta $478 ; seekabs expects this
pla ; desired track in a
jsr seekabs ; let dos function do its thing
rts
;---------------------------------------------------------
; sendnib - send entire disk as nibbles
;
; we don't want to depend on any disk formatting, not even
; on the track and sector numbers. so don't use rwts; just
; calibrate the arm to track 0, and send all 35 tracks. we
; do _not_ support half-tracks. each track is read about
; twice its length, to give the other side enough data to
; make the analysis. each track must be acknowledged
; before we proceed with the next track.
;---------------------------------------------------------
sendnib:
jsr nibtitle ; Adjust screen
jsr initsnib ; Ask for filename & send to pc
jsr nibblank ; Clear progress to all blanks
jsr calibrat ; Calibrate the disk
lda #ack ; Send initial ack
jsr putc
lda #0 ; Don't actually use rwts...
sta iobtrk ; ...so use this as just memory
snibloop:
lda #_'R'
jsr nibshow ; Show 'R' at current track
jsr rdnibtr ; Read track as nibbles
jsr snibtrak ; Send nibbles to other side
bcs snibloop ; Re-read same track
inc iobtrk ; Next trackno
lda iobtrk
cmp #$23 ; Repeat while trackno < 35
bcs snibfin ; Jump if ready
jsr nibnextt ; Go to next track
jmp snibloop
snibfin:
lda #0 ; No errors encountered
jsr putc ; Send (no) error flag to pc
jsr motoroff ; We're finished with the drive
jmp awbeep ; Beep and end
;---------------------------------------------------------
; nibblank - clear progress to all blanks
;---------------------------------------------------------
nibblank:
lda cv
pha ; Save current vertical pos
lda #5 ; Fixed vertical position
jsr tabv ; Calculate basl from a
ldy #2 ; Initial horizontal position
lda #_' ' ; The character to display
nibblnk1:
sta (basl),y ; Put on screen
iny ; Next horizontal position
cpy #37 ; At the end?
bcc nibblnk1 ; If not, jump back
pla
jsr tabv ; Restore cv
rts
;---------------------------------------------------------
; nibshow - show character in a at current track
; support for haltracking added
;---------------------------------------------------------
nibshow:
tay ; Character in y
lda cv
pha ; Save cv on stack
tya ; accum now contains char
pha ; Save char on stack
lda #5 ; Fixed vertical position
jsr tabv ; Calculate basl from accum
lda hlftrk ; Check to see if we're in halftrk
cmp #1 ; mode
bne nibnorm ; No hlftrk, continue normally
lda iobtrk
cmp #0 ; Track zero always treated the same
beq nibnorm
lsr ; Is track odd or even?
bcc nibeven ; Track is even, continue normally
lda #6 ; Increment vertical position
jsr tabv ; Calculate basl from accum
nibeven:
lda iobtrk ; Current track
lsr ; Ccalc horiz pos by
clc ; dividing by two and
adc #2 ; adding 2
jmp nibdisp
nibnorm:
lda iobtrk ; Current track
clc
adc #2 ; Calculate horizontal pos
nibdisp:
tay ; Index value in y
pla ; Restore character to show
sta (basl),y
pla
jsr tabv ; Restore cv
rts
;---------------------------------------------------------
; initsnib - init send nibble disk
; ask for a filename, then send "N" command and filename
; to the other side and await an acknowldgement.
; note we do not check for a valid disk in the drive;
; basically any disk will do. if there is no disk present,
; bad luck (behaves the same as when booting).
;---------------------------------------------------------
initsnib:
ldy #mfsend
jsr showmsg ; Ask filename
ldx #0 ; Get answer at $200
jsr nxtchar ; Input the line (Apple ROM)
lda #0 ; Null-terminate it
sta $200,x
txa
bne nibnamok
jmp abort ; Abort if no filename
nibnamok:
ldy #mpcans ; "awaiting answer from host"
jsr showmsg
lda #_'N' ; Load acc with command code
jsr putc ; and send to pc
jsr initprot ; Protocol negotiation
bcc :+ ; Protocol accepted
jmp abort ; Exit via abort
: ldx #0
fnloop2:
lda $200,x ; Send filename to pc
jsr putc
beq getans2 ; Stop at null
inx
bne fnloop2
getans2:
; for test only: activate next and deactivate line after
; lda #0 ; Simulate ok
jsr getc ; Answer from host should be 0
beq initsn2
jmp pcerror ; Error; exit via pcerror
initsn2:
ldy #mnibsend
jsr showmsg ; Show transfer message
jmp showfn ; Exit via showfn
;---------------------------------------------------------
; snibtrak - send nibble track to the other side
; and wait for acknowledgement. each 256 byte page is
; followed by a 16-bit crc.
; we know the buffer is set up at "tracks", and is
; nibpages * 256 bytes long. tracks is at page boundary.
; when the pc answers ack, clear carry. when it answers
; enq, set carry. when it answers anything else, abort
; the operation with the appropriate error message.
;---------------------------------------------------------
snibtrak:
lda #0 ; a = 0
sta iobsec ; Reset sector counter
sta nibptr ; Init running ptr
lda #>tracks ; Tracks address high
sta nibptr+1
lda #nibpages
sta nibpcnt ; Page counter
lda #_'O'
jsr nibshow ; Show 'O' at current track
snibtr1:
jsr snibpage
lda crc ; followed by crc
jsr putc
lda crc+1
jsr putc
; for test only: activate next and deactivate line after
; lda #ack ; Simulate response.
jsr getc ; Get response from host
cmp #ack ; Is it ack?
beq snibtr5 ; Yes, all right
pha ; Save on stack
lda #_I'!' ; Error during send
jsr nibshow ; Show status of current track
pla ; Restore response
cmp #nak ; Is it nak?
beq snibtr1 ; Yes, send again
snibtr2:
ldy #mconfus ; Something is wrong
snibtr3:
jsr showmsg ; Tell bad news
jsr motoroff ; Transfer ended in error
ldy #manykey ; Append prompt
jsr showm1
jsr awbeep
jsr rdkey ; Wait for key
jmp abort ; and abort
snibtr5:
lda #_'O'
jsr nibshow ; Show 'O' at current track
inc nibptr+1 ; Next page
inc iobsec ; Increment sector counter
dec nibpcnt ; Count
bne snibtr1 ; and back if more pages
; for test only: activate next and deactivate line after
; lda #ack ; Simulate response
jsr getc ; Get response from pc
cmp #ack ; Is it ack?
beq snibtr7 ; Ok
cmp #can ; Is it can (unreadable trk)?
beq snibtr8 ; Ok
cmp #nak ; Was it nak?
beq snibtr6 ; We will abort
cmp #enq
bne snibtr2 ; Host is confused; abort
sec ; Let caller know what goes on
rts
snibtr6:
ldy #manalys ; Host could not analyze the track
bpl snibtr3 ; Branch always
snibtr7:
lda #_'.' ; Entire track transferred ok
jsr nibshow ; Show status of current track
clc ; Indicate success to caller
rts
snibtr8:
lda #_I'U' ; Entire track was unreadable
jsr nibshow ; Probably a half track
clc ; Indicate success to caller
rts
;---------------------------------------------------------
; snibpage - send one page with nibble data and calculate
; crc. nibptr points to first byte to send.
;---------------------------------------------------------
snibpage:
ldy #0 ; Start index
sty crc ; Zero crc
sty crc+1
sty prev ; No previous character
lda iobsec
jsr putc ; Send the sector number
lda iobtrk
jsr putc ; Send the track number
lda #$02
jsr putc ; Send a protocol filler
snibpag1:
lda (nibptr),y ; Load byte to send
jsr updcrc ; Update crc
tax ; Keep a copy in x
sec ; Subtract from previous
sbc prev
stx prev ; Save previous byte
jsr putc ; Send difference
beq snibpag3 ; Was it a zero?
iny ; No, do next byte
bne snibpag1 ; Loop if more in this page
rts
snibpag2:
jsr updcrc
snibpag3:
iny ; Any more bytes?
beq snibpag4 ; No, it was 00 up to end
lda (nibptr),y ; Look at next byte
cmp prev
beq snibpag2 ; Same as before, continue
snibpag4:
tya ; Difference not a zero
jsr putc ; Send new address
bne snibpag1 ; and go back to main loop
rts ; Or return if no more bytes
;---------------------------------------------------------
; nibnextt - goto next track. we know there is still room
; to move further. next track is in iobtrk.
; use copy of dos function seekabs.
;---------------------------------------------------------
nibnextt:
jsr slot2x ; a = x = slot * 16
lda iobtrk ; a = desired track
asl a ; a now contains half-track
pha ; save on stack
sec ; prepare subtract
sbc #2 ; a now contains current track
sta $478 ; seekabs expects this
pla ; desired track in a
jsr seekabs ; let dos function do its thing
rts
;---------------------------------------------------------
; motoroff - turn disk drive motor off
; preserves y. doesn't hurt if motor is already off.
;---------------------------------------------------------
motoroff:
jsr slot2x ; a = x = slot * 16
lda $c088,x ; turn motor off
rts
;---------------------------------------------------------
; slot2x - sets configured slot * 16 in x and in a
;---------------------------------------------------------
slot2x: ldx pdslot
inx ; now 1..7
txa
asl
asl
asl
asl ; a now contans slot * 16
tax ; store in x
rts
;---------------------------------------------------------
; nibsync - synchronize on first byte after gap
; this function is only used from rdnibtr, but i had to
; make it a separate function to keep other stuff in one
; page (because of instriuction timings).
; this function is always fast enough to process the
; nibbles, no matter how it is laid out in memory.
; it always returns the first nibble after a gap, provided
; the track has a gap at all. if we don't find a gap, we
; probably have to do with an unformatted track. in that
; case, just return any byte as the first, so the process
; can continue.
; on entry, x must contain slot * 16. the disk must spin,
; and we must be in read mode and on the right track.
; on exit, the zero flag is 0, and a contains the byte.
; x and y are preserved.
; note we check the number of bytes read only when
; starting a new sequence; the check takes so long we
; loose any byte sync we might have (> 32 cycles).
;---------------------------------------------------------
nibsync:
tya
pha ; save y on the stack
lda #0
tay ; y=0 (counter)
sta synccnt
sta synccnt+1 ; init number of bytes
nibsync0:
jsr chekscnt
bcs nibsync5 ; accept any byte
nibsync1:
lda $c08c,x ; wait for complete byte
bpl nibsync1
iny ; count byte
cmp #$ff ; is it a gap byte?
bne nibsync0
nibsync2:
lda $c08c,x ; next byte
bpl nibsync2
iny ; count byte
cmp #$ff ; is it a gap byte?
bne nibsync0 ; only 1 gap byte
nibsync3:
lda $c08c,x ; next byte
bpl nibsync3
iny ; count byte
cmp #$ff ; is it a gap byte?
bne nibsync0 ; only 2 gap bytes
nibsync4:
lda $c08c,x ; next byte
bpl nibsync4
iny ; count byte
cmp #$ff ; is it a gap byte?
bne nibsync0 ; only 3 gap bytes
; at this point, we encountered 4 consecutive gap bytes.
; so now wait for the first non-gap byte.
nibsync5:
pla
tay ; restore y
nibsync6:
lda $c08c,x ; next byte
bpl nibsync6
cmp #$ff ; is it a gap byte?
beq nibsync6 ; go read next byte
jmp rdnibtr8 ; avoid rts; save some cycles
;---------------------------------------------------------
; chekscnt - check if we have to continue syncing
; add y to synccnt (16 bit), and reset y to 0. when
; synccnt reaches $3400, return carry set, else clear.
; $3400 is twice the max number of bytes in one track.
;---------------------------------------------------------
chekscnt:
clc ; add y to 16-bit synccnt
tya
adc synccnt ; lo-order part
sta synccnt
lda #0
tay ; reset y to 0
adc synccnt+1 ; high-order part
sta synccnt+1
cmp #$34 ; sets carry when a >= data
rts
;---------------------------------------------------------
; HOMECUR - RESET CURSOR POSITION TO 1ST SECTOR
; CHRREST - RESTORE PREVIOUS CONTENTS & ADVANCE CURSOR
; CHRADV - WRITE NEW CONTENTS & ADVANCE CURSOR
; ADVANCE - JUST ADVANCE CURSOR
; CHROVER - JUST WRITE NEW CONTENTS
;---------------------------------------------------------
homecur:
ldy savtrk
iny ; CURSOR ON 0TH COLUMN
sty ch
jsr topnext ; TOP OF 1ST COLUMN
jmp chrsave ; SAVE 1ST CHARACTER
chrrest:
lda savchr ; RESTORE OLD CHARACTER
chradv: jsr chrover ; OVERWRITE STATUS CHAR
jsr advance ; ADVANCE CURSOR
chrsave:
ldy ch
lda (basl),y ; SAVE NEW CHARACTER
sta savchr
rts
advance:
inc cv ; CURSOR DOWN
lda cv
cmp #21 ; STILL IN DISPLAY?
bcc nowrap ; YES, WE'RE DONE
topnext:
inc ch ; NO, GO TO TOP OF NEXT
lda #5 ; COLUMN
nowrap: jmp tabv ; VALIDATE BASL,H
chrover:
ldy ch
sta (basl),y
rts
;---------------------------------------------------------
; UPDCRC - UPDATE CRC WITH CONTENTS OF ACCUMULATOR
;---------------------------------------------------------
updcrc: pha
eor crc+1
tax
lda crc
eor crctblh,x
sta crc+1
lda crctbll,x
sta crc
pla
rts
;---------------------------------------------------------
; MAKETBL - MAKE CRC-16 TABLES
;---------------------------------------------------------
maketbl:
ldx #0
ldy #0
crcbyte:
stx crc ; LOW BYTE = 0
sty crc+1 ; HIGH BYTE = INDEX
ldx #8 ; FOR EACH BIT
crcbit: lda crc
crcbit1:
asl ; SHIFT CRC LEFT
rol crc+1
bcs crcflip
dex ; HIGH BIT WAS CLEAR, DO NOTHING
bne crcbit1
beq crcsave
crcflip:
eor #$21 ; HIGH BIT WAS SET, FLIP BITS
sta crc ;0, 5, AND 12
lda crc+1
eor #$10
sta crc+1
dex
bne crcbit
lda crc ; STORE CRC IN TABLES
crcsave:
sta crctbll,y
lda crc+1
sta crctblh,y
iny
bne crcbyte ; DO NEXT BYTE
rts
;---------------------------------------------------------
; PARMDFT - RESET PARAMETERS TO DEFAULT VALUES (USES AX)
;---------------------------------------------------------
parmdft:
lda configyet
bne warmer ; If no manual config yet, scan the slots
jsr FindSlot
warmer:
ldx #parmnum-1
dftloop:
lda default,x
sta parms,x
dex
bpl dftloop
rts
;---------------------------------------------------------
; AWBEEP - CUTE TWO-TONE BEEP (USES AXY)
;---------------------------------------------------------
awbeep: lda psound ; IF SOUND OFF, RETURN NOW
bne nobeep
lda #$80 ; STRAIGHT FROM APPLE WRITER ][
jsr beep1 ;(CANNIBALISM IS THE SINCEREST
lda #$a0 ; FORM OF FLATTERY)
beep1: ldy #$80
beep2: tax
beep3: dex
bne beep3
bit $c030 ; WHAP SPEAKER
dey
bne beep2
nobeep: rts
;---------------------------------------------------------
; PUTC - SEND ACC OVER THE SERIAL LINE (AXY UNCHANGED)
;---------------------------------------------------------
putc: jmp $0000 ; Pseudo-indirect JSR - self-modified
;---------------------------------------------------------
; GETC - GET A CHARACTER FROM SERIAL LINE (XY UNCHANGED)
;---------------------------------------------------------
getc: jmp $0000 ; Pseudo-indirect JSR - self-modified
;---------------------------------------------------------
; ABORT - STOP EVERYTHING (CALL babort TO BEEP ALSO)
;---------------------------------------------------------
babort: jsr awbeep ; BEEP
abort: ldx #$ff ; POP GOES THE STACKPTR
txs
jsr motoroff ; Turn potentially active drive off
bit $c010 ; STROBE KEYBOARD
jmp redraw ; AND RESTART
;---------------------------------------------------------
; TITLE - SHOW TITLE SCREEN
;---------------------------------------------------------
title: jsr home ; CLEAR SCREEN
ldy #mtitle
jsr showm1 ; SHOW TOP PART OF TITLE SCREEN
ldx #15 ; SHOW SECTOR NUMBERS
lda #5 ; IN DECREASING ORDER
sta cv ; FROM TOP TO BOTTOM
showsec:
jsr vtab
lda #$20
ldy #38
sta (basl),y
ldy #0
sta (basl),y
lda hexnum,x
iny
sta (basl),y
ldy #37
sta (basl),y
inc cv
dex
bpl showsec
showundm:
lda #_'_' ; SHOW LINE OF UNDERLINES
ldx #38 ; ABOVE INVERSE TEXT
showund:
sta $500,x
dex
bpl showund
rts
;---------------------------------------------------------
; nibtitle - show title screen for nibble disk transfer
;---------------------------------------------------------
nibtitle:
jsr home ; clear screen
ldy #mtitle
jsr showm1 ; show top part of title screen
lda #5 ; show one block left and right
sta cv ; on line 5
jsr vtab
lda #_I' ' ; inverse space char
ldy #38 ; at end of line
sta (basl),y
ldy #0 ; at start of line
sta (basl),y
lda #_I'>' ; inverse character!
iny ; next position in line
sta (basl),y
lda #_I'<' ; inverse character!
ldy #37 ; one-but-last position in line
sta (basl),y
lda hlftrk ; check to see if we need to
cmp #1 ; display halftrack line
bne nibtdone
lda #6 ; move one line down
sta cv
jsr vtab
lda #_I'.' ; put an inverse . on screen
ldy #0 ; at horiz pos 0
sta (basl),y
lda #'5' ; and now put a 5 so we see
ldy #1 ; .5 which means halftrk
sta (basl),y
lda #_I' ' ; put 2 inverse spaces at the end
ldy #37
sta (basl),y
iny
sta (basl),y
nibtdone:
bne showundm ; exit via title
;---------------------------------------------------------
; SHOWMSG - SHOW NULL-TERMINATED MESSAGE #Y AT BOTTOM OF
; SCREEN. CALL SHOWM1 TO SHOW ANYWHERE WITHOUT ERASING.
; THE MESSAGE CAN HAVE ANY LENGTH THAT FITS THE SCREEN.
;---------------------------------------------------------
showmsg:
sty ysave ; CLREOP USES Y
lda #0
sta ch ; COLUMN 0
lda #22 ; LINE 22
jsr tabv
jsr clreop ; CLEAR MESSAGE AREA
ldy ysave
showm1: lda msgtbl,y ; CALL HERE TO SHOW ANYWHERE
sta msgptr
lda msgtbl+1,y
sta msgptr+1
ldy #0
msgloop:
lda (msgptr),y
beq msgend
jsr cout1
iny
bne msgloop
inc msgptr+1
jmp msgloop
msgend: rts
;------------------------ MESSAGES -----------------------
msgtbl: .addr msg01,msg02,msg03,msg04,msg05,msg06,msg07
.addr msg08,msg09,msg10,msg11,msg12,msg13,msg14
.addr msg15,msg16,msg17,msg18,msg19,msg20,msg21
.addr msg22,msg23,msg24,msg25,msg26,msg27,msg28
.addr msg29,msg30,msg31,msg32,msg33
msg01: asc "COM:S"
mtssc: asc " ,"
mtspd: asc " "
; Define as many space characters as required to fill line
; while the version number stays in the middle.
.repeat 5-(.strlen(version_no)/2)
.byte $A0
.endrep
inv " ADT "
inv version_no
inv " "
.repeat 6-((.strlen(version_no)+1)/2)
.byte $A0
.endrep
asc "DISK:S"
mtslt: asc " ,D"
mtdrv: asc " "
.byte $8d,$8d,$8d
invcr " 00000000000000001111111111111111222 "
inv " "
hexnum: inv "0123456789ABCDEF0123456789ABCDEF012 "
.byte $8d,$00
msg02: inv " ADT CONFIGURATION "
.byte $8d,$8d,$8d
asccr "DISK SLOT"
asccr "DISK DRIVE"
asccr "COMMS DEVICE"
asccr "COMMS SPEED"
asccr "READ RETRIES"
asccr "WRITE RETRIES"
asccr "USE CHECKSUMS"
asccr "ENABLE SOUND"
ascz "SAVE CONFIG"
msg03: asccr "USE ARROWS AND SPACE TO CHANGE VALUES,"
ascz "RETURN TO ACCEPT, CTRL-D FOR DEFAULTS."
msg04: inv "S"
asc "END, "
inv "R"
asc "ECEIVE, "
inv "D"
asc "IR, "
inv "C"
asc "ONFIGURE, "
inv "Q"
asc "UIT, "
inv "?"
.byte 00
msg05: ascz "SPACE TO CONTINUE, ESC TO STOP: "
msg06: ascz "END OF DIRECTORY, TYPE SPACE: "
msg07: ascz "FILE TO RECEIVE: "
msg08: ascz "FILE TO SEND: "
msg09: ascz "RECEIVING FILE "
msg10: ascz "SENDING FILE "
msg11: inv "ERROR:"
ascz " NONSENSE FROM HOST."
msg12: inv "ERROR:"
ascz " NOT A 16-SECTOR DISK."
msg13: inv "ERROR:"
ascz " FILE "
msg14: .byte $8d
ascz "CAN'T BE OPENED."
msg15: .byte $8d
ascz "ALREADY EXISTS."
msg16: .byte $8d
ascz "IS NOT A 140K IMAGE."
msg17: .byte $8d
ascz "DOESN'T FIT ON DISK."
msg18: ascz " ANY KEY: "
msg19: ascz "<- DO NOT CHANGE"
msg20: inv "ADT "
invcr version_no
.byte $8d
asccr "ORIGINAL PROGRAM BY PAUL GUERTIN"
.byte $8d
asccr "SEND NIBBLE DISK ADDED BY GERARD PUTTER"
.byte $8d
asccr "HALFTRACK SEND ADDED BY ERIC NEILSON"
.byte $8d
asccr "IIGS,LASER,/// SUPPORT BY DAVID SCHMIDT"
.byte $8d
asc "----------------------------------------"
asccr "SENDS / RECEIVES APPLE II DISK IMAGES"
asccr "VIA A SERIAL CONNECTION."
asccr "REQUIRES A COMPATIBLE COMPANION PROGRAM"
asccr "AT THE HOST SIDE."
.byte $8d
asccr "SSC, IIGS, IIC, LASER & /// COMPATIBLE."
asccr "----------------------------------------"
ascz "PRESS ANY KEY"
msg21: ascz "TESTING DISK FORMAT."
msg22: ascz "AWAITING ANSWER FROM HOST."
msg23: ascz "HIT ANY KEY TO CONTINUE..."
msg24: ascz "DISK ERROR: "
msg25: ascz "FILE LOCKED"
msg26: ascz "WRITE PROTECTED"
msg27: ascz "I/O ERROR"
msg28: ascz "SENDING NIBBLE FILE "
msg29: ascz "NO DISK CARD IN SELECTED SLOT."
.byte $8d
ascz "ARE YOU SURE (Y/N)? "
msg30: inv "ERROR:"
ascz " CANNOT ANALYZE TRACK."
msg31: ascz "SENDING HALFTRACK FILE "
msg32: inv "S"
asc "IMPLE, "
inv "N"
asc "IBBLE, "
inv "H"
ascz "ALF TRACKS ?"
msg33: inv "ERROR:"
ascz " INCOMPATIBLE HOST SOFTWARE"
;----------------------- PARAMETERS ----------------------
configyet:
.byte 0 ; Has the user configged yet?
parmsiz:
.byte 7,2,9,7,8,8,2,2,2 ;#OPTIONS OF EACH PARM
parmtxt:
.byte _'1',0,_'2',0,_'3',0,_'4',0,_'5',0,_'6',0,_'7',0
.byte _'1',0,_'2',0
ascz "SSC SLOT 1"
ascz "SSC SLOT 2"
ascz "SSC SLOT 3"
ascz "SSC SLOT 4"
ascz "SSC SLOT 5"
ascz "SSC SLOT 6"
ascz "SSC SLOT 7"
ascz "IIGS MODEM"
ascz "GENERIC SLOT 2"
ascz "300"
ascz "1200"
ascz "2400"
ascz "4800"
ascz "9600"
ascz "19200"
ascz "115K"
.byte _'0',0,_'1',0,_'2',0,_'3',0,_'4',0,_'5',0,_'1',_'0',0,_'9',_'9',0
.byte _'0',0,_'1',0,_'2',0,_'3',0,_'4',0,_'5',0,_'1',_'0',0,_'9',_'9',0
ascz "YES"
ascz "NO"
ascz "YES"
ascz "NO"
ascz "YES"
ascz "NO"
parms:
pdslot: .byte 5 ; DISK SLOT (6)
pdrive: .byte 0 ; DISK DRIVE (1)
pssc: .byte 1 ; COMMS SLOT (2)
pspeed: .byte 6 ; COMMS SPEED (115k)
pretry: .byte 1,0 ; READ/WRITE MAX RETRIES (1,0)
pcksum: .byte 0 ; USE RWTS CHECKSUMS? (Y)
psound: .byte 0 ; SOUND AT END OF TRANSFER? (Y)
psave: .byte 1 ; SAVE? (N)
;-------------------------- IOB --------------------------
iob: .byte $01 ; IOB TYPE
iobslt: .byte $60 ; SLOT*$10
iobdrv: .byte $01 ; DRIVE
.byte $00 ; VOLUME
iobtrk: .byte $00 ; TRACK
iobsec: .byte $00 ; SECTOR
.addr dct ; DEVICE CHAR TABLE POINTER
iobbuf: .addr tracks ; SECTOR BUFFER POINTER
.byte $00,$00 ; UNUSED
iobcmd: .byte $01 ; COMMAND (1=READ, 2=WRITE)
.byte $00 ; ERROR CODE
.byte $fe ; ACTUAL VOLUME
.byte $60 ; PREVIOUS SLOT
.byte $01 ; PREVIOUS DRIVE
dct: .byte $00,$01,$ef,$d8 ; DEVICE CHARACTERISTICS TABLE
;-------------------------- MISC -------------------------
dosbyte:
.byte $00,$00 ; DOS BYTES CHANGED BY ADT
stddos: .byte $00 ; ZERO IF "STANDARD" DOS
savtrk: .byte $00 ; FIRST TRACK OF SEVEN
savchr: .byte $00 ; CHAR OVERWRITTEN WITH STATUS
message:
.byte $00 ; SECTOR STATUS SENT TO HOST
pccrc: .byte $00,$00 ; CRC RECEIVED FROM HOST
errors: .byte $00 ; NON-0 IF AT LEAST 1 DISK ERROR
; Inline source for IIgs Serial Communications Controller (SCC)
.include "iigsscc.s"
; Inline source for Super Serial Controller (SSC)
.include "ssc.s"
; Inline source for Pascal entry points
.include "pascalep.s"
; End of assembly; used to calculate
endasm: ; length to BSAVE
|
ADTPro/adtpro
| 7,227
|
src/client/dos/iigsscc.s
|
;
; ADTPro - Apple Disk Transfer ProDOS
; Copyright (C) 2006 by David Schmidt
; 1110325+david-schmidt@users.noreply.github.com
;
; This program is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 2 of the License, or (at your
; option) any later version.
;
; This program is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
;
; You should have received a copy of the GNU General Public License along
; with this program; if not, write to the Free Software Foundation, Inc.,
; 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
;
;---------------------------------------------------------
; INITZGS - Set up IIgs SCC chip (kills firmware and GSOS)
;---------------------------------------------------------
initzgs:
sei ; TURN OFF INTERRUPTS
jsr initzscc
jsr patchzgs
rts
;---------------------------------------------------------
; zccp - Send accumulator out the SCC serial port
;---------------------------------------------------------
zccp:
STA tempa
STX tempx
zsend: LDA gscmdb ; rr0
TAX
AND #%00000100 ; test bit 2 (hardware handshaking)
BEQ zsend
TXA
AND #%00100000 ; test bit 5 (ready to send?)
BEQ zsend
exit0: LDA tempa ; get char to send
STA gsdatab ; send the character
exit: LDX tempx
LDA tempa
RTS
tempa: .byte 1
tempx: .byte 1
;---------------------------------------------------------
; zccg - Get a character from the SCC serial port (XY unchanged)
;---------------------------------------------------------
zccg:
LDA gscmdb ; DUMMY READ TO RESET 8530 POINTER TO 0
pollscc:
lda $C000
cmp #esc ; escape = abort
bne sccnext
jmp pabort
sccnext:
LDA gscmdb ; READ 8530 READ REGISTER 0
AND #$01 ; BIT 0 MEANS RX CHAR AVAILABLE
cmp #$01
bne pollscc
; THERE'S A CHAR IN THE 8530 RX BUFFER
pullIt:
LDA #$01 ; SET 'POINTER' TO rr1
STA gscmdb
LDA gscmdb ; READ THE 8530 READ REGISTER 1
AND #$20 ; CHECK FOR bit 5=RX OVERRUN
BEQ itsOK
ldx #$30 ; Clear Receive overrun
stx gscmdb
ldx #$00
stx gscmdb
itsOK:
LDA #$08 ; WE WANT TO READ rr8
STA gscmdb ; SET 'POINTER' TO rr8
LDA gscmdb ; READ rr8
rts
;---------------------------------------------------------
; initzscc - initialize the Modem Port
; (Channel B is modem port, A is printer port)
;---------------------------------------------------------
initzscc:
SEI
clc
lda pspeed ; 0 = 300, 1 = 1200, 2 = 2400, 3 = 4800, 4 = 9600, 5=19200, 6=115200
sta baud
inc baud
LDA gscmdb ;hit rr0 once to sync up
LDX #9 ;wr9
LDA #resetb ;load constant to reset Ch B
;for Ch A, use RESETCHA
STX gscmdb
STA gscmdb
NOP ;SCC needs 11 pclck to recover
LDX #3 ;wr3
LDA #%11000000 ;8 data bits, receiver disabled
STX gscmdb ;could be 7 or 6 or 5 data bits
STA gscmdb ;for 8 bits, bits 7,6 = 1,1
LDX #5 ;wr5
LDA #%01100010 ;DTR enabled 0=/HIGH, 8 data bits
STX gscmdb ;no BRK, xmit disabled, no SDLC
STA gscmdb ;RTS ;MUST; be disabled, no crc
LDX #14 ;wr14
LDA #%00000000 ;null cmd, no loopback
STX gscmdb ;no echo, /DTR follows wr5
STA gscmdb ;BRG source is XTAL or RTxC
lda pspeed
cmp #$06
beq gofast
LDX #4 ;wr4
LDA #%01000100 ;X16 clock mode,
STX gscmdb ;1 stop bit, no parity
STA gscmdb ;could be 1.5 or 2 stop bits
;1.5 set bits 3,2 to 1,0
;2 set bits 3,2 to 1,1
LDX #11 ;wr11
LDA #wr11bbrg ;load constant to write
STX gscmdb
STA gscmdb
JSR TIMECON ;set up wr12 and wr13
;to set baud rate.
; Enables
ORA #%00000001 ;enable baud rate gen
LDX #14 ;wr14
STX gscmdb
STA gscmdb ;write value
jmp initcommon
gofast:
LDX #4 ;wr4
LDA #%10000100 ;X32 clock mode,
STX gscmdb ;1 stop bit, no parity
STA gscmdb ;could be 1.5 or 2 stop bits
;1.5 set bits 3,2 to 1,0
;2 set bits 3,2 to 1,1
LDX #11 ;wr11
LDA #wr11bxtal ;load constant to write
STX gscmdb
STA gscmdb
initcommon:
LDA #%11000001 ;8 data bits, Rx enable
LDX #3
STX gscmdb
STA gscmdb ;write value
LDA #%01101010 ;DTR enabled; Tx enable
LDX #5
STX gscmdb
STA gscmdb ;write value
; Enable Interrupts
LDX #15 ;wr15
; The next line is commented out. This driver wants
; interrupts when GPi changes state, ie. the user
; on the BBS may have hung up. You can write a 0
; to this register if you don't need any external
; status interrupts. Then in the IRQIN routine you
; won't need handling for overruns; they won't be
; latched. See the Zilog Tech Ref. for details.
; LDA #%00100000 ;allow ext. int. on CTS/HSKi
LDA #%00000000 ;allow ext. int. on DCD/GPi
STX gscmdb
STA gscmdb
LDX #0
LDA #%00010000 ;reset ext. stat. ints.
STX gscmdb
STA gscmdb ;write it twice
STX gscmdb
STA gscmdb
LDX #1 ;wr1
LDA #%00000000 ;Wait Request disabled
STX gscmdb ;allow IRQs on Rx all & ext. stat
STA gscmdb ;No transmit interrupts (b1)
LDA gscmdb ; READ TO RESET channelB POINTER TO 0
LDA #$09
STA gscmdb ; SET 'POINTER' TO wr9
LDA #$00
STA gscmdb ; Anti BluRry's syndrome medication
CLI
RTS ;we're done!
; TIMECON: Set time constant bytes in wr12 & wr13
; (In other words, set the baud rate.)
TIMECON:
LDY baud
LDA #12
STA gscmdb
LDA baudl-1,y ;load time constant low
STA gscmdb
LDA #13
STA gscmdb
LDA baudh-1,y ;load time constant high
STA gscmdb
RTS
; Table of values for different baud rates. There is
; a low byte and a high byte table.
baudl: .byte 126 ;300 bps (1)
.byte 94 ;1200 (2)
.byte 46 ;2400 (3)
.byte 22 ;4800 (4)
.byte 10 ;9600 (5)
.byte 4 ;19200 (6)
.byte 1 ;38400 (7)
.byte 0 ;57600 (8)
baudh: .byte 1 ;300 bps (1)
.byte 0 ;1200 (2)
.byte 0 ;2400 (3)
.byte 0 ;4800 (4)
.byte 0 ;9600 (5)
.byte 0 ;19200 (6)
.byte 0 ;38400 (7)
.byte 0 ;57600 (8)
;---------------------------------------------------------
; resetzgs - Clean up SCC every time we hit the main loop
;---------------------------------------------------------
resetzgs:
lda gscmdb ; READ TO RESET channelB POINTER TO 0
rts
;---------------------------------------------------------
; PATCHZGS - Patch the entry point of putc and getc over
; to the IIgs versions
;---------------------------------------------------------
patchzgs:
lda #<zccp
sta putc+1
lda #>zccp
sta putc+2
lda #<zccg
sta getc+1
lda #>zccg
sta getc+2
lda #<resetzgs
sta resetio+1
lda #>resetzgs
sta resetio+2
rts
;---------------------------------------------------------
; Default SCC baud rate
;---------------------------------------------------------
baud: .byte 6 ;1=300, 2=1200, 3=2400
;4=4800, 5=9600, 6=19200
;7=38400, 8=57600.
;---------------------------------------------------------
; Apple IIgs SCC Z8530 registers and constants
;---------------------------------------------------------
gscmdb = $C038
gsdatab = $C03A
gscmda = $C039
gsdataa = $C03B
reseta = %11010001 ; constant to reset Channel A
resetb = %01010001 ; constant to reset Channel B
wr11a = %11010000 ; init wr11 in Ch A
wr11bxtal = %00000000 ; init wr11 in Ch B - use external clock
wr11bbrg = %01010000 ; init wr11 in Ch B - use baud rate generator
|
ADTPro/adtpro
| 4,983
|
src/client/dos/pascalep.s
|
;
; ADTPro - Apple Disk Transfer ProDOS
; Copyright (C) 2007 by David Schmidt
; 1110325+david-schmidt@users.noreply.github.com
;
; This program is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 2 of the License, or (at your
; option) any later version.
;
; This program is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
;
; You should have received a copy of the GNU General Public License along
; with this program; if not, write to the Free Software Foundation, Inc.,
; 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
;
;---------------------------------------------------------
; initpas - Do all the Pascal entry point setup stuff
;---------------------------------------------------------
initpas:
jsr SELFMOD
jsr INITSLOT
jsr INITSEND
jsr PATCHPAS
rts
;---------------------------------------------------------
; SELFMOD - Set up all self-modifying addresses
;---------------------------------------------------------
SELFMOD:
cld
lda $C20D ; PASCAL INIT ENTRY POINT
sta MODINIT+1 ; MOD CODE!!
iny
lda $C20E ; PASCAL READ ENTRY POINT
sta MODREAD+1 ; MOD CODE!!
iny
lda $C20F ; PASCAL WRITE ENTRY POINT
sta MODWRITE+1 ; MOD CODE!!
iny
lda $C210 ; PASCAL STATUS ENTRY POINT
sta MODSTAT1+1 ; MOD CODE!!
sta MODSTAT2+1 ; MOD CODE!!
iny
iny
lda $C212 ; PASCAL CONTROL ENTRY POINT
rts
;---------------------------------------------------------
; INITSLOT - Initialize the slot firmware
;---------------------------------------------------------
INITSLOT:
ldx #$C2 ; $CN, N=SLOT
ldy #$20 ; $N0, N=SLOT
lda #0
MODINIT:
jsr $C245 ; PASCAL INIT ENTRY POINT
rts
;---------------------------------------------------------
; INITSEND - initialization string for serial port
;---------------------------------------------------------
; The serial port initially accepts control-commands
; in its output stream. This means the port is not
; fully 8-bit transparent. We must first send a
; control sequence to prevent the firmware from
; interpreting any of the binary data.
;
INITSEND:
lda pspeed
cmp #$06
bpl SILOOP
asl
tay
lda BaudStrings,y
sta BAUDR+1
lda BaudStrings+1,y
sta BAUDR+2
ldy #0
SILOOP:
lda INITSTRING,Y
BEQ SIDONE ; ZERO terminates
jsr putcpas ; preserves Y
iny
bne SILOOP
SIDONE:
rts
INITSTRING:
.byte $01,$d8,$c4 ; ctrl-A X D - disable XON/XOFF
.byte $01,$c3,$c4 ; ctrl-A C D - disable auto CR
.byte $01,$c6,$c4 ; ctrl-A F D - suppress keyboard
.byte $01,$cb ; ctrl-A K - disable auto LF after CR
BAUDR:
.byte $01,$b1,$b5,$c2 ; ctrl-A nn B - set baud rate
.byte $01,$da ; ctrl-A Z - disable firmware control chars
.byte $00 ; terminate string
BaudStrings:
.byte $b0, $b6 ; "06" (300 baud)
.byte $b0, $b8 ; "08" (1200 baud)
.byte $b1, $b0 ; "10" (2400 baud)
.byte $b1, $b2 ; "12" (4800 baud)
.byte $b1, $b4 ; "14" (9600 baud)
.byte $b1, $b5 ; "15" (19200 baud)
;---------------------------------------------------------
; RESETPAS - Clean up every time we hit the main loop
;---------------------------------------------------------
RESETPAS:
rts
;---------------------------------------------------------
; putcpas - Send accumulator out the serial port
;---------------------------------------------------------
putcpas:
.byte $DA ; PHX
.byte $5A ; PHY
pha
K8D8:
lda $C000
cmp #esc ; Escape = abort
bne OK8E2
jmp pabort
OK8E2:
ldx #$C2 ; $CN, N=SLOT
ldy #$20 ; $N0
lda #0 ; READY FOR OUTPUT?
MODSTAT1:
jsr $C248 ; PASCAL STATUS ENTRY POINT
bcc K8D8 ; CC MEANS NOT READY
ldx #$C2 ; $CN
ldy #$20 ; $N0
pla ; RETRIEVE CHAR
pha ; MUST SAVE FOR RETURN
MODWRITE:
jsr $C247 ; PASCAL WRITE ENTRY POINT
pla
.byte $7A ; PLY
.byte $FA ; PLX
and #$FF
rts
;---------------------------------------------------------
; getcpas - Get a character from the serial port (XY unchanged)
;---------------------------------------------------------
getcpas:
.byte $DA ; PHX
.byte $5A ; PHY
K902:
lda $C000
cmp #esc ; Escape = abort
bne OK90C
jmp pabort
OK90C:
ldx #$C2 ; $CN, N=SLOT
ldy #$20 ; $N0
lda #1 ; INPUT READY?
MODSTAT2:
jsr $C248 ; PASCAL STATUS ENTRY POINT
bcc K902 ; CC MEANS NO INPUT READY
ldx #$C2 ; $CN
ldy #$20 ; $N0
MODREAD:
jsr $C246 ; PASCAL READ ENTRY POINT
.byte $7A ; PLY
.byte $FA ; PLX
and #$FF
rts
;---------------------------------------------------------
; PATCHPAS - Patch the entry point of putc and getc over
; to the Pascal versions
;---------------------------------------------------------
PATCHPAS:
lda #<putcpas
sta putc+1
lda #>putcpas
sta putc+2
lda #<getcpas
sta getc+1
lda #>getcpas
sta getc+2
lda #<RESETPAS
sta resetio+1
lda #>RESETPAS
sta resetio+2
rts
|
advanced-go-book-src/book-src
| 1,225
|
ch3.x/hello/hello_amd64.s
|
// Copyright © 2017 ChaiShushan <chaishushan{AT}gmail.com>.
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
#include "textflag.h"
#include "funcdata.h"
// "Hello World!\n"
DATA text<>+0(SB)/8,$"Hello Wo"
DATA text<>+8(SB)/8,$"rld!\n"
GLOBL text<>(SB),NOPTR,$16
// utf8: "你好, 世界!\n"
// hex: e4bda0e5a5bd2c20 e4b896e7958c210a
// len: 16
DATA text_zh<>+0(SB)/8,$"\xe4\xbd\xa0\xe5\xa5\xbd\x2c\x20"
DATA text_zh<>+8(SB)/8,$"\xe4\xb8\x96\xe7\x95\x8c\x21\x0a"
GLOBL text_zh<>(SB),NOPTR,$16
// func PrintHelloWorld_var()
TEXT ·PrintHelloWorld_var(SB), $16-0
NO_LOCAL_POINTERS
CALL runtime·printlock(SB)
MOVQ ·text+0(SB), AX
MOVQ AX, (SP)
MOVQ ·text+8(SB), AX
MOVQ AX, 8(SP)
CALL runtime·printstring(SB)
CALL runtime·printunlock(SB)
RET
// func PrintHelloWorld()
TEXT ·PrintHelloWorld(SB), $16-0
NO_LOCAL_POINTERS
CALL runtime·printlock(SB)
MOVQ $text<>+0(SB), AX
MOVQ AX, (SP)
MOVQ $16, 8(SP)
CALL runtime·printstring(SB)
CALL runtime·printunlock(SB)
RET
// func PrintHelloWorld_zh()
TEXT ·PrintHelloWorld_zh(SB), $16-0
NO_LOCAL_POINTERS
CALL runtime·printlock(SB)
MOVQ $text_zh<>+0(SB), AX
MOVQ AX, (SP)
MOVQ $16, 8(SP)
CALL runtime·printstring(SB)
CALL runtime·printunlock(SB)
RET
|
advanced-go-book-src/book-src
| 1,252
|
ch3.x/slice/slice_asm_amd64.s
|
// Copyright © 2017 ChaiShushan <chaishushan{AT}gmail.com>.
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
#include "textflag.h"
// func AsmSumInt16Slice(v []int16) int16
TEXT ·AsmSumInt16Slice(SB), NOSPLIT, $0-26
MOVQ v_base+0(FP), R8
MOVQ v_len+8(FP), R9
SHLQ $1, R9
ADDQ R8, R9
MOVQ $0, R10
loop:
CMPQ R8, R9
JE end
ADDW (R8), R10
ADDQ $2, R8
JMP loop
end:
MOVW R10, ret+24(FP)
RET
// func AsmSumIntSlice(s []int) int
TEXT ·AsmSumIntSlice(SB), NOSPLIT, $0-32
MOVQ s+0(FP), AX // &s[0]
MOVQ s_len+8(FP), BX // len(s)
MOVQ $0, CX // sum = 0
loop:
CMPQ BX, $0 // compare cnt,0
JLE end // if cnt <= 0: goto end
DECQ BX // cnt--
ADDQ (AX), CX // sum += s[i]
ADDQ $8, AX // i++
JMP loop // goto loop
end:
MOVQ CX, ret+24(FP) // return sum
RET
// func AsmSumIntSliceV2(s []int) int
TEXT ·AsmSumIntSliceV2(SB), NOSPLIT, $0-32
MOVQ s+0(FP), AX // p := &s[0]
MOVQ s_len+8(FP), BX
LEAQ 0(AX)(BX*8), BX // p_end := &s[len(s)]
MOVQ $0, CX // sum = 0
loop:
CMPQ AX, BX // compare p,p_end
JGE end // if p >= p_end: goto end
ADDQ (AX), CX // sum += s[i]
ADDQ $8, AX // p++
JMP loop // goto loop
end:
MOVQ CX, ret+24(FP) // return sum
RET
|
Advanced-Effects/Advanced-Effects
| 2,698
|
thirdparty/framework/diagnostics/thirdparty/google_crashpad_client/client/crashpad_info_note.S
|
// Copyright 2018 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This note section is used on ELF platforms to give ElfImageReader a method
// of finding the instance of CrashpadInfo g_crashpad_info without requiring
// that symbol to be in the dynamic symbol table.
#include "util/misc/elf_note_types.h"
// namespace crashpad {
// CrashpadInfo g_crashpad_info;
// } // namespace crashpad
#define CRASHPAD_INFO_SYMBOL _ZN8crashpad15g_crashpad_infoE
#define NOTE_ALIGN 4
// This section must be "a"llocated so that it appears in the final binary at
// runtime. The reference to CRASHPAD_INFO_SYMBOL uses an offset relative to
// this note to avoid making this note writable, which triggers a bug in GNU
// ld, or adding text relocations which require the target system to allow
// making text segments writable. https://crbug.com/crashpad/260.
.section .note.crashpad.info,"a",%note
.balign NOTE_ALIGN
CRASHPAD_NOTE:
.long name_end - name // namesz
.long desc_end - desc // descsz
.long CRASHPAD_ELF_NOTE_TYPE_CRASHPAD_INFO // type
name:
.asciz CRASHPAD_ELF_NOTE_NAME
name_end:
.balign NOTE_ALIGN
desc:
#if defined(__LP64__)
.quad CRASHPAD_INFO_SYMBOL - desc
#else
.long CRASHPAD_INFO_SYMBOL - desc
#endif // __LP64__
desc_end:
.size CRASHPAD_NOTE, .-CRASHPAD_NOTE
// CRASHPAD_NOTE can't be referenced directly by GetCrashpadInfo() because the
// relocation used to make the reference may require that the address be
// 8-byte aligned and notes must have 4-byte alignment.
.section .rodata,"a",%progbits
.balign 8
# .globl indicates that it's available to link against other .o files. .hidden
# indicates that it will not appear in the executable's symbol table.
.globl CRASHPAD_NOTE_REFERENCE
.hidden CRASHPAD_NOTE_REFERENCE
.type CRASHPAD_NOTE_REFERENCE, %object
CRASHPAD_NOTE_REFERENCE:
// The value of this quad isn't important. It exists to reference
// CRASHPAD_NOTE, causing the linker to include the note into the binary
// linking Crashpad. The subtraction from |name| is a convenience to allow the
// value to be computed statically.
.quad name - CRASHPAD_NOTE
|
Advanced-Effects/Advanced-Effects
| 5,797
|
thirdparty/framework/diagnostics/thirdparty/google_crashpad_client/util/misc/capture_context_fuchsia.S
|
// Copyright 2018 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// namespace crashpad {
// void CaptureContext(ucontext_t* context);
// } // namespace crashpad
#define CAPTURECONTEXT_SYMBOL _ZN8crashpad14CaptureContextEP8ucontext
.text
.globl CAPTURECONTEXT_SYMBOL
#if defined(__x86_64__)
.balign 16, 0x90
#elif defined(__aarch64__)
.balign 4, 0x0
#endif
CAPTURECONTEXT_SYMBOL:
#if defined(__x86_64__)
.cfi_startproc
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
// Note that 16-byte stack alignment is not maintained because this function
// does not call out to any other.
// pushfq first, because some instructions (but probably none used here)
// affect %rflags. %rflags will be in -8(%rbp).
pushfq
// General-purpose registers whose values haven’t changed can be captured
// directly.
movq %r8, 0x28(%rdi) // context->uc_mcontext.r8
movq %r9, 0x30(%rdi) // context->uc_mcontext.r9
movq %r10, 0x38(%rdi) // context->uc_mcontext.r10
movq %r11, 0x40(%rdi) // context->uc_mcontext.r11
movq %r12, 0x48(%rdi) // context->uc_mcontext.r12
movq %r13, 0x50(%rdi) // context->uc_mcontext.r13
movq %r14, 0x58(%rdi) // context->uc_mcontext.r14
movq %r15, 0x60(%rdi) // context->uc_mcontext.r15
// Because of the calling convention, there’s no way to recover the value of
// the caller’s %rdi as it existed prior to calling this function. This
// function captures a snapshot of the register state at its return, which
// involves %rdi containing a pointer to its first argument. Callers that
// require the value of %rdi prior to calling this function should obtain it
// separately. For example:
// uint64_t rdi;
// asm("movq %%rdi, %0" : "=m"(rdi));
movq %rdi, 0x68(%rdi) // context->uc_mcontext.rdi
movq %rsi, 0x70(%rdi) // context->uc_mcontext.rsi
// Use %r8 as a scratch register now that it has been saved.
// The original %rbp was saved on the stack in this function’s prologue.
movq (%rbp), %r8
movq %r8, 0x78(%rdi) // context->uc_mcontext.rbp
// Save the remaining general-purpose registers.
movq %rbx, 0x80(%rdi) // context->uc_mcontext.rbx
movq %rdx, 0x88(%rdi) // context->uc_mcontext.rdx
movq %rax, 0x90(%rdi) // context->uc_mcontext.rax
movq %rcx, 0x98(%rdi) // context->uc_mcontext.rcx
// %rsp was saved in %rbp in this function’s prologue, but the caller’s %rsp
// is 16 more than this value: 8 for the original %rbp saved on the stack in
// this function’s prologue, and 8 for the return address saved on the stack
// by the call instruction that reached this function.
leaq 16(%rbp), %r8
movq %r8, 0xa0(%rdi) // context->uc_mcontext.rsp
// The return address saved on the stack used by the call of this function is
// likely more useful than the current RIP here.
movq 8(%rbp), %r8
movq %r8, 0xa8(%rdi) // context->uc_mcontext.rip
// The original %rflags was saved on the stack above.
movq -8(%rbp), %r8
movq %r8, 0xb0(%rdi) // context->uc_mcontext.eflags
// Save the segment registers
movw %cs, 0xb8(%rdi) // context->uc_mcontext.cs
movw %gs, 0xba(%rdi) // context->uc_mcontext.gs
movw %fs, 0xbc(%rdi) // context->uc_mcontext.fs
xorw %ax, %ax
movw %ax, 0xbe(%rdi) // context->uc_mcontext.padding
// Zero out the remainder of the unused pseudo-registers
xorq %r8, %r8
movq %r8, 0xc0(%rdi) // context->uc_mcontext.err
movq %r8, 0xc8(%rdi) // context->uc_mcontext.trapno
movq %r8, 0xd0(%rdi) // context->uc_mcontext.oldmask
movq %r8, 0xd8(%rdi) // context->uc_mcontext.cr2
// Clean up by restoring clobbered registers, even those considered volatile
// by the ABI, so that the captured context represents the state at this
// function’s exit.
movq 0x90(%rdi), %rax
movq 0x28(%rdi), %r8
// TODO(https://crashpad.chromium.org/bug/300): save floating-point registers.
popfq
popq %rbp
ret
.cfi_endproc
#elif defined(__aarch64__)
// Zero out fault_address, which is unused.
str x31, [x0, #0xb0] // context->uc_mcontext.fault_address
// Save general purpose registers in context->uc_mcontext.regs[i].
// The original x0 can't be recovered.
stp x0, x1, [x0, #0xb8]
stp x2, x3, [x0, #0xc8]
stp x4, x5, [x0, #0xd8]
stp x6, x7, [x0, #0xe8]
stp x8, x9, [x0, #0xf8]
stp x10, x11, [x0, #0x108]
stp x12, x13, [x0, #0x118]
stp x14, x15, [x0, #0x128]
stp x16, x17, [x0, #0x138]
stp x18, x19, [x0, #0x148]
stp x20, x21, [x0, #0x158]
stp x22, x23, [x0, #0x168]
stp x24, x25, [x0, #0x178]
stp x26, x27, [x0, #0x188]
stp x28, x29, [x0, #0x198]
// The original LR can't be recovered.
str LR, [x0, #0x1a8]
// Use x1 as a scratch register.
mov x1, SP
str x1, [x0, #0x1b0] // context->uc_mcontext.sp
// The link register holds the return address for this function.
str LR, [x0, #0x1b8] // context->uc_mcontext.pc
// pstate should hold SPSR but NZCV are the only bits we know about.
mrs x1, NZCV
str x1, [x0, #0x1c0] // context->uc_mcontext.pstate
// Restore x1 from the saved context.
ldr x1, [x0, #0xc0]
// TODO(https://crashpad.chromium.org/bug/300): save floating-point registers.
ret
#endif // __x86_64__
|
Advanced-Effects/Advanced-Effects
| 15,690
|
thirdparty/framework/diagnostics/thirdparty/google_crashpad_client/util/misc/capture_context_linux.S
|
// Copyright 2018 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// namespace crashpad {
// void CaptureContext(ucontext_t* context);
// } // namespace crashpad
// The type name for a ucontext_t varies by libc implementation and version.
// Bionic and glibc 2.25 typedef ucontext_t from struct ucontext. glibc 2.26+
// typedef ucontext_t from struct ucontext_t. Alias the symbol names to maintain
// compatibility with both possibilities.
#define CAPTURECONTEXT_SYMBOL _ZN8crashpad14CaptureContextEP10ucontext_t
#define CAPTURECONTEXT_SYMBOL2 _ZN8crashpad14CaptureContextEP8ucontext
.text
.globl CAPTURECONTEXT_SYMBOL
.globl CAPTURECONTEXT_SYMBOL2
#if defined(__i386__) || defined(__x86_64__)
.balign 16, 0x90
#elif defined(__arm__) || defined(__aarch64__)
.balign 4, 0x0
.type CAPTURECONTEXT_SYMBOL, %function
.type CAPTURECONTEXT_SYMBOL2, %function
#elif defined(__mips__)
.balign 4, 0x0
#endif
CAPTURECONTEXT_SYMBOL:
CAPTURECONTEXT_SYMBOL2:
#if defined(__i386__)
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset %ebp, -8
movl %esp, %ebp
.cfi_def_cfa_register %ebp
// Note that 16-byte stack alignment is not maintained because this function
// does not call out to any other.
// pushfl first, because some instructions (but probably none used here)
// affect %eflags. %eflags will be in -4(%ebp).
pushfl
// Save the original value of %eax, and use %eax to hold the ucontext_t*
// argument. The original value of %eax will be in -8(%ebp).
pushl %eax
movl 8(%ebp), %eax
// Save the original value of %ecx, and use %ecx as a scratch register.
pushl %ecx
// The segment registers are 16 bits wide, but mcontext_t declares them
// as unsigned 32-bit values, so zero the top half.
xorl %ecx, %ecx
movw %gs, %cx
movl %ecx, 0x14(%eax) // context->uc_mcontext.xgs
movw %fs, %cx
movl %ecx, 0x18(%eax) // context->uc_mcontext.xfs
movw %es, %cx
movl %ecx, 0x1c(%eax) // context->uc_mcontext.xes
movw %ds, %cx
movl %ecx, 0x20(%eax) // context->uc_mcontext.xds
// General-purpose registers whose values haven’t changed can be captured
// directly.
movl %edi, 0x24(%eax) // context->uc_mcontext.edi
movl %esi, 0x28(%eax) // context->uc_mcontext.esi
// The original %ebp was saved on the stack in this function’s prologue.
movl (%ebp), %ecx
movl %ecx, 0x2c(%eax) // context->uc_mcontext.ebp
// %esp was saved in %ebp in this function’s prologue, but the caller’s %esp
// is 8 more than this value: 4 for the original %ebp saved on the stack in
// this function’s prologue, and 4 for the return address saved on the stack
// by the call instruction that reached this function.
leal 8(%ebp), %ecx
movl %ecx, 0x30(%eax) // context->uc_mcontext.esp
// More general-purpose registers
movl %ebx, 0x34(%eax) // context->uc_mcontext.ebx
movl %edx, 0x38(%eax) // context->uc_mcontext.edx
// The original %ecx was saved on the stack above.
movl -12(%ebp), %ecx
movl %ecx, 0x3c(%eax) // context->uc_mcontext.ecx
// The original %eax was saved on the stack above.
movl -8(%ebp), %ecx
movl %ecx, 0x40(%eax) // context->uc_mcontext.eax
// trapno and err are unused so zero them out.
xorl %ecx, %ecx
movl %ecx, 0x44(%eax) // context->uc_mcontext.trapno
movl %ecx, 0x48(%eax) // context->uc_mcontext.err
// %eip can’t be accessed directly, but the return address saved on the stack
// by the call instruction that reached this function can be used.
movl 4(%ebp), %ecx
movl %ecx, 0x4c(%eax) // context->uc_mcontext.eip
// More segment registers
xorl %ecx, %ecx
movw %cs, %cx
movl %ecx, 0x50(%eax) // context->uc_mcontext.xcs
// The original %eflags was saved on the stack above.
movl -4(%ebp), %ecx
movl %ecx, 0x54(%eax) // context->uc_mcontext.eflags
// uesp is unused so zero it out.
xorl %ecx, %ecx
movl %ecx, 0x58(%eax) // context->uc_mcontext.uesp
// The last segment register.
movw %ss, %cx
movl %ecx, 0x5c(%eax) // context->uc_mcontext.xss
// TODO(jperaza): save floating-point registers.
xorl %ecx, %ecx
movl %ecx, 0x60(%eax) // context->uc_mcontext.fpregs
// Clean up by restoring clobbered registers, even those considered volatile
// by the ABI, so that the captured context represents the state at this
// function’s exit.
popl %ecx
popl %eax
popfl
popl %ebp
ret
.cfi_endproc
#elif defined(__x86_64__)
.cfi_startproc
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
// Note that 16-byte stack alignment is not maintained because this function
// does not call out to any other.
// pushfq first, because some instructions (but probably none used here)
// affect %rflags. %rflags will be in -8(%rbp).
pushfq
// General-purpose registers whose values haven’t changed can be captured
// directly.
movq %r8, 0x28(%rdi) // context->uc_mcontext.r8
movq %r9, 0x30(%rdi) // context->uc_mcontext.r9
movq %r10, 0x38(%rdi) // context->uc_mcontext.r10
movq %r11, 0x40(%rdi) // context->uc_mcontext.r11
movq %r12, 0x48(%rdi) // context->uc_mcontext.r12
movq %r13, 0x50(%rdi) // context->uc_mcontext.r13
movq %r14, 0x58(%rdi) // context->uc_mcontext.r14
movq %r15, 0x60(%rdi) // context->uc_mcontext.r15
// Because of the calling convention, there’s no way to recover the value of
// the caller’s %rdi as it existed prior to calling this function. This
// function captures a snapshot of the register state at its return, which
// involves %rdi containing a pointer to its first argument. Callers that
// require the value of %rdi prior to calling this function should obtain it
// separately. For example:
// uint64_t rdi;
// asm("movq %%rdi, %0" : "=m"(rdi));
movq %rdi, 0x68(%rdi) // context->uc_mcontext.rdi
movq %rsi, 0x70(%rdi) // context->uc_mcontext.rsi
// Use %r8 as a scratch register now that it has been saved.
// The original %rbp was saved on the stack in this function’s prologue.
movq (%rbp), %r8
movq %r8, 0x78(%rdi) // context->uc_mcontext.rbp
// Save the remaining general-purpose registers.
movq %rbx, 0x80(%rdi) // context->uc_mcontext.rbx
movq %rdx, 0x88(%rdi) // context->uc_mcontext.rdx
movq %rax, 0x90(%rdi) // context->uc_mcontext.rax
movq %rcx, 0x98(%rdi) // context->uc_mcontext.rcx
// %rsp was saved in %rbp in this function’s prologue, but the caller’s %rsp
// is 16 more than this value: 8 for the original %rbp saved on the stack in
// this function’s prologue, and 8 for the return address saved on the stack
// by the call instruction that reached this function.
leaq 16(%rbp), %r8
movq %r8, 0xa0(%rdi) // context->uc_mcontext.rsp
// %rip can’t be accessed directly, but the return address saved on the stack
// by the call instruction that reached this function can be used.
movq 8(%rbp), %r8
movq %r8, 0xa8(%rdi) // context->uc_mcontext.rip
// The original %rflags was saved on the stack above.
movq -8(%rbp), %r8
movq %r8, 0xb0(%rdi) // context->uc_mcontext.eflags
// Save the segment registers
movw %cs, 0xb8(%rdi) // context->uc_mcontext.cs
movw %gs, 0xba(%rdi) // context->uc_mcontext.gs
movw %fs, 0xbc(%rdi) // context->uc_mcontext.fs
xorw %ax, %ax
movw %ax, 0xbe(%rdi) // context->uc_mcontext.padding
// Zero out the remainder of the unused pseudo-registers
xorq %r8, %r8
movq %r8, 0xc0(%rdi) // context->uc_mcontext.err
movq %r8, 0xc8(%rdi) // context->uc_mcontext.trapno
movq %r8, 0xd0(%rdi) // context->uc_mcontext.oldmask
movq %r8, 0xd8(%rdi) // context->uc_mcontext.cr2
// TODO(jperaza): save floating-point registers.
movq %r8, 0xe0(%rdi) // context->uc_mcontext.fpregs
// Clean up by restoring clobbered registers, even those considered volatile
// by the ABI, so that the captured context represents the state at this
// function’s exit.
movq 0x90(%rdi), %rax
movq 0x28(%rdi), %r8
popfq
popq %rbp
ret
.cfi_endproc
#elif defined(__arm__)
// The original r0 can't be recovered.
str r0, [r0, #0x20]
// Now advance r0 to point to the register array.
add r0, r0, #0x24
// Save registers r1-r12 at context->uc_mcontext.regs[i].
stm r0, {r1-r12}
// Restore r0.
sub r0, r0, #0x24
// Save SP/r13.
str SP, [r0, #0x54] // context->uc_mcontext.sp
// The original LR can't be recovered.
str LR, [r0, #0x58] // context->uc_mcontext.lr
// The link register holds the return address for this function.
str LR, [r0, #0x5c] // context->uc_mcontext.pc
// Use r1 as a scratch register.
// CPSR is a deprecated synonym for APSR.
mrs r1, APSR
str r1, [r0, #0x60] // context->uc_mcontext.cpsr
// Zero out unused fields.
mov r1, #0x0
str r1, [r0, #0x14] // context->uc_mcontext.trap_no
str r1, [r0, #0x18] // context->uc_mcontext.error_code
str r1, [r0, #0x1c] // context->uc_mcontext.oldmask
str r1, [r0, #0x64] // context->uc_mcontext.fault_address
// Restore r1.
ldr r1, [r0, #0x24]
// TODO(https://crashpad.chromium.org/bug/300): save floating-point registers.
mov PC, LR
#elif defined(__aarch64__)
// Zero out fault_address, which is unused.
str xzr, [x0, #0xb0] // context->uc_mcontext.fault_address
// Save general purpose registers in context->uc_mcontext.regs[i].
// The original x0 can't be recovered.
stp x0, x1, [x0, #0xb8]
stp x2, x3, [x0, #0xc8]
stp x4, x5, [x0, #0xd8]
stp x6, x7, [x0, #0xe8]
stp x8, x9, [x0, #0xf8]
stp x10, x11, [x0, #0x108]
stp x12, x13, [x0, #0x118]
stp x14, x15, [x0, #0x128]
stp x16, x17, [x0, #0x138]
stp x18, x19, [x0, #0x148]
stp x20, x21, [x0, #0x158]
stp x22, x23, [x0, #0x168]
stp x24, x25, [x0, #0x178]
stp x26, x27, [x0, #0x188]
stp x28, x29, [x0, #0x198]
// The original LR can't be recovered.
str x30, [x0, #0x1a8]
// Use x1 as a scratch register.
mov x1, SP
str x1, [x0, #0x1b0] // context->uc_mcontext.sp
// The link register holds the return address for this function.
str x30, [x0, #0x1b8] // context->uc_mcontext.pc
// pstate should hold SPSR but NZCV are the only bits we know about.
mrs x1, NZCV
str x1, [x0, #0x1c0] // context->uc_mcontext.pstate
// Restore x1 from the saved context.
ldr x1, [x0, #0xc0]
// TODO(https://crashpad.chromium.org/bug/300): save floating-point registers.
ret
#elif defined(__mips__)
.set noat
#if _MIPS_SIM == _ABIO32
#define STORE sw
#define MCONTEXT_FPREG_SIZE 4
#define MCONTEXT_PC_OFFSET 32
#else
#define STORE sd
#define MCONTEXT_FPREG_SIZE 8
#define MCONTEXT_PC_OFFSET 616
#endif
#define MCONTEXT_REG_SIZE 8
#define MCONTEXT_GREGS_OFFSET 40
#define MCONTEXT_FPREGS_OFFSET 296
// Value of register 0 is always 0.
// Registers 26 and 27 are reserved for kernel, and shouldn't be used.
STORE $1, (1 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $2, (2 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $3, (3 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $4, (4 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $5, (5 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $6, (6 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $7, (7 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $8, (8 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $9, (9 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $10, (10 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $11, (11 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $12, (12 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $13, (13 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $14, (14 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $15, (15 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $16, (16 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $17, (17 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $18, (18 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $19, (19 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $20, (20 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $21, (21 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $22, (22 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $23, (23 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $24, (24 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $25, (25 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $28, (28 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $29, (29 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $30, (30 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $31, (31 * MCONTEXT_REG_SIZE + MCONTEXT_GREGS_OFFSET)($a0)
STORE $31, (MCONTEXT_PC_OFFSET)($a0)
#ifdef __mips_hard_float
s.d $f0, (0 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f2, (2 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f4, (4 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f6, (6 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f8, (8 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f10, (10 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f12, (12 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f14, (14 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f16, (16 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f18, (18 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f20, (20 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f22, (22 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f24, (24 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f26, (26 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f28, (28 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f30, (30 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
#if _MIPS_SIM != _ABIO32
s.d $f1, (1 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f3, (3 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f5, (5 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f7, (7 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f9, (9 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f11, (11 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f13, (13 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f15, (15 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f17, (17 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f19, (19 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f21, (21 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f23, (23 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f25, (25 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f27, (27 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f29, (29 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
s.d $f31, (31 * MCONTEXT_FPREG_SIZE + MCONTEXT_FPREGS_OFFSET)($a0)
#endif // _MIPS_SIM != _ABIO32
#endif // __mips_hard_float
jr $ra
.set at
#endif // __i386__
|
Advanced-Effects/Advanced-Effects
| 10,584
|
thirdparty/framework/diagnostics/thirdparty/google_crashpad_client/util/misc/capture_context_mac.S
|
// Copyright 2014 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
#if defined(__i386__) || defined(__x86_64__)
// namespace crashpad {
// void CaptureContext(x86_thread_state_t* x86_thread_state);
// } // namespace crashpad
#define CAPTURECONTEXT_SYMBOL __ZN8crashpad14CaptureContextEP16x86_thread_state
#elif defined(__aarch64__)
// namespace crashpad {
// void CaptureContext(arm_unified_thread_state_t* arm_unified_thread_state);
// } // namespace crashpad
#define CAPTURECONTEXT_SYMBOL \
__ZN8crashpad14CaptureContextEP24arm_unified_thread_state
#endif
.section __TEXT,__text,regular,pure_instructions
.private_extern CAPTURECONTEXT_SYMBOL
.globl CAPTURECONTEXT_SYMBOL
#if defined(__i386__) || defined(__x86_64__)
.p2align 4, 0x90
#elif defined(__aarch64__)
.p2align 2
#endif
CAPTURECONTEXT_SYMBOL:
#if defined(__i386__)
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset %ebp, -8
movl %esp, %ebp
.cfi_def_cfa_register %ebp
// Note that 16-byte stack alignment is not maintained because this function
// does not call out to any other.
// pushfl first, because some instructions (but probably none used here)
// affect %eflags. %eflags will be in -4(%ebp).
pushfl
// Save the original value of %eax, and use %eax to hold the x86_thread_state*
// argument. The original value of %eax will be in -8(%ebp).
pushl %eax
movl 8(%ebp), %eax
// Initialize the header identifying the x86_thread_state_t structure as
// carrying an x86_thread_state32_t (flavor x86_THREAD_STATE32) of size
// x86_THREAD_STATE32_COUNT 32-bit values.
movl $1, (%eax) // x86_thread_state->tsh.flavor
movl $16, 4(%eax) // x86_thread_state->tsh.count
// General-purpose registers whose values haven’t changed can be captured
// directly.
movl %ebx, 12(%eax) // x86_thread_state->uts.ts32.__ebx
movl %ecx, 16(%eax) // x86_thread_state->uts.ts32.__ecx
movl %edx, 20(%eax) // x86_thread_state->uts.ts32.__edx
movl %edi, 24(%eax) // x86_thread_state->uts.ts32.__edi
movl %esi, 28(%eax) // x86_thread_state->uts.ts32.__esi
// Now that the original value of %edx has been saved, it can be repurposed to
// hold other registers’ values.
// The original %eax was saved on the stack above.
movl -8(%ebp), %edx
movl %edx, 8(%eax) // x86_thread_state->uts.ts32.__eax
// The original %ebp was saved on the stack in this function’s prologue.
movl (%ebp), %edx
movl %edx, 32(%eax) // x86_thread_state->uts.ts32.__ebp
// %esp was saved in %ebp in this function’s prologue, but the caller’s %esp
// is 8 more than this value: 4 for the original %ebp saved on the stack in
// this function’s prologue, and 4 for the return address saved on the stack
// by the call instruction that reached this function.
leal 8(%ebp), %edx
movl %edx, 36(%eax) // x86_thread_state->uts.ts32.__esp
// The original %eflags was saved on the stack above.
movl -4(%ebp), %edx
movl %edx, 44(%eax) // x86_thread_state->uts.ts32.__eflags
// %eip can’t be accessed directly, but the return address saved on the stack
// by the call instruction that reached this function can be used.
movl 4(%ebp), %edx
movl %edx, 48(%eax) // x86_thread_state->uts.ts32.__eip
// The segment registers are 16 bits wide, but x86_thread_state declares them
// as unsigned 32-bit values, so zero the top half.
xorl %edx, %edx
movw %ss, %dx
movl %edx, 40(%eax) // x86_thread_state->uts.ts32.__ss
movw %cs, %dx
movl %edx, 52(%eax) // x86_thread_state->uts.ts32.__cs
movw %ds, %dx
movl %edx, 56(%eax) // x86_thread_state->uts.ts32.__ds
movw %es, %dx
movl %edx, 60(%eax) // x86_thread_state->uts.ts32.__es
movw %fs, %dx
movl %edx, 64(%eax) // x86_thread_state->uts.ts32.__fs
movw %gs, %dx
movl %edx, 68(%eax) // x86_thread_state->uts.ts32.__gs
// Clean up by restoring clobbered registers, even those considered volatile
// by the ABI, so that the captured context represents the state at this
// function’s exit.
movl 20(%eax), %edx // x86_thread_state->uts.ts32.__edx
popl %eax
popfl
popl %ebp
ret
.cfi_endproc
#elif defined(__x86_64__)
.cfi_startproc
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
// Note that 16-byte stack alignment is not maintained because this function
// does not call out to any other.
// pushfq first, because some instructions (but probably none used here)
// affect %rflags. %rflags will be in -8(%rbp).
pushfq
// Initialize the header identifying the x86_thread_state_t structure as
// carrying an x86_thread_state64_t (flavor x86_THREAD_STATE64) of size
// x86_THREAD_STATE64_COUNT 32-bit values.
movl $4, (%rdi) // x86_thread_state->tsh.flavor
movl $42, 4(%rdi) // x86_thread_state->tsh.count
// General-purpose registers whose values haven’t changed can be captured
// directly.
movq %rax, 8(%rdi) // x86_thread_state->uts.ts64.__rax
movq %rbx, 16(%rdi) // x86_thread_state->uts.ts64.__rbx
movq %rcx, 24(%rdi) // x86_thread_state->uts.ts64.__rcx
movq %rdx, 32(%rdi) // x86_thread_state->uts.ts64.__rdx
movq %rsi, 48(%rdi) // x86_thread_state->uts.ts64.__rsi
movq %r8, 72(%rdi) // x86_thread_state->uts.ts64.__r8
movq %r9, 80(%rdi) // x86_thread_state->uts.ts64.__r9
movq %r10, 88(%rdi) // x86_thread_state->uts.ts64.__r10
movq %r11, 96(%rdi) // x86_thread_state->uts.ts64.__r11
movq %r12, 104(%rdi) // x86_thread_state->uts.ts64.__r12
movq %r13, 112(%rdi) // x86_thread_state->uts.ts64.__r13
movq %r14, 120(%rdi) // x86_thread_state->uts.ts64.__r14
movq %r15, 128(%rdi) // x86_thread_state->uts.ts64.__r15
// Because of the calling convention, there’s no way to recover the value of
// the caller’s %rdi as it existed prior to calling this function. This
// function captures a snapshot of the register state at its return, which
// involves %rdi containing a pointer to its first argument. Callers that
// require the value of %rdi prior to calling this function should obtain it
// separately. For example:
// uint64_t rdi;
// asm("movq %%rdi, %0" : "=m"(rdi));
movq %rdi, 40(%rdi) // x86_thread_state->uts.ts64.__rdi
// Now that the original value of %rax has been saved, it can be repurposed to
// hold other registers’ values.
// The original %rbp was saved on the stack in this function’s prologue.
movq (%rbp), %rax
movq %rax, 56(%rdi) // x86_thread_state->uts.ts64.__rbp
// %rsp was saved in %rbp in this function’s prologue, but the caller’s %rsp
// is 16 more than this value: 8 for the original %rbp saved on the stack in
// this function’s prologue, and 8 for the return address saved on the stack
// by the call instruction that reached this function.
leaq 16(%rbp), %rax
movq %rax, 64(%rdi) // x86_thread_state->uts.ts64.__rsp
// %rip can’t be accessed directly, but the return address saved on the stack
// by the call instruction that reached this function can be used.
movq 8(%rbp), %rax
movq %rax, 136(%rdi) // x86_thread_state->uts.ts64.__rip
// The original %rflags was saved on the stack above.
movq -8(%rbp), %rax
movq %rax, 144(%rdi) // x86_thread_state->uts.ts64.__rflags
// The segment registers are 16 bits wide, but x86_thread_state declares them
// as unsigned 64-bit values, so zero the top portion.
xorq %rax, %rax
movw %cs, %ax
movq %rax, 152(%rdi) // x86_thread_state->uts.ts64.__cs
movw %fs, %ax
movq %rax, 160(%rdi) // x86_thread_state->uts.ts64.__fs
movw %gs, %ax
movq %rax, 168(%rdi) // x86_thread_state->uts.ts64.__gs
// Clean up by restoring clobbered registers, even those considered volatile
// by the ABI, so that the captured context represents the state at this
// function’s exit.
movq 8(%rdi), %rax
popfq
popq %rbp
ret
.cfi_endproc
#elif defined(__aarch64__)
.cfi_startproc
// Save general-purpose registers in arm_unified_thread_state->ts_64.__x[0].
// The original x0 can't be recovered.
stp x0, x1, [x0, #0x8]
stp x2, x3, [x0, #0x18]
stp x4, x5, [x0, #0x28]
stp x6, x7, [x0, #0x38]
stp x8, x9, [x0, #0x48]
stp x10, x11, [x0, #0x58]
stp x12, x13, [x0, #0x68]
stp x14, x15, [x0, #0x78]
stp x16, x17, [x0, #0x88]
stp x18, x19, [x0, #0x98]
stp x20, x21, [x0, #0xa8]
stp x22, x23, [x0, #0xb8]
stp x24, x25, [x0, #0xc8]
stp x26, x27, [x0, #0xd8]
// Save the last general-purpose register (x28) and the frame pointer (x29).
stp x28, x29, [x0, #0xe8] // __x[28] and __fp
// Save the link register (x30) and the stack pointer (using x1 as a scratch
// register)
mov x1, sp
stp x30, x1, [x0, #0xf8] // __lr and __sp
// The link register (x30) holds the return address for this function.
// __cpsr should hold current program status register, but nzcv are the only
// bits we know about (saved using x1 as a scratch register). The 64-bit x1
// covers both the 32-bit __cpsr (which receives the nzcv bits) and __pad
// (which will be zeroed).
mrs x1, nzcv
stp x30, x1, [x0, #0x108] // __pc and __cpsr and __pad
// Initialize the header identifying the arm_unified_thread_state structure as
// carrying an arm_thread_state64_t (flavor ARM_THREAD_STATE64) of size
// ARM_THREAD_STATE64_COUNT 32-bit values.
mov x1, #6
movk x1, #68, lsl #32
str x1, [x0, #0x0] // arm_thread_state->ash.flavor and count
// Restore x1 from the saved context.
ldr x1, [x0, #0x10]
// TODO(justincohen): Consider saving floating-point registers into
// arm_neon_state64_t as second parameter, or as a a second function call
// after all of the general-purpose state is captured, or as a new struct that
// has both arm_unified_state_t and arm_neon_state64_t members. That may be
// better than a second parameter (which occupies another register) and better
// than a second function call.
ret
.cfi_endproc
#endif
.subsections_via_symbols
#endif
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 4,140
|
smart_run/tests/lib/crt0.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#start from __start,
#(1)initialize all registers
#(2)initialize supervisor mode stack pointer
#(3)set VBR register
#(4)construct PMP
#(5)construct MMU
#(6)enable control reg such as EE and MMU
#(9)jump to the main procedure using jmp to main
.include "core_init.h"
.text
.global __start
__start:
# enable extension
li x3, 0x400000
csrs mxstatus,x3
# enable fpu
li x3, 0x6000
csrs mstatus,x3
# enable vector
li x3, 0x800000
csrs mstatus,x3
# enable unalign
li x3, 0x8000
csrs mxstatus,x3
# mxstatus
li x3, 0x638000
csrs mxstatus,x3
# msmpr
csrsi msmpr, 0x1
# PART 1: initialize all registers
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
li x10,0
li x11,0
li x12,0
li x13,0
li x14,0
li x15,0
li x16,0
li x17,0
li x18,0
li x19,0
li x20,0
li x21,0
li x22,0
li x23,0
li x24,0
li x25,0
li x26,0
li x27,0
li x28,0
li x29,0
li x30,0
li x31,0
## PART 2: initialize stack pointer & ppn
csrr x3,mhartid
li x4,1
.global cpu_0_sp
cpu_0_sp:
la x2, __kernel_stack
# PART 3:initialize mtvec value
la x3,__trap_handler
csrw mtvec,x3
# enable mie
li x3, 0x8
csrs mstatus,x3
# invalid all memory for IBP,BTB,BHT,DCACHE,ICACHE
li x3, 0x70013
csrs 0x7c2,x3 #mcor
# enable ICACHE,DCACHE,BHT,BTB,IBP,RAS,WA
# li x3, 0x10f7
# csrs 0x7c1,x3 #mhcr
li x3, 0x11ff
csrs mhcr,x3 #mhcr
.global after_l2en
after_l2en:
# enable write allocate
# li x3, 0x4
# csrs 0x7c1,x3 #mhcr
li x3, 0x11ff
csrs mhcr,x3 #mhcr
# enable lbuf,way_pred,data_cache_prefetch, amr
# li x3, 0x7e30c
# csrs 0x7c5,x3 #mhint
li x3, 0x6e30c
csrs mhint,x3 #mhint
# mccr2
li x3, 0xe0000009
csrs mccr2,x3
jal main
.global __exit
__exit:
addi x10,x0,0x0
addi x1,x0,0x5a
addi x2,x0,0x6b
addi x3,x0,0x7c
sync
li x3, 0x444333222
add x4,x0,x3
#
.global __fail
__fail:
addi x10,x0,0x0
addi x1,x0,0x2c
addi x2,x0,0x3b
sync
li x3,0x2382348720
.section .text
__trap_handler:
j __synchronous_exception
.align 2
j __asychronous_int
.align 2
nop #reserved
.align 2
j __asychronous_int
.align 2
j __asychronous_int
.align 2
j __asychronous_int
.align 2
nop #reserved
.align 2
j __asychronous_int
.align 2
j __asychronous_int
.align 2
j __asychronous_int
.align 2
nop #reserved
.align 2
j __asychronous_int
j __fail
__synchronous_exception:
#push
sw x13,-4(sp)
sw x14,-8(sp)
sw x15,-12(sp)
csrr x14,mcause
andi x15,x14,0x7ff #cause
srli x14,x14,0x3b #int
andi x14,x14,0x10 #mask bit
add x14,x14,x15 #{int,cause}
slli x14,x14,0x3 #offset
la x15,vector_table
add x15,x14,x15 #target pc
lw x14, 0(x15) #get exception addr
lw x13, -4(sp) #recover x13
lw x15, -12(sp) #recover x15
addi x14,x14,-4
jr x14
__asychronous_int:
sw x13,-4(sp)
sw x14,-8(sp)
sw x15,-12(sp)
csrr x14,mcause
andi x15,x14,0x7ff #cause
srli x14,x14,0x3b #int
andi x14,x14,0x10 #mask bit
add x14,x14,x15 #{int,cause}
slli x14,x14,0x3 #offset
la x15,vector_table
add x15,x14,x15 #target pc
lw x14, 0(x15) #get exception addr
lw x13, -4(sp) #recover x13
lw x15, -12(sp) #recover x15
addi x14,x14,-4
jr x14
.global vector_table
.align 10
vector_table: #totally 256 entries
.rept 128
.long __fail
.endr
.global __dummy
__dummy:
.data
nop
nop
nop
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 9,073
|
smart_run/tests/lib/core_lsu.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#D-cache operation
.macro SUM_EN
#backup regs
addi x2,x2,-16
sd x9,0x0(x2)
sd x10,0x8(x2)
csrr x9,mstatus
li x10,0x1
slli x10,x10,18
or x9,x9,x10
csrw mstatus,x9
ld x9,0x0(x2)
ld x10,0x8(x2)
addi x2,x2,16
.endm
.macro DATA_CAHE_WB
csrsi mhcr,0x8
.endm
.macro DATA_CACHE_EN
#backup regs
csrsi mhcr, 0x2
# addi x2, x2, -16
# sd x7, 0x0(x2)
# sd x8, 0x8(x2)
# csrr x7, mhcr
# li x8,0x1
# slli x8,x8,0x1
# or x8,x8,x7
# csrw mhcr,x7
# #restore regs
# ld x7, 0x0(x2)
# ld x8, 0x8(x2)
# addi x2, x2, 16
.endm
.macro DATA_CACHE_DIS
#backup regs
csrci mhcr,0x2
# addi x2, x2, -16
# sd x7, 0x0(x2)
# sd x8, 0x8(x2)
# csrr x7, mhcr
# li x8,-3
# and x8,x8,x7
# csrw mhcr,x7
# #restore regs
# ld x7, 0x0(x2)
# ld x8, 0x8(x2)
# addi x2, x2, 16
.endm
.macro DATA_CACHE_EN_NBK
csrsi mhcr,0x2
.endm
.macro DATA_CACHE_EN_NONWA_NBK
csrsi mhcr,0x2
csrci mhcr,0x4
.endm
.macro DATA_CACHE_DIS_NBK
csrci mhcr,0x2
.endm
.macro DATA_CACHE_INV
#backup regs
csrsi mcor,0x12
# dcache.cva x2
# addi x2, x2, -4
# st.w x7, (x2)
# dcache.cva x2
#
# mfcr x7, cr17
# bclri x7, 0
# bseti x7, 1
# bclri x7, 2
# bclri x7, 3
# bseti x7, 4
# mtcr x7, cr17
# #restore regs
# ld.w x7, (x2)
# addi x2, x2, 4
.endm
#.macro DATA_CACHE_INV_ADDR ADDR_REG,IMM_REG
# mtcr \ADDR_REG,cr22
# mfcr \IMM_REG,cr17
#
# bclri \IMM_REG,0
# bseti \IMM_REG,1
# bseti \IMM_REG,4
# bclri \IMM_REG,5
# bseti \IMM_REG,6
# bclri \IMM_REG,7
#
# mtcr \IMM_REG,cr17
#.endm
#
#.macro DATA_CACHE_INV_SETWAY SETWAY_REG,IMM_REG
# mtcr \SETWAY_REG,cr22
# mfcr \IMM_REG,cr17
#
# bclri \IMM_REG,0
# bseti \IMM_REG,1
# bseti \IMM_REG,4
# bclri \IMM_REG,5
# bseti \IMM_REG,6
# bseti \IMM_REG,7
#
# mtcr \IMM_REG,cr17
#.endm
.macro DATA_CACHE_CLR
#backup regs
addi x2, x2, -8
sd x7, 0x0(x2)
li x7,0x22
csrw mcor,x7
# #restore regs
ld x7, 0x0(x2)
addi x2, x2, 8
.endm
.macro DATA_CACHE_CLR_NBK
li x30, 0x22
csrw mcor,x30
.endm
#.macro DATA_CACHE_CLR_ADDR ADDR_REG,IMM_REG
# mtcr \ADDR_REG,cr22
# mfcr \IMM_REG,cr17
#
# bclri \IMM_REG,0
# bseti \IMM_REG,1
# bclri \IMM_REG,4
# bseti \IMM_REG,5
# bseti \IMM_REG,6
# bclri \IMM_REG,7
#
# mtcr \IMM_REG,cr17
#.endm
#
#.macro DATA_CACHE_CLR_SETWAY SETWAY_REG,IMM_REG
# mtcr \SETWAY_REG,cr22
# mfcr \IMM_REG,cr17
#
# bclri \IMM_REG,0
# bseti \IMM_REG,1
# bclri \IMM_REG,4
# bseti \IMM_REG,5
# bseti \IMM_REG,6
# bseti \IMM_REG,7
#
# mtcr \IMM_REG,cr17
#.endm
.macro DATA_CACHE_CLIV
#backup regs
addi x2, x2, -8
sd x7, 0x0(x2)
li x7, 0x32
csrw mcor,x7
#restore regs
ld x7, 0x0(x2)
addi x2, x2, 8
.endm
.macro DATA_CACHE_CLIV_NBK
li x30, 0x32
csrw mcor,x30
.endm
#.macro DATA_CACHE_CLIV_ADDR ADDR_REG,IMM_REG
# mtcr \ADDR_REG,cr22
# mfcr \IMM_REG,cr17
#
# bclri \IMM_REG,0
# bseti \IMM_REG,1
# bseti \IMM_REG,4
# bseti \IMM_REG,5
# bseti \IMM_REG,6
# bclri \IMM_REG,7
#
# mtcr \IMM_REG,cr17
#.endm
#.macro DATA_CACHE_CLIV_SETWAY SETWAY_REG,IMM_REG
# mtcr \SETWAY_REG,cr22
# mfcr \IMM_REG,cr17
#
# bclri \IMM_REG,0
# bseti \IMM_REG,1
# bseti \IMM_REG,4
# bseti \IMM_REG,5
# bseti \IMM_REG,6
# bseti \IMM_REG,7
# mtcr \IMM_REG,cr17
#.endm
#Prefetch operation
.macro DATA_CACHE_L1_PREFETCH_EN DIS
#backup regs
addi x2 , x2, -16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7 , mhint
li x8,0x1
slli x8,x8,2 #l1 prefetch
or x7,x8,x7
li x8,\DIS
slli x8,x8,13 #l1 dis
or x7,x8,x7
csrw mhint,x7
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2 , x2,16
.endm
.macro DATA_CACHE_L2_PREFETCH_EN DIS
#backup regs
addi x2 , x2, -16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7 , mhint
li x8,0x1
slli x8,x8,15 #l1 prefetch
or x7,x8,x7
li x8,\DIS
slli x8,x8,16 #l1 dis
or x7,x8,x7
csrw mhint,x7
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2 , x2,16
.endm
.macro DATA_CACHE_L1_PREFETCH_DIS
#backup regs
csrci mhint,0x4
.endm
.macro DATA_CACHE_L2_PREFETCH_DIS
#backup regs
addi x2 , x2, -16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
li x7, 0xffffffffffff7fff
csrr x8,mhint
and x8,x8,x7
csrw mhint,x8
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2 , x2,16
.endm
#.macro DATA_CACHE_PREFETCH_DISTANCE_4
# #backup regs
# subi x2 , 4
# st.w x7 , (x2,0)
#
# mfcr x7 , cr31
# bseti x7 , 13
# bseti x7 , 14
# mtcr x7 , cr31
#
# #restore regs
# ld.w x7 , (x2,0)
# addi x2 , 4
#.endm
#other
.macro MM_EN
#backup regs
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
li x9,0x8000
csrc mxstatus,x9
csrr x9,mxstatus
li x10, 1
slli x10,x10,15
or x9,x9,x10
csrw mxstatus,x9
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2 , x2,16
.endm
.macro AMR_EN
#backup regs
csrsi mhint, 0x8
.endm
.macro MM_DIS
#backup regs
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7,mxstatus
li x8,0xffffffffffff7fff #l1 prefetch
and x7,x8,x7
csrw mxstatus,x7
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro AMR_DIS
csrci mhint,0x8
.endm
.macro L1_ECC_EN
#backup regs
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7, mhint
li x8,0x1
slli x8,x8,19
or x7,x7,x8
csrw mhint,x8
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro L1_ECC_DIS
#backup regs
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7, mhint
li x8,0xfffffffffffbffff
and x7,x7,x8
csrw mhint,x8
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro DCACHE_TAG_1BIT_ERR_INJ XN
li \XN,0x2
slli \XN,\XN,29
ori \XN,\XN,0x1
.endm
.macro DCACHE_DATA_1BIT_ERR_INJ XN
li \XN,0x3
slli \XN,\XN,29
ori \XN,\XN,0x1
.endm
.macro DCACHE_TAG_2BIT_ERR_INJ XN
li \XN,0x2
slli \XN,\XN,29
ori \XN,\XN,0x3
.endm
.macro DCACHE_DATA_2BIT_ERR_INJ XN
li \XN,0x3
slli \XN,\XN,29
ori \XN,\XN,0x3
.endm
.macro MCER_CHECK_ECC_VLD VLD FAIL
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7,mcer
srli x7,x7,31
andi x7,x7,1
li x8, \VLD
bne x7,x8, \FAIL
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro MCER_ECC_VLD_CLEAR
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7,mcer
li x8, 0x7fffffff
and x7,x8,x7
csrw mcer,x7
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro MCER_CHECK_ERR_FATAL FATAL FAIL
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7,mcer
srli x7,x7,30
andi x7,x7,1
li x8, \FATAL
bne x7,x8, \FAIL
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro MCER_CHECK_FIX_CNT FIXCNT FAIL
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7,mcer
srli x7,x7,24
andi x7,x7,0x3f
li x8,\FIXCNT
bne x7,x8,\FAIL
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro MCER_CHECK_RAMID RAMID FAIL
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7,mcer
srli x7,x7,21
andi x7,x7,0x3
li x8,\RAMID
bne x7,x8,\FAIL
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro MCER_CHECK_ERR_WAY ERR_WAY FAIL
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7,mcer
srli x7,x7,17
andi x7,x7,0x1
li x8,\ERR_WAY
bne x7,x8,\FAIL
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro MCER_CHECK_ERR_INDEX ERR_INDEX FAIL
addi x2 , x2,-16
sd x7 , 0x0(x2)
sd x8 , 0x8(x2)
csrr x7,mcer
li x8,0x1ffff
and x7,x7,x8
li x8,\ERR_INDEX
bne x7,x8,\FAIL
#restore regs
ld x7 , 0x0(x2)
ld x8 , 0x8(x2)
addi x2,x2,16
.endm
.macro DATA_CACHE_WA
#backup regs
csrsi mhcr,0x4
.endm
.macro DATA_CACHE_NWA
#backup regs
csrci mhcr,0x4
.endm
#check the word from addr whether is that data
.macro CHK_ADDR_DATA ADDR, DATA,IMM_REG0,IMM_REG1
li \IMM_REG0,\ADDR
lw \IMM_REG0,(\IMM_REG0)
li \IMM_REG1,\DATA
bne \IMM_REG0,\IMM_REG1,__fail
.endm
.macro CHK_REG_VALUE REG,VALUE,IMM_REG0
li \IMM_REG0,\VALUE
bne \REG,\IMM_REG0,__fail
.endm
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 5,605
|
smart_run/tests/cases/MMU/ct_mmu_basic.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
.text
.align 6
.global main
main:
###########################################################
# MACRO DEFINITION #
###########################################################
.macro MMU_PTW_1G VPN, PPN, FLAG, THEADFLAG
#backup regs
addi x2, x2, -56
sd x9, 0(x2)
sd x10, 8(x2)
sd x11, 16(x2)
sd x12, 24(x2)
sd x13, 32(x2)
sd x14, 40(x2)
sd x15, 48(x2)
# get PPN from satp in x9
csrr x9, satp
li x10, 0xfffffffffff
and x9, x9, x10
# get VPN2 in x10
li x10, \VPN
li x11, 0x7fc0000
and x10,x10,x11
srli x10, x10, 18
# cfig first-level page
# level-1 page, entry addr:{ppn,VPN2,3'b0} in x15
slli x14, x9, 12
slli x11, x10, 3
add x15, x14, x11
# write pte into level-1 page
li x11, \PPN
li x12, \THEADFLAG
li x13, \FLAG
slli x11, x11, 10
slli x12, x12, 59
or x11, x11, x12
or x11, x11, x13
sd x11, 0(x15)
#restore regs
ld x9, 0(x2)
ld x10, 8(x2)
ld x11, 16(x2)
ld x12, 24(x2)
ld x13, 32(x2)
ld x14, 40(x2)
ld x15, 48(x2)
addi x2, x2, 56
# fence
fence
.endm
.macro MMU_PTW_2M VPN, PPN, FLAG, THEADFLAG
#backup regs
addi x2, x2, -88
sd x9, 0(x2)
sd x10, 8(x2)
sd x11, 16(x2)
sd x12, 24(x2)
sd x13, 32(x2)
sd x14, 40(x2)
sd x15, 48(x2)
sd x16, 56(x2)
sd x17, 64(x2)
sd x18, 72(x2)
sd x19, 80(x2)
# get PPN from satp in x9
csrr x9, satp
li x10, 0xfffffffffff
and x9, x9, x10
# get VPN2 in x10
li x10, \VPN
li x11, 0x7fc0000
and x10,x10,x11
srli x10, x10, 18
# cfig first-level page
# level-1 page, entry addr:{ppn,VPN2,3'b0} in x15
slli x14, x9, 12
slli x11, x10, 3
add x15, x14, x11
# write pte into level-1 page
# level-2 base addr in x12
addi x12, x10, 1
add x12, x12, x9
slli x14, x12, 10
li x13, 0xc1
or x13, x13, x14
sd x13, 0(x15)
# cfig level-2 page
# get VPN1 in x16
li x11, \VPN
li x13, 0x3fe00
and x16, x11, x13
srli x16, x16, 9
# level-2 page, entry addr:{pte.ppn,VPN1,3'b0} in x17
slli x13, x12, 12
slli x17, x16, 3
add x17, x17, x13
# write pte into level-2 page
li x11, \PPN
li x12, \THEADFLAG
li x13, \FLAG
slli x11, x11, 10
slli x12, x12, 59
or x11, x11, x12
or x11, x11, x13
sd x11, 0(x17)
#restore regs
ld x9, 0(x2)
ld x10, 8(x2)
ld x11, 16(x2)
ld x12, 24(x2)
ld x13, 32(x2)
ld x14, 40(x2)
ld x15, 48(x2)
ld x16, 56(x2)
ld x17, 64(x2)
ld x18, 72(x2)
ld x19, 80(x2)
addi x2, x2, 88
# fence
fence
.endm
.macro MXSTATUS_THEADISAEE IMM
#write cskyisaee
li x9,0x400000
csrc mxstatus,x9
csrr x9,mxstatus
li x10, \IMM
slli x10,x10,22
or x9,x9,x10
csrw mxstatus,x9
.endm
.macro MMU_EN
#backup regs
addi x2, x2, -16
sd x9, 0(x2)
sd x10, 8(x2)
#write MODE=8 to SATP
csrr x9,satp
li x10,0xfffffffffffffff
and x9,x9,x10
li x10,8
slli x10,x10,60
or x9,x9,x10
csrw satp, x9
#restore regs
ld x10, 8(x2)
ld x9, 0(x2)
addi x2, x2, 16
.endm
.macro MMU_SATP_PPN PPN
#backup regs
addi x2, x2, -16
sd x9, 0(x2)
sd x10, 8(x2)
#write PPN
csrr x9, satp
li x10, 0xfffffffff0000000
and x9,x9,x10
li x10, \PPN
or x9,x9,x10
csrw satp,x9
#restore regs
ld x10, 8(x2)
ld x9, 0(x2)
addi x2, x2, 8
.endm
.macro MMU_SATP_ASID ASID
#backup regs
addi x2, x2, -16
sd x9, 0(x2)
sd x10, 8(x2)
#write ASID
csrr x9, satp
li x10, 0xf0000fffffffffff
and x9,x9,x10
li x10, \ASID
slli x10,x10,44
or x9,x9,x10
csrw satp,x9
#restore regs
ld x10, 8(x2)
ld x9, 0(x2)
addi x2, x2, 8
.endm
.macro MMODE_SMODE SROUTINE
#disable sie/mie
li x29, 0x2
csrc mstatus,x29
li x29, 0x8
csrc mstatus,x29
#write mepc
la x1,\SROUTINE
csrw mepc,x1
li x1,0x800
csrrs x3,mstatus,x1
li x1,0x1000
csrrc x3,mstatus,x1
mret
.endm
.macro MXSTATUS_MAEE IMM
#write maee
li x9,0x200000
csrc mxstatus,x9
csrr x9,mxstatus
li x10, \IMM
slli x10,x10,21
or x9,x9,x10
csrw mxstatus,x9
.endm
###########################################################
# MAIN PROGRAM #
###########################################################
# cfig cskyisaee
MXSTATUS_THEADISAEE 1
# cfig satp
sfence.vma x0,x0
MMU_EN
MMU_SATP_PPN 0x40
MMU_SATP_ASID 0x1
# enable maee
MXSTATUS_MAEE 1
# cfig PMP
# entry0: addr 0x0_0000_0000~0x2_ffff_ffff, r=1 w=1 x=1 l=0
# cfig pmpcfg0
li x1, 0x0f
csrw pmpcfg0, x1
# cfig pmpaddr0
li x1, 0xc0000000
csrw pmpaddr0, x1
# cfig mapping relation
# Instruction + Mapping Area, D=1 A=1 X=1 W=1 R=1
# VPN:0x0 <--> PPN:0x0
MMU_PTW_1G 0x0,0x0,0xcf,0xf
# switch to S mode
MMODE_SMODE TEST1
.global TEST1
TEST1:
# ld/st addr:0x30000
li x1,0x30000
sd x5,0(x1)
ld x4,0(x1)
# EXIT
.global EXIT
EXIT:
la x1, __exit
jr x1
.global FAIL
FAIL:
la x1, __fail
jr x1
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 11,379
|
smart_run/tests/cases/exception/ct_expt_smoke.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#* *
#************************************************************
#*************Following is the generated instructions*****************
.macro SETMEXP EXP_CODE, HANDLER_BEGIN,HANDLER_END
sd t0, -24(sp) #address cann't be changed
sd t1, -32(sp) #it relates to crt0.s
la t1, vector_table
addi t0,x0,\EXP_CODE
slli t0,t0,0x3
add t1,t1,t0
la t0, \HANDLER_BEGIN
sd t0, 0(t1)
ld t1, -32(sp)
ld t0, -24(sp)
j \HANDLER_END
ld a4, -16(sp)
.endm
.macro MMU_PTW_4K VPN, PPN, FLAG, THEADFLAG
#backup regs
addi x2, x2, -88
sd x9, 0(x2)
sd x10, 8(x2)
sd x11, 16(x2)
sd x12, 24(x2)
sd x13, 32(x2)
sd x14, 40(x2)
sd x15, 48(x2)
sd x16, 56(x2)
sd x17, 64(x2)
sd x18, 72(x2)
sd x19, 80(x2)
# get PPN from satp in x9
csrr x9, satp
li x10, 0xfffffffffff
and x9, x9, x10
# get VPN2 in x10
li x10, \VPN
li x11, 0x7fc0000
and x10,x10,x11
srli x10, x10, 18
# cfig first-level page
# level-1 page, entry addr:{ppn,VPN2,3'b0} in x15
slli x14, x9, 12
slli x11, x10, 3
add x15, x14, x11
# write pte into level-1 page
# level-2 base addr in x12
addi x12, x10, 1
add x12, x12, x9
slli x14, x12, 10
li x13, 0xc1
or x13, x13, x14
sd x13, 0(x15)
# cfig level-2 page
# get VPN1 in x16
li x11, \VPN
li x13, 0x3fe00
and x16, x11, x13
srli x16, x16, 9
# level-2 page, entry addr:{pte.ppn,VPN1,3'b0} in x17
slli x13, x12, 12
slli x17, x16, 3
add x17, x17, x13
# write pte into level-2 page
# level-3 base addr in x18: PPN+2+2^9+{vpn2,vpn1}
li x11, 0x200
addi x11, x11, 2
add x11, x11, x9
li x10, \VPN
srli x10, x10, 9
add x18, x11, x10
slli x19, x18, 10
li x13, 0xc1
or x19, x19, x13
sd x19, 0(x17)
# cfig level-3 page
# get VPN0 in x12
li x11, \VPN
li x12, 0x1ff
and x12, x12, x11
# get level-3 page addr x17
slli x18, x18, 12
slli x12, x12, 3
add x17, x18, x12
# write pte into level-3 page
li x11, \PPN
li x12, \THEADFLAG
li x13, \FLAG
slli x11, x11, 10
slli x12, x12, 59
or x11, x11, x12
or x11, x11, x13
sd x11, 0(x17)
#restore regs
ld x9, 0(x2)
ld x10, 8(x2)
ld x11, 16(x2)
ld x12, 24(x2)
ld x13, 32(x2)
ld x14, 40(x2)
ld x15, 48(x2)
ld x16, 56(x2)
ld x17, 64(x2)
ld x18, 72(x2)
ld x19, 80(x2)
addi x2, x2, 88
# fence
fence
.endm
.text
.align 6
.global main
main:
csrr x10, mhartid
bnez x10, TEST_WFI
nop
.global MMU_CFG
csrr x9,satp
li x10,0xfffffffffffffff
and x9,x9,x10
li x10,8
slli x10,x10,60
or x9,x9,x10
csrw satp, x9
MMU_PTW_4K 0x0,0x0,0xff,0xf
.option norvc
dcache.ciall
.global DATA_CACHE_DIS
DATA_CACHE_DIS:
csrci mhcr,0x2
SETMEXP 2 ILL_BEG ILL_END
.global ILL_BEG
ILL_BEG:
la x3,ILL_EXPT
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x4,11111111
csrrci x1,mtval,0x0
bne x1,x4,TEST_FAIL
li x3,0x2
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global ILL_END
ILL_END:
SETMEXP 3 EBREAK_BEG EBREAK_END
.global EBREAK_BEG
EBREAK_BEG:
la x3,EBREAK_EXPT
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
csrrci x1,mtval,0x0
li x3,0x3
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global EBREAK_END
EBREAK_END:
SETMEXP 4 MISALIGN_BEG MISALIGN_END
.global MISALIGN_BEG
MISALIGN_BEG:
la x3,MISALIGN_EXPT
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x3,0x6fff
csrrci x1,mtval,0x0
bne x1,x3,TEST_FAIL
li x3,0x4
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global MISALIGN_END
MISALIGN_END:
SETMEXP 5 LOAD_ACCERR_BEG LOAD_ACCERR_END
.global LOAD_ACCERR_BEG
LOAD_ACCERR_BEG:
la x3,LOAD_ACCERR_EXPT
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x3,0x0
csrrci x1,mtval,0x0
bne x1,x3,TEST_FAIL
li x3,0x5
csrrc x1,mcause,x0
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global LOAD_ACCERR_END
LOAD_ACCERR_END:
SETMEXP 6 STORE_MISALIGN_BEG STORE_MISALIGN_END
.global STORE_MISALIGN_BEG
STORE_MISALIGN_BEG:
la x3,STORE_MISALIGN_EXPT
csrrc x1,mepc,x0
li x3,0x6
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global STORE_MISALIGN_END
STORE_MISALIGN_END:
SETMEXP 7 STORE_ACCERR_BEG STORE_ACCERR_END
.global STORE_ACCERR_BEG
STORE_ACCERR_BEG:
la x3,STORE_ACCERR_EXPT
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x3,0x7
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global STORE_ACCERR_END
STORE_ACCERR_END:
SETMEXP 11 MECALL_BEG MECALL_END
.global MECALL_BEG
MECALL_BEG:
la x3,MECALL_EXPT
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
csrrci x1,mtval,0x0
bne x1,x0,TEST_FAIL
li x3,0xb
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global MECALL_END
MECALL_END:
SETMEXP 1 INST_SCCERR_BEG INST_SCCERR_END
.global INST_SCCERR_BEG
INST_SCCERR_BEG:
li x3,0x600000010
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x3,0x600000010
csrrci x1,mtval,0x0
li x3,0x1
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
la x1,AFTER_INST_ACCERR_EXPT
csrrw x3,mepc,x1
mret
jalr x0,x1,0x0
.global INST_SCCERR_END
INST_SCCERR_END:
SETMEXP 9 SECALL_BEG SECALL_END
.global SECALL_BEG
SECALL_BEG:
la x3,SECALL_EXPT
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x3,0x9
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global SECALL_END
SECALL_END:
SETMEXP 12 INS_PAGEFAULT_BEG INS_PAGEFAULT_END
.global INS_PAGEFAULT_BEG
INS_PAGEFAULT_BEG:
li x3,0xfff00000
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x3,0xc
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
la x1,AFTER_INST_PAGEFAULT_EXPT
csrrw x0, mepc,x1
mret
.global INS_PAGEFAULT_END
INS_PAGEFAULT_END:
SETMEXP 13 LOAD_PAGEFAULT_BEG LOAD_PAGEFAULT_END
.global LOAD_PAGEFAULT_BEG
LOAD_PAGEFAULT_BEG:
la x3,LOAD_PAGEFAULT_EXPT
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x3,0xfff00000
csrrci x1,mtval,0x0
bne x1,x3,TEST_FAIL
li x3,0xd
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global LOAD_PAGEFAULT_END
LOAD_PAGEFAULT_END:
SETMEXP 15 STORE_PAGEFAULT_BEG STORE_PAGEFAULT_END
.global STORE_PAGEFAULT_BEG
STORE_PAGEFAULT_BEG:
la x3,STORE_PAGEFAULT_EXPT
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x3,0xfff00000
csrrci x1,mtval,0x0
bne x1,x3,TEST_FAIL
li x3,0xf
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global STORE_PAGEFAULT_END
STORE_PAGEFAULT_END:
SETMEXP 8 UECALL_BEG UECALL_END
.global UECALL_BEG
UECALL_BEG:
la x3,UROUTINE
csrrc x1,mepc,x0
bne x1,x3,TEST_FAIL
li x3,0x8
csrrc x1,mcause,x0
bne x1,x3,TEST_FAIL
csrrs x1, mepc,x0
addi x1,x1,0x4
csrrw x0, mepc,x1
mret
.global UECALL_END
UECALL_END:
.global WRITE_DELEG
WRITE_DELEG:
li x1,0x6000
csrrc x0,mstatus,x1
.global MCSR_INITIAL
MCSR_INITIAL:
li x1,0x800
csrrw x0,mstatus,x1
li x1,0x800000000000000f
csrrw x0,mcause,x1
li x1,0xffffffffffffffff
csrrw x0,mepc,x1
li x1,0x800000000000000f
csrrw x0,scause,x1
li x1,0xffffffffffffffff
csrrw x0,sepc,x1
.global ILL_EXPT
ILL_EXPT:
.long 11111111
.global EBREAK
EBREAK:
li x1,0x800
csrrw x0,mstatus,x1
.global EBREAK_EXPT
EBREAK_EXPT:
ebreak
.global MISALIGN
MISALIGN:
li x1,0x800
csrrw x0,mstatus,x1
li x1,0x6fff
.global MISALIGN_EXPT
MISALIGN_EXPT:
lw x3,0x0(x1)
.global LOAD_ACCERR
LOAD_ACCERR:
li x1,0x800
csrrw x0,mstatus,x1
li x1, 0xbfffffff
csrw pmpaddr0, x1
li x1, 0xc007ffff
csrw pmpaddr1, x1
li x1, 0x1000001ff
csrw pmpaddr2, x1
li x1, 0x147ffffff
csrw pmpaddr3, x1
li x1, 0x1800001ff
csrw pmpaddr4, x1
li x1, 0x1807fffff
csrw pmpaddr5, x1
li x1, 0x18981898180f
csrw pmpcfg0, x1
li x1,0x600000010
.global LOAD_ACCERR_EXPT
LOAD_ACCERR_EXPT:
lw x3,0x0(x1)
.global STORE_MISALIGN
STORE_MISALIGN:
li x1,0x800
csrrw x0,mstatus,x1
li x1,0x6fff
.global STORE_MISALIGN_EXPT
STORE_MISALIGN_EXPT:
sw x3,0x0(x1)
.global STORE_ACCERR
STORE_ACCERR:
li x1,0x800
csrrw x0,mstatus,x1
li x1,0x600000010
.global STORE_ACCERR_EXPT
STORE_ACCERR_EXPT:
sw x3,0x0(x1)
li x1,0x800
csrrw x0,mstatus,x1
.global MECALL_EXPT
MECALL_EXPT:
ecall
li x1,0x800
csrrw x0,mstatus,x1
li x1,0x600000010
.global INST_ACCERR_EXPT
INST_ACCERR_EXPT:
jalr x0,x1,0x0
.global AFTER_INST_ACCERR_EXPT
AFTER_INST_ACCERR_EXPT:
nop
la x1,SROUTINE
csrw mepc,x1
li x1,0xc0800
csrrs x0,mstatus,x1
li x1,0x1000
csrrc x0,mstatus,x1
mret
.global SROUTINE
SROUTINE:
.global SECALL_EXPT
SECALL_EXPT:
ecall
.global INST_PAGEFAULT
INST_PAGEFAULT:
li x1,0xfff00000
.global INST_PAGEFAULT_EXPT
INST_PAGEFAULT_EXPT:
jalr x0,x1,0x0
.global AFTER_INST_PAGEFAULT_EXPT
AFTER_INST_PAGEFAULT_EXPT:
nop
.global LOAD_PAGEFAULT
LOAD_PAGEFAULT:
li x1,0xfff00000
.global LOAD_PAGEFAULT_EXPT
LOAD_PAGEFAULT_EXPT:
lw x3,0x0(x1)
.global STORE_PAGEFAULT
STORE_PAGEFAULT:
li x1,0xfff00000
.global STORE_PAGEFAULT_EXPT
STORE_PAGEFAULT_EXPT:
sw x3,0x0(x1)
la x1,UROUTINE
csrw sepc,x1
li x1,0x100
csrrc x3,sstatus,x1
sret
.global UROUTINE
UROUTINE:
ecall
.global TEST_PASS
TEST_PASS:
la x1, __exit
jr x1
.global TEST_FAIL
TEST_FAIL:
la x1, __fail
jr x1
.global TEST_WFI
TEST_WFI:
wfi
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 1,987
|
smart_run/tests/cases/csr/ct_csr_operate.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#* *
#************************************************************
#*************Following is the generated instructions*****************
.text
.align 6
.global main
main:
.option norvc
.global reg_access
reg_access:
li x1,0xffffffff
#read to x10 & write x1 to csr
csrrw x10,mstatus,x1
#read to x10 & set x1 bit to csr
csrrs x10,mstatus,x1
#read to x10 & clr x1 bit to csr
csrrc x10,mstatus,x1
#read to x10
csrr x10,mstatus
# write x1 to csr
csrw mstatus,x1
#set x1 bit to csr
csrs mstatus,x1
#clr x1 bit to csr
csrc mstatus,x1
.global imm_access
imm_access:
#read to x10 & write imm to csr
csrrwi x10,mstatus,0x3
#read to x10 & set imm bit to csr
csrrsi x10,mstatus,0x3
#read to x10 & clr imm bit to csr
csrrci x10,mstatus,0x3
#write imm bit to csr
csrs mstatus,0x1
#set imm bit to csr
csrs mstatus,0x1
#clr imm bit to csr
csrc mstatus,0x1
.global TEST_EXIT
TEST_EXIT:
la x1,__exit
jr x1
.global TEST_FAIL
TEST_FAIL:
la x1,__fail
jr x1
.global TEST_WFI
TEST_WFI:
wfi
.option rvc
#******this region is added by generator******
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 3,759
|
smart_run/tests/cases/cache/idcache_oper/ct_idcache_oper.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#* *
#************************************************************
#*************Following is the generated instructions*****************
.text
.align 6
.global main
main:
csrr x10, mhartid
bnez x10, WFI
nop
.global ID_SYNC
ID_SYNC:
fence.i
.global ICACHE_DIS
ICACHE_DIS:
csrci mhcr,0x1
.global ICACHE_INV_ALL
ICACHE_INV_ALL:
#sel icache
csrsi mcor,0x1
csrci mcor,0x2
#inv
csrsi mcor,0x10
.global ICACHE_EN
ICACHE_EN:
csrsi mhcr,0x1
.global DCACHE_DIS
DCACHE_DIS:
csrci mhcr,0x2
.global DCACHE_CLR_ALL
DCACHE_CLR_ALL:
#sel dcache
csrsi mcor,0x2
csrci mcor,0x1
#clr
li x10,0x20
csrs mcor,x10
.global DCACHE_INV_ALL
DCACHE_INV_ALL:
#sel dcache
csrsi mcor,0x2
csrci mcor,0x1
#inv
csrsi mcor,0x10
.global DCACHE_EN
DCACHE_EN:
csrsi mhcr,0x2
.global INDEX_OPER
INDEX_OPER:
nop
.global READ_ICACHE_TAG
READ_ICACHE_TAG:
#set index 3fc0 id 0 (icache tag)
li x3, 0x3fc0
csrw mcindex,x3
#read icache tag data
li x3, 0x1
csrw mcins,x3
csrr x3,mcdata0
.global READ_ICACHE_DATA
READ_ICACHE_DATA:
#set index 3fc0 id 1 (icache data)
li x3, 0x10003fc0
csrw mcindex,x3
#read icache data
li x3, 0x1
csrw mcins,x3
csrr x3,mcdata0
csrr x4,mcdata1
.global READ_DCACHE_TAG
READ_DCACHE_TAG:
#set index 3fc0 id 2 (dcache tag)
li x3, 0x20003fc0
csrw mcindex,x3
#read dcache tag data
li x3, 0x1
csrw mcins,x3
csrr x3,mcdata0
.global READ_DCACHE_DATA
READ_DCACHE_DATA:
#set index 3fc0 id 3 (dcache data)
li x3, 0x30003fc0
csrw mcindex,x3
li x3, 0x1
#read dcache data
csrw mcins,x3
csrr x3,mcdata0
csrr x4,mcdata0
.global INST_OPER
INST_OPER:
li x10,0x400000
csrs mxstatus,x10
.global icache_ins_all
icache_ins_all:
#inv all
icache.iall
#inv all & share
icache.ialls
.global icache_ins_index
icache_ins_index:
li x10,0x3fc0
#inv va,pa
icache.iva x10
icache.ipa x10
.global dcache_ins
dcache_ins:
#inv all
dcache.iall
#clr all
dcache.call
#inv & clr all
dcache.ciall
.global dcache_ins_index
dcache_ins_index:
li x10, 0x30040000
#inv set /way
dcache.isw x10
#clr set /way
dcache.csw x10
#inv & clr set/way
dcache.cisw x10
#tag
li x10,0x3fc0
#virtural addr inv dcache & l2 cache
dcache.iva x10
#virtural addr clr dcache & l2 cache
dcache.cva x10
#virtural addr clr dcache
dcache.cval1 x10
#virtural addr clr/inv dcache & l2 cache
dcache.civa x10
#physical addr inv
dcache.ipa x10
#physical addr clr dcache &l2 cache
dcache.cpa x10
#physical addr clr dcache
dcache.cpal1 x10
#physical addr clr/inv dcache & l2 cache
dcache.cipa x10
.global EXIT
EXIT:
la x1, __exit
jr x1
.global FAIL
FAIL:
la x1, __fail
jr x1
.global WFI
WFI:
wfi
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 201,607
|
smart_run/tests/cases/smoke/bus_smoke/ct_bus_smoke.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#* *
#************************************************************
######################################################
########## MACRO DEFINE
.macro MSTATUS_SUM IMM
#write sum
li x9,0x40000
csrc mstatus,x9
#csrr x9,mstatus
li x10, \IMM
slli x10,x10,18
# or x9,x9,x10
csrs mstatus,x10
.endm
.macro MMU_EN
#backup regs
addi x2, x2, -16
sd x9, 0(x2)
sd x10, 8(x2)
#write MODE=8 to SATP
csrr x9,satp
li x10,0xfffffffffffffff
and x9,x9,x10
li x10,8
slli x10,x10,60
or x9,x9,x10
csrw satp, x9
#restore regs
ld x10, 8(x2)
ld x9, 0(x2)
addi x2, x2, 16
.endm
.macro MMU_SATP_PPN PPN
#backup regs
addi x2, x2, -16
sd x9, 0(x2)
sd x10, 8(x2)
#write PPN
csrr x9, satp
li x10, 0xfffffffff0000000
and x9,x9,x10
li x10, \PPN
or x9,x9,x10
csrw satp,x9
#restore regs
ld x10, 8(x2)
ld x9, 0(x2)
addi x2, x2, 8
.endm
.macro MMU_SATP_ASID ASID
#backup regs
addi x2, x2, -16
sd x9, 0(x2)
sd x10, 8(x2)
#write ASID
csrr x9, satp
li x10, 0xf0000fffffffffff
and x9,x9,x10
li x10, \ASID
slli x10,x10,44
or x9,x9,x10
csrw satp,x9
#restore regs
ld x10, 8(x2)
ld x9, 0(x2)
addi x2, x2, 8
.endm
.macro MXSTATUS_MAEE IMM
#write maee
li x9,0x200000
csrc mxstatus,x9
csrr x9,mxstatus
li x10, \IMM
slli x10,x10,21
or x9,x9,x10
csrw mxstatus,x9
.endm
.macro MMU_PTW_4K VPN, PPN, FLAG, THEADFLAG
#backup regs
addi x2, x2, -88
sd x9, 0(x2)
sd x10, 8(x2)
sd x11, 16(x2)
sd x12, 24(x2)
sd x13, 32(x2)
sd x14, 40(x2)
sd x15, 48(x2)
sd x16, 56(x2)
sd x17, 64(x2)
sd x18, 72(x2)
sd x19, 80(x2)
# get PPN from satp in x9
csrr x9, satp
li x10, 0xfffffffffff
and x9, x9, x10
# get VPN2 in x10
li x10, \VPN
li x11, 0x7fc0000
and x10,x10,x11
srli x10, x10, 18
# cfig first-level page
# level-1 page, entry addr:{ppn,VPN2,3'b0} in x15
slli x14, x9, 12
slli x11, x10, 3
add x15, x14, x11
# write pte into level-1 page
# level-2 base addr in x12
addi x12, x10, 1
add x12, x12, x9
slli x14, x12, 10
li x13, 0xc1
or x13, x13, x14
sd x13, 0(x15)
# cfig level-2 page
# get VPN1 in x16
li x11, \VPN
li x13, 0x3fe00
and x16, x11, x13
srli x16, x16, 9
# level-2 page, entry addr:{pte.ppn,VPN1,3'b0} in x17
slli x13, x12, 12
slli x17, x16, 3
add x17, x17, x13
# write pte into level-2 page
# level-3 base addr in x18: PPN+2+2^9+{vpn2,vpn1}
li x11, 0x200
addi x11, x11, 2
add x11, x11, x9
li x10, \VPN
srli x10, x10, 9
add x18, x11, x10
slli x19, x18, 10
li x13, 0xc1
or x19, x19, x13
sd x19, 0(x17)
# cfig level-3 page
# get VPN0 in x12
li x11, \VPN
li x12, 0x1ff
and x12, x12, x11
# get level-3 page addr x17
slli x18, x18, 12
slli x12, x12, 3
add x17, x18, x12
# write pte into level-3 page
li x11, \PPN
li x12, \THEADFLAG
li x13, \FLAG
slli x11, x11, 10
slli x12, x12, 59
or x11, x11, x12
or x11, x11, x13
sd x11, 0(x17)
#restore regs
ld x9, 0(x2)
ld x10, 8(x2)
ld x11, 16(x2)
ld x12, 24(x2)
ld x13, 32(x2)
ld x14, 40(x2)
ld x15, 48(x2)
ld x16, 56(x2)
ld x17, 64(x2)
ld x18, 72(x2)
ld x19, 80(x2)
addi x2, x2, 88
# fence
fence
.endm
.macro MMU_MMODE_SMODE_S SROUTINE
#write sepc
la x1,\SROUTINE
csrw sepc,x1
li x1,0x100
csrrs x0,mstatus,x1
sret
.endm
.macro MMODE_SMODE SROUTINE
#disable sie/mie
li x29, 0x2
csrc mstatus,x29
li x29, 0x8
csrc mstatus,x29
#write mepc
la x1,\SROUTINE
csrw mepc,x1
li x1,0x800
csrrs x3,mstatus,x1
li x1,0x1000
csrrc x3,mstatus,x1
mret
.endm
.text
.align 6
.global main
main:
######################################################
########## Control regiter define
li x3,0x4
csrs mhcr,x3
li t0,0x8000
csrs mxstatus,t0
# cfig satp
sfence.vma x0,x0
MMU_EN
MMU_SATP_PPN 0x40
MMU_SATP_ASID 0x1
# cfig PMP
# entry0: addr 0x0_0000_0000~0x2_ffff_ffff, r=1 w=1 x=1 l=0
# cfig pmpcfg0
li x1, 0x0f
csrw pmpcfg0, x1
# cfig pmpaddr0
li x1, 0xc0000000
csrw pmpaddr0, x1
MSTATUS_SUM 1
MXSTATUS_MAEE 1
MMU_PTW_4K 0x0,0x0,0xff,0xf
MMU_PTW_4K 0x1,0x1,0xff,0xf
MMU_PTW_4K 0x2,0x2,0xff,0xf
MMU_PTW_4K 0x3,0x3,0xff,0xf
MMU_PTW_4K 0x4,0x4,0xff,0xf
MMU_PTW_4K 0x5,0x5,0xff,0xf
MMU_PTW_4K 0x6,0x6,0xff,0xf
MMU_PTW_4K 0x7,0x7,0xff,0xf
MMU_PTW_4K 0x8,0x8,0xff,0xf
MMU_PTW_4K 0x9,0x9,0xff,0xf
MMU_PTW_4K 0xa,0xa,0xff,0xf
MMU_PTW_4K 0xb,0xb,0xff,0xf
MMU_PTW_4K 0xc,0xc,0xff,0xf
MMU_PTW_4K 0xd,0xd,0xff,0xf
MMU_PTW_4K 0xe,0xe,0xff,0xf
MMU_PTW_4K 0xf,0xf,0xff,0xf
MMU_PTW_4K 0x10,0x10,0xff,0xf
MMU_PTW_4K 0x11,0x11,0xff,0xf
MMU_PTW_4K 0x12,0x12,0xff,0xf
MMU_PTW_4K 0x13,0x13,0xff,0xf
MMU_PTW_4K 0x14,0x14,0xff,0xf
MMU_PTW_4K 0x15,0x15,0xff,0xf
MMU_PTW_4K 0x16,0x16,0xff,0xf
MMU_PTW_4K 0x17,0x17,0xff,0xf
MMU_PTW_4K 0x18,0x18,0xff,0xf
MMU_PTW_4K 0x19,0x19,0xff,0xf
MMU_PTW_4K 0x1a,0x1a,0xff,0xf
MMU_PTW_4K 0x1b,0x1b,0xff,0xf
MMU_PTW_4K 0x1c,0x1c,0xff,0xf
MMU_PTW_4K 0x1d,0x1d,0xff,0xf
MMU_PTW_4K 0x1e,0x1e,0xff,0xf
MMU_PTW_4K 0x1f,0x1f,0xff,0xf
MMU_PTW_4K 0x100,0x100,0xef,0xf
MMU_PTW_4K 0x101,0x101,0xef,0xf
MMU_PTW_4K 0x102,0x102,0xef,0x7
MMU_PTW_4K 0x103,0x103,0xef,0x7
MMU_PTW_4K 0x104,0x104,0xef,0x17
MMU_PTW_4K 0x105,0x105,0xef,0x17
MMU_PTW_4K 0x106,0x106,0xef,0xd
MMU_PTW_4K 0x107,0x107,0xef,0xd
and x26,x9,x8
fence o,iorw
lhu x5,0x000000030(x22)
ld x5,0x000000038(x22)
lwu x5,0x00000003C(x22)
lwu x5,0x000000008(x22)
sd x4,0x000000038(x22)
sh x4,0x00000000E(x22)
sd x4,0x000000038(x22)
sh x4,0x00000001C(x22)
li x8, 0xffffffc0
######################################################
########## 4 core
csrr x3,mhartid
li x4,0
beq x4,x3,CPU0
li x4,1
beq x4,x3,CPU1
li x4,2
beq x4,x3,CPU2
li x4,3
beq x4,x3,CPU3
wfi
.global CPU0
CPU0:
MMODE_SMODE CPU0_S
.global CPU0_S
CPU0_S:
######################################################
########## every core will execute ld/st instruction
# address can be non-cacheable/strong order/cachable/shreable
li x22,0x00010604E
and x22,x22,x8
dcache.civa x22
sync.s
li x22,0x0001020BD
and x22,x22,x8
li x4,0xAA22D819CDCF2E8A
lh x5,0x00000003E(x22)
li x22,0x00010015C
and x22,x22,x8
li x4,0xB4F5D18ACFAF1531
ld x5,0x000000040(x22)
li x22,0x000104084
and x22,x22,x8
li x4,0xF2ED3B17ACAB6C88
sb x4,0x000000026(x22)
li x22,0x00010419C
and x22,x22,x8
li x4,0xDCC79DFE631CBC67
sh x4,0x00000002A(x22)
li x22,0x0001000C0
and x22,x22,x8
li x4,0xB37B73E27D0C3DD1
dcache.civa x22
li x22,0x000102076
and x22,x22,x8
li x4,0x9D83336F72A31D0E
sb x4,0x00000002D(x22)
li x22,0x00010000C
and x22,x22,x8
li x4,0xD54EEC9496D77D33
sb x4,0x00000002E(x22)
li x22,0x0001061F2
and x22,x22,x8
li x4,0x904298D6C13980E9
lb x5,0x00000003A(x22)
li x22,0x00010010F
and x22,x22,x8
li x4,0x2A33098201C69D97
lw x5,0x000000034(x22)
li x22,0x0001041D9
and x22,x22,x8
li x4,0x898BEB524FCE44C6
dcache.civa x22
li x22,0x00010016A
and x22,x22,x8
li x4,0xD6F188CCBBA61FBF
sb x4,0x00000001B(x22)
li x22,0x0001020AF
and x22,x22,x8
li x4,0x401702BA660AC540
ld x5,0x000000040(x22)
li x22,0x000106048
and x22,x22,x8
li x4,0xB35DD799AC82EFFF
lbu x5,0x000000019(x22)
li x22,0x0001061F6
and x22,x22,x8
li x4,0x4A53B9A9C1480A88
dcache.cva x22
li x22,0x00010402A
and x22,x22,x8
li x4,0x5DB6B2A18FB1D1F3
dcache.cva x22
li x22,0x0001040C0
and x22,x22,x8
li x4,0xE0EC4B9B70DAEAE0
sb x4,0x000000011(x22)
li x22,0x000106016
and x22,x22,x8
li x4,0x8D0D9495790A20E1
sw x4,0x00000001C(x22)
li x22,0x0001061B3
and x22,x22,x8
li x4,0xDBFEB4F7BC6DC238
lwu x5,0x000000048(x22)
li x22,0x0001041AD
and x22,x22,x8
li x4,0x508282DD795A64F
sd x4,0x000000010(x22)
li x22,0x000102093
and x22,x22,x8
li x4,0xE03F2A2E53AA6074
sw x4,0x000000034(x22)
li x22,0x00010011C
and x22,x22,x8
li x4,0x57F21123902B0A12
lwu x5,0x000000020(x22)
li x22,0x0001000AF
and x22,x22,x8
li x4,0x7C0BFCC3EF649ED7
sd x4,0x000000028(x22)
li x22,0x0001061DB
and x22,x22,x8
li x4,0x76F7F078E548A641
lw x5,0x000000018(x22)
li x22,0x0001021BB
and x22,x22,x8
li x4,0x3DFA74CB37939769
lbu x5,0x000000028(x22)
li x22,0x0001061F5
and x22,x22,x8
li x4,0x6E01757F5341E10
sd x4,0x000000018(x22)
li x22,0x000104142
and x22,x22,x8
li x4,0x2AA7B367B6A727D0
sb x4,0x00000003A(x22)
li x22,0x000104080
and x22,x22,x8
li x4,0xA10F3C70F88B80DA
dcache.civa x22
li x22,0x00010416F
and x22,x22,x8
li x4,0x82F966E96599404A
dcache.civa x22
li x22,0x0001061C3
and x22,x22,x8
li x4,0x983FD369869567AE
sb x4,0x000000033(x22)
li x22,0x0001021CD
and x22,x22,x8
li x4,0xAA1022B978F32BC0
lwu x5,0x00000000C(x22)
li x22,0x00010009B
and x22,x22,x8
li x4,0xCB6086D5DECDB944
lbu x5,0x000000040(x22)
li x22,0x00010405C
and x22,x22,x8
li x4,0x163E10063B75E98B
lw x5,0x000000030(x22)
li x22,0x0001041EF
and x22,x22,x8
li x4,0x985F8274BBD8D6EA
lb x5,0x000000006(x22)
li x22,0x0001020B5
and x22,x22,x8
li x4,0xFB47403184F5973D
sh x4,0x00000004C(x22)
li x22,0x0001041CE
and x22,x22,x8
li x4,0x85F72F68ACE1CC19
sd x4,0x000000008(x22)
li x22,0x0001040ED
and x22,x22,x8
li x4,0xB141C866E32DF1B2
sd x4,0x000000000(x22)
li x22,0x000106100
and x22,x22,x8
li x4,0xDCA5BF0786D830F3
sh x4,0x000000050(x22)
li x22,0x0001001BA
and x22,x22,x8
li x4,0x4A11DC7CBD069413
dcache.civa x22
li x22,0x000102135
and x22,x22,x8
li x4,0xF67C83B1A12BE1AA
lw x5,0x000000014(x22)
li x22,0x0001060F3
and x22,x22,x8
li x4,0x13EE47A0DA872F77
dcache.cva x22
li x22,0x00010404E
and x22,x22,x8
li x4,0x6898FB0F5EFB6D89
lw x5,0x000000004(x22)
li x22,0x000100086
and x22,x22,x8
li x4,0x3362B98A781EFC0C
dcache.civa x22
li x22,0x00010209D
and x22,x22,x8
li x4,0xDFE9682CF4F97DCC
lbu x5,0x00000000A(x22)
li x22,0x000106000
and x22,x22,x8
li x4,0x594DE2F5EFC5F7A2
dcache.cva x22
li x22,0x0001060C0
and x22,x22,x8
li x4,0x18955573366D500A
dcache.cva x22
li x22,0x00010202F
and x22,x22,x8
li x4,0xBFA60A18F3CB6BF
dcache.cva x22
li x22,0x00010604B
and x22,x22,x8
li x4,0x834150F0279877DD
dcache.cva x22
li x22,0x0001060BB
and x22,x22,x8
li x4,0xFFD69518E608C4D9
sw x4,0x000000020(x22)
li x22,0x0001040B0
and x22,x22,x8
li x4,0xB302E19D48C32B3A
lwu x5,0x000000034(x22)
li x22,0x00010014C
and x22,x22,x8
li x4,0x9BB27BEFE8B8A0FE
dcache.civa x22
li x22,0x000104166
and x22,x22,x8
li x4,0xDA3517D041391FBE
sd x4,0x000000008(x22)
li x22,0x0001061C2
and x22,x22,x8
li x4,0xFF16224DC7655B0E
sh x4,0x00000003A(x22)
li x22,0x000102130
and x22,x22,x8
li x4,0xBFE8BE887D1E9A52
sh x4,0x000000034(x22)
li x22,0x000102000
and x22,x22,x8
li x4,0xAE19D62AE7CA1E36
dcache.cva x22
li x22,0x00010612A
and x22,x22,x8
li x4,0x846C17D5731DF837
dcache.cva x22
li x22,0x0001000EE
and x22,x22,x8
li x4,0x30148B4E5828EA4A
lbu x5,0x000000004(x22)
li x22,0x0001040F1
and x22,x22,x8
li x4,0x77CAD44299FD9149
dcache.civa x22
li x22,0x0001020C4
and x22,x22,x8
li x4,0x6A688E27D5554829
dcache.cva x22
li x22,0x000100164
and x22,x22,x8
li x4,0xA0E47D7CEC977BD1
sw x4,0x00000003C(x22)
li x22,0x0001040E0
and x22,x22,x8
li x4,0x9F2A38C4D35C890D
sh x4,0x000000044(x22)
li x22,0x00010600F
and x22,x22,x8
li x4,0x31BDBDE6A4426914
dcache.civa x22
li x22,0x00010619D
and x22,x22,x8
li x4,0xCA7F21FE48567CBD
sh x4,0x000000014(x22)
li x22,0x000106081
and x22,x22,x8
li x4,0xFE1766A462E4CA59
sw x4,0x000000024(x22)
li x22,0x0001021A9
and x22,x22,x8
li x4,0x1A9B5C3E00E68E6F
sd x4,0x000000020(x22)
li x22,0x0001001CB
and x22,x22,x8
li x4,0x2E27325FC325D8FC
ld x5,0x000000028(x22)
li x22,0x00010619A
and x22,x22,x8
li x4,0xF6CD82B9CB696AF8
dcache.civa x22
li x22,0x0001000BE
and x22,x22,x8
li x4,0x72584BEC03CF5109
sd x4,0x000000048(x22)
li x22,0x0001060D0
and x22,x22,x8
li x4,0xFE4BBD50D8E7664D
sd x4,0x000000038(x22)
li x22,0x0001040C0
and x22,x22,x8
li x4,0xE2E556D296D545EF
lb x5,0x000000042(x22)
li x22,0x00010210D
and x22,x22,x8
li x4,0x9613E6A4B5F07A24
sd x4,0x000000040(x22)
li x22,0x000104200
and x22,x22,x8
li x4,0x459616F07EB9B1D9
dcache.cva x22
li x22,0x00010403D
and x22,x22,x8
li x4,0xC252A4BF72C90868
lwu x5,0x000000000(x22)
li x22,0x000106124
and x22,x22,x8
li x4,0xDCCF30CD4261E608
dcache.cva x22
li x22,0x00010418B
and x22,x22,x8
li x4,0x89C19835783061E0
lb x5,0x000000047(x22)
li x22,0x0001040A6
and x22,x22,x8
li x4,0xAD1E48EC8337E8C4
lwu x5,0x000000030(x22)
li x22,0x00010608E
and x22,x22,x8
li x4,0x11B3C880A479863F
dcache.civa x22
li x22,0x000106102
and x22,x22,x8
li x4,0x8FB60E30514228B3
dcache.cva x22
li x22,0x000102160
and x22,x22,x8
li x4,0x931263798DDC60A9
sb x4,0x000000006(x22)
li x22,0x000102004
and x22,x22,x8
li x4,0x279A755FF6DD364
lbu x5,0x00000003E(x22)
li x22,0x00010015E
and x22,x22,x8
li x4,0xE968EC56A7DA3435
ld x5,0x000000040(x22)
li x22,0x000104153
and x22,x22,x8
li x4,0x71AC9CA3534BBA8C
dcache.civa x22
li x22,0x00010006E
and x22,x22,x8
li x4,0xF240E9989B642462
sd x4,0x000000020(x22)
li x22,0x000102062
and x22,x22,x8
li x4,0x4292271B343890CB
dcache.civa x22
li x22,0x0001041FE
and x22,x22,x8
li x4,0x887D8D44FB546DCE
lw x5,0x000000040(x22)
li x22,0x0001040EF
and x22,x22,x8
li x4,0xF835DC5A09513224
dcache.civa x22
li x22,0x000100056
and x22,x22,x8
li x4,0x1440FBEB00FAD65F
sw x4,0x000000004(x22)
li x22,0x00010016D
and x22,x22,x8
li x4,0x59086EA32514FBD0
dcache.civa x22
li x22,0x000104183
and x22,x22,x8
li x4,0x4DBF6C17A0F58E3C
dcache.cva x22
li x22,0x00010200F
and x22,x22,x8
li x4,0x19A69FB4B7836EE
lh x5,0x00000004C(x22)
li x22,0x000102150
and x22,x22,x8
li x4,0xE925A9F4FDA9687F
sb x4,0x000000014(x22)
li x22,0x000102076
and x22,x22,x8
li x4,0xA19D6BC20656C310
lhu x5,0x000000014(x22)
li x22,0x0001020D9
and x22,x22,x8
li x4,0x538C7072FAF314E0
lhu x5,0x000000028(x22)
li x22,0x00010606D
and x22,x22,x8
li x4,0x4C90F945CECFBEFD
sd x4,0x000000010(x22)
li x22,0x0001060F0
and x22,x22,x8
li x4,0xC3762A4A2411DE3E
sb x4,0x000000049(x22)
li x22,0x000102062
and x22,x22,x8
li x4,0xB5447CF55F224C90
lh x5,0x000000044(x22)
li x22,0x0001040FC
and x22,x22,x8
li x4,0xC559C19FC2BAFF1D
dcache.cva x22
li x22,0x00010017D
and x22,x22,x8
li x4,0x6DABE6971E81A4A4
sb x4,0x000000007(x22)
li x22,0x000104044
and x22,x22,x8
li x4,0xB662041CEF7458CC
sh x4,0x000000016(x22)
li x22,0x000100166
and x22,x22,x8
li x4,0x1646E0434DE79C4
sb x4,0x00000003E(x22)
li x22,0x0001020C4
and x22,x22,x8
li x4,0xF7CAC52DC9AB646
dcache.cva x22
li x22,0x0001060AF
and x22,x22,x8
li x4,0xDFF1DFC55CE34226
sd x4,0x000000000(x22)
li x22,0x000102060
and x22,x22,x8
li x4,0x205B4F6C82A8038D
dcache.civa x22
li x22,0x0001000DE
and x22,x22,x8
li x4,0x9D6989DBD7C15FF3
dcache.cva x22
li x22,0x000104094
and x22,x22,x8
li x4,0x917FAB094AA742DD
dcache.civa x22
li x22,0x0001001AF
and x22,x22,x8
li x4,0x4D930B9E7133C27F
ld x5,0x000000008(x22)
li x22,0x00010402E
and x22,x22,x8
li x4,0xB8E2170CB39D8C6D
dcache.civa x22
li x22,0x000104160
and x22,x22,x8
li x4,0xA5F77D5B60EC075C
dcache.cva x22
li x22,0x000100168
and x22,x22,x8
li x4,0xD9BEAD4130440B11
ld x5,0x000000028(x22)
li x22,0x00010006C
and x22,x22,x8
li x4,0xA0F067B889200853
sw x4,0x000000000(x22)
li x22,0x000100142
and x22,x22,x8
li x4,0x2B75200DAAC1852E
sw x4,0x000000014(x22)
li x22,0x0001021E8
and x22,x22,x8
li x4,0x153A4C700EBE8C18
sd x4,0x000000008(x22)
li x22,0x0001061E6
and x22,x22,x8
li x4,0x5ABDF2EFF2EBFFC
lhu x5,0x00000004C(x22)
li x22,0x000102199
and x22,x22,x8
li x4,0x20DFE70FF45EB336
lwu x5,0x000000014(x22)
li x22,0x0001040B4
and x22,x22,x8
li x4,0x7CD5650E82DA056B
lhu x5,0x00000004E(x22)
li x22,0x00010400F
and x22,x22,x8
li x4,0xE2BE1A459E14BD5E
sh x4,0x000000018(x22)
li x22,0x0001041E2
and x22,x22,x8
li x4,0xAE6F69F4C143F42D
lwu x5,0x000000024(x22)
li x22,0x00010618F
and x22,x22,x8
li x4,0x432BD0C49A78477D
sw x4,0x000000008(x22)
li x22,0x0001001DA
and x22,x22,x8
li x4,0xB4B1F759CCAF2458
sb x4,0x000000036(x22)
li x22,0x000100094
and x22,x22,x8
li x4,0x5AD7EF13430DF0BF
dcache.civa x22
li x22,0x0001060C9
and x22,x22,x8
li x4,0xD86260911B4A6CA3
dcache.cva x22
li x22,0x0001060BC
and x22,x22,x8
li x4,0x5D2F0A989518F888
lbu x5,0x00000004F(x22)
li x22,0x00010011A
and x22,x22,x8
li x4,0x4653C3FEA903156F
lhu x5,0x000000014(x22)
li x22,0x000100156
and x22,x22,x8
li x4,0xC4A4E0415411C968
sb x4,0x00000002F(x22)
li x22,0x00010614F
and x22,x22,x8
li x4,0x554E5607D156047F
dcache.civa x22
li x22,0x00010400C
and x22,x22,x8
li x4,0x2D1223C212AF0FC8
sd x4,0x000000018(x22)
li x22,0x0001041C6
and x22,x22,x8
li x4,0x2549AD682F37E551
sb x4,0x00000003B(x22)
li x22,0x0001021AB
and x22,x22,x8
li x4,0x58172B7C6C392DEA
lbu x5,0x000000012(x22)
li x22,0x000106101
and x22,x22,x8
li x4,0xD0AAF519A698476
lw x5,0x000000048(x22)
li x22,0x0001060D5
and x22,x22,x8
li x4,0xCD2926D4D2EC84C3
dcache.cva x22
li x22,0x0001020AC
and x22,x22,x8
li x4,0x802C924E3347C3FA
sh x4,0x000000048(x22)
li x22,0x0001041BD
and x22,x22,x8
li x4,0x4FF51C1962456C5B
sh x4,0x000000026(x22)
li x22,0x00010007C
and x22,x22,x8
li x4,0x71D9C78B3CC48CDB
sh x4,0x00000003C(x22)
li x22,0x0001001C2
and x22,x22,x8
li x4,0x31F949144BE30B76
sw x4,0x000000038(x22)
li x22,0x000100075
and x22,x22,x8
li x4,0x20C426AF4435AFA4
lb x5,0x00000002D(x22)
li x22,0x000106185
and x22,x22,x8
li x4,0x97B8826611F51B2A
lb x5,0x00000002F(x22)
li x22,0x0001061FA
and x22,x22,x8
li x4,0xFCD080DA37D9914F
sb x4,0x00000001A(x22)
li x22,0x0001060EA
and x22,x22,x8
li x4,0xEA2FA5133CB3B73B
sd x4,0x000000028(x22)
li x22,0x00010208A
and x22,x22,x8
li x4,0xE27FCC5BE76AFE94
dcache.civa x22
li x22,0x000106128
and x22,x22,x8
li x4,0xB0A19A055EB4B0F0
lbu x5,0x000000013(x22)
li x22,0x0001041CC
and x22,x22,x8
li x4,0x67022BFEAADD11F2
dcache.civa x22
li x22,0x00010207F
and x22,x22,x8
li x4,0x428118AE8686A51
dcache.civa x22
li x22,0x00010615E
and x22,x22,x8
li x4,0x503A6D89972E4CAD
ld x5,0x000000010(x22)
li x22,0x00010216C
and x22,x22,x8
li x4,0x440C94424938FBCE
dcache.cva x22
li x22,0x00010218D
and x22,x22,x8
li x4,0x8C4F4EC192E3718C
dcache.cva x22
li x22,0x000102104
and x22,x22,x8
li x4,0xA30C97FB88EAE273
dcache.civa x22
li x22,0x0001001F8
and x22,x22,x8
li x4,0x5371E9EF84FAFD55
dcache.civa x22
li x22,0x0001060C5
and x22,x22,x8
li x4,0xC3FB120785C714A
dcache.civa x22
li x22,0x000104044
and x22,x22,x8
li x4,0x6A9C47C97A0AAE8A
sw x4,0x000000000(x22)
li x22,0x0001061BA
and x22,x22,x8
li x4,0x86D5B0F8C911B999
dcache.civa x22
li x22,0x00010411D
and x22,x22,x8
li x4,0x3BC6815AED4649A4
sh x4,0x00000000A(x22)
li x22,0x000100163
and x22,x22,x8
li x4,0x72F5738D665C72A2
sw x4,0x000000048(x22)
li x22,0x000106113
and x22,x22,x8
li x4,0x79DF676FBD4F80E4
sb x4,0x000000007(x22)
li x22,0x00010600C
and x22,x22,x8
li x4,0xC30B491A0FE623A9
sd x4,0x000000038(x22)
li x22,0x0001040BD
and x22,x22,x8
li x4,0xADD97CE8EA1C911A
sd x4,0x000000038(x22)
li x22,0x00010004D
and x22,x22,x8
li x4,0x77710947DA6E2E91
lwu x5,0x000000018(x22)
li x22,0x0001060F3
and x22,x22,x8
li x4,0x78E115937DC57E6A
sb x4,0x00000000A(x22)
li x22,0x00010400F
and x22,x22,x8
li x4,0x9EF62B912E89B9AA
sw x4,0x000000020(x22)
li x22,0x000102122
and x22,x22,x8
li x4,0x60CB0BB768BD6731
ld x5,0x000000020(x22)
li x22,0x000104035
and x22,x22,x8
li x4,0xF15411B65D052869
sw x4,0x000000024(x22)
li x22,0x0001040D8
and x22,x22,x8
li x4,0x34A6D658EF63E03D
sw x4,0x000000040(x22)
li x22,0x0001060DF
and x22,x22,x8
li x4,0xAD10E0A7883A352C
sh x4,0x000000004(x22)
li x22,0x0001001E5
and x22,x22,x8
li x4,0xF88E1C4785D8DF17
lbu x5,0x000000030(x22)
li x22,0x00010215A
and x22,x22,x8
li x4,0x6D0C8285587D5C01
dcache.cva x22
li x22,0x000106017
and x22,x22,x8
li x4,0xD3DA7D8D503342B8
sd x4,0x000000000(x22)
li x22,0x00010400A
and x22,x22,x8
li x4,0x70C2DB58037A8B3B
sw x4,0x000000000(x22)
li x22,0x00010011E
and x22,x22,x8
li x4,0x800F249DC77ED8CA
dcache.cva x22
li x22,0x000102089
and x22,x22,x8
li x4,0x769F939B7EFA424E
lhu x5,0x000000026(x22)
li x22,0x000102170
and x22,x22,x8
li x4,0x5218034902BF5FD0
sd x4,0x000000008(x22)
li x22,0x00010600A
and x22,x22,x8
li x4,0x7C3423EF1C38D568
lhu x5,0x000000004(x22)
li x22,0x0001001DF
and x22,x22,x8
li x4,0x595A4C3CD2FE91DE
dcache.civa x22
li x22,0x0001000CA
and x22,x22,x8
li x4,0x929659F8F4324694
lw x5,0x000000014(x22)
li x22,0x00010400C
and x22,x22,x8
li x4,0x18C10187DF6086BF
dcache.cva x22
li x22,0x0001000E8
and x22,x22,x8
li x4,0x38A235F3E2C08FD9
sh x4,0x00000003E(x22)
li x22,0x0001020BF
and x22,x22,x8
li x4,0x42EC870C1302B150
sh x4,0x00000003C(x22)
li x22,0x00010206C
and x22,x22,x8
li x4,0x98A8DF0029B360AD
lhu x5,0x00000004A(x22)
li x22,0x000100094
and x22,x22,x8
li x4,0x1E92CFEAE41B57EB
sd x4,0x000000030(x22)
li x22,0x0001000B9
and x22,x22,x8
li x4,0xBDC5CF82061F4985
dcache.cva x22
li x22,0x00010009C
and x22,x22,x8
li x4,0x85650BDCB85FC1B2
lwu x5,0x000000044(x22)
li x22,0x000100001
and x22,x22,x8
li x4,0xB40D93486A68C3CE
dcache.cva x22
li x22,0x00010211A
and x22,x22,x8
li x4,0xB8EEA40C8104DD84
sb x4,0x00000002D(x22)
li x22,0x000106022
and x22,x22,x8
li x4,0x3017F2F6928BC2CA
dcache.civa x22
li x22,0x00010204F
and x22,x22,x8
li x4,0x6CDC9FBCD77E9C86
sh x4,0x000000042(x22)
li x22,0x0001040F2
and x22,x22,x8
li x4,0x6E96AC630D3ED9D
dcache.civa x22
li x22,0x0001041B9
and x22,x22,x8
li x4,0xBBB9E83322D8B92C
sd x4,0x000000038(x22)
li x22,0x0001021AA
and x22,x22,x8
li x4,0x3395E2BFDA1AFBBF
sb x4,0x000000041(x22)
li x22,0x0001041FF
and x22,x22,x8
li x4,0x83A942F2C3BE28DA
dcache.civa x22
li x22,0x0001021FA
and x22,x22,x8
li x4,0xE7303239912FDB3F
dcache.civa x22
li x22,0x00010016C
and x22,x22,x8
li x4,0x28F72F7FF1326347
lbu x5,0x000000026(x22)
li x22,0x000106110
and x22,x22,x8
li x4,0xBA95FD10F36DFCC2
dcache.cva x22
li x22,0x0001021DA
and x22,x22,x8
li x4,0xEB20EEFAC035E9AC
sh x4,0x00000004C(x22)
li x22,0x0001000E4
and x22,x22,x8
li x4,0xE93499AD8BDF961
sd x4,0x000000008(x22)
li x22,0x00010200B
and x22,x22,x8
li x4,0x33A52F3DC5BEB551
ld x5,0x000000028(x22)
li x22,0x0001041F1
and x22,x22,x8
li x4,0x2BF8346B1E58F47D
ld x5,0x000000018(x22)
li x22,0x000106166
and x22,x22,x8
li x4,0x5A2E38FA3E537C87
lh x5,0x000000024(x22)
li x22,0x00010613D
and x22,x22,x8
li x4,0xBC2FD60A7B27C036
dcache.cva x22
li x22,0x0001061EA
and x22,x22,x8
li x4,0xA8562F3C3B182863
dcache.civa x22
li x22,0x000106067
and x22,x22,x8
li x4,0x9CFB3B5339D022B9
lhu x5,0x000000012(x22)
li x22,0x0001000EF
and x22,x22,x8
li x4,0xB59177E003C4BB6A
sw x4,0x000000048(x22)
li x22,0x000102102
and x22,x22,x8
li x4,0x7E9B77B45C21A853
lwu x5,0x000000020(x22)
li x22,0x00010606D
and x22,x22,x8
li x4,0x3E6C34C10502F560
sh x4,0x000000030(x22)
li x22,0x0001020A8
and x22,x22,x8
li x4,0x4ADA2D676C169D22
sd x4,0x000000038(x22)
li x22,0x0001000C0
and x22,x22,x8
li x4,0x244094FE5DDFD30A
dcache.cva x22
li x22,0x000102128
and x22,x22,x8
li x4,0x86208B8CA7B51634
sb x4,0x000000000(x22)
li x22,0x0001041EB
and x22,x22,x8
li x4,0x867D93F8B4CE47B4
sd x4,0x000000010(x22)
li x22,0x00010604A
and x22,x22,x8
li x4,0x262A18B3A1EA6479
sw x4,0x000000018(x22)
li x22,0x00010219F
and x22,x22,x8
li x4,0x37B0CEFC3FC16769
lb x5,0x000000034(x22)
li x22,0x0001021F7
and x22,x22,x8
li x4,0x260B58DB2B422DF3
dcache.civa x22
li x22,0x00010601B
and x22,x22,x8
li x4,0x49398AF6817EE30C
dcache.cva x22
li x22,0x0001061EC
and x22,x22,x8
li x4,0x9A1C4554916071D6
lbu x5,0x00000003B(x22)
li x22,0x000100102
and x22,x22,x8
li x4,0xA2AEE20251ED51AA
dcache.civa x22
li x22,0x00010207A
and x22,x22,x8
li x4,0xCEB2C2050161FF82
dcache.cva x22
li x22,0x0001021A7
and x22,x22,x8
li x4,0x425DD2696C3C024E
dcache.cva x22
li x22,0x000102069
and x22,x22,x8
li x4,0x6F8BFF8D80D04837
dcache.civa x22
li x22,0x000104143
and x22,x22,x8
li x4,0x49CDB5F026AADA46
lwu x5,0x000000044(x22)
li x22,0x0001001C9
and x22,x22,x8
li x4,0xA7BDAE13AD734C8E
dcache.cva x22
li x22,0x0001040AC
and x22,x22,x8
li x4,0x2355158A3068D60A
sd x4,0x000000020(x22)
li x22,0x000106000
and x22,x22,x8
li x4,0x9CE85E06667F1E3B
dcache.cva x22
li x22,0x000104144
and x22,x22,x8
li x4,0xB488C96EE211721F
sh x4,0x000000002(x22)
li x22,0x000104054
and x22,x22,x8
li x4,0x54A94BD5A6590FB0
sb x4,0x000000013(x22)
li x22,0x00010003D
and x22,x22,x8
li x4,0x99C59AE66CE01553
ld x5,0x000000028(x22)
li x22,0x0001020AE
and x22,x22,x8
li x4,0x6C307C49D49D0E8E
lwu x5,0x000000008(x22)
li x22,0x000102078
and x22,x22,x8
li x4,0x78BEC09DED4B13D3
lb x5,0x00000001E(x22)
li x22,0x0001000ED
and x22,x22,x8
li x4,0x4BD5B98BB4020D98
dcache.cva x22
li x22,0x00010618E
and x22,x22,x8
li x4,0xDD7B4E554CD2D128
lb x5,0x000000020(x22)
li x22,0x0001041A1
and x22,x22,x8
li x4,0xA5BA8C05F923F521
sw x4,0x000000004(x22)
li x22,0x000102132
and x22,x22,x8
li x4,0x37C049C1595ACB81
sw x4,0x000000040(x22)
li x22,0x000104188
and x22,x22,x8
li x4,0x8541DA4285A470A6
sd x4,0x000000048(x22)
li x22,0x00010608A
and x22,x22,x8
li x4,0x7E7ECEDD1B012FB6
sd x4,0x000000018(x22)
li x22,0x00010214E
and x22,x22,x8
li x4,0x706F961B9E0BCF53
dcache.cva x22
li x22,0x0001061AF
and x22,x22,x8
li x4,0x68A550ECBFD5D725
lb x5,0x000000029(x22)
li x22,0x000100069
and x22,x22,x8
li x4,0x5CA2419939561279
sh x4,0x000000010(x22)
li x22,0x00010600D
and x22,x22,x8
li x4,0xDB55BDA8A5411BA0
dcache.cva x22
li x22,0x000104039
and x22,x22,x8
li x4,0xCA2812313D1133EF
dcache.cva x22
li x22,0x000100042
and x22,x22,x8
li x4,0xCE20F5D4F1A8179F
sb x4,0x000000037(x22)
li x22,0x00010603B
and x22,x22,x8
li x4,0x420BF57DEA3C464F
lwu x5,0x000000030(x22)
li x22,0x000106137
and x22,x22,x8
li x4,0xB87B430B783602C2
lhu x5,0x000000034(x22)
li x22,0x00010418C
and x22,x22,x8
li x4,0xD80F1CD8781D1672
lhu x5,0x000000048(x22)
li x22,0x00010604A
and x22,x22,x8
li x4,0x51407515947F61A0
dcache.cva x22
li x22,0x00010017A
and x22,x22,x8
li x4,0x67E1DA1E5051DA4
dcache.cva x22
li x22,0x00010619F
and x22,x22,x8
li x4,0x81C3A9D5D3F0C678
lhu x5,0x000000030(x22)
li x22,0x0001060F4
and x22,x22,x8
li x4,0xC2CCE4D7F4CC3507
dcache.civa x22
li x22,0x00010204E
and x22,x22,x8
li x4,0x7A8890BD5F74210C
dcache.cva x22
li x22,0x0001020BB
and x22,x22,x8
li x4,0xE335AC74F355FEC
sw x4,0x000000028(x22)
li x22,0x0001061EC
and x22,x22,x8
li x4,0x199A36FBDA84EF9F
lbu x5,0x000000040(x22)
li x22,0x0001061B2
and x22,x22,x8
li x4,0x1BA03900C1951E69
dcache.civa x22
li x22,0x0001001F5
and x22,x22,x8
li x4,0xA1DBC5B2B13573D
dcache.cva x22
li x22,0x0001061DB
and x22,x22,x8
li x4,0x9847B4D914A50EC8
sh x4,0x000000014(x22)
li x22,0x000106098
and x22,x22,x8
li x4,0x589D2E2A52048B73
lhu x5,0x000000018(x22)
li x22,0x0001040EB
and x22,x22,x8
li x4,0x699921AF394CF986
lhu x5,0x00000004E(x22)
li x22,0x000102078
and x22,x22,x8
li x4,0x8C05D6A70F1E397
sw x4,0x000000040(x22)
li x22,0x00010206F
and x22,x22,x8
li x4,0x52069BACA00D310B
sb x4,0x000000029(x22)
li x22,0x000100154
and x22,x22,x8
li x4,0xD8F6FA7E5925C9E2
sb x4,0x00000000D(x22)
li x22,0x0001021FF
and x22,x22,x8
li x4,0x52B2B4741E7CA4B6
lb x5,0x000000036(x22)
li x22,0x00010017E
and x22,x22,x8
li x4,0xD8C96BBC4CF5868F
lb x5,0x000000030(x22)
li x22,0x00010011D
and x22,x22,x8
li x4,0xC4F4D65AD7457759
dcache.civa x22
li x22,0x0001001C8
and x22,x22,x8
li x4,0xAB3F63F98E20BB11
lw x5,0x000000040(x22)
li x22,0x00010418A
and x22,x22,x8
li x4,0xC2ADD4A3C82FCA96
sb x4,0x000000040(x22)
li x22,0x00010019F
and x22,x22,x8
li x4,0xDBC98AAA90D5DD50
lb x5,0x00000002F(x22)
li x22,0x0001020E3
and x22,x22,x8
li x4,0xFE2D65B12EBAE171
lhu x5,0x000000036(x22)
li x22,0x0001060BF
and x22,x22,x8
li x4,0x379F6B9199335FFA
dcache.cva x22
li x22,0x00010019B
and x22,x22,x8
li x4,0x3AF6E0BD28B07CF3
sw x4,0x000000038(x22)
li x22,0x0001040EF
and x22,x22,x8
li x4,0xBCC82B163D42ED40
dcache.cva x22
li x22,0x0001061A9
and x22,x22,x8
li x4,0x6F2C7C1296E94AA5
lwu x5,0x000000048(x22)
li x22,0x000100133
and x22,x22,x8
li x4,0x73949015A2139293
dcache.civa x22
li x22,0x000100119
and x22,x22,x8
li x4,0x9D9A99C0047C1B64
sb x4,0x000000038(x22)
li x22,0x0001020EC
and x22,x22,x8
li x4,0xBF0EC2D816B820A4
lbu x5,0x000000015(x22)
li x22,0x000100021
and x22,x22,x8
li x4,0xCAD4C90032C18910
sd x4,0x000000040(x22)
li x22,0x000104157
and x22,x22,x8
li x4,0xF5887833644BFE88
dcache.cva x22
li x22,0x00010607C
and x22,x22,x8
li x4,0xC0906637311F8CA6
sd x4,0x000000030(x22)
li x22,0x000100027
and x22,x22,x8
li x4,0xCD4C5F1D3308EFC1
dcache.cva x22
li x22,0x00010614A
and x22,x22,x8
li x4,0x695A92E8BDDC8DE1
sw x4,0x000000004(x22)
li x22,0x000106179
and x22,x22,x8
li x4,0xFF66AAB0CF47B836
dcache.cva x22
li x22,0x0001061BA
and x22,x22,x8
li x4,0x4FEF664D7D1F9C38
lw x5,0x000000018(x22)
li x22,0x0001040DB
and x22,x22,x8
li x4,0x8A41D1CE0B5E08E6
lb x5,0x000000012(x22)
li x22,0x0001001F9
and x22,x22,x8
li x4,0xA1C5DD954A3EF088
sw x4,0x000000030(x22)
li x22,0x000102170
and x22,x22,x8
li x4,0xAEB448C1B300FFB4
sb x4,0x000000004(x22)
li x22,0x0001061F8
and x22,x22,x8
li x4,0x670ABBADBD727303
ld x5,0x000000010(x22)
li x22,0x00010001A
and x22,x22,x8
li x4,0x214EB09E40C2A61A
dcache.cva x22
li x22,0x0001021F7
and x22,x22,x8
li x4,0xFE63C16EE74C2E46
dcache.civa x22
li x22,0x00010214A
and x22,x22,x8
li x4,0x2FEB3B0D2DAD4FD7
sd x4,0x000000038(x22)
li x22,0x000106130
and x22,x22,x8
li x4,0xE354BACCD4DFD6D7
sb x4,0x00000001A(x22)
li x22,0x0001020C5
and x22,x22,x8
li x4,0x266B1A6D5519675D
lbu x5,0x00000004B(x22)
li x22,0x00010219C
and x22,x22,x8
li x4,0xA95B30106F828102
sb x4,0x00000000B(x22)
li x22,0x000100045
and x22,x22,x8
li x4,0xC0D3D81B31D06DF0
lbu x5,0x000000018(x22)
li x22,0x00010418A
and x22,x22,x8
li x4,0xAFEB47AE7707DC99
ld x5,0x000000048(x22)
li x22,0x00010214D
and x22,x22,x8
li x4,0x6EB6ADDB5BE2E4F8
dcache.civa x22
li x22,0x000100046
and x22,x22,x8
li x4,0x9CCE6399050175EA
dcache.civa x22
li x22,0x00010418C
and x22,x22,x8
li x4,0xA2C2ECD40D0A5375
lw x5,0x00000003C(x22)
li x22,0x000106098
and x22,x22,x8
li x4,0x74AF6058B71EB0C1
sb x4,0x000000015(x22)
li x22,0x00010017F
and x22,x22,x8
li x4,0x5CE786613B349C9F
ld x5,0x000000020(x22)
li x22,0x00010006A
and x22,x22,x8
li x4,0x7C87FA5A0F413A21
dcache.civa x22
li x22,0x000102018
and x22,x22,x8
li x4,0xFADD8C168E8703CB
sb x4,0x00000001B(x22)
li x22,0x0001060B4
and x22,x22,x8
li x4,0x46BED8C11CACCAE0
lhu x5,0x000000044(x22)
li x22,0x000104038
and x22,x22,x8
li x4,0x13F82D35DF963693
lbu x5,0x000000048(x22)
li x22,0x000102132
and x22,x22,x8
li x4,0xB086DC73FDA054EA
lhu x5,0x00000004A(x22)
li x22,0x000104004
and x22,x22,x8
li x4,0xF1A23D76E3B6D81C
dcache.civa x22
li x22,0x000104097
and x22,x22,x8
li x4,0x838D6D17D2C5DB18
dcache.cva x22
li x22,0x00010602F
and x22,x22,x8
li x4,0x118F74BBF5BDF53C
lw x5,0x00000002C(x22)
li x22,0x000102167
and x22,x22,x8
li x4,0x1BC9B64442FE6C05
lw x5,0x000000010(x22)
li x22,0x000100008
and x22,x22,x8
li x4,0x9617C8D90F551FCA
lb x5,0x00000000E(x22)
li x22,0x0001021EB
and x22,x22,x8
li x4,0x80BE4239A105127B
dcache.civa x22
li x22,0x0001001A2
and x22,x22,x8
li x4,0xECBD342130B2B4B6
dcache.cva x22
li x22,0x0001040F2
and x22,x22,x8
li x4,0x3D4F9437D1053878
lhu x5,0x000000034(x22)
li x22,0x0001001BF
and x22,x22,x8
li x4,0x2DEC623DFEE1FCF5
sb x4,0x000000000(x22)
li x22,0x0001060D3
and x22,x22,x8
li x4,0x10580B8013385FD3
dcache.cva x22
li x22,0x000104020
and x22,x22,x8
li x4,0x9F75EE22B7D570B3
lw x5,0x000000008(x22)
li x22,0x000106143
and x22,x22,x8
li x4,0x671DB42E5DE336BB
dcache.civa x22
li x22,0x0001001B0
and x22,x22,x8
li x4,0x989AF69B146C3786
sd x4,0x000000018(x22)
li x22,0x0001001B1
and x22,x22,x8
li x4,0x8B8A61FB381CC2F3
sh x4,0x00000002E(x22)
li x22,0x000102177
and x22,x22,x8
li x4,0x4E9AA15E2E17AE85
sh x4,0x00000002E(x22)
li x22,0x000100058
and x22,x22,x8
li x4,0xF5C43257F9838239
sw x4,0x000000038(x22)
li x22,0x0001020FB
and x22,x22,x8
li x4,0x6AD1485383CC1D3F
lh x5,0x000000002(x22)
li x22,0x000100199
and x22,x22,x8
li x4,0xF5C940A707D0070C
sh x4,0x00000002A(x22)
li x22,0x000100190
and x22,x22,x8
li x4,0x7874CDF60CBF5079
sb x4,0x00000001F(x22)
li x22,0x000104032
and x22,x22,x8
li x4,0x59D894ED7784216E
lw x5,0x000000030(x22)
li x22,0x0001040E0
and x22,x22,x8
li x4,0x511726BA08DF6C9E
dcache.cva x22
li x22,0x000102168
and x22,x22,x8
li x4,0xC9CCB13256F29F17
lw x5,0x000000040(x22)
li x22,0x000102019
and x22,x22,x8
li x4,0x7A6EF00CCA5F6F7A
dcache.civa x22
li x22,0x00010002A
and x22,x22,x8
li x4,0x2F535F5B9D35A0D1
dcache.civa x22
li x22,0x00010417D
and x22,x22,x8
li x4,0xB730ACBDDD6F5358
lbu x5,0x000000046(x22)
li x22,0x0001061EA
and x22,x22,x8
li x4,0x32724B6EFF698FE2
lhu x5,0x000000004(x22)
li x22,0x000104119
and x22,x22,x8
li x4,0xC449379871FB6620
lh x5,0x000000026(x22)
li x22,0x0001000ED
and x22,x22,x8
li x4,0x92470D90BE66D4AD
lhu x5,0x00000004E(x22)
li x22,0x000102184
and x22,x22,x8
li x4,0x5528C69A2467611B
sd x4,0x000000010(x22)
li x22,0x00010614C
and x22,x22,x8
li x4,0x760ECDFC5503DD01
dcache.civa x22
li x22,0x000106151
and x22,x22,x8
li x4,0x2866704B11975000
dcache.civa x22
li x22,0x000102094
and x22,x22,x8
li x4,0x16B4B1226BBD9244
sb x4,0x00000003F(x22)
li x22,0x000104043
and x22,x22,x8
li x4,0x5CE5C0338D906E07
lwu x5,0x000000024(x22)
li x22,0x00010609D
and x22,x22,x8
li x4,0x91BC2167FF912784
sw x4,0x000000038(x22)
li x22,0x00010214F
and x22,x22,x8
li x4,0xDDF9623485B4B94D
ld x5,0x000000038(x22)
li x22,0x000102069
and x22,x22,x8
li x4,0x6521F6DB7BE3993A
sd x4,0x000000038(x22)
li x22,0x000102049
and x22,x22,x8
li x4,0x98A164A3691C0076
lbu x5,0x000000001(x22)
li x22,0x000102152
and x22,x22,x8
li x4,0x91EE5D3EB9949A85
sh x4,0x000000038(x22)
li x22,0x000104049
and x22,x22,x8
li x4,0x6E839CB34CB83BC3
dcache.civa x22
li x22,0x0001060AE
and x22,x22,x8
li x4,0x9DDAC4B23502F37B
lwu x5,0x000000010(x22)
li x22,0x0001041F7
and x22,x22,x8
li x4,0xB092278F50EF65B2
dcache.cva x22
li x22,0x00010615D
and x22,x22,x8
li x4,0x26269D571AFA1D63
lwu x5,0x000000004(x22)
li x22,0x0001020BD
and x22,x22,x8
li x4,0x17988CADAD48AD53
lw x5,0x000000034(x22)
li x22,0x0001061A2
and x22,x22,x8
li x4,0x4A729CF38B09DCA9
lw x5,0x000000048(x22)
li x22,0x00010007D
and x22,x22,x8
li x4,0xB9129AA17586A98C
lbu x5,0x000000013(x22)
li x22,0x0001000CA
and x22,x22,x8
li x4,0xDEA68AEEF946246
sb x4,0x000000040(x22)
li x22,0x000104072
and x22,x22,x8
li x4,0xF6F43719C5F64F53
dcache.civa x22
li x22,0x0001020AB
and x22,x22,x8
li x4,0x681A125AB3ABB4F6
sb x4,0x000000044(x22)
li x22,0x0001060E6
and x22,x22,x8
li x4,0x597E82D60E85F6EF
dcache.cva x22
li x22,0x000104028
and x22,x22,x8
li x4,0x5461FA9B68226FB
dcache.civa x22
li x22,0x00010211D
and x22,x22,x8
li x4,0x6E64861240C4FBBC
dcache.cva x22
li x22,0x0001001A3
and x22,x22,x8
li x4,0x4A4D972925E1018E
sb x4,0x000000035(x22)
li x22,0x0001040D3
and x22,x22,x8
li x4,0xA3C353EAB824BEA2
ld x5,0x000000020(x22)
li x22,0x00010418C
and x22,x22,x8
li x4,0xEB48DCE568838E29
lhu x5,0x00000001E(x22)
li x22,0x00010401E
and x22,x22,x8
li x4,0x26444761B4E2E11
dcache.cva x22
li x22,0x000106072
and x22,x22,x8
li x4,0x8F9DF1283DC3A333
lbu x5,0x00000004F(x22)
li x22,0x0001041BF
and x22,x22,x8
li x4,0x2E75168AEAB9681E
sh x4,0x000000038(x22)
li x22,0x0001060B9
and x22,x22,x8
li x4,0xD503527F7F0B0E1C
lhu x5,0x000000010(x22)
li x22,0x00010400E
and x22,x22,x8
li x4,0x421A5547EB2DE40B
lhu x5,0x000000004(x22)
li x22,0x00010010E
and x22,x22,x8
li x4,0x9BCEB8EF41DD5B41
sd x4,0x000000010(x22)
li x22,0x0001040AC
and x22,x22,x8
li x4,0x46CFFBFCF9879AE5
dcache.civa x22
li x22,0x000106110
and x22,x22,x8
li x4,0xE671EE0FFD337AB4
sw x4,0x00000000C(x22)
li x22,0x000102045
and x22,x22,x8
li x4,0xCDFEA8FA7C6FD47A
lhu x5,0x00000000A(x22)
li x22,0x000104108
and x22,x22,x8
li x4,0xEE3C71162E13166E
sd x4,0x000000008(x22)
li x22,0x00010417D
and x22,x22,x8
li x4,0x334F21CEB8629F8
lwu x5,0x000000018(x22)
li x22,0x0001040FA
and x22,x22,x8
li x4,0x5E9AF51DBC9375CB
ld x5,0x000000000(x22)
li x22,0x0001001EA
and x22,x22,x8
li x4,0xD31945771FEAF1B2
sw x4,0x000000010(x22)
li x22,0x00010603A
and x22,x22,x8
li x4,0x735AE874B29AA0A8
sw x4,0x000000030(x22)
li x22,0x00010018D
and x22,x22,x8
li x4,0xCF445D43ABECF55C
sb x4,0x000000036(x22)
li x22,0x00010402B
and x22,x22,x8
li x4,0xB2CC43ADA5062ED6
dcache.civa x22
li x22,0x0001020E9
and x22,x22,x8
li x4,0xC7103CFA8855AECC
lbu x5,0x00000003C(x22)
li x22,0x0001000E6
and x22,x22,x8
li x4,0x2AE33202D58C18B7
sw x4,0x000000038(x22)
li x22,0x000100095
and x22,x22,x8
li x4,0xD9D365A0D79D0B65
sh x4,0x000000046(x22)
li x22,0x00010610B
and x22,x22,x8
li x4,0x97F98ED9B2B9CF26
dcache.cva x22
li x22,0x00010200D
and x22,x22,x8
li x4,0xC7343284366FEAF0
dcache.cva x22
li x22,0x00010206B
and x22,x22,x8
li x4,0x99446C12FD32109C
lhu x5,0x000000046(x22)
li x22,0x0001000CB
and x22,x22,x8
li x4,0xEEC7BAE9766C6D72
ld x5,0x000000000(x22)
li x22,0x00010015F
and x22,x22,x8
li x4,0xF200A415149499DC
dcache.cva x22
li x22,0x0001040EC
and x22,x22,x8
li x4,0x31DE23F181218809
dcache.civa x22
li x22,0x000102092
and x22,x22,x8
li x4,0x3ACA8079E8CA2576
sh x4,0x00000000E(x22)
li x22,0x000104048
and x22,x22,x8
li x4,0xEEE52813018E06EF
dcache.civa x22
li x22,0x000106118
and x22,x22,x8
li x4,0x7A70094A75C05B05
dcache.cva x22
li x22,0x0001000CD
and x22,x22,x8
li x4,0x84FAE080F94D60AF
dcache.cva x22
li x22,0x0001001FF
and x22,x22,x8
li x4,0x4A437D4436B3976E
dcache.civa x22
li x22,0x0001020C9
and x22,x22,x8
li x4,0xB1C87BC4DAFDF8C2
sw x4,0x000000014(x22)
li x22,0x00010609D
and x22,x22,x8
li x4,0x7533EB80AF609864
lbu x5,0x00000000B(x22)
li x22,0x000106175
and x22,x22,x8
li x4,0x528F8EE90334D3FA
dcache.civa x22
li x22,0x00010602D
and x22,x22,x8
li x4,0x77BC814F11A34D9
lbu x5,0x00000000D(x22)
li x22,0x0001001DB
and x22,x22,x8
li x4,0xC67839FC19271327
sb x4,0x000000044(x22)
li x22,0x0001001C0
and x22,x22,x8
li x4,0x15278966DF046EAD
sd x4,0x000000008(x22)
li x22,0x000106191
and x22,x22,x8
li x4,0xFFD80F6F0FF84CAD
lhu x5,0x00000003E(x22)
li x22,0x00010202A
and x22,x22,x8
li x4,0xDE7D85C45D8FBFBE
lb x5,0x00000004E(x22)
li x22,0x0001001DD
and x22,x22,x8
li x4,0x2F0F523E48D5213F
lb x5,0x000000023(x22)
li x22,0x0001041CC
and x22,x22,x8
li x4,0x7489F697ACA1DCB2
dcache.civa x22
li x22,0x00010008C
and x22,x22,x8
li x4,0x50F4C8586637128A
lhu x5,0x000000012(x22)
li x22,0x000104052
and x22,x22,x8
li x4,0xE9E67EACA52D173A
sb x4,0x00000000A(x22)
li x22,0x00010010A
and x22,x22,x8
li x4,0x71AC43494048904D
dcache.civa x22
li x22,0x0001001C6
and x22,x22,x8
li x4,0xF68972901C571F2
dcache.cva x22
li x22,0x0001020BE
and x22,x22,x8
li x4,0x2B2626E3DEA5D369
lhu x5,0x000000044(x22)
li x22,0x000106001
and x22,x22,x8
li x4,0xD5106D7996BB8B25
lh x5,0x00000004E(x22)
li x22,0x000102139
and x22,x22,x8
li x4,0x5F320FD45CA6E48E
ld x5,0x000000028(x22)
li x22,0x000102032
and x22,x22,x8
li x4,0xEE9B183346DF00DB
lwu x5,0x000000038(x22)
li x22,0x00010011C
and x22,x22,x8
li x4,0x73944BDC07E78A32
ld x5,0x000000008(x22)
li x22,0x00010206C
and x22,x22,x8
li x4,0x904D59C37DEC3BBC
sb x4,0x00000004B(x22)
li x22,0x00010611D
and x22,x22,x8
li x4,0xA89EB8A2DB5612CF
dcache.civa x22
li x22,0x00010216B
and x22,x22,x8
li x4,0xCC4C695059B7B317
lhu x5,0x00000003E(x22)
li x22,0x0001021D2
and x22,x22,x8
li x4,0xDD202702D50DBC18
dcache.civa x22
li x22,0x000102065
and x22,x22,x8
li x4,0x19F33AC7F22461D1
lwu x5,0x000000028(x22)
li x22,0x000100198
and x22,x22,x8
li x4,0x2DE66D54AF33068A
lw x5,0x00000004C(x22)
li x22,0x0001000AA
and x22,x22,x8
li x4,0x7C08C72ACD272470
sw x4,0x000000020(x22)
li x22,0x000100115
and x22,x22,x8
li x4,0x95859B2609772479
dcache.cva x22
li x22,0x000102002
and x22,x22,x8
li x4,0xDC58739DD9430F15
lh x5,0x000000038(x22)
li x22,0x0001061BB
and x22,x22,x8
li x4,0x176161E65EFEFB1A
lbu x5,0x00000003D(x22)
li x22,0x00010603C
and x22,x22,x8
li x4,0x809DA26A8212AC9C
sw x4,0x000000018(x22)
li x22,0x0001001CA
and x22,x22,x8
li x4,0x9EB01CBBE8AC0E41
sb x4,0x00000002B(x22)
li x22,0x0001021CE
and x22,x22,x8
li x4,0x261E948FC905E118
sh x4,0x00000002A(x22)
li x22,0x00010215C
and x22,x22,x8
li x4,0xC007C051D04E7477
sb x4,0x000000010(x22)
li x22,0x0001000CB
and x22,x22,x8
li x4,0x896D145151CDD94C
sw x4,0x000000034(x22)
li x22,0x0001041D7
and x22,x22,x8
li x4,0x32FB77B7C7A0F2D9
sw x4,0x00000000C(x22)
li x22,0x000100048
and x22,x22,x8
li x4,0x1622CF3E29C2DD48
dcache.civa x22
li x22,0x00010408D
and x22,x22,x8
li x4,0xF241E378269F1677
sw x4,0x00000000C(x22)
li x22,0x0001060AA
and x22,x22,x8
li x4,0x924AF1621C650668
lbu x5,0x000000006(x22)
li x22,0x0001021D3
and x22,x22,x8
li x4,0xD894A1E95A88A58D
lwu x5,0x000000030(x22)
li x22,0x000100198
and x22,x22,x8
li x4,0xC29CEEF7D947FEC5
lw x5,0x00000001C(x22)
li x22,0x00010004B
and x22,x22,x8
li x4,0x94051866E859090B
sw x4,0x00000001C(x22)
li x22,0x000102055
and x22,x22,x8
li x4,0xF64443F511FE7C90
dcache.cva x22
li x22,0x000104189
and x22,x22,x8
li x4,0x34A2B3C0BA15EB44
dcache.civa x22
li x22,0x00010019D
and x22,x22,x8
li x4,0xCBC2B6B9B5B935D9
sd x4,0x000000048(x22)
li x22,0x00010013A
and x22,x22,x8
li x4,0x8EB4AFB7D04B5504
dcache.civa x22
li x22,0x0001060F4
and x22,x22,x8
li x4,0xEF599183B39353F7
sh x4,0x000000028(x22)
li x22,0x000102027
and x22,x22,x8
li x4,0x8A49443A51ED6910
sb x4,0x000000022(x22)
li x22,0x0001060E1
and x22,x22,x8
li x4,0x16FF184E73C062A4
sb x4,0x000000033(x22)
li x22,0x000100047
and x22,x22,x8
li x4,0xB698E2378DDC78B4
lh x5,0x000000022(x22)
li x22,0x000100016
and x22,x22,x8
li x4,0xF6309DEC077716A
sb x4,0x000000036(x22)
li x22,0x000104011
and x22,x22,x8
li x4,0x554EFC4CAF8F5565
dcache.civa x22
li x22,0x000100188
and x22,x22,x8
li x4,0xBA5C822F42AA5B0E
lb x5,0x00000002B(x22)
li x22,0x00010417D
and x22,x22,x8
li x4,0xC31A8B8AD5794986
sw x4,0x000000008(x22)
li x22,0x000102128
and x22,x22,x8
li x4,0x80B0E242CEDC3DC3
dcache.cva x22
li x22,0x000104066
and x22,x22,x8
li x4,0xE0B8870BCE587334
dcache.civa x22
li x22,0x0001060B4
and x22,x22,x8
li x4,0xD31AB6206CA13691
dcache.civa x22
li x22,0x0001060F6
and x22,x22,x8
li x4,0xE8B395E4ADEA482D
lb x5,0x000000005(x22)
li x22,0x0001040FF
and x22,x22,x8
li x4,0xBD667969EFD1F32E
dcache.civa x22
li x22,0x000106036
and x22,x22,x8
li x4,0x61750B242602E84
lw x5,0x000000048(x22)
li x22,0x000104023
and x22,x22,x8
li x4,0x6D86316FB21C1F76
sd x4,0x000000018(x22)
li x22,0x000104192
and x22,x22,x8
li x4,0x438E83A50C075565
lw x5,0x000000038(x22)
li x22,0x000104039
and x22,x22,x8
li x4,0x7AF945BEEC58011C
lw x5,0x000000028(x22)
li x22,0x000104170
and x22,x22,x8
li x4,0x10E41A2BD3AF8B6F
dcache.cva x22
li x22,0x000106107
and x22,x22,x8
li x4,0x654EB669A2E59D49
lw x5,0x00000001C(x22)
li x22,0x0001020AD
and x22,x22,x8
li x4,0xE59A93B7F319FCDF
sb x4,0x000000005(x22)
li x22,0x00010603F
and x22,x22,x8
li x4,0xEAC06885E4058A4D
dcache.cva x22
li x22,0x0001020E4
and x22,x22,x8
li x4,0xB5C0932187E4480F
sw x4,0x000000034(x22)
li x22,0x0001060D7
and x22,x22,x8
li x4,0x2A906F7968647D55
dcache.cva x22
li x22,0x000106133
and x22,x22,x8
li x4,0xBF26C9A610E66AD2
lw x5,0x000000018(x22)
li x22,0x00010601C
and x22,x22,x8
li x4,0x8F2CAABE804C70E3
sw x4,0x000000010(x22)
li x22,0x0001001EE
and x22,x22,x8
li x4,0xA403843EFFBD652F
dcache.cva x22
li x22,0x0001020AC
and x22,x22,x8
li x4,0xC74CDDB1A9467935
dcache.cva x22
li x22,0x00010008A
and x22,x22,x8
li x4,0x5B838EA110CC0106
dcache.civa x22
li x22,0x000102176
and x22,x22,x8
li x4,0x51924128B6AEB4E8
dcache.civa x22
li x22,0x0001041A8
and x22,x22,x8
li x4,0x226360F9F7BF5DC5
sb x4,0x000000011(x22)
li x22,0x0001021B0
and x22,x22,x8
li x4,0xA948814F0A472B80
dcache.cva x22
li x22,0x000104075
and x22,x22,x8
li x4,0x495A6746506497A2
lw x5,0x000000038(x22)
li x22,0x00010400C
and x22,x22,x8
li x4,0x2EFCFEB8F3810969
sw x4,0x000000018(x22)
li x22,0x0001001D3
and x22,x22,x8
li x4,0x52A286B36273FD56
lb x5,0x00000000F(x22)
li x22,0x000102145
and x22,x22,x8
li x4,0xB15CB979A24A4B03
sw x4,0x000000044(x22)
li x22,0x000104054
and x22,x22,x8
li x4,0x47D05FBE961EC6E4
sd x4,0x000000030(x22)
li x22,0x000102093
and x22,x22,x8
li x4,0xA6B5A08DFD9ABC80
lbu x5,0x000000026(x22)
li x22,0x000104132
and x22,x22,x8
li x4,0x9D1BDE58BB8F02A1
sh x4,0x000000048(x22)
li x22,0x00010401A
and x22,x22,x8
li x4,0x7B1C8AB1EF6375E5
sw x4,0x000000040(x22)
li x22,0x0001001A6
and x22,x22,x8
li x4,0x68C0ECDC480BCB6E
dcache.cva x22
li x22,0x000100036
and x22,x22,x8
li x4,0xB6B94C2B48068E35
dcache.cva x22
li x22,0x000100191
and x22,x22,x8
li x4,0x3F20C27FA4A1DE9C
lhu x5,0x000000006(x22)
li x22,0x000100138
and x22,x22,x8
li x4,0x3D7E5BFFCFDCCBF8
ld x5,0x000000020(x22)
li x22,0x0001021E9
and x22,x22,x8
li x4,0x92D8B830CC4B0C46
lhu x5,0x00000003E(x22)
li x22,0x000104070
and x22,x22,x8
li x4,0xCD324E92E28F679A
sw x4,0x000000050(x22)
li x22,0x0001060B1
and x22,x22,x8
li x4,0xFB8E994D3F8BBFA1
lhu x5,0x000000044(x22)
li x22,0x000104057
and x22,x22,x8
li x4,0x64C01BA2D5BE28D1
dcache.cva x22
li x22,0x000100062
and x22,x22,x8
li x4,0x168FB176FCC5DA66
ld x5,0x000000008(x22)
li x22,0x0001040C4
and x22,x22,x8
li x4,0xF7E858BA4316E195
lbu x5,0x000000023(x22)
li x22,0x00010205A
and x22,x22,x8
li x4,0xF0F397BE8C6C195
sb x4,0x00000003E(x22)
li x22,0x00010201C
and x22,x22,x8
li x4,0x946FD84594A2A323
lb x5,0x000000050(x22)
li x22,0x0001021A9
and x22,x22,x8
li x4,0xF4853586CD2443FF
dcache.cva x22
li x22,0x000106045
and x22,x22,x8
li x4,0xFCC9ACCCD8D00AE2
sw x4,0x00000004C(x22)
li x22,0x00010004B
and x22,x22,x8
li x4,0xA16CF8DEBAFD55A
dcache.cva x22
li x22,0x0001061F5
and x22,x22,x8
li x4,0xE762D0A7606AFE90
lwu x5,0x000000048(x22)
li x22,0x0001021A0
and x22,x22,x8
li x4,0x20514A45A84DABC8
lh x5,0x00000004A(x22)
li x22,0x0001061C3
and x22,x22,x8
li x4,0x65DB356E719E3530
dcache.cva x22
li x22,0x0001001C2
and x22,x22,x8
li x4,0xB542FC91EF9BBECB
lbu x5,0x00000002B(x22)
li x22,0x00010001B
and x22,x22,x8
li x4,0xC6E9DCFF2BF7957C
ld x5,0x000000048(x22)
li x22,0x0001061EA
and x22,x22,x8
li x4,0x2D84C8C4838BE234
lwu x5,0x000000018(x22)
li x22,0x000104047
and x22,x22,x8
li x4,0x8CB34991B8F09447
lhu x5,0x00000001C(x22)
li x22,0x0001020E7
and x22,x22,x8
li x4,0x81A86A49F4A4CE4C
lbu x5,0x00000000F(x22)
li x22,0x000100058
and x22,x22,x8
li x4,0x62D32616A40589DD
lwu x5,0x000000004(x22)
li x22,0x000106192
and x22,x22,x8
li x4,0xEFB08F0FF5367455
sw x4,0x000000010(x22)
li x22,0x000100016
and x22,x22,x8
li x4,0x94A51488F183AADA
lhu x5,0x00000001E(x22)
li x22,0x0001020E9
and x22,x22,x8
li x4,0x2EF5995CF72CFC64
sw x4,0x00000004C(x22)
li x22,0x000100194
and x22,x22,x8
li x4,0xAD9803986E26217
lb x5,0x00000002B(x22)
li x22,0x000102049
and x22,x22,x8
li x4,0x47D363FFB66C39EE
lh x5,0x000000000(x22)
li x22,0x0001060B9
and x22,x22,x8
li x4,0xC59F62C8158A11EB
dcache.cva x22
li x22,0x000106033
and x22,x22,x8
li x4,0xCAF47C02A9DEB1D
dcache.civa x22
li x22,0x0001020AA
and x22,x22,x8
li x4,0xC73A2EF966BEF679
sh x4,0x00000004C(x22)
li x22,0x0001040C4
and x22,x22,x8
li x4,0xFB01ED75EFAFFB0A
lhu x5,0x00000004C(x22)
li x22,0x00010406D
and x22,x22,x8
li x4,0x6AF39BC04A6056AF
ld x5,0x000000040(x22)
li x22,0x000102005
and x22,x22,x8
li x4,0xCAB36BE24C33DB6E
sd x4,0x000000048(x22)
li x22,0x00010210B
and x22,x22,x8
li x4,0x6F7973F59EA8C4CA
lb x5,0x000000030(x22)
li x22,0x000100127
and x22,x22,x8
li x4,0x11AA53BDD6F23067
lb x5,0x000000007(x22)
la x1, __exit
jr x1
.global CPU1
CPU1:
MMODE_SMODE CPU1_S
.global CPU1_S
CPU1_S:
li x22,0x000102079
and x22,x22,x8
li x22,0x00010403A
and x22,x22,x8
li x4,0xF428178136A29315
sh x4,0x000000002(x22)
li x22,0x00010407E
and x22,x22,x8
li x4,0x44554A914BC46B4D
ld x5,0x000000040(x22)
li x22,0x000100068
and x22,x22,x8
li x4,0x250E4593483D8231
dcache.cva x22
li x22,0x0001020E9
and x22,x22,x8
li x4,0x3128CA3BE079A23B
sd x4,0x000000048(x22)
li x22,0x000106C07
and x22,x22,x8
li x4,0x6AEC585BD334CC4B
dcache.civa x22
li x22,0x00010683F
and x22,x22,x8
li x4,0xD7E1B938DF440DC3
sb x4,0x000000018(x22)
li x22,0x000102012
and x22,x22,x8
li x4,0xA8E3E6AE9BB00F45
lb x5,0x000000033(x22)
li x22,0x000102101
and x22,x22,x8
li x4,0x6C83B96EE5DF4C29
dcache.cva x22
li x22,0x0001001AD
and x22,x22,x8
li x4,0xB3BD4B11952C755A
sd x4,0x000000038(x22)
li x22,0x000104015
and x22,x22,x8
li x4,0x66B8D27032180BCC
dcache.civa x22
li x22,0x000106A27
and x22,x22,x8
li x4,0xDF3F9132D9FE4039
sd x4,0x000000040(x22)
li x22,0x00010685F
and x22,x22,x8
li x4,0xA971E8F3AEB373C8
lw x5,0x000000038(x22)
li x22,0x0001021C0
and x22,x22,x8
li x4,0x3BB1415C396BD963
dcache.civa x22
li x22,0x000106A68
and x22,x22,x8
li x4,0xEB343BF370B79BF8
sb x4,0x000000050(x22)
li x22,0x0001001E2
and x22,x22,x8
li x4,0xADB00B18D2DFFFC7
lhu x5,0x00000004E(x22)
li x22,0x0001021F2
and x22,x22,x8
li x4,0xD1EDF5CD577A964D
lb x5,0x000000041(x22)
li x22,0x000106BE5
and x22,x22,x8
li x4,0x5BAFE37C472843C5
dcache.civa x22
li x22,0x0001001D0
and x22,x22,x8
li x4,0x93C06F7F8B581517
sh x4,0x00000002E(x22)
li x22,0x000106A77
and x22,x22,x8
li x4,0xAC27A031FB62B21
dcache.civa x22
li x22,0x000106ACD
and x22,x22,x8
li x4,0xE2912D877A75DAA6
sh x4,0x00000002E(x22)
li x22,0x000106BAC
and x22,x22,x8
li x4,0x5508C79564D0E031
sb x4,0x000000041(x22)
li x22,0x0001069B6
and x22,x22,x8
li x4,0x715C5309FD14D485
dcache.cva x22
li x22,0x000102135
and x22,x22,x8
li x4,0x258A01EC7AFFD065
ld x5,0x000000040(x22)
li x22,0x0001000C4
and x22,x22,x8
li x4,0xC743C77A90936FCE
sw x4,0x000000040(x22)
li x22,0x0001068B2
and x22,x22,x8
li x4,0x82193D882507D943
ld x5,0x000000020(x22)
li x22,0x000106B5C
and x22,x22,x8
li x4,0xEF2BD595D92580BF
dcache.civa x22
li x22,0x0001001C7
and x22,x22,x8
li x4,0xC4FE6583073892C6
sd x4,0x000000018(x22)
li x22,0x000106ADF
and x22,x22,x8
li x4,0xB2749FB69C28666C
dcache.cva x22
li x22,0x00010672E
and x22,x22,x8
li x4,0x6D1E9F5239ECCA14
sw x4,0x000000000(x22)
li x22,0x000106ACE
and x22,x22,x8
li x4,0xC21D44E1D317AD
ld x5,0x000000010(x22)
li x22,0x0001021E1
and x22,x22,x8
li x4,0x1CD95AB7B51C9E45
sh x4,0x000000036(x22)
li x22,0x000106769
and x22,x22,x8
li x4,0xBFBAC92E40EB5655
sw x4,0x000000024(x22)
li x22,0x0001020BC
and x22,x22,x8
li x4,0xE71AE9D4F8095639
dcache.cva x22
li x22,0x0001068B3
and x22,x22,x8
li x4,0x8C774140C48D0149
sd x4,0x000000048(x22)
li x22,0x0001001D6
and x22,x22,x8
li x4,0xE166951CEF1576BA
dcache.civa x22
li x22,0x000100096
and x22,x22,x8
li x4,0xE2AC4AC468B0F22C
dcache.cva x22
li x22,0x000100145
and x22,x22,x8
li x4,0xFB21CB86C192063C
lbu x5,0x000000014(x22)
li x22,0x000106C74
and x22,x22,x8
li x4,0x89003090E529E139
dcache.cva x22
li x22,0x000100094
and x22,x22,x8
li x4,0xD099548B444298CC
lh x5,0x00000000A(x22)
li x22,0x000102004
and x22,x22,x8
li x4,0xA2645101212B4180
lwu x5,0x000000034(x22)
li x22,0x0001068E6
and x22,x22,x8
li x4,0xBEB83CF473CC293
dcache.cva x22
li x22,0x000104150
and x22,x22,x8
li x4,0x7D6F9F7FED204E3C
ld x5,0x000000038(x22)
li x22,0x000100139
and x22,x22,x8
li x4,0x3DC54531568FD6DD
dcache.cva x22
li x22,0x0001040D5
and x22,x22,x8
li x4,0x6C143E358908D4AE
sw x4,0x000000004(x22)
li x22,0x0001067BD
and x22,x22,x8
li x4,0x633B7C0DBFCF7B10
dcache.civa x22
li x22,0x0001067FE
and x22,x22,x8
li x4,0xC3E144AE170D27F
ld x5,0x000000040(x22)
li x22,0x0001001A4
and x22,x22,x8
li x4,0xB6785E6BC1BFE754
lb x5,0x00000000A(x22)
li x22,0x000104009
and x22,x22,x8
li x4,0xA3993047E83EF6D6
lbu x5,0x00000000F(x22)
li x22,0x0001020C5
and x22,x22,x8
li x4,0x292EFF136EA75DC2
sd x4,0x000000010(x22)
li x22,0x000106BBA
and x22,x22,x8
li x4,0xCCC06BEFEC9286B0
sh x4,0x000000004(x22)
li x22,0x000106AE3
and x22,x22,x8
li x4,0x203C5DC01FA9D21E
dcache.cva x22
li x22,0x000106919
and x22,x22,x8
li x4,0xF97CBFDDC82C4799
sw x4,0x000000034(x22)
li x22,0x0001068FD
and x22,x22,x8
li x4,0xFAB1FFE0580FB16C
lh x5,0x000000012(x22)
li x22,0x000106BE7
and x22,x22,x8
li x4,0xDCAA18CC35AE3DA5
sb x4,0x000000018(x22)
li x22,0x0001000E5
and x22,x22,x8
li x4,0x91AC6982084B6586
dcache.civa x22
li x22,0x0001020E6
and x22,x22,x8
li x4,0xA68E9B9E951C604E
sw x4,0x000000030(x22)
li x22,0x00010203F
and x22,x22,x8
li x4,0xFE68F4900B80061B
sh x4,0x00000001A(x22)
li x22,0x000102166
and x22,x22,x8
li x4,0x4025A59EDD28F78B
sb x4,0x000000037(x22)
li x22,0x000102180
and x22,x22,x8
li x4,0xEC47BFCA2B467BB4
dcache.cva x22
li x22,0x000106C4C
and x22,x22,x8
li x4,0xF2471785DFDBD0CA
ld x5,0x000000040(x22)
li x22,0x000102167
and x22,x22,x8
li x4,0xC87C4339325138CE
lw x5,0x00000004C(x22)
li x22,0x000102144
and x22,x22,x8
li x4,0x27ED15A7875D453C
lh x5,0x000000030(x22)
li x22,0x000106766
and x22,x22,x8
li x4,0xACEC9534CD0B2293
sh x4,0x00000002A(x22)
li x22,0x000106A69
and x22,x22,x8
li x4,0xC63FC598EF5DFBB
dcache.civa x22
li x22,0x0001000D0
and x22,x22,x8
li x4,0x73EB5F735B65425
lh x5,0x00000002C(x22)
li x22,0x0001000C5
and x22,x22,x8
li x4,0xF94F9BB870202C39
lbu x5,0x00000001B(x22)
li x22,0x000104131
and x22,x22,x8
li x4,0xA7AF027DEDA8861B
lbu x5,0x000000015(x22)
li x22,0x000104184
and x22,x22,x8
li x4,0x222213DD5BF496B8
lhu x5,0x00000004A(x22)
li x22,0x0001041C3
and x22,x22,x8
li x4,0x95DD05112264B05E
lbu x5,0x000000000(x22)
li x22,0x0001040AB
and x22,x22,x8
li x4,0xA356F31FE53E194F
dcache.civa x22
li x22,0x000104158
and x22,x22,x8
li x4,0x945A8BEE55118481
dcache.cva x22
li x22,0x000102028
and x22,x22,x8
li x4,0xD06B6A11DFEA298
lb x5,0x000000018(x22)
li x22,0x000106982
and x22,x22,x8
li x4,0x4E177E29BA1B569B
dcache.cva x22
li x22,0x000100078
and x22,x22,x8
li x4,0x17B6122062D0EFC
dcache.cva x22
li x22,0x0001041CA
and x22,x22,x8
li x4,0xFCB5D407D3F62A47
sh x4,0x00000003E(x22)
li x22,0x0001020C1
and x22,x22,x8
li x4,0x3B93C7D4EBA4EF60
lhu x5,0x000000038(x22)
li x22,0x000106768
and x22,x22,x8
li x4,0x80824888889F6C08
sh x4,0x000000000(x22)
li x22,0x0001068E2
and x22,x22,x8
li x4,0xF75FEFD67FD58D67
dcache.cva x22
li x22,0x000106A40
and x22,x22,x8
li x4,0x7EBA4A7C2F6524A3
lb x5,0x000000046(x22)
li x22,0x0001001A0
and x22,x22,x8
li x4,0x5863141DAECB700
dcache.civa x22
li x22,0x000102064
and x22,x22,x8
li x4,0x3E8C72A5F6F059E3
dcache.civa x22
li x22,0x0001041CC
and x22,x22,x8
li x4,0x76276785BE1A0248
lw x5,0x000000024(x22)
li x22,0x00010677D
and x22,x22,x8
li x4,0x523616808B811E02
lbu x5,0x00000000C(x22)
li x22,0x0001000C6
and x22,x22,x8
li x4,0x2FD3E516E93BC624
lbu x5,0x00000002F(x22)
li x22,0x000106C49
and x22,x22,x8
li x4,0x94017F641494F79D
dcache.cva x22
li x22,0x000106798
and x22,x22,x8
li x4,0x1A96157E14712B5D
sd x4,0x000000018(x22)
li x22,0x0001040A0
and x22,x22,x8
li x4,0x430A738976441A06
lbu x5,0x00000001A(x22)
li x22,0x00010215F
and x22,x22,x8
li x4,0x3D29411C7F2BAE5F
lb x5,0x00000000E(x22)
li x22,0x000102007
and x22,x22,x8
li x4,0xEF43A59D6422B4A8
sb x4,0x000000010(x22)
li x22,0x0001021BF
and x22,x22,x8
li x4,0xA330789AFC90824D
sd x4,0x000000008(x22)
li x22,0x0001069C0
and x22,x22,x8
li x4,0x39DB4ED359D1AEDD
lwu x5,0x000000000(x22)
li x22,0x000106BD4
and x22,x22,x8
li x4,0xFEBC079F56E908AC
dcache.cva x22
li x22,0x00010694A
and x22,x22,x8
li x4,0x6F7AF80DE4984FE8
sh x4,0x00000003C(x22)
li x22,0x0001069A7
and x22,x22,x8
li x4,0x8D06B1288D4F8F14
sh x4,0x00000000A(x22)
li x22,0x000104054
and x22,x22,x8
li x4,0x455FD614DFEAAFBF
lb x5,0x000000022(x22)
li x22,0x000106BD9
and x22,x22,x8
li x4,0xD6AC684BD513AAB
sd x4,0x000000038(x22)
li x22,0x0001001C5
and x22,x22,x8
li x4,0x16B75D46806C9FE3
sd x4,0x000000040(x22)
li x22,0x00010407F
and x22,x22,x8
li x4,0x5237284BD1026E4F
sd x4,0x000000010(x22)
li x22,0x000106822
and x22,x22,x8
li x4,0x62E72F77DF3D3AE
dcache.civa x22
li x22,0x000104008
and x22,x22,x8
li x4,0x9BFB994C2E8338F
sh x4,0x000000028(x22)
li x22,0x000106749
and x22,x22,x8
li x4,0xD3FFAB12B5CA484
lb x5,0x000000016(x22)
li x22,0x000104027
and x22,x22,x8
li x4,0x404B2446E7999232
lbu x5,0x00000004D(x22)
li x22,0x000104020
and x22,x22,x8
li x4,0x6C1DDF3536D9CB38
ld x5,0x000000018(x22)
li x22,0x00010212F
and x22,x22,x8
li x4,0xA0CA67AA7E6EFA5F
sw x4,0x000000004(x22)
li x22,0x000106C9B
and x22,x22,x8
li x4,0xFB3F8053DD43FC55
dcache.cva x22
li x22,0x0001021BC
and x22,x22,x8
li x4,0xF277CD57FB5B9F6E
dcache.civa x22
li x22,0x000106C33
and x22,x22,x8
li x4,0xFEC663FAE0171745
sh x4,0x000000018(x22)
li x22,0x0001067D8
and x22,x22,x8
li x4,0xB19A1D5E8C29A0AD
sd x4,0x000000038(x22)
li x22,0x000106C98
and x22,x22,x8
li x4,0x841B6CA979A510CC
sb x4,0x000000008(x22)
li x22,0x000106A90
and x22,x22,x8
li x4,0x5798477D9D107B4E
dcache.civa x22
li x22,0x000106ACB
and x22,x22,x8
li x4,0x7837BB5C4CBB75AB
lb x5,0x00000000B(x22)
li x22,0x000106902
and x22,x22,x8
li x4,0x825E8932D3C692B6
lw x5,0x000000038(x22)
li x22,0x0001041F3
and x22,x22,x8
li x4,0x5A77DA2BC768A7AA
dcache.civa x22
li x22,0x00010672B
and x22,x22,x8
li x4,0xC0B335FA459A0966
sw x4,0x000000030(x22)
li x22,0x0001040E9
and x22,x22,x8
li x4,0xDC559093F40AEAAB
sd x4,0x000000040(x22)
li x22,0x000106CAC
and x22,x22,x8
li x4,0xC71BCF18C5C2F911
sb x4,0x000000048(x22)
li x22,0x0001067BC
and x22,x22,x8
li x4,0xC8DF4C20F047C412
lwu x5,0x000000020(x22)
li x22,0x000106B1C
and x22,x22,x8
li x4,0x13D2CA2D79FA62F9
dcache.civa x22
li x22,0x00010205D
and x22,x22,x8
li x4,0xF97595687EE8A193
lbu x5,0x000000005(x22)
li x22,0x000106BD1
and x22,x22,x8
li x4,0x1EACBB2F180E7FAC
lh x5,0x00000001E(x22)
li x22,0x000104044
and x22,x22,x8
li x4,0xAEC8C161315EFA32
lw x5,0x000000014(x22)
li x22,0x000102104
and x22,x22,x8
li x4,0xA615AE1713F46356
dcache.civa x22
li x22,0x000106753
and x22,x22,x8
li x4,0x4530E1073ADC7AC
dcache.cva x22
li x22,0x000104082
and x22,x22,x8
li x4,0x3DE8016C6C11BA0D
dcache.cva x22
li x22,0x00010019E
and x22,x22,x8
li x4,0x7B39D23CAFDEAC3D
lw x5,0x000000008(x22)
li x22,0x000106CDB
and x22,x22,x8
li x4,0x6E38A2DA88FC8B0E
dcache.cva x22
li x22,0x000104103
and x22,x22,x8
li x4,0x8B9ED4CEC207DEB4
dcache.civa x22
li x22,0x0001041C1
and x22,x22,x8
li x4,0x81821B2B9A38F23A
lwu x5,0x00000000C(x22)
li x22,0x0001041C4
and x22,x22,x8
li x4,0x8F97BCBB0CB0E384
dcache.civa x22
li x22,0x000100165
and x22,x22,x8
li x4,0x59B753AA2AFEFD9C
sh x4,0x000000044(x22)
li x22,0x000106BF4
and x22,x22,x8
li x4,0x268D834BACBA99BB
dcache.civa x22
li x22,0x0001068CC
and x22,x22,x8
li x4,0x776FAD6739B38257
lwu x5,0x00000004C(x22)
li x22,0x00010415C
and x22,x22,x8
li x4,0xE19AAA6276F15C7C
sd x4,0x000000010(x22)
li x22,0x000106AB3
and x22,x22,x8
li x4,0x852F4646F86E616
sb x4,0x000000047(x22)
li x22,0x00010407A
and x22,x22,x8
li x4,0x9EEE0F57C69E79AA
sw x4,0x000000038(x22)
li x22,0x000104190
and x22,x22,x8
li x4,0xB41259103E7FC6D3
lwu x5,0x000000028(x22)
li x22,0x000100038
and x22,x22,x8
li x4,0xFDAFFBE667BBB081
dcache.civa x22
li x22,0x000106C0B
and x22,x22,x8
li x4,0x6AB88082656D8ACC
dcache.civa x22
li x22,0x00010678D
and x22,x22,x8
li x4,0x87A933BFAFAE92B2
dcache.civa x22
li x22,0x0001068E7
and x22,x22,x8
li x4,0xB908CFD1CCA2DE53
dcache.civa x22
li x22,0x000104132
and x22,x22,x8
li x4,0xE1D674F912D796E6
sd x4,0x000000000(x22)
li x22,0x00010687B
and x22,x22,x8
li x4,0x3708066C964F9F86
dcache.civa x22
li x22,0x000106A29
and x22,x22,x8
li x4,0x758657CFE28C3B41
lw x5,0x000000038(x22)
li x22,0x0001040FF
and x22,x22,x8
li x4,0xA423FBF353EDA9D6
dcache.cva x22
li x22,0x0001000DA
and x22,x22,x8
li x4,0x5706372748BB8B74
dcache.cva x22
li x22,0x000106B70
and x22,x22,x8
li x4,0x8B0FC96F82A5C67B
sh x4,0x00000002E(x22)
li x22,0x0001041EE
and x22,x22,x8
li x4,0x1EAC193B09744DF5
sh x4,0x00000004E(x22)
li x22,0x000106BD2
and x22,x22,x8
li x4,0x244C86EFE32E3FBD
lh x5,0x000000002(x22)
li x22,0x000102155
and x22,x22,x8
li x4,0x6AC84FFCBCCB119C
lb x5,0x00000000A(x22)
li x22,0x0001040B2
and x22,x22,x8
li x4,0x47F5C9E594F10A8D
lwu x5,0x00000000C(x22)
li x22,0x000100029
and x22,x22,x8
li x4,0x3AA0568603C5B869
dcache.cva x22
li x22,0x0001021D8
and x22,x22,x8
li x4,0xD1A48130EA092626
dcache.cva x22
li x22,0x000106807
and x22,x22,x8
li x4,0x46AFFD3112D38933
dcache.cva x22
li x22,0x0001001BE
and x22,x22,x8
li x4,0x2CA992CBB340BA80
dcache.civa x22
li x22,0x0001041C0
and x22,x22,x8
li x4,0xBCE67F366E434A64
dcache.cva x22
li x22,0x000100024
and x22,x22,x8
li x4,0x8E50131CFD4B82BE
ld x5,0x000000038(x22)
li x22,0x000104092
and x22,x22,x8
li x4,0xF994324BFC3AA845
sd x4,0x000000000(x22)
li x22,0x000100167
and x22,x22,x8
li x4,0xD6AD9DAB1DC1AFE2
dcache.cva x22
li x22,0x000104148
and x22,x22,x8
li x4,0xE7E3247B3084FA8A
lwu x5,0x000000020(x22)
li x22,0x00010410D
and x22,x22,x8
li x4,0x94F8B9B3BB011C42
lh x5,0x000000014(x22)
li x22,0x00010019C
and x22,x22,x8
li x4,0xF8B5F75421C88C4E
lhu x5,0x00000001C(x22)
li x22,0x000106874
and x22,x22,x8
li x4,0xD0BD45DCC2AB15EF
lbu x5,0x000000037(x22)
li x22,0x000106A70
and x22,x22,x8
li x4,0x95FB2481F69042D0
lhu x5,0x00000004C(x22)
li x22,0x0001020A6
and x22,x22,x8
li x4,0xBD2F400909048774
lh x5,0x000000042(x22)
li x22,0x000106C3C
and x22,x22,x8
li x4,0x84C77C21CD34C48D
sw x4,0x000000024(x22)
li x22,0x00010681A
and x22,x22,x8
li x4,0x58637CBEADEAF679
sb x4,0x000000017(x22)
li x22,0x000100052
and x22,x22,x8
li x4,0x57F9570DB2A7E489
sw x4,0x000000044(x22)
li x22,0x0001021CD
and x22,x22,x8
li x4,0x67228117E867F272
lwu x5,0x00000000C(x22)
li x22,0x000102047
and x22,x22,x8
li x4,0x8DAB076D192802A5
dcache.civa x22
li x22,0x000100075
and x22,x22,x8
li x4,0x9F756CE2E92A6CB0
sh x4,0x000000036(x22)
li x22,0x0001067FE
and x22,x22,x8
li x4,0xC42E5453AF87342F
dcache.civa x22
li x22,0x000106CEB
and x22,x22,x8
li x4,0x68AFD769A5E82297
sb x4,0x00000002B(x22)
li x22,0x000100162
and x22,x22,x8
li x4,0x745C18A6CF3E98CD
lbu x5,0x000000024(x22)
li x22,0x000106C95
and x22,x22,x8
li x4,0xFF539C7BE173EF77
sw x4,0x000000020(x22)
li x22,0x00010213A
and x22,x22,x8
li x4,0x8AD4459E061411B6
sb x4,0x00000003D(x22)
li x22,0x000100113
and x22,x22,x8
li x4,0x9F2BED560868D255
sb x4,0x00000003B(x22)
li x22,0x000104180
and x22,x22,x8
li x4,0xC73B6D111D63F3E3
sd x4,0x000000008(x22)
li x22,0x0001021A6
and x22,x22,x8
li x4,0xBD611C207BA2FEF5
lhu x5,0x00000002A(x22)
li x22,0x000106C95
and x22,x22,x8
li x4,0x5C01905C70846D5D
lhu x5,0x000000008(x22)
li x22,0x000104060
and x22,x22,x8
li x4,0x366637616F4E90D1
sw x4,0x000000028(x22)
li x22,0x000104118
and x22,x22,x8
li x4,0xE2F49ADE957A4290
dcache.cva x22
li x22,0x00010001D
and x22,x22,x8
li x4,0x7BE35DB6D992DEDC
sw x4,0x000000018(x22)
li x22,0x000106A1C
and x22,x22,x8
li x4,0x57662BB923773E48
sb x4,0x000000029(x22)
li x22,0x000106751
and x22,x22,x8
li x4,0xBCF2227BD28E2AAA
lb x5,0x00000001C(x22)
li x22,0x000102091
and x22,x22,x8
li x4,0x1B0FE3367ECC853C
dcache.cva x22
li x22,0x000104117
and x22,x22,x8
li x4,0xA6A16A9A583883AB
lh x5,0x00000002A(x22)
li x22,0x000106CBE
and x22,x22,x8
li x4,0x4418DDEBFAE87D0C
sd x4,0x000000038(x22)
li x22,0x000104088
and x22,x22,x8
li x4,0xB69D326950962522
lh x5,0x000000008(x22)
li x22,0x000106A5E
and x22,x22,x8
li x4,0xCDA530490F2E9DBD
ld x5,0x000000030(x22)
li x22,0x00010402F
and x22,x22,x8
li x4,0xC253E786490D500
lh x5,0x00000001E(x22)
li x22,0x0001069CF
and x22,x22,x8
li x4,0xFC7E08421DC3D06
dcache.cva x22
li x22,0x000100038
and x22,x22,x8
li x4,0xDF1395E36FF66793
lw x5,0x000000038(x22)
li x22,0x000104005
and x22,x22,x8
li x4,0x380014371AD77D2
dcache.cva x22
li x22,0x000106A37
and x22,x22,x8
li x4,0x5CB24ED7DC288AC3
sb x4,0x00000001E(x22)
li x22,0x000106AEC
and x22,x22,x8
li x4,0xE939A3A40EB57B1
lw x5,0x000000040(x22)
li x22,0x0001001EF
and x22,x22,x8
li x4,0x30275DE2DEA88979
dcache.civa x22
li x22,0x000106C91
and x22,x22,x8
li x4,0x10E7C29F59F50090
dcache.cva x22
li x22,0x000106A27
and x22,x22,x8
li x4,0x82FB1661964A9AC9
lh x5,0x000000048(x22)
li x22,0x000106B88
and x22,x22,x8
li x4,0xDA0717AB4D4FE18C
dcache.civa x22
li x22,0x00010670C
and x22,x22,x8
li x4,0xA135E3690B125AD5
sw x4,0x000000008(x22)
li x22,0x0001041BF
and x22,x22,x8
li x4,0xE7C6204AAD9D4E60
dcache.cva x22
li x22,0x0001000B4
and x22,x22,x8
li x4,0xDA024B05072DCECD
sw x4,0x00000003C(x22)
li x22,0x000106946
and x22,x22,x8
li x4,0xCB90F1015B7368CA
sb x4,0x00000000F(x22)
li x22,0x000106A50
and x22,x22,x8
li x4,0xEF0C1DEC07564E59
sw x4,0x000000010(x22)
li x22,0x000106A2F
and x22,x22,x8
li x4,0x6E338A59573BA756
dcache.cva x22
li x22,0x000106852
and x22,x22,x8
li x4,0xBC0C925B062ADC3C
dcache.civa x22
li x22,0x00010019B
and x22,x22,x8
li x4,0x1C6E1F923AA3D38B
lh x5,0x000000026(x22)
li x22,0x0001067D0
and x22,x22,x8
li x4,0x2270799E2A992BC2
dcache.civa x22
li x22,0x000100140
and x22,x22,x8
li x4,0xDBD8C19BE29D5A7E
sh x4,0x00000002E(x22)
li x22,0x0001001C2
and x22,x22,x8
li x4,0x22EFAE16EE29815E
sw x4,0x000000004(x22)
li x22,0x000106BA0
and x22,x22,x8
li x4,0xAC1EFE13F8B1AC35
sw x4,0x000000050(x22)
li x22,0x000106743
and x22,x22,x8
li x4,0x69D7A84DD186D31E
dcache.civa x22
li x22,0x0001021B8
and x22,x22,x8
li x4,0x41B9603EF6405560
dcache.civa x22
li x22,0x000106CFE
and x22,x22,x8
li x4,0x7507208D53B6C28A
sh x4,0x00000001E(x22)
li x22,0x00010412E
and x22,x22,x8
li x4,0xA5514BAB037447D9
sb x4,0x00000002B(x22)
li x22,0x0001020C8
and x22,x22,x8
li x4,0x583DC1E20C103EBB
lwu x5,0x000000028(x22)
li x22,0x000106CB1
and x22,x22,x8
li x4,0x2E4A4E03E14AC975
sh x4,0x00000004C(x22)
li x22,0x000104118
and x22,x22,x8
li x4,0x1A0FE2087FBE0901
lw x5,0x00000004C(x22)
li x22,0x000106CA1
and x22,x22,x8
li x4,0xAD993C70AC053AF2
lbu x5,0x00000001C(x22)
li x22,0x00010678B
and x22,x22,x8
li x4,0x8604724466C0620C
ld x5,0x000000018(x22)
li x22,0x00010412F
and x22,x22,x8
li x4,0xDE3CD3A897F1168F
lw x5,0x00000003C(x22)
li x22,0x00010412D
and x22,x22,x8
li x4,0xF3D445C08C8B0D48
sd x4,0x000000048(x22)
li x22,0x000106991
and x22,x22,x8
li x4,0xC16A8E7F46730D57
lw x5,0x00000002C(x22)
li x22,0x0001040F2
and x22,x22,x8
li x4,0x8B226DCA8CC88363
sd x4,0x000000028(x22)
li x22,0x000104194
and x22,x22,x8
li x4,0xC31D858089A3371A
sb x4,0x000000042(x22)
li x22,0x000106A8E
and x22,x22,x8
li x4,0xC1D030392FD66246
ld x5,0x000000020(x22)
li x22,0x000106C1B
and x22,x22,x8
li x4,0x98C15633CC57421A
dcache.civa x22
li x22,0x000106C98
and x22,x22,x8
li x4,0x98F502E55CA50F2F
sb x4,0x000000026(x22)
li x22,0x000100127
and x22,x22,x8
li x4,0x7F3E5D5CB870C0E
dcache.cva x22
li x22,0x00010671B
and x22,x22,x8
li x4,0x4D2B7936495F1F1F
lw x5,0x000000024(x22)
li x22,0x000102039
and x22,x22,x8
li x4,0x83D19194661738A9
dcache.civa x22
li x22,0x000106787
and x22,x22,x8
li x4,0x4E1EDEC3071328FF
sw x4,0x000000008(x22)
li x22,0x00010695E
and x22,x22,x8
li x4,0xC1BFBBAC65EECDB6
sw x4,0x000000008(x22)
li x22,0x0001069F9
and x22,x22,x8
li x4,0x519C5A655594CDC9
dcache.civa x22
li x22,0x0001041B1
and x22,x22,x8
li x4,0x76DB70A7AC59D442
dcache.cva x22
li x22,0x000106C7A
and x22,x22,x8
li x4,0x8E95A3ECB6CB7F6E
dcache.civa x22
li x22,0x0001041AD
and x22,x22,x8
li x4,0x214189606FFD3260
lbu x5,0x000000026(x22)
li x22,0x000106AAD
and x22,x22,x8
li x4,0xE19A3A828151C28
dcache.cva x22
li x22,0x000104150
and x22,x22,x8
li x4,0xA7DDCA4707B2B502
dcache.cva x22
li x22,0x0001000B4
and x22,x22,x8
li x4,0x406EDB986FCBE152
dcache.civa x22
li x22,0x0001068FB
and x22,x22,x8
li x4,0x48CEED245F17A2C8
lhu x5,0x00000004E(x22)
li x22,0x0001041E1
and x22,x22,x8
li x4,0xE796298586C932E2
dcache.civa x22
li x22,0x000106703
and x22,x22,x8
li x4,0x24231829682E9809
dcache.civa x22
li x22,0x000100176
and x22,x22,x8
li x4,0x8544AA52767D77B8
sb x4,0x000000038(x22)
li x22,0x000106875
and x22,x22,x8
li x4,0x65619344388B83E5
sb x4,0x00000003A(x22)
li x22,0x0001040B4
and x22,x22,x8
li x4,0xE46EF64885DD2D7A
lh x5,0x000000016(x22)
li x22,0x000102092
and x22,x22,x8
li x4,0xD4BEB2862463E79
dcache.civa x22
li x22,0x00010002B
and x22,x22,x8
li x4,0xABD8FE77DD7FD9FF
sb x4,0x00000000B(x22)
li x22,0x0001001B6
and x22,x22,x8
li x4,0xAC6E1798B6EE3522
lb x5,0x00000000C(x22)
li x22,0x000106970
and x22,x22,x8
li x4,0x2B529FAF9F5FF331
lb x5,0x000000019(x22)
li x22,0x0001068E0
and x22,x22,x8
li x4,0x688F1076B5075A33
lw x5,0x000000010(x22)
li x22,0x0001067FA
and x22,x22,x8
li x4,0xB8391FAE8CD1B80E
sw x4,0x000000030(x22)
li x22,0x0001000A5
and x22,x22,x8
li x4,0x77E76B069B52FC7
lw x5,0x000000044(x22)
li x22,0x00010699E
and x22,x22,x8
li x4,0x3DA4C729A73DDD5F
sb x4,0x000000047(x22)
li x22,0x0001040D1
and x22,x22,x8
li x4,0xF17CBCD5FB67C976
dcache.civa x22
li x22,0x00010002C
and x22,x22,x8
li x4,0xF69EF621C4BA62CF
lbu x5,0x00000004B(x22)
li x22,0x00010013B
and x22,x22,x8
li x4,0xBE1A5AF62B610D73
dcache.cva x22
li x22,0x0001000A5
and x22,x22,x8
li x4,0xE1FBCAC68D2969EB
lhu x5,0x000000036(x22)
li x22,0x0001068ED
and x22,x22,x8
li x4,0x792F40EA2C8CDA10
lh x5,0x000000036(x22)
li x22,0x00010673D
and x22,x22,x8
li x4,0x89F58417A3651B78
dcache.civa x22
li x22,0x00010003A
and x22,x22,x8
li x4,0x19E7EC38FD719D15
sh x4,0x000000014(x22)
li x22,0x000102157
and x22,x22,x8
li x4,0x6E22B34DAC40680A
dcache.civa x22
li x22,0x000104035
and x22,x22,x8
li x4,0x6F6CF7C9382ED0B5
sb x4,0x00000002E(x22)
li x22,0x000104121
and x22,x22,x8
li x4,0xAD14FD2778A8765F
sd x4,0x000000020(x22)
li x22,0x0001041A5
and x22,x22,x8
li x4,0xE770C60C3A94441C
dcache.civa x22
li x22,0x00010414D
and x22,x22,x8
li x4,0x9460C6B32E8D7580
lh x5,0x000000022(x22)
li x22,0x0001021D7
and x22,x22,x8
li x4,0x4E377D79A93FE0C6
lh x5,0x000000022(x22)
li x22,0x000100056
and x22,x22,x8
li x4,0xB17E1A5A003DB301
sw x4,0x00000002C(x22)
li x22,0x000100149
and x22,x22,x8
li x4,0xD1B1914C3D69242D
dcache.civa x22
li x22,0x000104056
and x22,x22,x8
li x4,0xFC6B63B4F4C296B1
lbu x5,0x00000000E(x22)
li x22,0x000106B8C
and x22,x22,x8
li x4,0xC23FADDBB17FF4C5
lhu x5,0x000000036(x22)
li x22,0x00010418C
and x22,x22,x8
li x4,0xF255CDC6496A904C
lb x5,0x00000002F(x22)
li x22,0x000106BC7
and x22,x22,x8
li x4,0x405386019F1F1B69
sb x4,0x000000032(x22)
li x22,0x0001001F9
and x22,x22,x8
li x4,0x7B246A8EA3FBE8F
sd x4,0x000000008(x22)
li x22,0x0001069DC
and x22,x22,x8
li x4,0x6778A4FAA5A59063
lh x5,0x00000000A(x22)
li x22,0x0001021F0
and x22,x22,x8
li x4,0x1459CE85F14C8E35
lh x5,0x000000010(x22)
li x22,0x000106B3C
and x22,x22,x8
li x4,0x4AD1375DDC5DD641
lh x5,0x000000018(x22)
li x22,0x000104198
and x22,x22,x8
li x4,0x76C21D6281F3A308
dcache.cva x22
li x22,0x000106B1F
and x22,x22,x8
li x4,0x69E2C857A68EFF1A
dcache.civa x22
li x22,0x0001020C9
and x22,x22,x8
li x4,0xC97399D3AAFD206E
sd x4,0x000000030(x22)
li x22,0x0001000A0
and x22,x22,x8
li x4,0xB81CE41CE0192224
sd x4,0x000000020(x22)
li x22,0x00010412F
and x22,x22,x8
li x4,0xF76E0F53A4496096
lb x5,0x000000012(x22)
li x22,0x000104034
and x22,x22,x8
li x4,0x16ECFFC4F140679F
sb x4,0x000000000(x22)
li x22,0x00010210B
and x22,x22,x8
li x4,0x62A738034267DE88
sd x4,0x000000018(x22)
li x22,0x000106720
and x22,x22,x8
li x4,0x3A6D9584E2827C71
dcache.cva x22
li x22,0x000106826
and x22,x22,x8
li x4,0x3BCD26551FFB7DC3
lwu x5,0x00000002C(x22)
li x22,0x000106C3E
and x22,x22,x8
li x4,0xB9E90C48C244A10F
lb x5,0x000000033(x22)
li x22,0x000102127
and x22,x22,x8
li x4,0xEA32B79D3B94488B
dcache.civa x22
li x22,0x00010002F
and x22,x22,x8
li x4,0x20906CD0FB523D2C
ld x5,0x000000008(x22)
li x22,0x000102177
and x22,x22,x8
li x4,0x2EF1E59EC30EC18F
lw x5,0x00000003C(x22)
li x22,0x000106A0A
and x22,x22,x8
li x4,0x403A7B52CD681F8F
dcache.cva x22
li x22,0x00010401B
and x22,x22,x8
li x4,0xB82DD624F978382F
sb x4,0x000000013(x22)
li x22,0x000104055
and x22,x22,x8
li x4,0xE7D2E061D9337FEB
dcache.civa x22
li x22,0x0001069DF
and x22,x22,x8
li x4,0x9D7D1F5CF45DB761
sb x4,0x000000027(x22)
li x22,0x0001001FA
and x22,x22,x8
li x4,0xD74BD6C2BA9F3BEC
sh x4,0x000000020(x22)
li x22,0x0001021D0
and x22,x22,x8
li x4,0x33B43423503A767F
lh x5,0x00000003A(x22)
li x22,0x0001000CB
and x22,x22,x8
li x4,0x638A45726DBD769C
sb x4,0x000000006(x22)
li x22,0x000102130
and x22,x22,x8
li x4,0xB4700EFA452FC76
sb x4,0x00000000B(x22)
li x22,0x000106C0F
and x22,x22,x8
li x4,0x3597E01E6D9A98E2
lhu x5,0x000000006(x22)
li x22,0x00010690C
and x22,x22,x8
li x4,0x2BB0DC864D6CB8E6
sh x4,0x000000040(x22)
wfi
.global CPU2
CPU2:
MMODE_SMODE CPU2_S
.global CPU2_S
CPU2_S:
li x22,0x00010711F
and x22,x22,x8
li x22,0x000107037
and x22,x22,x8
li x4,0x16D57D713A39F242
sb x4,0x000000036(x22)
li x22,0x00010706F
and x22,x22,x8
li x4,0x96B87F71430EA184
sd x4,0x000000018(x22)
li x22,0x00010211D
and x22,x22,x8
li x4,0xDD6812F359B6D720
lhu x5,0x00000001A(x22)
li x22,0x00010003D
and x22,x22,x8
li x4,0xF3CF278BD8B54716
lbu x5,0x000000018(x22)
li x22,0x0001020FC
and x22,x22,x8
li x4,0xEB163FB95311149E
sb x4,0x00000002C(x22)
li x22,0x000100170
and x22,x22,x8
li x4,0x79EA3F0B8D38540D
lbu x5,0x000000013(x22)
li x22,0x0001070C5
and x22,x22,x8
li x4,0x62ED9662561499E4
lwu x5,0x000000020(x22)
li x22,0x0001070F9
and x22,x22,x8
li x4,0xAB262D01F812EC68
lwu x5,0x000000000(x22)
li x22,0x000102080
and x22,x22,x8
li x4,0xCA4747A6195CDC90
lw x5,0x00000003C(x22)
li x22,0x000107002
and x22,x22,x8
li x4,0xECA8E7F51215F12E
dcache.civa x22
li x22,0x00010201A
and x22,x22,x8
li x4,0xF6D3B803032B36B
lhu x5,0x000000044(x22)
li x22,0x00010004C
and x22,x22,x8
li x4,0xF812459E486D85F3
sd x4,0x000000038(x22)
li x22,0x0001000C1
and x22,x22,x8
li x4,0x1D0F048356470D5C
dcache.cva x22
li x22,0x000100066
and x22,x22,x8
li x4,0x40BAD8B6AC7B7245
sh x4,0x00000001A(x22)
li x22,0x000104118
and x22,x22,x8
li x4,0xFA98DC1532E89833
dcache.cva x22
li x22,0x000104152
and x22,x22,x8
li x4,0xE5C1C185A9460364
dcache.civa x22
li x22,0x00010211B
and x22,x22,x8
li x4,0xB0106C162E1B5693
dcache.civa x22
li x22,0x0001021BC
and x22,x22,x8
li x4,0x50AF4DF130DBC97C
sh x4,0x000000046(x22)
li x22,0x00010719A
and x22,x22,x8
li x4,0x5C385CAA8DBD29F3
sw x4,0x00000003C(x22)
li x22,0x0001001E0
and x22,x22,x8
li x4,0xCCD7EF71431E8520
lw x5,0x00000002C(x22)
li x22,0x000100047
and x22,x22,x8
li x4,0xC0B821AFD9A91366
lw x5,0x000000018(x22)
li x22,0x00010718C
and x22,x22,x8
li x4,0xC42139B95334F776
dcache.cva x22
li x22,0x000107012
and x22,x22,x8
li x4,0xE2AEB016175EFCAF
sh x4,0x000000002(x22)
li x22,0x00010705F
and x22,x22,x8
li x4,0xD75A7DBB7482EA10
lwu x5,0x000000034(x22)
li x22,0x000100165
and x22,x22,x8
li x4,0x1D3C43FA6087BC13
dcache.cva x22
li x22,0x000104070
and x22,x22,x8
li x4,0xB8962A6BE1D83C5D
lwu x5,0x000000034(x22)
li x22,0x000102183
and x22,x22,x8
li x4,0x4C8C30409E48E615
sd x4,0x000000048(x22)
li x22,0x000104068
and x22,x22,x8
li x4,0x61C5E71EC3FC03F6
ld x5,0x000000040(x22)
li x22,0x00010704E
and x22,x22,x8
li x4,0x5567F1EE0F3E0B10
sh x4,0x00000004E(x22)
li x22,0x00010712E
and x22,x22,x8
li x4,0xC9BEFED9C4459566
dcache.cva x22
li x22,0x0001001D6
and x22,x22,x8
li x4,0x81B50EFDAF0463E9
sd x4,0x000000038(x22)
li x22,0x0001040F9
and x22,x22,x8
li x4,0x7B90589AA2CAE548
dcache.civa x22
li x22,0x000102184
and x22,x22,x8
li x4,0x12C32CB3B5E8B7BD
dcache.civa x22
li x22,0x0001070B1
and x22,x22,x8
li x4,0xC155A130A813B5CE
lwu x5,0x000000044(x22)
li x22,0x00010200B
and x22,x22,x8
li x4,0x522B765FE72449FC
sw x4,0x00000001C(x22)
li x22,0x0001021DF
and x22,x22,x8
li x4,0x29647B34D5BD5B48
lh x5,0x000000048(x22)
li x22,0x0001040F7
and x22,x22,x8
li x4,0x28888ACB6A04C29D
ld x5,0x000000048(x22)
li x22,0x000104189
and x22,x22,x8
li x4,0xE9A1212BCB0DC0AF
dcache.cva x22
li x22,0x000107146
and x22,x22,x8
li x4,0xE706287D520A51C8
sh x4,0x00000004A(x22)
li x22,0x00010013A
and x22,x22,x8
li x4,0xBA92A067BEA1A6F7
dcache.civa x22
li x22,0x0001070F4
and x22,x22,x8
li x4,0xFB9EE832CE8ED5BD
lw x5,0x000000024(x22)
li x22,0x000100157
and x22,x22,x8
li x4,0xF5ADF6162402204B
dcache.cva x22
li x22,0x000100199
and x22,x22,x8
li x4,0xFAA593FA649958FC
sh x4,0x000000036(x22)
li x22,0x00010012B
and x22,x22,x8
li x4,0x2DB5073CE08B1F43
dcache.civa x22
li x22,0x0001001B8
and x22,x22,x8
li x4,0xF732791B66758A8A
dcache.cva x22
li x22,0x000107061
and x22,x22,x8
li x4,0xF06C4BE5538108DA
dcache.civa x22
li x22,0x0001071C1
and x22,x22,x8
li x4,0x3DE525950FD15379
lhu x5,0x00000003C(x22)
li x22,0x00010403C
and x22,x22,x8
li x4,0x6D17BB1F4351D7E
sd x4,0x000000008(x22)
li x22,0x0001001D6
and x22,x22,x8
li x4,0x9FF534ED08FD6829
lw x5,0x000000040(x22)
li x22,0x00010405F
and x22,x22,x8
li x4,0x216FF965A43ED484
lbu x5,0x000000027(x22)
li x22,0x0001020D2
and x22,x22,x8
li x4,0x75D085DF6D7E1F90
sb x4,0x000000021(x22)
li x22,0x0001000C3
and x22,x22,x8
li x4,0x859DF9F20E12A908
dcache.cva x22
li x22,0x0001020A3
and x22,x22,x8
li x4,0x60BAF4B4A728AA1
sw x4,0x000000038(x22)
li x22,0x00010005D
and x22,x22,x8
li x4,0x365D4DD3FA9889CC
sb x4,0x000000042(x22)
li x22,0x0001020F9
and x22,x22,x8
li x4,0xFB299476470F0F8D
sd x4,0x000000030(x22)
li x22,0x00010709B
and x22,x22,x8
li x4,0x5B55D47853A52864
dcache.cva x22
li x22,0x0001001F9
and x22,x22,x8
li x4,0x8C3F55C88E1C664B
dcache.civa x22
li x22,0x000102156
and x22,x22,x8
li x4,0xBB49D078605BB636
dcache.civa x22
li x22,0x000100120
and x22,x22,x8
li x4,0x7DFB6DDC5289EC6A
sd x4,0x000000020(x22)
li x22,0x00010212A
and x22,x22,x8
li x4,0xD5B1570366A62879
sh x4,0x00000000E(x22)
li x22,0x000102139
and x22,x22,x8
li x4,0xDD518CA4F8400D2A
dcache.cva x22
li x22,0x0001021D5
and x22,x22,x8
li x4,0xCC1DBB1944143F0A
sd x4,0x000000000(x22)
li x22,0x00010402F
and x22,x22,x8
li x4,0xBDCA76E727B59912
sh x4,0x000000002(x22)
li x22,0x00010005B
and x22,x22,x8
li x4,0xB110D1BCF35054B3
sw x4,0x000000040(x22)
li x22,0x0001001FA
and x22,x22,x8
li x4,0xAED7BBC004BFF3DE
ld x5,0x000000048(x22)
li x22,0x00010203A
and x22,x22,x8
li x4,0x933156AFD5AB7F5C
sh x4,0x00000004A(x22)
li x22,0x0001071D6
and x22,x22,x8
li x4,0xCF1B5F1A863ADA7F
sh x4,0x000000022(x22)
li x22,0x000100127
and x22,x22,x8
li x4,0xD11F560EC3AB1046
dcache.civa x22
li x22,0x00010701D
and x22,x22,x8
li x4,0x89A2C7621B2EE3BD
sb x4,0x00000001A(x22)
li x22,0x00010013B
and x22,x22,x8
li x4,0xAF1934260433C03D
lhu x5,0x000000022(x22)
li x22,0x000107199
and x22,x22,x8
li x4,0x38301F4F4836C65A
sw x4,0x000000024(x22)
li x22,0x00010209E
and x22,x22,x8
li x4,0x8FD08A4174F0FCC0
lbu x5,0x000000032(x22)
li x22,0x00010700A
and x22,x22,x8
li x4,0x7D304E95B065F5E2
dcache.cva x22
li x22,0x00010007C
and x22,x22,x8
li x4,0x707420FCABB81C52
lbu x5,0x000000013(x22)
li x22,0x0001070B6
and x22,x22,x8
li x4,0x88E43A8395BF4290
lhu x5,0x000000036(x22)
li x22,0x0001021BD
and x22,x22,x8
li x4,0x88686632C64BFD65
sw x4,0x000000038(x22)
li x22,0x000102103
and x22,x22,x8
li x4,0x5207F9AB360FD895
dcache.civa x22
li x22,0x00010219E
and x22,x22,x8
li x4,0x2A5600E63A4FCAA5
dcache.civa x22
li x22,0x00010200E
and x22,x22,x8
li x4,0x85C074B3782AA2CE
dcache.cva x22
li x22,0x0001021D1
and x22,x22,x8
li x4,0x56473DED93C18A0D
lw x5,0x000000018(x22)
li x22,0x0001040ED
and x22,x22,x8
li x4,0xF4BA3BB3CEA8435B
sb x4,0x00000002E(x22)
li x22,0x0001041D5
and x22,x22,x8
li x4,0xBD3524095937C8B6
dcache.civa x22
li x22,0x000104146
and x22,x22,x8
li x4,0xA0E1C1681197E0D1
lwu x5,0x00000001C(x22)
li x22,0x000107189
and x22,x22,x8
li x4,0xA57536F11A02052F
dcache.civa x22
li x22,0x00010006E
and x22,x22,x8
li x4,0x24172FAD06FBF83A
sw x4,0x000000010(x22)
li x22,0x000102097
and x22,x22,x8
li x4,0x902C5CEFD1680AB2
lbu x5,0x000000038(x22)
li x22,0x00010704A
and x22,x22,x8
li x4,0xA5B669D4F3505839
dcache.civa x22
li x22,0x0001000BF
and x22,x22,x8
li x4,0x9A42CEFC149965F3
sh x4,0x000000022(x22)
li x22,0x000100022
and x22,x22,x8
li x4,0xA3A7EFFD400A7BE5
lw x5,0x000000020(x22)
li x22,0x000104147
and x22,x22,x8
li x4,0x5E30C1FEDF76A3F7
lwu x5,0x000000038(x22)
li x22,0x0001000FB
and x22,x22,x8
li x4,0x60676610292FA7B9
sw x4,0x000000038(x22)
li x22,0x000100029
and x22,x22,x8
li x4,0xC7BD3C824F928282
sw x4,0x000000028(x22)
li x22,0x000100157
and x22,x22,x8
li x4,0x4F5399D29F363704
lw x5,0x00000003C(x22)
li x22,0x0001021B7
and x22,x22,x8
li x4,0xF4482C0C57EC6B29
lw x5,0x000000044(x22)
li x22,0x000107009
and x22,x22,x8
li x4,0xF5638AF3CE8CC3F9
lb x5,0x000000005(x22)
li x22,0x0001040FB
and x22,x22,x8
li x4,0xC4D0D3A91B65EADC
lwu x5,0x00000001C(x22)
li x22,0x000102199
and x22,x22,x8
li x4,0xB3454D6ECF2DD3E7
ld x5,0x000000018(x22)
li x22,0x000100004
and x22,x22,x8
li x4,0x6C6DB7CE79AFF161
sh x4,0x000000048(x22)
li x22,0x0001041B9
and x22,x22,x8
li x4,0xB4A5D14D0FEEF5B6
ld x5,0x000000030(x22)
li x22,0x00010201D
and x22,x22,x8
li x4,0x991D927520F6FE21
dcache.civa x22
li x22,0x000104063
and x22,x22,x8
li x4,0x7F732CB95255F0E1
dcache.cva x22
li x22,0x000100104
and x22,x22,x8
li x4,0x3A792E8C9FAD35E1
sh x4,0x00000002E(x22)
li x22,0x000107186
and x22,x22,x8
li x4,0x4F019FDB729ACEDE
lb x5,0x000000033(x22)
li x22,0x000104096
and x22,x22,x8
li x4,0xBEA5981C7FAC1886
sw x4,0x000000014(x22)
li x22,0x00010707C
and x22,x22,x8
li x4,0xC6AADC74369B2510
sd x4,0x000000020(x22)
li x22,0x00010415F
and x22,x22,x8
li x4,0x91900C551E581691
lhu x5,0x00000002C(x22)
li x22,0x0001040A7
and x22,x22,x8
li x4,0x8635CFC5F6A03595
lwu x5,0x00000002C(x22)
li x22,0x00010411D
and x22,x22,x8
li x4,0x7438E85F0E5058D7
sh x4,0x000000010(x22)
li x22,0x0001040C9
and x22,x22,x8
li x4,0x3732A654BE45425E
lwu x5,0x000000028(x22)
li x22,0x0001040B4
and x22,x22,x8
li x4,0x9EB2F10D1E8DE439
lw x5,0x000000034(x22)
li x22,0x00010703A
and x22,x22,x8
li x4,0xDC2AA5A136A0D19C
dcache.civa x22
li x22,0x000107147
and x22,x22,x8
li x4,0x51B51255A764C4BD
sw x4,0x000000010(x22)
li x22,0x000102019
and x22,x22,x8
li x4,0x90DDA5706393026C
dcache.cva x22
li x22,0x0001071B8
and x22,x22,x8
li x4,0x23F9658987AA1496
ld x5,0x000000018(x22)
li x22,0x0001020FB
and x22,x22,x8
li x4,0x91656F48F2ADE617
sd x4,0x000000050(x22)
li x22,0x0001041D8
and x22,x22,x8
li x4,0xBB87E2AC184CEC81
lb x5,0x000000001(x22)
li x22,0x000104040
and x22,x22,x8
li x4,0x3FABA0B6F7E5FC01
ld x5,0x000000020(x22)
li x22,0x0001000CB
and x22,x22,x8
li x4,0x2D5561A16A4474F1
sw x4,0x000000004(x22)
li x22,0x000104136
and x22,x22,x8
li x4,0x1369BB1C98175801
lw x5,0x00000000C(x22)
li x22,0x0001021E0
and x22,x22,x8
li x4,0x62EC0D5FE729A809
sh x4,0x00000000A(x22)
li x22,0x000102137
and x22,x22,x8
li x4,0x99F727C092CF9BAF
sw x4,0x000000004(x22)
li x22,0x0001071AC
and x22,x22,x8
li x4,0x3DA0DB7605EF9637
sh x4,0x00000004A(x22)
li x22,0x0001021FF
and x22,x22,x8
li x4,0x3F9E721471FEEE54
lbu x5,0x00000000B(x22)
li x22,0x0001021FB
and x22,x22,x8
li x4,0xB424E7726CB987B2
lb x5,0x000000002(x22)
li x22,0x00010717A
and x22,x22,x8
li x4,0xB5C9775420C484E2
sw x4,0x00000002C(x22)
li x22,0x0001000D5
and x22,x22,x8
li x4,0x11DEC754D7767E9
dcache.civa x22
li x22,0x000104018
and x22,x22,x8
li x4,0x8854D74559B8538F
lwu x5,0x000000004(x22)
li x22,0x000107012
and x22,x22,x8
li x4,0xE1D8CD2BBF39436A
sh x4,0x000000028(x22)
li x22,0x0001001C8
and x22,x22,x8
li x4,0x8C6125C377A3D108
lbu x5,0x000000041(x22)
li x22,0x0001000D5
and x22,x22,x8
li x4,0xAD09B2D5856B4871
sw x4,0x000000048(x22)
li x22,0x00010200C
and x22,x22,x8
li x4,0xFC5DE6F6481BCB24
dcache.cva x22
li x22,0x000102126
and x22,x22,x8
li x4,0x8909C000087F8
dcache.cva x22
li x22,0x0001000E6
and x22,x22,x8
li x4,0x72CC6A6F8C83D8A5
dcache.cva x22
li x22,0x000104097
and x22,x22,x8
li x4,0xA6D3E960A622962C
dcache.cva x22
li x22,0x000107143
and x22,x22,x8
li x4,0x5D9EEE29BB9308FC
lw x5,0x000000044(x22)
li x22,0x0001041CC
and x22,x22,x8
li x4,0xA3C38A7DD28B96AD
dcache.civa x22
li x22,0x000100050
and x22,x22,x8
li x4,0xDE2AC16CD201E74
sd x4,0x000000028(x22)
li x22,0x0001020C3
and x22,x22,x8
li x4,0x7B4E2BAA3353CE9E
dcache.cva x22
li x22,0x000104103
and x22,x22,x8
li x4,0xB8110589821783D5
dcache.civa x22
li x22,0x00010403F
and x22,x22,x8
li x4,0x6FC838AA48CA90B8
dcache.cva x22
li x22,0x000102153
and x22,x22,x8
li x4,0xB7653395636FC760
dcache.civa x22
li x22,0x000100162
and x22,x22,x8
li x4,0x27DF4058F2C80DCD
sb x4,0x000000017(x22)
li x22,0x0001001A0
and x22,x22,x8
li x4,0xFE7E51E6441D4777
lw x5,0x000000018(x22)
li x22,0x00010702A
and x22,x22,x8
li x4,0x40164A659C082A11
dcache.civa x22
li x22,0x00010204A
and x22,x22,x8
li x4,0xFF5CFB1A8224C502
lb x5,0x000000011(x22)
li x22,0x00010017E
and x22,x22,x8
li x4,0x1A8582C15281B14
dcache.cva x22
li x22,0x000100138
and x22,x22,x8
li x4,0xA1A274C6506A3BC8
sb x4,0x00000002E(x22)
li x22,0x000107106
and x22,x22,x8
li x4,0x3BEB5A38AB49B5C7
dcache.civa x22
li x22,0x0001040CF
and x22,x22,x8
li x4,0x34190B986CA8D76B
sw x4,0x00000001C(x22)
li x22,0x0001071E4
and x22,x22,x8
li x4,0x1AA00A3B8A5B4D86
lhu x5,0x00000002C(x22)
li x22,0x000102144
and x22,x22,x8
li x4,0x79DE19B004D48B2D
dcache.cva x22
li x22,0x00010202F
and x22,x22,x8
li x4,0xAFBC213A182B105E
lwu x5,0x000000014(x22)
li x22,0x000102048
and x22,x22,x8
li x4,0xB7C132F6CD6933FF
lhu x5,0x000000034(x22)
li x22,0x0001020FF
and x22,x22,x8
li x4,0x9FDE79FE90C305E2
sd x4,0x000000010(x22)
li x22,0x00010716B
and x22,x22,x8
li x4,0xA7B196C231AA5CBB
lb x5,0x000000000(x22)
li x22,0x000102005
and x22,x22,x8
li x4,0x904ABEC91B218B88
sb x4,0x000000006(x22)
li x22,0x000100158
and x22,x22,x8
li x4,0x60DDE16BD0B28E5
sh x4,0x00000003C(x22)
li x22,0x00010411E
and x22,x22,x8
li x4,0xC05A0F32D40421CD
lw x5,0x000000014(x22)
li x22,0x00010218F
and x22,x22,x8
li x4,0x48FA0EB573A43DCB
dcache.civa x22
li x22,0x0001071F0
and x22,x22,x8
li x4,0xC32A64C159F936AD
dcache.civa x22
li x22,0x00010701A
and x22,x22,x8
li x4,0xB95278E51F7E7EFF
lb x5,0x00000004C(x22)
li x22,0x0001020FB
and x22,x22,x8
li x4,0x7FE0855B65541D40
dcache.cva x22
li x22,0x00010707C
and x22,x22,x8
li x4,0xDD23F70A1D819E03
sb x4,0x00000004D(x22)
li x22,0x00010005F
and x22,x22,x8
li x4,0x84DE0AB52FC4B5BF
dcache.cva x22
li x22,0x0001001F8
and x22,x22,x8
li x4,0x824C72738B3F2014
lbu x5,0x000000043(x22)
li x22,0x000102150
and x22,x22,x8
li x4,0x1BE2BA1563ACC635
dcache.cva x22
li x22,0x000102033
and x22,x22,x8
li x4,0x35FE6F9BFC063EA4
dcache.civa x22
li x22,0x0001070E2
and x22,x22,x8
li x4,0x4F36EC3865106D20
sw x4,0x000000014(x22)
li x22,0x000102086
and x22,x22,x8
li x4,0x66E5A0BC2C4A7FD2
sw x4,0x00000001C(x22)
li x22,0x0001020E8
and x22,x22,x8
li x4,0x8793C6F7974627C8
sw x4,0x00000000C(x22)
li x22,0x0001020BD
and x22,x22,x8
li x4,0x4C8976F30600AFAD
dcache.civa x22
li x22,0x00010216D
and x22,x22,x8
li x4,0x83DC0D36F4F18D49
dcache.civa x22
li x22,0x00010018F
and x22,x22,x8
li x4,0xDF1A91BFD0E7855B
sh x4,0x00000004E(x22)
li x22,0x0001020DD
and x22,x22,x8
li x4,0xB91B8E5FF0111E8
lbu x5,0x000000010(x22)
li x22,0x000107107
and x22,x22,x8
li x4,0x78F9201A87696326
ld x5,0x000000008(x22)
li x22,0x000104078
and x22,x22,x8
li x4,0x1FC45B31E8515A29
dcache.civa x22
li x22,0x0001021B0
and x22,x22,x8
li x4,0xD1D61330D1B4196B
sd x4,0x000000010(x22)
li x22,0x0001001E6
and x22,x22,x8
li x4,0x1DE355A704E39071
lb x5,0x000000020(x22)
li x22,0x00010403E
and x22,x22,x8
li x4,0x132C75A0AA6B2AB4
dcache.civa x22
li x22,0x000104144
and x22,x22,x8
li x4,0xBD488E48C92AF627
dcache.cva x22
li x22,0x0001020C5
and x22,x22,x8
li x4,0xF2D9B6644857F095
dcache.cva x22
li x22,0x000100014
and x22,x22,x8
li x4,0x8698DB86908E4BCA
dcache.cva x22
li x22,0x0001041B1
and x22,x22,x8
li x4,0x509D696802B8D26B
dcache.civa x22
li x22,0x000107104
and x22,x22,x8
li x4,0x94AB770E8CCC8E04
dcache.civa x22
li x22,0x00010207C
and x22,x22,x8
li x4,0x236E945101B2B6E0
lb x5,0x000000037(x22)
li x22,0x000100169
and x22,x22,x8
li x4,0x914C4709604CF6CA
sw x4,0x000000014(x22)
li x22,0x00010405A
and x22,x22,x8
li x4,0x2A77139611BC2149
sd x4,0x000000040(x22)
li x22,0x0001040EA
and x22,x22,x8
li x4,0x9CD27EBE72C01CC7
dcache.civa x22
li x22,0x0001021A9
and x22,x22,x8
li x4,0x80E8B4168BD46C37
ld x5,0x000000030(x22)
li x22,0x0001071A9
and x22,x22,x8
li x4,0x36E4E925A6A5DD2A
dcache.cva x22
li x22,0x000104050
and x22,x22,x8
li x4,0xDE5E6E2202ACBB92
lwu x5,0x000000018(x22)
li x22,0x0001040C9
and x22,x22,x8
li x4,0x27F00BAC4ED948A
dcache.civa x22
li x22,0x000104139
and x22,x22,x8
li x4,0xF4AF08FE53BB8720
sw x4,0x00000000C(x22)
li x22,0x00010711E
and x22,x22,x8
li x4,0xE99B27AF2A8487B2
ld x5,0x000000018(x22)
li x22,0x000104187
and x22,x22,x8
li x4,0x9419AE609F7D4709
lbu x5,0x00000003A(x22)
li x22,0x0001040EA
and x22,x22,x8
li x4,0x7D04283BE28AD734
ld x5,0x000000038(x22)
li x22,0x0001020DD
and x22,x22,x8
li x4,0x68EFA7FC8FAE4765
lh x5,0x000000014(x22)
li x22,0x00010017A
and x22,x22,x8
li x4,0xC6062601531CB4EE
lhu x5,0x000000018(x22)
li x22,0x00010001E
and x22,x22,x8
li x4,0x2F762CDF7B14CE26
dcache.cva x22
li x22,0x000102162
and x22,x22,x8
li x4,0x6FE6666C5F5B81AE
lwu x5,0x000000024(x22)
li x22,0x000104065
and x22,x22,x8
li x4,0xBAD29018ECBF9AA1
lb x5,0x000000021(x22)
li x22,0x000107041
and x22,x22,x8
li x4,0xB704F3EFF0782BBC
sb x4,0x000000027(x22)
li x22,0x000102173
and x22,x22,x8
li x4,0x3F4B1ADE88EFFB91
lhu x5,0x000000020(x22)
li x22,0x0001040FE
and x22,x22,x8
li x4,0xB218681E7AC02F17
dcache.cva x22
li x22,0x000104026
and x22,x22,x8
li x4,0xA23A83B49E73151B
sh x4,0x000000008(x22)
li x22,0x0001020D1
and x22,x22,x8
li x4,0x3243A80B699748F0
sh x4,0x00000000E(x22)
li x22,0x000104133
and x22,x22,x8
li x4,0x1761CE135D39FCAD
sb x4,0x000000048(x22)
li x22,0x00010702C
and x22,x22,x8
li x4,0xFA5632993EB492F9
sw x4,0x000000034(x22)
li x22,0x0001041C8
and x22,x22,x8
li x4,0x11D16EFCD15D8049
lh x5,0x00000002C(x22)
li x22,0x000100040
and x22,x22,x8
li x4,0xACB18048B8BD7173
dcache.cva x22
li x22,0x0001021AB
and x22,x22,x8
li x4,0xD606C6661A23CAA1
dcache.cva x22
li x22,0x000100031
and x22,x22,x8
li x4,0xFC01D49B0E8E862D
dcache.civa x22
li x22,0x000107060
and x22,x22,x8
li x4,0x5E5BA76C8A7E69DD
sb x4,0x000000022(x22)
li x22,0x000102162
and x22,x22,x8
li x4,0x3E8A5B427FFD31FF
dcache.cva x22
li x22,0x0001070F7
and x22,x22,x8
li x4,0x4EBB6E01C8465AD6
dcache.civa x22
li x22,0x00010710B
and x22,x22,x8
li x4,0x49AAC1146913EB53
lh x5,0x000000046(x22)
li x22,0x000100176
and x22,x22,x8
li x4,0x1870A4B10F0D5662
sd x4,0x000000000(x22)
li x22,0x0001020C6
and x22,x22,x8
li x4,0x47833462A798BD82
sw x4,0x000000044(x22)
li x22,0x000104017
and x22,x22,x8
li x4,0x494C9ED8E70B49F1
lw x5,0x000000020(x22)
li x22,0x000107136
and x22,x22,x8
li x4,0x6512ABDB61009B65
sw x4,0x00000000C(x22)
li x22,0x0001020CB
and x22,x22,x8
li x4,0x3D49387C41A3B707
sd x4,0x000000010(x22)
li x22,0x000107152
and x22,x22,x8
li x4,0xE669634FCB9A82FB
lbu x5,0x00000002E(x22)
li x22,0x00010707E
and x22,x22,x8
li x4,0xCA7ED3C122A42F6F
dcache.civa x22
li x22,0x00010705B
and x22,x22,x8
li x4,0xD1A08A7531D43516
lhu x5,0x00000000E(x22)
li x22,0x000102088
and x22,x22,x8
li x4,0x9EE241BBE40FE38D
lbu x5,0x00000002D(x22)
li x22,0x000107167
and x22,x22,x8
li x4,0x7453C391C8347A06
sh x4,0x000000026(x22)
li x22,0x000107058
and x22,x22,x8
li x4,0x4A3617D526833259
sb x4,0x000000028(x22)
li x22,0x00010703A
and x22,x22,x8
li x4,0x59E53F91A8E5BFAB
lwu x5,0x000000018(x22)
li x22,0x00010403F
and x22,x22,x8
li x4,0xE7DE8F4C1D0E73D0
lwu x5,0x000000034(x22)
li x22,0x000104166
and x22,x22,x8
li x4,0x24257CCAE368ABC1
dcache.cva x22
li x22,0x00010203A
and x22,x22,x8
li x4,0xE518239EA57F2C10
sb x4,0x000000010(x22)
li x22,0x0001040A4
and x22,x22,x8
li x4,0xC0F5F01960F132F1
dcache.civa x22
li x22,0x0001020BD
and x22,x22,x8
li x4,0xB4CCA5BB8C82604D
lh x5,0x00000000E(x22)
li x22,0x0001021A9
and x22,x22,x8
li x4,0x72848A34CE5C1043
dcache.civa x22
li x22,0x00010009C
and x22,x22,x8
li x4,0xCC3270DFD0044EF9
lbu x5,0x00000002A(x22)
li x22,0x000104020
and x22,x22,x8
li x4,0xB845F2056BB5A62C
lw x5,0x000000018(x22)
li x22,0x0001041C1
and x22,x22,x8
li x4,0x1ADAB71607A0E48C
lbu x5,0x00000000B(x22)
li x22,0x000107138
and x22,x22,x8
li x4,0x9B29AD2B86D46F18
sd x4,0x000000038(x22)
li x22,0x00010702F
and x22,x22,x8
li x4,0x28B1D0404A038746
dcache.civa x22
li x22,0x0001071FA
and x22,x22,x8
li x4,0x44D8A19A5E65E2D4
sw x4,0x00000004C(x22)
li x22,0x000102003
and x22,x22,x8
li x4,0x1974C874B1B26232
dcache.cva x22
li x22,0x0001021CB
and x22,x22,x8
li x4,0x6DB9D4C427957D62
lhu x5,0x00000003C(x22)
li x22,0x000102125
and x22,x22,x8
li x4,0xEC9EA89CA39548EE
dcache.cva x22
li x22,0x00010214C
and x22,x22,x8
li x4,0xABFB21E3E52014C
lbu x5,0x00000004D(x22)
li x22,0x0001020E5
and x22,x22,x8
li x4,0x37D1F0FE65238419
dcache.civa x22
li x22,0x000104200
and x22,x22,x8
li x4,0x4C8A5370ECE3A000
dcache.cva x22
li x22,0x000107014
and x22,x22,x8
li x4,0x2F19BAD9C63C5F30
sb x4,0x00000001F(x22)
li x22,0x000104084
and x22,x22,x8
li x4,0x3891B9C54E510E7A
ld x5,0x000000018(x22)
li x22,0x00010018C
and x22,x22,x8
li x4,0xB39EDF7901B2EBC1
dcache.cva x22
li x22,0x00010701D
and x22,x22,x8
li x4,0xBBCD1BACCE5246F2
sh x4,0x000000040(x22)
li x22,0x000104197
and x22,x22,x8
li x4,0xAEFD8BB22FBD23B5
dcache.civa x22
li x22,0x0001001FE
and x22,x22,x8
li x4,0x9627E4155F454250
sb x4,0x000000030(x22)
li x22,0x0001021DB
and x22,x22,x8
li x4,0xBFF12588A062E13C
lh x5,0x000000020(x22)
li x22,0x000102009
and x22,x22,x8
li x4,0xBB54EF2F6D83EEBC
dcache.civa x22
li x22,0x0001001DB
and x22,x22,x8
li x4,0xB58474A683C777EB
lw x5,0x000000004(x22)
li x22,0x000107029
and x22,x22,x8
li x4,0x9641CA349C5A4462
dcache.civa x22
li x22,0x000100003
and x22,x22,x8
li x4,0x2494545C700CF6D
dcache.cva x22
li x22,0x00010000C
and x22,x22,x8
li x4,0xF7D07008CF6598C5
dcache.civa x22
li x22,0x000107049
and x22,x22,x8
li x4,0xA6482248B625ED0F
dcache.civa x22
li x22,0x00010406B
and x22,x22,x8
li x4,0x28B78DCE47FE1C1
sb x4,0x000000010(x22)
li x22,0x000100131
and x22,x22,x8
li x4,0x507D84165FD6BB69
dcache.civa x22
li x22,0x0001021D6
and x22,x22,x8
li x4,0x8F5131B700319FB2
sb x4,0x000000011(x22)
li x22,0x000104026
and x22,x22,x8
li x4,0x34757B339F5B4A6E
sw x4,0x000000038(x22)
li x22,0x0001071C2
and x22,x22,x8
li x4,0x312131FF2865B41C
sd x4,0x000000028(x22)
li x22,0x00010012F
and x22,x22,x8
li x4,0x705E14E9062522A0
sw x4,0x00000003C(x22)
li x22,0x0001021CC
and x22,x22,x8
li x4,0xAD76FCCC69CB18D4
lwu x5,0x000000028(x22)
li x22,0x00010711A
and x22,x22,x8
li x4,0x47B7CCF3D566A359
lwu x5,0x000000020(x22)
li x22,0x000100159
and x22,x22,x8
li x4,0x140F80BD4253241A
dcache.cva x22
li x22,0x0001041F4
and x22,x22,x8
li x4,0x2EA44BB2C6B6FA18
lhu x5,0x00000004E(x22)
li x22,0x00010705B
and x22,x22,x8
li x4,0xE038BF16D8997E69
dcache.civa x22
li x22,0x000107091
and x22,x22,x8
li x4,0xAB13E2506FA62064
sw x4,0x00000000C(x22)
li x22,0x0001020B6
and x22,x22,x8
li x4,0xE73DBCFED967A73B
dcache.civa x22
li x22,0x000104066
and x22,x22,x8
li x4,0x4A8757EB3FC91A03
dcache.cva x22
li x22,0x0001021D9
and x22,x22,x8
li x4,0xF6B57625E939F0F2
lbu x5,0x000000045(x22)
li x22,0x000104081
and x22,x22,x8
li x4,0xB6973A897172D047
sb x4,0x00000002A(x22)
li x22,0x000107067
and x22,x22,x8
li x4,0x75CA910D5DB0E570
dcache.civa x22
li x22,0x000100129
and x22,x22,x8
li x4,0xFE6C040E799DF353
lw x5,0x000000010(x22)
li x22,0x0001071DF
and x22,x22,x8
li x4,0xB8B77542A1BF13BE
dcache.cva x22
li x22,0x0001021E2
and x22,x22,x8
li x4,0xE93352B3C3449CAD
lh x5,0x000000004(x22)
li x22,0x000100069
and x22,x22,x8
li x4,0xF34A77B3D378955D
lb x5,0x00000001C(x22)
li x22,0x0001040ED
and x22,x22,x8
li x4,0x2A96565DBC3BF7BB
dcache.civa x22
li x22,0x000107160
and x22,x22,x8
li x4,0x60CAAABBFF5E61D9
dcache.civa x22
li x22,0x0001040F6
and x22,x22,x8
li x4,0xF41F782C5392893
sb x4,0x00000001D(x22)
li x22,0x0001040E5
and x22,x22,x8
li x4,0xCE0E2A9D53FEFCF9
sh x4,0x00000003E(x22)
li x22,0x00010200C
and x22,x22,x8
li x4,0x80C1A94AC6BB2D4D
sb x4,0x00000003A(x22)
li x22,0x00010214C
and x22,x22,x8
li x4,0xE3DD31BEB347C624
dcache.civa x22
li x22,0x000100142
and x22,x22,x8
li x4,0x5924AE07187E757A
lw x5,0x000000028(x22)
li x22,0x0001020FF
and x22,x22,x8
li x4,0x5244054875D04790
lhu x5,0x000000028(x22)
li x22,0x000102105
and x22,x22,x8
li x4,0x9D15822E7AA29CCA
lh x5,0x00000003A(x22)
li x22,0x00010202B
and x22,x22,x8
li x4,0x61E52FB81F216CE2
lhu x5,0x00000000C(x22)
li x22,0x00010013E
and x22,x22,x8
li x4,0xA4DC462027C75172
sd x4,0x000000030(x22)
li x22,0x00010410C
and x22,x22,x8
li x4,0xC9268D95385AB628
lb x5,0x000000004(x22)
li x22,0x000100170
and x22,x22,x8
li x4,0x5F1DBE418B7A555D
lbu x5,0x000000018(x22)
li x22,0x0001021C2
and x22,x22,x8
li x4,0x63445766E0650C40
dcache.civa x22
li x22,0x0001071A8
and x22,x22,x8
li x4,0xFE84BB3CDC8D3BE
lhu x5,0x000000046(x22)
li x22,0x0001071EA
and x22,x22,x8
li x4,0x216A5097AD2EFFC7
sd x4,0x000000048(x22)
li x22,0x000102091
and x22,x22,x8
li x4,0x11524735D66D30F5
lb x5,0x00000000D(x22)
li x22,0x000102094
and x22,x22,x8
li x4,0x8DA3FCE9C6D0C1E6
dcache.civa x22
li x22,0x0001071E6
and x22,x22,x8
li x4,0xDF689EEAD061D0F8
dcache.cva x22
li x22,0x000104161
and x22,x22,x8
li x4,0xDC2CE95F93E7A7EC
lwu x5,0x000000028(x22)
wfi
.global CPU3
CPU3:
MMODE_SMODE CPU3_S
.global CPU3_S
CPU3_S:
li x22,0x000107BED
and x22,x22,x8
li x22,0x000107AF2
and x22,x22,x8
li x4,0x56BCF2DDE8B1262C
lwu x5,0x000000020(x22)
lui x31,0xC5A1B
or x31,x13,x5
li x22,0x000104125
and x22,x22,x8
li x4,0x449569B18E7689C4
dcache.civa x22
mulhu x9,x7,x11
sltu x15,x27,x18
xor x16,x17,x28
li x22,0x000107C79
and x22,x22,x8
li x4,0x9C63CC3CD599486B
dcache.cva x22
divu x11,x22,x5
andi x30,x21,0xFFFFFFFFFFFFFAB3
li x22,0x000100028
and x22,x22,x8
li x4,0x156D601628FF5109
dcache.civa x22
or x9,x15,x5
slt x29,x25,x31
and x29,x17,x23
li x22,0x00010201A
and x22,x22,x8
li x4,0x62CC9D9590B745CE
sb x4,0x000000038(x22)
slti x13,x23,0x000000458
addi x28,x3,0xFFFFFFFFFFFFFC09
auipc x20,0x42C6D
li x22,0x0001077AF
and x22,x22,x8
li x4,0xE5F8754B91371094
sh x4,0x000000046(x22)
mulh x31,x7,x29
ori x27,x15,0xFFFFFFFFFFFFFA35
li x22,0x000107A63
and x22,x22,x8
li x4,0xFDF240E06B60F3D8
lb x5,0x000000045(x22)
mulhsu x11,x10,x17
xor x13,x7,x4
or x21,x1,x1
li x22,0x00010211D
and x22,x22,x8
li x4,0xDAE14B00BCAA7AA6
sh x4,0x00000002C(x22)
auipc x11,0xC67D2
div x15,x7,x27
li x22,0x0001041B1
and x22,x22,x8
li x4,0x46066DE105D7BD6
ld x5,0x000000040(x22)
remu x12,x8,x14
addi x17,x25,0xFFFFFFFFFFFFF825
li x22,0x000102071
and x22,x22,x8
li x4,0x6BC46DD97A36D0EE
lw x5,0x000000020(x22)
mulhsu x16,x26,x20
xor x13,x31,x14
sltiu x13,x27,0x0000006CF
li x22,0x000104185
and x22,x22,x8
li x4,0xFB29C15DAFB02BA1
lh x5,0x000000030(x22)
mulh x14,x23,x9
and x10,x29,x26
auipc x18,0x5AFE8
li x22,0x0001021A2
and x22,x22,x8
li x4,0x8905420C4F79EBC8
sw x4,0x000000018(x22)
xor x15,x22,x20
andi x9,x19,0xFFFFFFFFFFFFFB5E
rem x20,x25,x14
li x22,0x000102173
and x22,x22,x8
li x4,0x8CDC0463DDBF7832
sw x4,0x00000004C(x22)
andi x11,x0,0xFFFFFFFFFFFFFBFC
andi x23,x31,0xFFFFFFFFFFFFFEFC
mulh x25,x31,x9
li x22,0x000107CE3
and x22,x22,x8
li x4,0x9F44602801693606
dcache.cva x22
sll x24,x6,x23
sra x24,x5,x16
mulhu x11,x20,x12
li x22,0x0001078BF
and x22,x22,x8
li x4,0xE5FACE934DEAB8C7
dcache.cva x22
andi x13,x13,0x00000051D
srai x19,x21,0x08
remu x27,x18,x2
li x22,0x000107B66
and x22,x22,x8
li x4,0xAD14F93844401E2D
dcache.civa x22
or x28,x25,x8
sltu x24,x21,x27
and x9,x4,x9
li x22,0x00010019B
and x22,x22,x8
li x4,0x2E5CFC76782B0AA4
sb x4,0x000000036(x22)
srl x9,x18,x1
lui x9,0xD64B8
ori x10,x13,0xFFFFFFFFFFFFF9BA
li x22,0x000107A80
and x22,x22,x8
li x4,0x772F74409C1217D1
ld x5,0x000000040(x22)
mulhu x23,x5,x17
rem x31,x30,x19
sll x19,x24,x5
li x22,0x000107B2D
and x22,x22,x8
li x4,0xB81A62DA521C5FE7
sh x4,0x000000032(x22)
auipc x27,0x36E3D
mulhu x14,x15,x5
li x22,0x000107C2A
and x22,x22,x8
li x4,0x86F654379324C39E
lw x5,0x000000038(x22)
div x9,x2,x8
remu x21,x29,x22
li x22,0x00010771D
and x22,x22,x8
li x4,0x4890DF998A72CC46
lh x5,0x00000002E(x22)
ori x9,x18,0x00000037D
auipc x30,0x54199
li x22,0x0001077EA
and x22,x22,x8
li x4,0x74AAAF863BC8361D
sw x4,0x000000028(x22)
and x26,x16,x11
or x27,x19,x17
li x22,0x000107B2D
and x22,x22,x8
li x4,0xD5EFDD94866B220F
lhu x5,0x00000004E(x22)
sltiu x14,x15,0xFFFFFFFFFFFFF945
srl x19,x12,x28
srl x6,x19,x9
li x22,0x000107AB7
and x22,x22,x8
li x4,0x942D5ADD4185E5BF
dcache.civa x22
xor x25,x14,x31
add x10,x26,x23
li x22,0x000107A46
and x22,x22,x8
li x4,0xC28D4DB92D09C8DD
dcache.civa x22
sll x27,x5,x9
mul x15,x9,x6
xori x30,x9,0x0000005BA
li x22,0x000107707
and x22,x22,x8
li x4,0xCDC75870846FB9FA
lb x5,0x000000006(x22)
slti x27,x7,0x000000550
srli x20,x3,0x09
li x22,0x000107CD3
and x22,x22,x8
li x4,0xA2FD50A964E94B4B
dcache.cva x22
mulh x20,x30,x30
mulhu x27,x24,x19
andi x29,x30,0x000000092
li x22,0x00010790E
and x22,x22,x8
li x4,0xF94D1BB2E0E8C60
sb x4,0x000000018(x22)
sll x21,x27,x24
sra x29,x24,x31
li x22,0x000100051
and x22,x22,x8
li x4,0xD618AC13950B432E
sb x4,0x000000003(x22)
div x19,x25,x17
srl x13,x15,x30
slti x21,x27,0xFFFFFFFFFFFFF9E1
li x22,0x00010016D
and x22,x22,x8
li x4,0x9116764038EE7211
dcache.cva x22
slt x24,x0,x25
mulhsu x14,x17,x27
mul x30,x29,x1
li x22,0x000107AAB
and x22,x22,x8
li x4,0x486E11D5E23B4845
dcache.cva x22
rem x27,x30,x24
slt x29,x6,x4
auipc x26,0xA501D
li x22,0x000102069
and x22,x22,x8
li x4,0xCE1B3AD9443A2CFD
sb x4,0x000000006(x22)
slt x29,x31,x26
rem x6,x23,x26
li x22,0x000107A02
and x22,x22,x8
li x4,0xC354A347FCEA48F6
lwu x5,0x000000000(x22)
lui x16,0x8CBF0
mulhsu x10,x5,x3
li x22,0x00010004E
and x22,x22,x8
li x4,0xBA908FD7090C396A
ld x5,0x000000008(x22)
divu x9,x14,x23
sll x16,x21,x22
xori x27,x12,0x0000005AF
li x22,0x0001079CF
and x22,x22,x8
li x4,0x922A36B4BCE536A7
lw x5,0x00000004C(x22)
xor x21,x11,x6
lui x28,0x20BF0
li x22,0x00010790D
and x22,x22,x8
li x4,0x182DD953BFC26D51
dcache.civa x22
remu x16,x10,x22
slli x11,x7,0x018
li x22,0x000100096
and x22,x22,x8
li x4,0x8B8A0FDE02C58FB4
dcache.cva x22
addi x21,x24,0x0000004F8
rem x28,x7,x18
li x22,0x00010771B
and x22,x22,x8
li x4,0x9BB4666A072825E
sh x4,0x00000004E(x22)
srl x21,x14,x9
ori x17,x13,0x0000001C6
li x22,0x000107CDC
and x22,x22,x8
li x4,0x15042791EC2BB4E4
lhu x5,0x00000004C(x22)
slti x20,x28,0xFFFFFFFFFFFFFAD9
mulh x23,x15,x25
li x22,0x0001021A2
and x22,x22,x8
li x4,0xA601F2DA1A81C0B
dcache.cva x22
sub x12,x4,x30
div x31,x9,x22
sll x29,x8,x31
li x22,0x000107BB4
and x22,x22,x8
li x4,0xC0C5C384B68850C
dcache.civa x22
slli x27,x30,0x0F
auipc x11,0x82639
add x31,x19,x20
li x22,0x0001021EF
and x22,x22,x8
li x4,0x13870DB2883B1E01
lh x5,0x000000048(x22)
add x11,x10,x11
addi x26,x27,0x000000714
ori x12,x23,0xFFFFFFFFFFFFFBE1
li x22,0x000100194
and x22,x22,x8
li x4,0x16F6EAF17338C0F7
lbu x5,0x000000015(x22)
addi x23,x21,0x0000004E6
rem x9,x20,x31
li x22,0x000100130
and x22,x22,x8
li x4,0x65233483656EA649
ld x5,0x000000010(x22)
add x13,x18,x10
div x16,x1,x18
and x19,x25,x25
li x22,0x0001040D2
and x22,x22,x8
li x4,0xABAD7CA73850D4EA
sd x4,0x000000028(x22)
mulh x27,x20,x16
srl x29,x30,x3
li x22,0x0001078AA
and x22,x22,x8
li x4,0x58DD146924985576
dcache.civa x22
auipc x10,0x89D94
slt x18,x8,x1
div x14,x26,x18
li x22,0x00010798E
and x22,x22,x8
li x4,0x94EFED27BA98167B
dcache.cva x22
lui x12,0x3E62B
rem x27,x0,x16
li x22,0x00010212A
and x22,x22,x8
li x4,0x669CB997F74CBA18
sh x4,0x000000036(x22)
divu x7,x29,x21
sub x25,x19,x20
li x22,0x00010779E
and x22,x22,x8
li x4,0x1317020ECA6E18A9
dcache.cva x22
and x26,x22,x0
addi x18,x30,0xFFFFFFFFFFFFFEA6
li x22,0x00010203F
and x22,x22,x8
li x4,0xAAB876623E578C84
dcache.cva x22
mulhsu x6,x13,x22
sltu x12,x10,x29
slli x15,x27,0x015
li x22,0x00010401F
and x22,x22,x8
li x4,0x684E94DE512DE9F
dcache.civa x22
slti x20,x28,0x00000051E
remu x29,x20,x29
mulh x23,x1,x11
li x22,0x000102029
and x22,x22,x8
li x4,0x175324EAB987FC14
sb x4,0x000000024(x22)
mulhu x21,x8,x20
sub x10,x2,x11
li x22,0x000102044
and x22,x22,x8
li x4,0x7AD77CF68B59E279
dcache.civa x22
sub x15,x12,x25
sltiu x15,x12,0xFFFFFFFFFFFFF83D
li x22,0x000107CB1
and x22,x22,x8
li x4,0xC7782B9F387F2676
sh x4,0x00000001A(x22)
ori x20,x23,0xFFFFFFFFFFFFF99A
auipc x26,0x184C5
srai x19,x19,0x01F
li x22,0x000107A73
and x22,x22,x8
li x4,0xC36DE3C083D78673
ld x5,0x000000040(x22)
addi x7,x26,0xFFFFFFFFFFFFFDD4
sub x12,x25,x9
li x22,0x0001000EF
and x22,x22,x8
li x4,0xD1C6FC0927B461A0
ld x5,0x000000038(x22)
sll x11,x12,x29
sltiu x7,x17,0x00000047B
or x31,x17,x13
li x22,0x000102169
and x22,x22,x8
li x4,0x184AC488DB668073
dcache.cva x22
and x6,x16,x3
lui x7,0xD24BD
li x22,0x0001041CA
and x22,x22,x8
li x4,0x1FD9A3CCA5E92C1A
dcache.cva x22
slti x9,x30,0xFFFFFFFFFFFFFE15
xori x31,x30,0xFFFFFFFFFFFFFF4D
sll x14,x6,x19
li x22,0x000107BFD
and x22,x22,x8
li x4,0x5353E9FEC4CE372A
lwu x5,0x00000002C(x22)
divu x25,x28,x22
lui x7,0xE579
addi x26,x8,0xFFFFFFFFFFFFFB3F
li x22,0x000107C76
and x22,x22,x8
li x4,0xF9B59A514B23603F
sb x4,0x000000040(x22)
add x16,x20,x20
slt x25,x19,x30
mulhu x17,x25,x14
li x22,0x000100081
and x22,x22,x8
li x4,0x91DD65CBD835DB1B
lb x5,0x000000014(x22)
rem x16,x3,x27
auipc x21,0x9B372
li x22,0x000100152
and x22,x22,x8
li x4,0xC149A3592ED9F222
sb x4,0x000000018(x22)
mulhsu x28,x24,x15
srl x30,x11,x8
li x22,0x000107A45
and x22,x22,x8
li x4,0xB105D5C5E25C1A40
dcache.cva x22
div x31,x10,x28
rem x13,x25,x26
li x22,0x000104107
and x22,x22,x8
li x4,0x32942EB050E6553B
dcache.civa x22
sra x20,x5,x25
ori x26,x21,0x00000059F
li x22,0x0001020F4
and x22,x22,x8
li x4,0x1BB7260FA6B801B9
sh x4,0x00000002C(x22)
slli x31,x15,0x01F
sltu x15,x24,x9
li x22,0x000104120
and x22,x22,x8
li x4,0x87D2F37E34F519E
lwu x5,0x000000044(x22)
div x10,x0,x29
slti x12,x15,0x000000005
li x22,0x000104098
and x22,x22,x8
li x4,0x262EDD904FC163E0
lhu x5,0x000000046(x22)
slli x7,x10,0x012
addi x29,x0,0xFFFFFFFFFFFFF979
and x29,x18,x4
li x22,0x00010792E
and x22,x22,x8
li x4,0x5D540770790090B1
dcache.civa x22
ori x30,x16,0x00000000B
ori x28,x23,0x0000001D5
li x22,0x0001079EF
and x22,x22,x8
li x4,0x188087895ABB2BF
lhu x5,0x00000000A(x22)
sub x31,x7,x29
addi x9,x0,0xFFFFFFFFFFFFFE0D
div x14,x27,x11
li x22,0x000107CFD
and x22,x22,x8
li x4,0x4D4D528FB5E6F419
sh x4,0x000000044(x22)
div x23,x14,x21
auipc x29,0x46AFF
li x22,0x00010003F
and x22,x22,x8
li x4,0xB8DF0B4670F995FE
sw x4,0x00000004C(x22)
sltiu x14,x1,0xFFFFFFFFFFFFFE08
sltu x25,x0,x26
mul x27,x7,x14
li x22,0x0001079A8
and x22,x22,x8
li x4,0xF6E8BF046318A0D9
dcache.civa x22
srai x28,x30,0x0B
sltiu x28,x31,0xFFFFFFFFFFFFF86F
li x22,0x0001020A3
and x22,x22,x8
li x4,0x3639830D2A1FDD0
dcache.civa x22
srli x14,x8,0x01E
sltu x25,x4,x26
li x22,0x000107A63
and x22,x22,x8
li x4,0x71A3DB1FCADCB7F6
ld x5,0x000000040(x22)
auipc x31,0xE51C3
remu x29,x10,x14
li x22,0x0001000E5
and x22,x22,x8
li x4,0xA1203471C5C5BC98
lw x5,0x000000020(x22)
srai x31,x9,0x01C
and x12,x3,x24
li x22,0x0001021EE
and x22,x22,x8
li x4,0xD673552596925C45
dcache.cva x22
sltiu x31,x27,0xFFFFFFFFFFFFFFDA
srli x16,x0,0x01E
srl x24,x10,x10
li x22,0x000107C2A
and x22,x22,x8
li x4,0x2A2B187E758EE7E0
sd x4,0x000000010(x22)
ori x14,x24,0xFFFFFFFFFFFFFA34
ori x26,x3,0xFFFFFFFFFFFFFE26
li x22,0x00010202B
and x22,x22,x8
li x4,0x2A821E756C7D92E0
lh x5,0x000000018(x22)
srl x9,x20,x23
slli x24,x16,0x01D
and x26,x31,x17
li x22,0x000107B14
and x22,x22,x8
li x4,0xC5ABBF7445001009
lwu x5,0x000000024(x22)
mulhsu x7,x16,x13
sll x26,x16,x1
xori x20,x15,0xFFFFFFFFFFFFFBAE
li x22,0x00010212A
and x22,x22,x8
li x4,0xB9466239A80E1283
lbu x5,0x00000001E(x22)
slti x12,x5,0x000000559
rem x7,x9,x17
li x22,0x000107951
and x22,x22,x8
li x4,0x5E3869CB40AD5FDB
dcache.civa x22
slt x7,x29,x29
mulh x20,x1,x8
andi x23,x14,0x0000005A2
li x22,0x0001079A0
and x22,x22,x8
li x4,0x54792D7F9238B8D6
lwu x5,0x000000040(x22)
or x20,x15,x9
ori x28,x18,0xFFFFFFFFFFFFFF54
ori x9,x14,0x00000070F
li x22,0x000100179
and x22,x22,x8
li x4,0x901EABABF2A96384
dcache.civa x22
add x26,x21,x23
mulh x14,x19,x24
mulhsu x29,x28,x5
li x22,0x0001001C7
and x22,x22,x8
li x4,0x3271770704580E8E
lw x5,0x000000024(x22)
slt x26,x12,x31
srai x24,x15,0x01B
srai x23,x22,0x014
li x22,0x000102034
and x22,x22,x8
li x4,0xFBC7E7FEC06B355E
dcache.cva x22
sll x13,x7,x17
andi x11,x24,0xFFFFFFFFFFFFFE40
li x22,0x0001041F5
and x22,x22,x8
li x4,0x19B1E1F3F17EC758
ld x5,0x000000030(x22)
auipc x24,0x43A6B
mulhu x24,x30,x4
li x22,0x000107CE4
and x22,x22,x8
li x4,0x35EE47E70E62242D
sb x4,0x000000028(x22)
slt x13,x11,x7
add x26,x29,x1
li x22,0x000104098
and x22,x22,x8
li x4,0x50489CD6601F6630
sb x4,0x000000006(x22)
or x31,x4,x10
auipc x25,0xD3D1C
li x22,0x00010405C
and x22,x22,x8
li x4,0xCC7545A6966D4B15
dcache.civa x22
rem x10,x26,x8
sll x10,x9,x29
li x22,0x000107AEC
and x22,x22,x8
li x4,0xF8DB28128D9410DC
dcache.civa x22
xori x15,x17,0x00000038D
auipc x28,0xF196E
remu x15,x3,x12
li x22,0x0001040C8
and x22,x22,x8
li x4,0x46A39FAB7912B0A1
dcache.civa x22
andi x13,x2,0xFFFFFFFFFFFFF842
sll x27,x28,x13
mulhu x14,x10,x20
li x22,0x0001041F6
and x22,x22,x8
li x4,0x3DD74CAFDD3BED39
lb x5,0x00000000A(x22)
sltu x14,x30,x31
ori x11,x11,0xFFFFFFFFFFFFFB41
srai x11,x8,0x0E
li x22,0x0001020BD
and x22,x22,x8
li x4,0x504271052F544673
lh x5,0x000000012(x22)
remu x17,x19,x13
mulhsu x6,x26,x5
li x22,0x00010786C
and x22,x22,x8
li x4,0x9745A307552E9D2D
sw x4,0x000000044(x22)
div x16,x14,x23
and x15,x12,x24
li x22,0x000100074
and x22,x22,x8
li x4,0xF55D9EB88E2FEB5F
dcache.civa x22
addi x20,x6,0xFFFFFFFFFFFFFE52
sra x9,x15,x21
li x22,0x00010008F
and x22,x22,x8
li x4,0x65066FBC5003BBED
dcache.cva x22
mulh x15,x0,x30
srli x30,x11,0x01E
li x22,0x000102096
and x22,x22,x8
li x4,0xD21259E3F666DE1A
sb x4,0x000000000(x22)
mulh x10,x23,x20
slt x28,x9,x23
addi x25,x0,0x0000003AD
li x22,0x000107C6F
and x22,x22,x8
li x4,0xC123EF3E303161C
ld x5,0x000000038(x22)
lui x10,0x5FD0
sra x19,x17,x25
li x22,0x000107B41
and x22,x22,x8
li x4,0xDE28DDB37A7E9F43
sw x4,0x000000044(x22)
slt x9,x13,x28
xor x18,x3,x27
slli x17,x4,0x017
li x22,0x000107CC4
and x22,x22,x8
li x4,0x77D04074BD0CF6FE
lw x5,0x00000001C(x22)
slt x10,x0,x7
divu x31,x6,x1
auipc x29,0xD2EE9
li x22,0x00010407E
and x22,x22,x8
li x4,0xDC2BFAB45DFBA3C2
lh x5,0x000000036(x22)
slti x30,x26,0x000000764
srai x19,x4,0x016
li x22,0x000107B69
and x22,x22,x8
li x4,0x34AE39C3002FF54F
dcache.cva x22
andi x7,x17,0xFFFFFFFFFFFFFD5F
sltu x10,x28,x21
ori x25,x27,0x000000337
li x22,0x00010777A
and x22,x22,x8
li x4,0xAE6FE5010EB2B5A4
lh x5,0x000000048(x22)
sll x25,x0,x24
sltiu x15,x5,0xFFFFFFFFFFFFF980
li x22,0x000107BD0
and x22,x22,x8
li x4,0x1DB260ADF1DCA1FA
dcache.civa x22
xori x31,x23,0xFFFFFFFFFFFFFB51
sll x27,x0,x16
srl x26,x16,x10
li x22,0x0001040AA
and x22,x22,x8
li x4,0xEB89A68C1EC586F0
sh x4,0x000000042(x22)
mulh x7,x13,x13
slli x7,x5,0x01C
srl x25,x6,x19
li x22,0x000107C81
and x22,x22,x8
li x4,0xC06651BC0793D089
lhu x5,0x000000018(x22)
auipc x20,0x45878
mulhu x26,x15,x21
li x22,0x0001077B1
and x22,x22,x8
li x4,0xF4CB0841BD20D2A6
ld x5,0x000000030(x22)
remu x10,x0,x29
sltu x12,x11,x8
sll x28,x24,x12
li x22,0x000104050
and x22,x22,x8
li x4,0xB1896D9B18ABE7E6
sb x4,0x000000049(x22)
mulhsu x30,x27,x2
andi x14,x15,0x00000079C
li x22,0x000100122
and x22,x22,x8
li x4,0xD1C51805315C1D09
lwu x5,0x00000002C(x22)
srl x31,x13,x19
slli x23,x14,0x00
addi x21,x28,0x00000053E
li x22,0x000102132
and x22,x22,x8
li x4,0x74F0ED3A9589C706
lb x5,0x000000027(x22)
slti x24,x1,0xFFFFFFFFFFFFFA10
slli x17,x10,0x018
li x22,0x000107826
and x22,x22,x8
li x4,0xBE3C485D3BDAA744
sb x4,0x00000003A(x22)
sra x9,x11,x10
sra x7,x8,x14
li x22,0x000107947
and x22,x22,x8
li x4,0xA875B9094A0D0918
dcache.cva x22
sra x30,x11,x2
mulhsu x13,x4,x8
li x22,0x000107CD6
and x22,x22,x8
li x4,0x8D36BC3524DB78DF
lhu x5,0x000000038(x22)
slt x27,x8,x11
rem x24,x17,x10
slli x26,x30,0x011
li x22,0x00010777D
and x22,x22,x8
li x4,0x2780A59176C3E9FC
dcache.civa x22
srl x20,x2,x8
andi x11,x23,0x00000029B
srai x20,x18,0x0B
li x22,0x000107A3C
and x22,x22,x8
li x4,0xB1F9AE50A5DBD6E7
sd x4,0x000000018(x22)
slt x11,x23,x19
srli x26,x11,0x01E
li x22,0x000100059
and x22,x22,x8
li x4,0x50F91654DB2AD8CA
sh x4,0x00000004E(x22)
lui x13,0xF0A7
rem x17,x5,x8
xor x21,x18,x3
li x22,0x000102114
and x22,x22,x8
li x4,0x9E26EECCF0EA171D
lhu x5,0x00000004E(x22)
or x21,x24,x9
divu x19,x16,x19
li x22,0x000107C61
and x22,x22,x8
li x4,0xF7CFA983FC00CA6C
lbu x5,0x000000026(x22)
srl x9,x23,x29
mulh x14,x11,x23
xor x21,x22,x12
li x22,0x00010010D
and x22,x22,x8
li x4,0x99AF18443A406D83
sh x4,0x00000001A(x22)
mulhsu x28,x22,x2
rem x7,x16,x13
li x22,0x000107C4D
and x22,x22,x8
li x4,0x8A735B3BE5D32646
sd x4,0x000000018(x22)
slt x24,x10,x31
auipc x19,0x628B6
mul x29,x5,x22
li x22,0x0001077E1
and x22,x22,x8
li x4,0xAEDC51EBAC18BD0F
sd x4,0x000000040(x22)
slli x9,x0,0x0C
sltu x23,x28,x30
li x22,0x000102107
and x22,x22,x8
li x4,0x9A107FD053BE07D
sw x4,0x000000024(x22)
andi x19,x12,0xFFFFFFFFFFFFFA7C
mulhu x24,x28,x2
li x22,0x000100083
and x22,x22,x8
li x4,0xCE6477302DFC7A7
dcache.civa x22
mul x14,x15,x7
slli x27,x23,0x04
auipc x11,0x3BD43
li x22,0x000107B45
and x22,x22,x8
li x4,0xE2B91F3CE9858A2E
lw x5,0x000000000(x22)
sltu x28,x30,x17
and x16,x0,x9
li x22,0x0001021A6
and x22,x22,x8
li x4,0x8FE061D4A47A2A8D
dcache.cva x22
srl x10,x31,x28
rem x14,x25,x24
rem x28,x21,x14
li x22,0x000107931
and x22,x22,x8
li x4,0xBDD6662CE8EE4699
sb x4,0x000000032(x22)
sltiu x7,x23,0x000000597
mulhsu x18,x17,x21
li x22,0x0001078BC
and x22,x22,x8
li x4,0x7611CB4FCD0FFA07
lh x5,0x000000034(x22)
divu x24,x9,x1
sll x29,x3,x7
add x30,x0,x13
li x22,0x0001078B9
and x22,x22,x8
li x4,0xD3E987C8610B8251
lhu x5,0x00000004C(x22)
auipc x13,0x2FADD
sra x7,x6,x7
lui x13,0xCDE83
li x22,0x00010203D
and x22,x22,x8
li x4,0x20867D399BB0AAF9
sb x4,0x00000003A(x22)
xori x9,x29,0xFFFFFFFFFFFFFF9C
sra x9,x25,x31
slli x21,x8,0x0B
li x22,0x0001078D5
and x22,x22,x8
li x4,0x96B401EFFE8C8C98
lwu x5,0x000000004(x22)
ori x23,x1,0xFFFFFFFFFFFFFC07
mul x14,x24,x5
mulh x23,x26,x24
li x22,0x000104095
and x22,x22,x8
li x4,0xDB82B68AD85EAD56
lbu x5,0x000000038(x22)
mulh x25,x20,x7
or x9,x3,x8
xori x25,x5,0xFFFFFFFFFFFFF887
li x22,0x0001040F4
and x22,x22,x8
li x4,0xC901BE33C03CA0E4
ld x5,0x000000030(x22)
or x25,x27,x22
or x7,x21,x17
slt x26,x29,x19
li x22,0x0001020E2
and x22,x22,x8
li x4,0xB0E9D9F818296CA5
dcache.civa x22
sltiu x11,x5,0x00000042D
add x12,x27,x18
mul x7,x0,x18
li x22,0x000107779
and x22,x22,x8
li x4,0x7211DFAC596F9553
lhu x5,0x000000020(x22)
slti x20,x10,0x0000001B0
xor x9,x15,x11
li x22,0x0001078DE
and x22,x22,x8
li x4,0xC8D88B9DF2D5991
lwu x5,0x00000000C(x22)
slt x26,x21,x26
mulhu x19,x15,x1
xori x30,x24,0x00000045B
li x22,0x00010207B
and x22,x22,x8
li x4,0x5616EA776A2D0AEC
lw x5,0x000000018(x22)
xor x6,x21,x15
remu x13,x7,x23
mulh x24,x14,x25
li x22,0x00010408F
and x22,x22,x8
li x4,0x75A06AF8FD883894
dcache.cva x22
divu x11,x5,x17
ori x13,x17,0x000000260
add x9,x24,x25
li x22,0x000107AB2
and x22,x22,x8
li x4,0x2D347D8A883EE3BE
dcache.civa x22
andi x9,x24,0x000000653
andi x14,x8,0x000000452
sub x10,x27,x1
li x22,0x0001079C7
and x22,x22,x8
li x4,0xC5761287CE36E4DE
ld x5,0x000000008(x22)
sub x14,x19,x9
srai x25,x22,0x07
srl x7,x15,x27
li x22,0x0001077A6
and x22,x22,x8
li x4,0xFE9199090A52FFE
dcache.cva x22
xori x31,x25,0xFFFFFFFFFFFFF95E
div x13,x7,x7
and x9,x7,x30
li x22,0x000107743
and x22,x22,x8
li x4,0x60A23AD1701B34D0
sw x4,0x000000040(x22)
mulhsu x7,x17,x2
sll x14,x18,x2
slti x28,x14,0xFFFFFFFFFFFFFEBD
li x22,0x000107A78
and x22,x22,x8
li x4,0x9A50B3C686B06B37
dcache.civa x22
srli x28,x29,0x08
rem x31,x29,x7
sra x29,x14,x6
li x22,0x000107A98
and x22,x22,x8
li x4,0xE7C48746DDFB5AFC
dcache.civa x22
sll x23,x17,x18
and x6,x23,x2
mulhu x14,x6,x13
li x22,0x0001000E1
and x22,x22,x8
li x4,0x28074229F450CF44
sb x4,0x00000002B(x22)
sub x11,x26,x9
srai x17,x17,0x0E
li x22,0x000102027
and x22,x22,x8
li x4,0xD805A110F182CF4E
dcache.civa x22
sltiu x31,x3,0x000000096
or x20,x29,x7
li x22,0x0001041DB
and x22,x22,x8
li x4,0x863CA0772EA77AB1
dcache.cva x22
ori x29,x31,0x000000728
slli x26,x14,0x06
lui x26,0x1444D
li x22,0x000107A4F
and x22,x22,x8
li x4,0x495283F286965865
lh x5,0x00000000A(x22)
ori x11,x3,0xFFFFFFFFFFFFFACE
div x7,x17,x23
addi x14,x20,0x0000000D1
li x22,0x000107782
and x22,x22,x8
li x4,0xFC2FD91C2BC93288
lbu x5,0x000000048(x22)
sltu x25,x20,x4
remu x24,x24,x10
li x22,0x000107C63
and x22,x22,x8
li x4,0x2526D862298EE75F
lh x5,0x00000000E(x22)
divu x10,x22,x27
divu x26,x12,x6
div x23,x20,x22
li x22,0x000107770
and x22,x22,x8
li x4,0x9DED3CE71619F7F3
sd x4,0x000000028(x22)
ori x24,x13,0xFFFFFFFFFFFFF9BA
xor x17,x23,x26
mulhsu x16,x29,x13
li x22,0x000107C8F
and x22,x22,x8
li x4,0x1FFDDF526C117486
dcache.cva x22
mulh x25,x7,x20
div x26,x25,x26
sltu x10,x17,x0
li x22,0x0001021B9
and x22,x22,x8
li x4,0xA048D6887724AF48
dcache.civa x22
sra x12,x30,x15
mulhsu x16,x7,x3
ori x23,x5,0xFFFFFFFFFFFFFB06
li x22,0x000102062
and x22,x22,x8
li x4,0x2D65D02B1B41444F
dcache.civa x22
sll x12,x20,x7
srli x25,x31,0x01F
ori x23,x8,0x0000004FE
li x22,0x00010214C
and x22,x22,x8
li x4,0x95CB25A42BC36FDD
dcache.civa x22
rem x24,x5,x2
remu x14,x22,x25
sltu x27,x10,x9
li x22,0x000100174
and x22,x22,x8
li x4,0xCB65E1792D3DC079
lbu x5,0x000000047(x22)
mulhsu x23,x25,x20
auipc x6,0x41310
li x22,0x0001079BE
and x22,x22,x8
li x4,0x2C2030192A756C7A
lh x5,0x00000000A(x22)
xor x28,x24,x31
and x23,x4,x5
li x22,0x000107BC7
and x22,x22,x8
li x4,0x19C1E9A3E3FFCF3F
dcache.civa x22
srai x17,x16,0x01B
auipc x26,0x997AA
rem x12,x13,x3
li x22,0x000107836
and x22,x22,x8
li x4,0x54489083F4917B5A
lh x5,0x000000022(x22)
ori x12,x9,0x000000766
andi x19,x7,0xFFFFFFFFFFFFFC0C
xor x19,x17,x30
li x22,0x0001077E1
and x22,x22,x8
li x4,0xE4A7ED246EAAD966
dcache.cva x22
sll x24,x10,x7
addi x23,x4,0x0000006E8
sltiu x28,x16,0x000000691
li x22,0x0001000C5
and x22,x22,x8
li x4,0x3EFFA396C08C965C
dcache.civa x22
divu x13,x10,x26
slti x23,x27,0xFFFFFFFFFFFFFF3C
slt x15,x26,x14
li x22,0x000100050
and x22,x22,x8
li x4,0x652BD9043E3C32BD
dcache.cva x22
slti x27,x21,0xFFFFFFFFFFFFF8F3
lui x30,0x971EF
xor x16,x12,x13
li x22,0x000104003
and x22,x22,x8
li x4,0xE029C24A1026E666
sw x4,0x000000020(x22)
mul x29,x13,x6
sltu x26,x3,x26
li x22,0x00010219E
and x22,x22,x8
li x4,0x15B69D3FFA669667
sb x4,0x000000046(x22)
lui x27,0xFF636
srli x10,x9,0x01F
li x22,0x000100063
and x22,x22,x8
li x4,0x846983A3D26849F3
lwu x5,0x00000000C(x22)
add x30,x19,x26
mulh x20,x25,x2
slli x29,x8,0x05
li x22,0x0001077A6
and x22,x22,x8
li x4,0xC38371171F7CB763
lw x5,0x000000010(x22)
sll x18,x28,x7
sra x13,x1,x27
sub x24,x13,x29
li x22,0x0001079D0
and x22,x22,x8
li x4,0xA38D5432B89EBB67
lbu x5,0x00000000C(x22)
remu x28,x29,x14
slt x6,x22,x5
li x22,0x00010797B
and x22,x22,x8
li x4,0xE42327686A0EAC51
sb x4,0x00000001A(x22)
slli x12,x18,0x02
lui x10,0x830D2
li x22,0x0001021E4
and x22,x22,x8
li x4,0x8A736558EE74B029
lbu x5,0x000000014(x22)
srli x20,x1,0x01
slt x9,x3,x18
slt x14,x16,x17
li x22,0x000104188
and x22,x22,x8
li x4,0x3B7AE75A73BC307E
dcache.cva x22
divu x27,x23,x11
sll x12,x29,x13
li x22,0x0001021F7
and x22,x22,x8
li x4,0xC832756A6844D82F
sw x4,0x000000034(x22)
mulhu x10,x31,x12
mulhu x10,x7,x11
sll x9,x22,x14
li x22,0x00010406B
and x22,x22,x8
li x4,0xC4F3F1F3A749F4B2
sd x4,0x000000030(x22)
mul x23,x14,x2
srai x23,x15,0x016
mulh x18,x7,x25
li x22,0x0001000E7
and x22,x22,x8
li x4,0x9DE991B56F6B85FB
sb x4,0x00000000D(x22)
divu x28,x27,x14
srl x16,x25,x22
li x22,0x0001000A8
and x22,x22,x8
li x4,0xB3F5D9C72DA4D9E6
dcache.civa x22
sltiu x30,x0,0xFFFFFFFFFFFFFCCF
mulhu x6,x28,x26
lui x17,0x4BF35
li x22,0x000107ABA
and x22,x22,x8
li x4,0x17E549001015D5DB
lbu x5,0x000000026(x22)
sub x20,x28,x11
slli x12,x18,0x016
li x22,0x000107C7A
and x22,x22,x8
li x4,0xB500F18B2D0D5671
sb x4,0x000000023(x22)
addi x6,x27,0xFFFFFFFFFFFFF94A
sll x12,x21,x5
li x22,0x00010210A
and x22,x22,x8
li x4,0xCFDCAD1892AE8CA2
lhu x5,0x000000016(x22)
lui x25,0xF2CFF
srai x29,x7,0x07
remu x30,x14,x12
li x22,0x000100163
and x22,x22,x8
li x4,0x74A7670B6916CB70
dcache.civa x22
xor x16,x10,x16
xor x11,x7,x7
li x22,0x000107C1C
and x22,x22,x8
li x4,0xB95F03CE50B05BB6
dcache.civa x22
mul x12,x11,x19
remu x10,x14,x6
sll x27,x4,x13
li x22,0x000107ACC
and x22,x22,x8
li x4,0x8F0B3EAF53D6703E
lwu x5,0x000000010(x22)
sra x15,x17,x6
sltu x7,x3,x5
slli x9,x29,0x04
li x22,0x000107A10
and x22,x22,x8
li x4,0x16B8741FDD292F02
sb x4,0x00000003A(x22)
mulh x30,x22,x9
andi x21,x29,0x00000030B
addi x13,x24,0xFFFFFFFFFFFFFAD4
li x22,0x000100182
and x22,x22,x8
li x4,0x2C6ACA57FBF0F192
lh x5,0x000000032(x22)
mulh x9,x28,x1
auipc x27,0x18CBE
li x22,0x000107826
and x22,x22,x8
li x4,0x9F6555414F6E0E11
lbu x5,0x00000003C(x22)
slli x29,x22,0x06
srli x12,x10,0x01B
andi x10,x23,0x000000093
li x22,0x000104076
and x22,x22,x8
li x4,0xAD27B7E6216480AE
sw x4,0x000000050(x22)
sltiu x9,x12,0xFFFFFFFFFFFFFEF7
remu x7,x13,x14
li x22,0x0001020C7
and x22,x22,x8
li x4,0x8D573CB5F624F474
lw x5,0x000000004(x22)
divu x26,x14,x7
sltiu x12,x11,0x0000000B5
slli x20,x5,0x06
li x22,0x000104184
and x22,x22,x8
li x4,0xCD8B18286FB094B
dcache.cva x22
slti x28,x9,0xFFFFFFFFFFFFFA7A
slti x11,x8,0xFFFFFFFFFFFFFD57
srl x9,x10,x18
li x22,0x000107795
and x22,x22,x8
li x4,0x796B574EC9053A38
dcache.cva x22
sll x14,x15,x8
xori x13,x25,0x00000029D
xori x23,x9,0x000000751
li x22,0x000102172
and x22,x22,x8
li x4,0xC9D49A48C49DCE0B
lw x5,0x000000018(x22)
slli x13,x4,0x0F
slli x25,x26,0x014
xor x19,x17,x10
li x22,0x000107A37
and x22,x22,x8
li x4,0x8ED821D1DFF3C0B4
dcache.cva x22
slt x14,x8,x9
mul x20,x5,x3
li x22,0x0001079B4
and x22,x22,x8
li x4,0xA32B1B2824876374
dcache.cva x22
rem x23,x2,x10
sll x9,x6,x26
li x22,0x000104134
and x22,x22,x8
li x4,0x6DB478604D5759CE
lh x5,0x000000028(x22)
divu x27,x19,x20
sll x10,x0,x31
li x22,0x000107CB4
and x22,x22,x8
li x4,0x96D5E805BFE110F1
lb x5,0x000000017(x22)
ori x27,x24,0xFFFFFFFFFFFFFECA
mulh x7,x18,x22
li x22,0x000107C9F
and x22,x22,x8
li x4,0xB03D40EDB819D7F4
sh x4,0x000000016(x22)
andi x18,x11,0x00000037E
sltu x20,x28,x5
slti x20,x15,0xFFFFFFFFFFFFFE7C
li x22,0x000102048
and x22,x22,x8
li x4,0x6473D014649D5BD1
dcache.civa x22
srai x31,x8,0x010
div x23,x5,x10
li x22,0x000107C32
and x22,x22,x8
li x4,0xC2923F9733111792
dcache.cva x22
rem x14,x13,x1
add x21,x31,x6
li x22,0x0001077F2
and x22,x22,x8
li x4,0x9F725D3D8D5270DF
lhu x5,0x00000003E(x22)
add x20,x27,x12
auipc x6,0x9569E
add x20,x2,x4
li x22,0x000104101
and x22,x22,x8
li x4,0x35457660C0AD007A
lw x5,0x00000003C(x22)
mul x24,x7,x15
srl x21,x18,x7
div x11,x3,x4
li x22,0x000100154
and x22,x22,x8
li x4,0x520A1AE32212F23A
sd x4,0x000000038(x22)
slti x21,x14,0x00000076F
andi x23,x19,0x000000435
and x26,x26,x24
li x22,0x00010416C
and x22,x22,x8
li x4,0xE62A4F2CC57798AA
dcache.civa x22
mulhsu x14,x21,x15
mulhu x9,x1,x11
slt x17,x30,x6
li x22,0x00010009C
and x22,x22,x8
li x4,0xBB89862FB9EC9561
sw x4,0x000000004(x22)
sll x12,x20,x15
sra x17,x10,x20
li x22,0x000104074
and x22,x22,x8
li x4,0xE14A8AA069613B8B
dcache.cva x22
div x6,x3,x23
remu x11,x30,x24
li x22,0x000107B80
and x22,x22,x8
li x4,0xB346AB510FFD9E68
sb x4,0x000000016(x22)
andi x28,x18,0x000000134
sub x11,x12,x31
li x22,0x000107946
and x22,x22,x8
li x4,0xB9EA643E31E389B6
dcache.civa x22
mulhu x13,x18,x13
mul x30,x5,x26
li x22,0x000107981
and x22,x22,x8
li x4,0x7264957D70A0A1D3
lhu x5,0x000000048(x22)
srli x11,x11,0x01F
mulhu x26,x28,x13
li x22,0x000100193
and x22,x22,x8
li x4,0x5E6E6E5EDA33A45B
dcache.civa x22
mulhsu x20,x23,x19
and x14,x28,x5
li x22,0x000100015
and x22,x22,x8
li x4,0xD3FEA6F04F219BDB
sw x4,0x000000040(x22)
remu x15,x23,x20
remu x15,x1,x11
add x7,x9,x18
li x22,0x000107812
and x22,x22,x8
li x4,0xE7F70E6357B12971
dcache.cva x22
xori x11,x11,0x00000070C
auipc x21,0x5DEF5
auipc x23,0xC812A
li x22,0x00010214C
and x22,x22,x8
li x4,0x2BA7B160B9019B58
sw x4,0x000000048(x22)
srli x26,x30,0x0C
add x6,x9,x17
li x22,0x0001040D6
and x22,x22,x8
li x4,0x39437B2F4538464F
sd x4,0x000000008(x22)
ori x27,x30,0xFFFFFFFFFFFFFF13
andi x29,x25,0xFFFFFFFFFFFFF9D5
slli x11,x31,0x01A
li x22,0x000104118
and x22,x22,x8
li x4,0x7698730912A19B26
dcache.civa x22
srai x18,x2,0x0F
or x25,x5,x12
li x22,0x00010781A
and x22,x22,x8
li x4,0xD57FA8378AA4379E
sh x4,0x00000004C(x22)
slli x15,x24,0x013
and x10,x5,x30
addi x11,x19,0x0000003B2
li x22,0x000104178
and x22,x22,x8
li x4,0xE520E904F851AC8A
dcache.civa x22
xori x29,x1,0x000000230
ori x15,x0,0x000000356
and x18,x16,x15
li x22,0x0001040DA
and x22,x22,x8
li x4,0xC8EE1A4D8E1D513F
sd x4,0x000000040(x22)
sra x16,x24,x2
sltiu x21,x18,0xFFFFFFFFFFFFF933
ori x13,x19,0xFFFFFFFFFFFFFDCA
li x22,0x000102057
and x22,x22,x8
li x4,0x201733E7C846D9F1
ld x5,0x000000020(x22)
mulhsu x12,x24,x12
remu x19,x31,x9
sll x31,x20,x7
li x22,0x0001077DA
and x22,x22,x8
li x4,0xAC8CD3E444C69078
dcache.civa x22
sll x18,x3,x31
mul x10,x24,x1
li x22,0x000107CDB
and x22,x22,x8
li x4,0x58AAB4B647D38A35
ld x5,0x000000010(x22)
sltu x7,x22,x11
addi x14,x10,0xFFFFFFFFFFFFFF51
slli x11,x28,0x0C
li x22,0x000107BE0
and x22,x22,x8
li x4,0xD76A718BEEF54296
dcache.civa x22
div x6,x7,x19
xor x27,x6,x30
li x22,0x000100123
and x22,x22,x8
li x4,0x4A6704A1859CB45B
sw x4,0x000000034(x22)
add x25,x15,x1
mulhsu x6,x17,x9
li x22,0x000107AC0
and x22,x22,x8
li x4,0x84BE176C6648EB83
dcache.cva x22
slt x12,x16,x21
sltiu x6,x12,0xFFFFFFFFFFFFFBC9
li x22,0x000107766
and x22,x22,x8
li x4,0xEF4322A850571319
dcache.cva x22
auipc x12,0x699F8
or x11,x17,x23
divu x20,x0,x19
li x22,0x0001079E6
and x22,x22,x8
li x4,0x381DB073AC58A9C5
dcache.cva x22
srli x29,x16,0x01C
sub x29,x19,x28
auipc x28,0xEC6C
li x22,0x0001000CB
and x22,x22,x8
li x4,0xA3B814F10F943920
lhu x5,0x000000018(x22)
sra x23,x20,x9
div x10,x25,x27
add x6,x12,x17
li x22,0x000107B7C
and x22,x22,x8
li x4,0x646E05D52EE37EA2
dcache.civa x22
remu x7,x14,x10
remu x14,x14,x7
li x22,0x00010780B
and x22,x22,x8
li x4,0xFE2A85AC6EDDFE3D
sd x4,0x000000000(x22)
andi x14,x19,0xFFFFFFFFFFFFF994
mulhsu x7,x28,x28
li x22,0x000104004
and x22,x22,x8
li x4,0x64989996BBE87E0B
dcache.civa x22
slli x17,x7,0x01E
sll x23,x21,x19
or x24,x25,x24
li x22,0x000107CFE
and x22,x22,x8
li x4,0x74FE977368D49C84
lhu x5,0x000000040(x22)
slli x12,x1,0x016
mul x20,x27,x30
li x22,0x00010782B
and x22,x22,x8
li x4,0x1BDC3BB4204F5A96
dcache.cva x22
and x9,x24,x15
slti x28,x11,0x0000001F1
mul x18,x19,x21
li x22,0x000107824
and x22,x22,x8
li x4,0x1627A216B2C628F7
dcache.cva x22
lui x27,0xF8B4D
sltiu x17,x31,0xFFFFFFFFFFFFFFA2
li x22,0x00010782C
and x22,x22,x8
li x4,0xE1BE142D2772CA4E
lbu x5,0x000000017(x22)
slli x26,x17,0x07
and x27,x14,x29
xori x26,x17,0x000000658
li x22,0x00010208C
and x22,x22,x8
li x4,0xDF586D0391B2EB29
dcache.cva x22
addi x14,x10,0x00000079C
slt x31,x11,x7
div x29,x24,x25
li x22,0x000102149
and x22,x22,x8
li x4,0x6CFBB956C3068D60
lhu x5,0x00000003C(x22)
rem x15,x17,x24
srai x10,x3,0x02
li x22,0x000107721
and x22,x22,x8
li x4,0xC0B4DCDE019ECB40
lw x5,0x00000001C(x22)
addi x17,x18,0xFFFFFFFFFFFFFF89
sll x12,x17,x24
li x22,0x00010211D
and x22,x22,x8
li x4,0xA71E310DCE119ED4
dcache.cva x22
and x6,x2,x14
sub x6,x4,x2
slti x31,x26,0x0000004E8
li x22,0x000107B17
and x22,x22,x8
li x4,0x96BBB5943B80DD50
dcache.civa x22
sltu x30,x12,x15
xori x9,x11,0x0000001E8
sra x17,x13,x0
li x22,0x00010214D
and x22,x22,x8
li x4,0x3F0ED0D82B031221
sb x4,0x00000002A(x22)
xor x12,x3,x11
srl x12,x17,x12
auipc x23,0xBF7F1
li x22,0x0001079D7
and x22,x22,x8
li x4,0xAD1C63789565EB36
dcache.cva x22
sll x28,x7,x31
sltu x17,x5,x5
srl x19,x14,x0
li x22,0x000107B2A
and x22,x22,x8
li x4,0x80EA50C80845E7DB
ld x5,0x000000038(x22)
srai x12,x7,0x0E
srl x9,x1,x29
ori x19,x24,0xFFFFFFFFFFFFFF1F
li x22,0x0001040FB
and x22,x22,x8
li x4,0x5D9FA8065EA6C306
lb x5,0x000000047(x22)
addi x18,x18,0xFFFFFFFFFFFFFD21
ori x29,x17,0x0000001CD
li x22,0x000107726
and x22,x22,x8
li x4,0xC6C618908FE66FE9
sw x4,0x000000044(x22)
div x30,x24,x31
lui x30,0xB2DBB
sub x28,x15,x15
li x22,0x00010413E
and x22,x22,x8
li x4,0x33F5FEADAF3A1F1B
lbu x5,0x000000015(x22)
or x14,x30,x1
slli x9,x4,0x011
xori x24,x27,0x00000056B
li x22,0x000107955
and x22,x22,x8
li x4,0x82F18F86251A1FBB
dcache.civa x22
and x25,x14,x20
divu x11,x13,x19
mulh x23,x5,x10
li x22,0x0001001C6
and x22,x22,x8
li x4,0x611493BBDA6713A5
lw x5,0x000000048(x22)
mulhu x31,x10,x15
slti x26,x15,0x000000475
slli x18,x2,0x07
li x22,0x00010408B
and x22,x22,x8
li x4,0xDCDC4B5B0F2E12A4
sd x4,0x000000038(x22)
sltu x28,x21,x20
mulh x17,x10,x9
li x22,0x000104041
and x22,x22,x8
li x4,0xCDEC7A9A6C7C3F0C
dcache.cva x22
mul x30,x5,x22
addi x6,x5,0x0000007C7
lui x15,0xBB116
li x22,0x000107890
and x22,x22,x8
li x4,0x27C508E8FB7C6929
dcache.civa x22
lui x10,0x74F2B
sll x26,x17,x5
li x22,0x000107B12
and x22,x22,x8
li x4,0xD1F2112B167CF58E
dcache.civa x22
andi x13,x28,0xFFFFFFFFFFFFFD69
slt x13,x4,x6
li x22,0x00010788C
and x22,x22,x8
li x4,0xB8705B52D8150D6
lh x5,0x00000002E(x22)
divu x31,x27,x7
xori x17,x28,0x0000005A3
li x22,0x000107A3C
and x22,x22,x8
li x4,0x172FAEDC406D30E1
dcache.cva x22
mulhu x13,x7,x8
srl x7,x16,x12
li x22,0x0001021FD
and x22,x22,x8
li x4,0x48337468ABF617FF
lw x5,0x000000044(x22)
sub x27,x15,x26
div x29,x6,x4
sra x9,x27,x23
li x22,0x000107B89
and x22,x22,x8
li x4,0x43EDB31D023737F9
dcache.cva x22
xor x15,x30,x18
div x13,x1,x2
divu x26,x5,x20
li x22,0x000107CEA
and x22,x22,x8
li x4,0xFD6D53CF79B25055
dcache.civa x22
addi x23,x4,0xFFFFFFFFFFFFF960
addi x24,x0,0xFFFFFFFFFFFFFCE2
or x19,x11,x6
li x22,0x0001020CA
and x22,x22,x8
li x4,0xA06B8DFFF5218BC2
lwu x5,0x000000034(x22)
lui x30,0xFE6A0
sltiu x10,x8,0xFFFFFFFFFFFFF82C
add x13,x12,x5
li x22,0x000102045
and x22,x22,x8
li x4,0xBA216D205DF7D615
lhu x5,0x000000026(x22)
div x16,x21,x17
sra x17,x20,x1
li x22,0x000107755
and x22,x22,x8
li x4,0x2D481B04F31856E2
dcache.cva x22
slli x11,x11,0x01
and x19,x0,x6
li x22,0x0001078AD
and x22,x22,x8
li x4,0xDA76E38E96781643
sb x4,0x000000032(x22)
sltiu x6,x19,0xFFFFFFFFFFFFFADF
sra x18,x14,x20
li x22,0x000107CDD
and x22,x22,x8
li x4,0xBC4F74A80F077F0D
dcache.civa x22
andi x24,x31,0xFFFFFFFFFFFFFE92
sra x31,x12,x8
li x22,0x000102164
and x22,x22,x8
li x4,0xF8CD272B06A0CD67
dcache.civa x22
divu x10,x30,x31
srli x15,x14,0x01
li x22,0x0001021C6
and x22,x22,x8
li x4,0xCA41B932CAC77251
sb x4,0x00000000A(x22)
sub x9,x3,x11
divu x7,x19,x26
li x22,0x0001077EA
and x22,x22,x8
li x4,0x958AA5E1CA01CD79
lhu x5,0x000000024(x22)
srl x13,x14,x19
addi x16,x0,0x000000247
mul x7,x6,x23
li x22,0x00010780A
and x22,x22,x8
li x4,0x15CF89767708BDCE
sh x4,0x000000030(x22)
addi x15,x6,0x0000007DE
or x24,x11,x4
li x22,0x000107A77
and x22,x22,x8
li x4,0x58443FFCEAA16326
dcache.civa x22
srl x16,x8,x22
sltu x17,x0,x22
li x22,0x000100131
and x22,x22,x8
li x4,0xC10099570DDE2C83
dcache.cva x22
sra x7,x23,x18
slti x23,x29,0x0000002F8
li x22,0x00010010B
and x22,x22,x8
li x4,0xAEB3802A3F1299D9
sd x4,0x000000038(x22)
mulhsu x28,x6,x19
mulhsu x26,x1,x24
li x22,0x000107C5F
and x22,x22,x8
li x4,0xBE792159B956663E
sd x4,0x000000000(x22)
xori x7,x4,0x000000372
xor x14,x10,x0
mulhu x10,x12,x23
li x22,0x0001040D6
and x22,x22,x8
li x4,0x53D705EA7C06FFC5
sb x4,0x000000036(x22)
xori x30,x31,0x00000040A
xori x13,x30,0x0000000C0
li x22,0x0001000A3
and x22,x22,x8
li x4,0x635F9E7E1266B8D2
sh x4,0x000000008(x22)
andi x31,x1,0xFFFFFFFFFFFFFC3E
sll x30,x22,x26
li x22,0x000107A3A
and x22,x22,x8
li x4,0xCBCD09750FFC7663
dcache.civa x22
xori x7,x22,0xFFFFFFFFFFFFFD51
sll x6,x15,x22
li x22,0x000107C01
and x22,x22,x8
li x4,0x426374F4EF2961F5
dcache.civa x22
srai x15,x23,0x0D
or x17,x24,x0
andi x24,x4,0xFFFFFFFFFFFFF8B9
li x22,0x000107901
and x22,x22,x8
li x4,0xF9CEC82592679F63
lh x5,0x000000016(x22)
andi x26,x8,0xFFFFFFFFFFFFFBD4
xori x23,x3,0xFFFFFFFFFFFFFC9B
li x22,0x000107706
and x22,x22,x8
li x4,0xB9CE1F10D8245270
dcache.civa x22
divu x30,x27,x17
mul x14,x23,x12
li x22,0x0001040C3
and x22,x22,x8
li x4,0xF0D1E21B2C5E9779
lbu x5,0x000000034(x22)
addi x28,x13,0xFFFFFFFFFFFFF993
sub x11,x18,x4
li x22,0x000104177
and x22,x22,x8
li x4,0x12747D4713DF99E6
dcache.cva x22
sltiu x11,x26,0xFFFFFFFFFFFFFE54
add x31,x24,x24
sltiu x15,x27,0xFFFFFFFFFFFFFC5C
li x22,0x0001020C6
and x22,x22,x8
li x4,0x12846021A79DC841
sh x4,0x000000008(x22)
mulh x31,x22,x19
slli x14,x12,0x01B
li x22,0x0001000BC
and x22,x22,x8
li x4,0x301131BA43A7A06E
sh x4,0x000000024(x22)
div x14,x20,x9
xori x14,x2,0xFFFFFFFFFFFFFCDC
li x22,0x000107B30
and x22,x22,x8
li x4,0x705A219070FD5FA
ld x5,0x000000048(x22)
sra x20,x0,x16
div x6,x12,x25
li x22,0x000100192
and x22,x22,x8
li x4,0x64B11D553DC2EE57
lh x5,0x00000004C(x22)
divu x26,x12,x4
sltiu x31,x8,0xFFFFFFFFFFFFFDA2
mulhu x23,x12,x21
li x22,0x000100016
and x22,x22,x8
li x4,0x5EB2F10BAC9D4995
dcache.cva x22
auipc x30,0x1C43D
divu x21,x2,x3
li x22,0x000107B40
and x22,x22,x8
li x4,0x7343302BECDB876D
lb x5,0x00000004D(x22)
and x31,x29,x16
sltiu x11,x26,0xFFFFFFFFFFFFFAE3
sra x25,x27,x12
li x22,0x000104135
and x22,x22,x8
li x4,0xD8546D90A68DF229
ld x5,0x000000038(x22)
ori x10,x8,0xFFFFFFFFFFFFFCD5
mulhu x21,x14,x2
or x29,x25,x7
li x22,0x000107B84
and x22,x22,x8
li x4,0xCDDB185B6A4D8A24
sw x4,0x000000004(x22)
xor x26,x25,x7
xori x9,x2,0xFFFFFFFFFFFFFE48
li x22,0x000100080
and x22,x22,x8
li x4,0xF0FC20617C9E69A2
sd x4,0x000000010(x22)
or x27,x14,x4
sll x6,x2,x15
lui x13,0x6E68B
li x22,0x0001021ED
and x22,x22,x8
li x4,0x84C0C81E7765BA4
dcache.civa x22
rem x25,x20,x14
ori x17,x27,0x00000002B
li x22,0x000107ADF
and x22,x22,x8
li x4,0x6C82E60C8756BF1
sb x4,0x00000003E(x22)
ori x6,x23,0x0000002DB
sra x27,x9,x3
li x22,0x0001040BF
and x22,x22,x8
li x4,0xEA4AB402CA5FAF36
lbu x5,0x000000038(x22)
srl x11,x12,x31
div x9,x12,x31
li x22,0x0001000E3
and x22,x22,x8
li x4,0xF54A9A26F59FCA1F
dcache.civa x22
divu x31,x29,x4
addi x27,x23,0xFFFFFFFFFFFFFFF1
and x14,x13,x24
li x22,0x000100086
and x22,x22,x8
li x4,0xC40B9EB9384D0CA2
dcache.cva x22
slt x12,x13,x5
mul x26,x22,x27
mulhu x7,x5,x17
li x22,0x00010006A
and x22,x22,x8
li x4,0x88C153E0D23AC19B
sw x4,0x00000002C(x22)
sltiu x27,x11,0x000000342
div x27,x31,x11
li x22,0x000104128
and x22,x22,x8
li x4,0xE4DB2087AEAD3798
ld x5,0x000000040(x22)
sltiu x28,x17,0xFFFFFFFFFFFFFFEA
srli x16,x4,0x010
andi x9,x13,0xFFFFFFFFFFFFFACF
li x22,0x0001040A6
and x22,x22,x8
li x4,0xB7287F6B01DF099
sw x4,0x000000004(x22)
xor x7,x25,x20
sub x25,x5,x5
li x22,0x000100127
and x22,x22,x8
li x4,0x49586545AF410958
dcache.civa x22
mulhu x13,x2,x4
sub x11,x10,x29
auipc x23,0x5058B
li x22,0x0001021C8
and x22,x22,x8
li x4,0x777C9444F4047E73
sw x4,0x000000014(x22)
auipc x31,0xE8A6E
sll x14,x14,x8
li x22,0x000107ACA
and x22,x22,x8
li x4,0xC06E30B431201310
sw x4,0x000000020(x22)
addi x24,x22,0xFFFFFFFFFFFFF874
sltiu x12,x13,0x000000726
li x22,0x0001021B7
and x22,x22,x8
li x4,0xC8A6F4B6C2FAC7BB
lhu x5,0x000000008(x22)
srl x24,x28,x31
mulh x31,x11,x23
li x22,0x00010404E
and x22,x22,x8
li x4,0x4D354ED09E1A9979
sw x4,0x000000040(x22)
slti x20,x21,0xFFFFFFFFFFFFFA0A
mulhsu x30,x7,x16
or x15,x9,x7
li x22,0x000107A9B
and x22,x22,x8
li x4,0x7EB83D41C948969B
dcache.cva x22
srli x21,x2,0x06
mul x28,x4,x3
srai x19,x9,0x02
li x22,0x000104010
and x22,x22,x8
li x4,0x6A6B0C3EB963F4F7
sh x4,0x000000000(x22)
sra x25,x25,x0
xor x24,x0,x20
li x22,0x000102016
and x22,x22,x8
li x4,0x96E83B342B52D145
sb x4,0x000000019(x22)
add x29,x6,x29
div x25,x1,x18
li x22,0x0001020C8
and x22,x22,x8
li x4,0x45242151CD970C9C
lw x5,0x000000020(x22)
sub x21,x26,x22
slti x7,x2,0x000000057
andi x11,x13,0x00000015E
li x22,0x0001077FD
and x22,x22,x8
li x4,0x41B2C81B5D63824D
lb x5,0x00000001A(x22)
add x28,x3,x23
divu x9,x30,x15
li x22,0x00010799A
and x22,x22,x8
li x4,0xF557845548CF4F88
dcache.cva x22
sltiu x9,x22,0x0000001EA
mul x29,x27,x11
ori x20,x15,0x00000021B
li x22,0x000107A6D
and x22,x22,x8
li x4,0x43926FACE2335C74
lh x5,0x00000000E(x22)
divu x23,x10,x9
xor x30,x17,x29
wfi
#******this region is added by generator******
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 104,776
|
smart_run/tests/cases/ISA/ISA_AMO/ct_lsu_amo_basic_smart.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#* *
#************************************************************
#*************Following is the generated instructions*****************
.text
.align 6
.global main
main:
.include "core_init.h"
DATA_CACHE_CLIV
MM_EN
DATA_CACHE_EN
li x2,0x00090000
li x3,0x000a0000
li x11,0x00090000
li x12,0x000a0008
li x28,0
li x29,3
.global start
start: # first round :cache enable; second round: cache disable
addi x28,x28,1
bge x28,x29,success
la x20,fail
.global check_amoswap
check_amoswap:
ori x11,x11,0x00000061D # gen one addr
andi x11,x11,-8
li x15,0x78AF40619DEE61FC
sd x15,0x000000000(x11) #store the data
li x9,0xC2052BDC086A5C50
amoswap.d x13,x9, (x11) #check the data
bne x15,x13, inner_fail_random_loop1_9
ld x16,(x11) #check the store data
bne x16,x9,inner_fail_random_loop1_9
j pass_random_loop1_9
.global inner_fail_random_loop1_9
inner_fail_random_loop1_9:
jr x20
.global pass_random_loop1_9
pass_random_loop1_9:
c.sw x14,0x00(x11)
ori x11,x11,0x0000002A1
andi x11,x11,-8
li x15,0xC5A3C9C3D8452FE5
sd x15,0x000000000(x11)
li x9,0x703AD3C65D5F5DD
amoswap.d.rl x13,x9, (x11)
bne x15,x13, inner_fail_random_loop1_8
ld x16,(x11)
bne x16,x9,inner_fail_random_loop1_8
j pass_random_loop1_8
.global inner_fail_random_loop1_8
inner_fail_random_loop1_8:
jr x20
.global pass_random_loop1_8
pass_random_loop1_8:
c.sw x13,0x00(x11)
ori x11,x11,0x000000278
andi x11,x11,-8
li x15,0x4D5CB0AEE2F1A939
sd x15,0x000000000(x11)
li x9,0x28107386258F3519
amoswap.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail_random_loop1_7
ld x16,(x11)
bne x16,x9,inner_fail_random_loop1_7
j pass_random_loop1_7
.global inner_fail_random_loop1_7
inner_fail_random_loop1_7:
jr x20
.global pass_random_loop1_7
pass_random_loop1_7:
lw x9,0x000000000(x11)
ori x11,x11,0x0000001F5
andi x11,x11,-8
li x15,0xADDA85EF299B4373
sd x15,0x000000000(x11)
li x9,0x4A0E404744C68E0E
amoswap.d.aq x13,x9, (x11)
bne x15,x13, inner_fail_random_loop1_6
ld x16,(x11)
bne x16,x9,inner_fail_random_loop1_6
j pass_random_loop1_6
.global inner_fail_random_loop1_6
inner_fail_random_loop1_6:
jr x20
.global pass_random_loop1_6
pass_random_loop1_6:
sd x14,0x000000000(x11)
ori x11,x11,0x00000034C
andi x11,x11,-8
li x15,0x5A8F5135EF3F81CC
sd x15,0x000000000(x11)
li x9,0x3C85981913C58F08
amoswap.d x13,x9, (x11)
bne x15,x13, inner_fail_random_loop1_5
ld x16,(x11)
bne x16,x9,inner_fail_random_loop1_5
j pass_random_loop1_5
.global inner_fail_random_loop1_5
inner_fail_random_loop1_5:
jr x20
.global pass_random_loop1_5
pass_random_loop1_5:
c.lw x8,0x00(x11)
ori x11,x11,0x00000005F
andi x11,x11,-8
li x15,0xB89F96341F4AD6E
sd x15,0x000000000(x11)
li x9,0xD60E8444FE0AD4C1
amoswap.d.rl x13,x9, (x11)
bne x15,x13, inner_fail_random_loop1_4
ld x16,(x11)
bne x16,x9,inner_fail_random_loop1_4
j pass_random_loop1_4
.global inner_fail_random_loop1_4
inner_fail_random_loop1_4:
jr x20
.global pass_random_loop1_4
pass_random_loop1_4:
lb x10,0x000000000(x11)
ori x11,x11,0x000000628
andi x11,x11,-8
li x15,0x3C3F0749C6387073
sd x15,0x000000000(x11)
li x9,0xD6267670592AE0F4
amoswap.d.rl x13,x9, (x11)
bne x15,x13, inner_fail_random_loop1_3
ld x16,(x11)
bne x16,x9,inner_fail_random_loop1_3
j pass_random_loop1_3
.global inner_fail_random_loop1_3
inner_fail_random_loop1_3:
jr x20
.global pass_random_loop1_3
pass_random_loop1_3:
c.lw x10,0x00(x11)
ori x11,x11,0x0000006E5
andi x11,x11,-8
li x15,0x338CF320664A871B
sd x15,0x000000000(x11)
li x9,0x912806A0AFCA5733
amoswap.d.aq x13,x9, (x11)
bne x15,x13, inner_fail_random_loop1_2
ld x16,(x11)
bne x16,x9,inner_fail_random_loop1_2
j pass_random_loop1_2
.global inner_fail_random_loop1_2
inner_fail_random_loop1_2:
jr x20
.global pass_random_loop1_2
pass_random_loop1_2:
lbu x9,0x000000000(x11)
ori x11,x11,0x000000459
andi x11,x11,-8
li x15,0x6AFD77EC04225BD3
sd x15,0x000000000(x11)
li x9,0xE7C6ECF08325ECA3
amoswap.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail_random_loop1_1
ld x16,(x11)
bne x16,x9,inner_fail_random_loop1_1
j pass_random_loop1_1
.global inner_fail_random_loop1_1
inner_fail_random_loop1_1:
jr x20
.global pass_random_loop1_1
pass_random_loop1_1:
sd x15,0x000000000(x11)
ori x11,x11,0x0000002AE
andi x11,x11,-8
li x15,0xBB067D0A550FFCA0
sd x15,0x000000000(x11)
li x9,0xE5A06833AC03479B
amoswap.d.rl x13,x9, (x11)
bne x15,x13, inner_fail
ld x16,(x11)
bne x16,x9,inner_fail
j pass
.global inner_fail
inner_fail:
jr x20
.global pass
pass:
ld x10,0x000000000(x11)
.global check_amoadd
check_amoadd:
ori x11,x11,0x00000053F
andi x11,x11,-8
li x15,0xC6BF45091D2D45D4
sd x15,0x000000000(x11)
li x9,0x2EC1B86F76416156
amoadd.d.rl x13,x9, (x11)
bne x15,x13, inner_fail2_random_loop2_9
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_random_loop2_9
j pass2_random_loop2_9
.global inner_fail2_random_loop2_9
inner_fail2_random_loop2_9:
jr x20
.global pass2_random_loop2_9
pass2_random_loop2_9:
lwu x10,0x000000000(x11)
ori x11,x11,0x00000056E
andi x11,x11,-8
li x15,0x29A0A31A3297C7BB
sd x15,0x000000000(x11)
li x9,0xE9422290AA7FE9CA
amoadd.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail2_random_loop2_8
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_random_loop2_8
j pass2_random_loop2_8
.global inner_fail2_random_loop2_8
inner_fail2_random_loop2_8:
jr x20
.global pass2_random_loop2_8
pass2_random_loop2_8:
lwu x9,0x000000000(x11)
ori x11,x11,0x000000690
andi x11,x11,-8
li x15,0x4B28DDF2389A830B
sd x15,0x000000000(x11)
li x9,0xFEE47A17E34EF60B
amoadd.d.rl x13,x9, (x11)
bne x15,x13, inner_fail2_random_loop2_7
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_random_loop2_7
j pass2_random_loop2_7
.global inner_fail2_random_loop2_7
inner_fail2_random_loop2_7:
jr x20
.global pass2_random_loop2_7
pass2_random_loop2_7:
lb x9,0x000000000(x11)
ori x11,x11,0x0000006C7
andi x11,x11,-8
li x15,0x3A5B7E0B5ED9F554
sd x15,0x000000000(x11)
li x9,0x8AC7EA4C84F5DEFD
amoadd.d x13,x9, (x11)
bne x15,x13, inner_fail2_random_loop2_6
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_random_loop2_6
j pass2_random_loop2_6
.global inner_fail2_random_loop2_6
inner_fail2_random_loop2_6:
jr x20
.global pass2_random_loop2_6
pass2_random_loop2_6:
c.lw x10,0x00(x11)
ori x11,x11,0x000000722
andi x11,x11,-8
li x15,0xF969C3F8B44CF0BA
sd x15,0x000000000(x11)
li x9,0x1D5F037079E9E742
amoadd.d x13,x9, (x11)
bne x15,x13, inner_fail2_random_loop2_5
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_random_loop2_5
j pass2_random_loop2_5
.global inner_fail2_random_loop2_5
inner_fail2_random_loop2_5:
jr x20
.global pass2_random_loop2_5
pass2_random_loop2_5:
c.sw x13,0x00(x11)
ori x11,x11,0x00000078C
andi x11,x11,-8
li x15,0x79003DD03FE621EA
sd x15,0x000000000(x11)
li x9,0x8DA64176ED26D1C9
amoadd.d x13,x9, (x11)
bne x15,x13, inner_fail2_random_loop2_4
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_random_loop2_4
j pass2_random_loop2_4
.global inner_fail2_random_loop2_4
inner_fail2_random_loop2_4:
jr x20
.global pass2_random_loop2_4
pass2_random_loop2_4:
sd x14,0x000000000(x11)
ori x11,x11,0x0000003CD
andi x11,x11,-8
li x15,0x670A8FAD36BA826D
sd x15,0x000000000(x11)
li x9,0xE1D5BDB3B377A115
amoadd.d.rl x13,x9, (x11)
bne x15,x13, inner_fail2_random_loop2_3
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_random_loop2_3
j pass2_random_loop2_3
.global inner_fail2_random_loop2_3
inner_fail2_random_loop2_3:
jr x20
.global pass2_random_loop2_3
pass2_random_loop2_3:
lw x9,0x000000000(x11)
ori x11,x11,0x0000005F3
andi x11,x11,-8
li x15,0x916276CD872D9445
sd x15,0x000000000(x11)
li x9,0x590DD2403A47272
amoadd.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail2_random_loop2_2
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_random_loop2_2
j pass2_random_loop2_2
.global inner_fail2_random_loop2_2
inner_fail2_random_loop2_2:
jr x20
.global pass2_random_loop2_2
pass2_random_loop2_2:
c.sw x13,0x00(x11)
ori x11,x11,0x0000006A1
andi x11,x11,-8
li x15,0x9D3B2628383BAA5F
sd x15,0x000000000(x11)
li x9,0x451067B9F3F11606
amoadd.d x13,x9, (x11)
bne x15,x13, inner_fail2_random_loop2_1
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_random_loop2_1
j pass2_random_loop2_1
.global inner_fail2_random_loop2_1
inner_fail2_random_loop2_1:
jr x20
.global pass2_random_loop2_1
pass2_random_loop2_1:
ld x10,0x000000000(x11)
ori x11,x11,0x0000003DC
andi x11,x11,-8
li x15,0xC429929DC5529C3B
sd x15,0x000000000(x11)
li x9,0xF8F998C135A55771
amoadd.d.aq x13,x9, (x11)
bne x15,x13, inner_fail2
ld x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2
j pass2
.global inner_fail2
inner_fail2:
jr x20
.global pass2
pass2:
ld x9,0x000000000(x11)
.global check_amoor
check_amoor:
ori x11,x11,0x000000766
andi x11,x11,-8
li x15,0xCB6C1E8C04B5D28B
sd x15,0x000000000(x11)
li x9,0x2ED4511BD61624CF
amoor.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail3_random_loop3_9
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_random_loop3_9
j pass3_random_loop3_9
.global inner_fail3_random_loop3_9
inner_fail3_random_loop3_9:
jr x20
.global pass3_random_loop3_9
pass3_random_loop3_9:
lwu x9,0x000000000(x11)
ori x11,x11,0x000000388
andi x11,x11,-8
li x15,0x4244275623B017A7
sd x15,0x000000000(x11)
li x9,0xF38D2178CDC065A4
amoor.d.aq x13,x9, (x11)
bne x15,x13, inner_fail3_random_loop3_8
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_random_loop3_8
j pass3_random_loop3_8
.global inner_fail3_random_loop3_8
inner_fail3_random_loop3_8:
jr x20
.global pass3_random_loop3_8
pass3_random_loop3_8:
lw x8,0x000000000(x11)
ori x11,x11,0x0000004F5
andi x11,x11,-8
li x15,0x1E17CD0E52016701
sd x15,0x000000000(x11)
li x9,0xD824689BF6A9C1B1
amoor.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail3_random_loop3_7
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_random_loop3_7
j pass3_random_loop3_7
.global inner_fail3_random_loop3_7
inner_fail3_random_loop3_7:
jr x20
.global pass3_random_loop3_7
pass3_random_loop3_7:
sw x14,0x000000000(x11)
ori x11,x11,0x0000001E9
andi x11,x11,-8
li x15,0x1A715ADF612AB8FE
sd x15,0x000000000(x11)
li x9,0xBA09000513328E47
amoor.d.aq x13,x9, (x11)
bne x15,x13, inner_fail3_random_loop3_6
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_random_loop3_6
j pass3_random_loop3_6
.global inner_fail3_random_loop3_6
inner_fail3_random_loop3_6:
jr x20
.global pass3_random_loop3_6
pass3_random_loop3_6:
lbu x9,0x000000000(x11)
ori x11,x11,0x000000576
andi x11,x11,-8
li x15,0x89F1B514830521C0
sd x15,0x000000000(x11)
li x9,0xC58C2C2AAB1D4FD2
amoor.d x13,x9, (x11)
bne x15,x13, inner_fail3_random_loop3_5
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_random_loop3_5
j pass3_random_loop3_5
.global inner_fail3_random_loop3_5
inner_fail3_random_loop3_5:
jr x20
.global pass3_random_loop3_5
pass3_random_loop3_5:
lb x10,0x000000000(x11)
ori x11,x11,0x00000026B
andi x11,x11,-8
li x15,0xA2A49509BDB598A4
sd x15,0x000000000(x11)
li x9,0x6B65BFFF85A94B1A
amoor.d.aq x13,x9, (x11)
bne x15,x13, inner_fail3_random_loop3_4
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_random_loop3_4
j pass3_random_loop3_4
.global inner_fail3_random_loop3_4
inner_fail3_random_loop3_4:
jr x20
.global pass3_random_loop3_4
pass3_random_loop3_4:
lwu x9,0x000000000(x11)
ori x11,x11,0x00000030F
andi x11,x11,-8
li x15,0x3162F6255F987E81
sd x15,0x000000000(x11)
li x9,0x65383C30468CC7AC
amoor.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail3_random_loop3_3
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_random_loop3_3
j pass3_random_loop3_3
.global inner_fail3_random_loop3_3
inner_fail3_random_loop3_3:
jr x20
.global pass3_random_loop3_3
pass3_random_loop3_3:
lwu x9,0x000000000(x11)
ori x11,x11,0x000000596
andi x11,x11,-8
li x15,0xAA0074C795944BE2
sd x15,0x000000000(x11)
li x9,0x14D9328DBE280A62
amoor.d.aq x13,x9, (x11)
bne x15,x13, inner_fail3_random_loop3_2
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_random_loop3_2
j pass3_random_loop3_2
.global inner_fail3_random_loop3_2
inner_fail3_random_loop3_2:
jr x20
.global pass3_random_loop3_2
pass3_random_loop3_2:
lb x10,0x000000000(x11)
ori x11,x11,0x00000035D
andi x11,x11,-8
li x15,0xE932B4155BE2E318
sd x15,0x000000000(x11)
li x9,0x741E2C5F13E7C25
amoor.d x13,x9, (x11)
bne x15,x13, inner_fail3_random_loop3_1
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_random_loop3_1
j pass3_random_loop3_1
.global inner_fail3_random_loop3_1
inner_fail3_random_loop3_1:
jr x20
.global pass3_random_loop3_1
pass3_random_loop3_1:
ld x8,0x000000000(x11)
ori x11,x11,0x0000005DF
andi x11,x11,-8
li x15,0x3E9E72802E40028B
sd x15,0x000000000(x11)
li x9,0xEC72E0E7E007599D
amoor.d x13,x9, (x11)
bne x15,x13, inner_fail3
ld x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3
j pass3
.global inner_fail3
inner_fail3:
jr x20
.global pass3
pass3:
lhu x10,0x000000000(x11)
.global check_amoand
check_amoand:
ori x11,x11,0x000000601
andi x11,x11,-8
li x15,0xAE33BC078D1E425F
sd x15,0x000000000(x11)
li x9,0x182D2549966E579B
amoand.d.rl x13,x9, (x11)
bne x15,x13, inner_fail4_random_loop4_9
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_random_loop4_9
j pass4_random_loop4_9
.global inner_fail4_random_loop4_9
inner_fail4_random_loop4_9:
jr x20
.global pass4_random_loop4_9
pass4_random_loop4_9:
c.sw x14,0x00(x11)
ori x11,x11,0x000000713
andi x11,x11,-8
li x15,0xC246D56064563360
sd x15,0x000000000(x11)
li x9,0xA5FAB1965399AFD2
amoand.d.aq x13,x9, (x11)
bne x15,x13, inner_fail4_random_loop4_8
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_random_loop4_8
j pass4_random_loop4_8
.global inner_fail4_random_loop4_8
inner_fail4_random_loop4_8:
jr x20
.global pass4_random_loop4_8
pass4_random_loop4_8:
c.sw x14,0x00(x11)
ori x11,x11,0x000000080
andi x11,x11,-8
li x15,0x62D9B0AFE55753E9
sd x15,0x000000000(x11)
li x9,0xEF4ED2AE11DD58FB
amoand.d.rl x13,x9, (x11)
bne x15,x13, inner_fail4_random_loop4_7
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_random_loop4_7
j pass4_random_loop4_7
.global inner_fail4_random_loop4_7
inner_fail4_random_loop4_7:
jr x20
.global pass4_random_loop4_7
pass4_random_loop4_7:
sw x14,0x000000000(x11)
ori x11,x11,0x0000002E3
andi x11,x11,-8
li x15,0x450BF1B13F1D3C8A
sd x15,0x000000000(x11)
li x9,0xF8B9D4789C0FDA40
amoand.d.rl x13,x9, (x11)
bne x15,x13, inner_fail4_random_loop4_6
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_random_loop4_6
j pass4_random_loop4_6
.global inner_fail4_random_loop4_6
inner_fail4_random_loop4_6:
jr x20
.global pass4_random_loop4_6
pass4_random_loop4_6:
lh x10,0x000000000(x11)
ori x11,x11,0x0000002B8
andi x11,x11,-8
li x15,0xDCBB2A46E3ECBF8D
sd x15,0x000000000(x11)
li x9,0xBBF7B79D703225F8
amoand.d x13,x9, (x11)
bne x15,x13, inner_fail4_random_loop4_5
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_random_loop4_5
j pass4_random_loop4_5
.global inner_fail4_random_loop4_5
inner_fail4_random_loop4_5:
jr x20
.global pass4_random_loop4_5
pass4_random_loop4_5:
lbu x10,0x000000000(x11)
ori x11,x11,0x0000004C3
andi x11,x11,-8
li x15,0x38A4FD28FFFE18D5
sd x15,0x000000000(x11)
li x9,0x31D49B9AACB93D8
amoand.d.aq x13,x9, (x11)
bne x15,x13, inner_fail4_random_loop4_4
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_random_loop4_4
j pass4_random_loop4_4
.global inner_fail4_random_loop4_4
inner_fail4_random_loop4_4:
jr x20
.global pass4_random_loop4_4
pass4_random_loop4_4:
c.lw x10,0x00(x11)
ori x11,x11,0x000000515
andi x11,x11,-8
li x15,0xC43E60C8423165B5
sd x15,0x000000000(x11)
li x9,0x6D928223690AC806
amoand.d x13,x9, (x11)
bne x15,x13, inner_fail4_random_loop4_3
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_random_loop4_3
j pass4_random_loop4_3
.global inner_fail4_random_loop4_3
inner_fail4_random_loop4_3:
jr x20
.global pass4_random_loop4_3
pass4_random_loop4_3:
lw x10,0x000000000(x11)
ori x11,x11,0x00000045B
andi x11,x11,-8
li x15,0x51A4077B35A7DB4D
sd x15,0x000000000(x11)
li x9,0xE36DAEE180FD42E6
amoand.d.rl x13,x9, (x11)
bne x15,x13, inner_fail4_random_loop4_2
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_random_loop4_2
j pass4_random_loop4_2
.global inner_fail4_random_loop4_2
inner_fail4_random_loop4_2:
jr x20
.global pass4_random_loop4_2
pass4_random_loop4_2:
c.sw x13,0x00(x11)
ori x11,x11,0x000000483
andi x11,x11,-8
li x15,0x406C73CFC12B98BD
sd x15,0x000000000(x11)
li x9,0x6AA62963FF909E8A
amoand.d x13,x9, (x11)
bne x15,x13, inner_fail4_random_loop4_1
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_random_loop4_1
j pass4_random_loop4_1
.global inner_fail4_random_loop4_1
inner_fail4_random_loop4_1:
jr x20
.global pass4_random_loop4_1
pass4_random_loop4_1:
lwu x8,0x000000000(x11)
ori x11,x11,0x00000042D
andi x11,x11,-8
li x15,0x1FA9B06FE3028067
sd x15,0x000000000(x11)
li x9,0xB59520CD9849EC8D
amoand.d x13,x9, (x11)
bne x15,x13, inner_fail4
ld x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4
j pass4
.global inner_fail4
inner_fail4:
jr x20
.global pass4
pass4:
lbu x10,0x000000000(x11)
.global check_amoxor
check_amoxor:
ori x11,x11,0x00000055A
andi x11,x11,-8
li x15,0xC7873E15DD6F2295
sd x15,0x000000000(x11)
li x9,0x7D16B9D308CA816E
amoxor.d x13,x9, (x11)
bne x15,x13, inner_fail5_random_loop5_9
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_random_loop5_9
j pass5_random_loop5_9
.global inner_fail5_random_loop5_9
inner_fail5_random_loop5_9:
jr x20
.global pass5_random_loop5_9
pass5_random_loop5_9:
ld x10,0x000000000(x11)
ori x11,x11,0x00000026E
andi x11,x11,-8
li x15,0xDB7B5DF7845DA128
sd x15,0x000000000(x11)
li x9,0x7C00BCCA7882BB7
amoxor.d.rl x13,x9, (x11)
bne x15,x13, inner_fail5_random_loop5_8
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_random_loop5_8
j pass5_random_loop5_8
.global inner_fail5_random_loop5_8
inner_fail5_random_loop5_8:
jr x20
.global pass5_random_loop5_8
pass5_random_loop5_8:
lwu x8,0x000000000(x11)
ori x11,x11,0x00000002F
andi x11,x11,-8
li x15,0xD412C71E1A3167FE
sd x15,0x000000000(x11)
li x9,0x26690CE1885E5962
amoxor.d.rl x13,x9, (x11)
bne x15,x13, inner_fail5_random_loop5_7
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_random_loop5_7
j pass5_random_loop5_7
.global inner_fail5_random_loop5_7
inner_fail5_random_loop5_7:
jr x20
.global pass5_random_loop5_7
pass5_random_loop5_7:
lbu x9,0x000000000(x11)
ori x11,x11,0x000000387
andi x11,x11,-8
li x15,0xFF76C217B467F829
sd x15,0x000000000(x11)
li x9,0x76171DCE3A9B9094
amoxor.d.aq x13,x9, (x11)
bne x15,x13, inner_fail5_random_loop5_6
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_random_loop5_6
j pass5_random_loop5_6
.global inner_fail5_random_loop5_6
inner_fail5_random_loop5_6:
jr x20
.global pass5_random_loop5_6
pass5_random_loop5_6:
c.lw x9,0x00(x11)
ori x11,x11,0x000000163
andi x11,x11,-8
li x15,0x84BE5B443F5D8E78
sd x15,0x000000000(x11)
li x9,0xC078CCC3A78C517E
amoxor.d x13,x9, (x11)
bne x15,x13, inner_fail5_random_loop5_5
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_random_loop5_5
j pass5_random_loop5_5
.global inner_fail5_random_loop5_5
inner_fail5_random_loop5_5:
jr x20
.global pass5_random_loop5_5
pass5_random_loop5_5:
lh x9,0x000000000(x11)
ori x11,x11,0x000000128
andi x11,x11,-8
li x15,0xFF7E6565A5C1C225
sd x15,0x000000000(x11)
li x9,0xA4404923D9C162A8
amoxor.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail5_random_loop5_4
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_random_loop5_4
j pass5_random_loop5_4
.global inner_fail5_random_loop5_4
inner_fail5_random_loop5_4:
jr x20
.global pass5_random_loop5_4
pass5_random_loop5_4:
lbu x8,0x000000000(x11)
ori x11,x11,0x000000039
andi x11,x11,-8
li x15,0x40127DB3C48C2F23
sd x15,0x000000000(x11)
li x9,0xFD343D2C0B5F30A
amoxor.d.rl x13,x9, (x11)
bne x15,x13, inner_fail5_random_loop5_3
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_random_loop5_3
j pass5_random_loop5_3
.global inner_fail5_random_loop5_3
inner_fail5_random_loop5_3:
jr x20
.global pass5_random_loop5_3
pass5_random_loop5_3:
lwu x8,0x000000000(x11)
ori x11,x11,0x0000000AA
andi x11,x11,-8
li x15,0xFF8AB19835DC3364
sd x15,0x000000000(x11)
li x9,0xB8DA905408D87974
amoxor.d x13,x9, (x11)
bne x15,x13, inner_fail5_random_loop5_2
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_random_loop5_2
j pass5_random_loop5_2
.global inner_fail5_random_loop5_2
inner_fail5_random_loop5_2:
jr x20
.global pass5_random_loop5_2
pass5_random_loop5_2:
c.sw x13,0x00(x11)
ori x11,x11,0x000000678
andi x11,x11,-8
li x15,0xC8C65EDC9E016CC4
sd x15,0x000000000(x11)
li x9,0xCD79D888EEC60C40
amoxor.d.rl x13,x9, (x11)
bne x15,x13, inner_fail5_random_loop5_1
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_random_loop5_1
j pass5_random_loop5_1
.global inner_fail5_random_loop5_1
inner_fail5_random_loop5_1:
jr x20
.global pass5_random_loop5_1
pass5_random_loop5_1:
lwu x10,0x000000000(x11)
ori x11,x11,0x0000006D3
andi x11,x11,-8
li x15,0xE9C9F332E42CD424
sd x15,0x000000000(x11)
li x9,0xE0F307071C988575
amoxor.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail5
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5
j pass5
.global inner_fail5
inner_fail5:
jr x20
.global pass5
pass5:
sw x13,0x000000000(x11)
.global check_amomin
check_amomin:
ori x11,x11,0x0000002A4
andi x11,x11,-8
li x15,0x41DC8F62892370EB
sd x15,0x000000000(x11)
li x9,0x4A79AFB3F9CC6497
amomin.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail6_random_loop6_9
ld x16,(x11)
blt x9,x13,mem_val_random_loop6_9
mv x9,x13
.global mem_val_random_loop6_9
mem_val_random_loop6_9:
mv x9,x9
bne x16,x9,inner_fail6_random_loop6_9
j pass6_random_loop6_9
.global inner_fail6_random_loop6_9
inner_fail6_random_loop6_9:
jr x20
.global pass6_random_loop6_9
pass6_random_loop6_9:
ld x9,0x000000000(x11)
ori x11,x11,0x0000001C3
andi x11,x11,-8
li x15,0x3B45DED6BEDAF9EF
sd x15,0x000000000(x11)
li x9,0xA5679FADEF32B65A
amomin.d x13,x9, (x11)
bne x15,x13, inner_fail6_random_loop6_8
ld x16,(x11)
blt x9,x13,mem_val_random_loop6_8
mv x9,x13
.global mem_val_random_loop6_8
mem_val_random_loop6_8:
mv x9,x9
bne x16,x9,inner_fail6_random_loop6_8
j pass6_random_loop6_8
.global inner_fail6_random_loop6_8
inner_fail6_random_loop6_8:
jr x20
.global pass6_random_loop6_8
pass6_random_loop6_8:
ld x10,0x000000000(x11)
ori x11,x11,0x00000047C
andi x11,x11,-8
li x15,0x24C2FBDE9AB67C65
sd x15,0x000000000(x11)
li x9,0xEC8DFB1144879031
amomin.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail6_random_loop6_7
ld x16,(x11)
blt x9,x13,mem_val_random_loop6_7
mv x9,x13
.global mem_val_random_loop6_7
mem_val_random_loop6_7:
mv x9,x9
bne x16,x9,inner_fail6_random_loop6_7
j pass6_random_loop6_7
.global inner_fail6_random_loop6_7
inner_fail6_random_loop6_7:
jr x20
.global pass6_random_loop6_7
pass6_random_loop6_7:
lh x8,0x000000000(x11)
ori x11,x11,0x000000225
andi x11,x11,-8
li x15,0x658FD6F0F0BFA5C8
sd x15,0x000000000(x11)
li x9,0x38E8FAC67DF62EC8
amomin.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail6_random_loop6_6
ld x16,(x11)
blt x9,x13,mem_val_random_loop6_6
mv x9,x13
.global mem_val_random_loop6_6
mem_val_random_loop6_6:
mv x9,x9
bne x16,x9,inner_fail6_random_loop6_6
j pass6_random_loop6_6
.global inner_fail6_random_loop6_6
inner_fail6_random_loop6_6:
jr x20
.global pass6_random_loop6_6
pass6_random_loop6_6:
c.sw x15,0x00(x11)
ori x11,x11,0x000000545
andi x11,x11,-8
li x15,0xFD367B90EA64CB7B
sd x15,0x000000000(x11)
li x9,0xFC753793B1B7D0CF
amomin.d x13,x9, (x11)
bne x15,x13, inner_fail6_random_loop6_5
ld x16,(x11)
blt x9,x13,mem_val_random_loop6_5
mv x9,x13
.global mem_val_random_loop6_5
mem_val_random_loop6_5:
mv x9,x9
bne x16,x9,inner_fail6_random_loop6_5
j pass6_random_loop6_5
.global inner_fail6_random_loop6_5
inner_fail6_random_loop6_5:
jr x20
.global pass6_random_loop6_5
pass6_random_loop6_5:
c.sw x13,0x00(x11)
ori x11,x11,0x000000018
andi x11,x11,-8
li x15,0x3D6582457FCDBED9
sd x15,0x000000000(x11)
li x9,0x617C5B8B8AD1C3AD
amomin.d.aq x13,x9, (x11)
bne x15,x13, inner_fail6_random_loop6_4
ld x16,(x11)
blt x9,x13,mem_val_random_loop6_4
mv x9,x13
.global mem_val_random_loop6_4
mem_val_random_loop6_4:
mv x9,x9
bne x16,x9,inner_fail6_random_loop6_4
j pass6_random_loop6_4
.global inner_fail6_random_loop6_4
inner_fail6_random_loop6_4:
jr x20
.global pass6_random_loop6_4
pass6_random_loop6_4:
sw x13,0x000000000(x11)
ori x11,x11,0x0000001FE
andi x11,x11,-8
li x15,0xF10FC7A4F0A7BCFF
sd x15,0x000000000(x11)
li x9,0x6BA4EBE03D3E3C43
amomin.d.aq x13,x9, (x11)
bne x15,x13, inner_fail6_random_loop6_3
ld x16,(x11)
blt x9,x13,mem_val_random_loop6_3
mv x9,x13
.global mem_val_random_loop6_3
mem_val_random_loop6_3:
mv x9,x9
bne x16,x9,inner_fail6_random_loop6_3
j pass6_random_loop6_3
.global inner_fail6_random_loop6_3
inner_fail6_random_loop6_3:
jr x20
.global pass6_random_loop6_3
pass6_random_loop6_3:
lb x10,0x000000000(x11)
ori x11,x11,0x000000799
andi x11,x11,-8
li x15,0x3310F34CC9638523
sd x15,0x000000000(x11)
li x9,0x8123D02C3668211D
amomin.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail6_random_loop6_2
ld x16,(x11)
blt x9,x13,mem_val_random_loop6_2
mv x9,x13
.global mem_val_random_loop6_2
mem_val_random_loop6_2:
mv x9,x9
bne x16,x9,inner_fail6_random_loop6_2
j pass6_random_loop6_2
.global inner_fail6_random_loop6_2
inner_fail6_random_loop6_2:
jr x20
.global pass6_random_loop6_2
pass6_random_loop6_2:
lw x9,0x000000000(x11)
ori x11,x11,0x0000006AE
andi x11,x11,-8
li x15,0x842E00C281B5A884
sd x15,0x000000000(x11)
li x9,0x19328E6AAB310324
amomin.d.aq x13,x9, (x11)
bne x15,x13, inner_fail6_random_loop6_1
ld x16,(x11)
blt x9,x13,mem_val_random_loop6_1
mv x9,x13
.global mem_val_random_loop6_1
mem_val_random_loop6_1:
mv x9,x9
bne x16,x9,inner_fail6_random_loop6_1
j pass6_random_loop6_1
.global inner_fail6_random_loop6_1
inner_fail6_random_loop6_1:
jr x20
.global pass6_random_loop6_1
pass6_random_loop6_1:
c.sw x13,0x00(x11)
ori x11,x11,0x0000001AD
andi x11,x11,-8
li x15,0xB1C7FFCA6C467682
sd x15,0x000000000(x11)
li x9,0xE1172D0DD428A2CF
amomin.d.aq x13,x9, (x11)
bne x15,x13, inner_fail6
ld x16,(x11)
blt x9,x13,mem_val
mv x9,x13
.global mem_val
mem_val:
mv x9,x9
bne x16,x9,inner_fail6
j pass6
.global inner_fail6
inner_fail6:
jr x20
.global pass6
pass6:
sw x15,0x000000000(x11)
.global check_amominu
check_amominu:
ori x11,x11,0x00000018C
andi x11,x11,-8
li x15,0x8E54C34924A80DF
sd x15,0x000000000(x11)
li x9,0xF5D02C2FF442F8F
amominu.d.rl x13,x9, (x11)
bne x15,x13, inner_fail7_random_loop7_9
ld x16,(x11)
bltu x9,x13,mem_val2_random_loop7_9
mv x9,x13
.global mem_val2_random_loop7_9
mem_val2_random_loop7_9:
mv x9,x9
bne x16,x9,inner_fail7_random_loop7_9
j pass7_random_loop7_9
.global inner_fail7_random_loop7_9
inner_fail7_random_loop7_9:
jr x20
.global pass7_random_loop7_9
pass7_random_loop7_9:
lw x8,0x000000000(x11)
ori x11,x11,0x000000339
andi x11,x11,-8
li x15,0xE8420FD2A09CE07C
sd x15,0x000000000(x11)
li x9,0x2E472959501FE808
amominu.d.aq x13,x9, (x11)
bne x15,x13, inner_fail7_random_loop7_8
ld x16,(x11)
bltu x9,x13,mem_val2_random_loop7_8
mv x9,x13
.global mem_val2_random_loop7_8
mem_val2_random_loop7_8:
mv x9,x9
bne x16,x9,inner_fail7_random_loop7_8
j pass7_random_loop7_8
.global inner_fail7_random_loop7_8
inner_fail7_random_loop7_8:
jr x20
.global pass7_random_loop7_8
pass7_random_loop7_8:
c.sw x14,0x00(x11)
ori x11,x11,0x000000584
andi x11,x11,-8
li x15,0xCF2EF74C4822FD92
sd x15,0x000000000(x11)
li x9,0x78BAD7D29EEB6B51
amominu.d x13,x9, (x11)
bne x15,x13, inner_fail7_random_loop7_7
ld x16,(x11)
bltu x9,x13,mem_val2_random_loop7_7
mv x9,x13
.global mem_val2_random_loop7_7
mem_val2_random_loop7_7:
mv x9,x9
bne x16,x9,inner_fail7_random_loop7_7
j pass7_random_loop7_7
.global inner_fail7_random_loop7_7
inner_fail7_random_loop7_7:
jr x20
.global pass7_random_loop7_7
pass7_random_loop7_7:
lwu x9,0x000000000(x11)
ori x11,x11,0x00000051F
andi x11,x11,-8
li x15,0x7B415528583EAB55
sd x15,0x000000000(x11)
li x9,0xEE69A7DBD2D04768
amominu.d.rl x13,x9, (x11)
bne x15,x13, inner_fail7_random_loop7_6
ld x16,(x11)
bltu x9,x13,mem_val2_random_loop7_6
mv x9,x13
.global mem_val2_random_loop7_6
mem_val2_random_loop7_6:
mv x9,x9
bne x16,x9,inner_fail7_random_loop7_6
j pass7_random_loop7_6
.global inner_fail7_random_loop7_6
inner_fail7_random_loop7_6:
jr x20
.global pass7_random_loop7_6
pass7_random_loop7_6:
lh x9,0x000000000(x11)
ori x11,x11,0x00000025F
andi x11,x11,-8
li x15,0xE388268E8BE11D98
sd x15,0x000000000(x11)
li x9,0xFAEC565940E38C0
amominu.d.aq x13,x9, (x11)
bne x15,x13, inner_fail7_random_loop7_5
ld x16,(x11)
bltu x9,x13,mem_val2_random_loop7_5
mv x9,x13
.global mem_val2_random_loop7_5
mem_val2_random_loop7_5:
mv x9,x9
bne x16,x9,inner_fail7_random_loop7_5
j pass7_random_loop7_5
.global inner_fail7_random_loop7_5
inner_fail7_random_loop7_5:
jr x20
.global pass7_random_loop7_5
pass7_random_loop7_5:
lw x8,0x000000000(x11)
ori x11,x11,0x00000026E
andi x11,x11,-8
li x15,0x1554EA4B3E9B2124
sd x15,0x000000000(x11)
li x9,0x7B9BEFBBF0F0D1EC
amominu.d x13,x9, (x11)
bne x15,x13, inner_fail7_random_loop7_4
ld x16,(x11)
bltu x9,x13,mem_val2_random_loop7_4
mv x9,x13
.global mem_val2_random_loop7_4
mem_val2_random_loop7_4:
mv x9,x9
bne x16,x9,inner_fail7_random_loop7_4
j pass7_random_loop7_4
.global inner_fail7_random_loop7_4
inner_fail7_random_loop7_4:
jr x20
.global pass7_random_loop7_4
pass7_random_loop7_4:
sw x15,0x000000000(x11)
ori x11,x11,0x0000005CF
andi x11,x11,-8
li x15,0xF5BEC1E8FA8A3A08
sd x15,0x000000000(x11)
li x9,0xB977979410A498A1
amominu.d x13,x9, (x11)
bne x15,x13, inner_fail7_random_loop7_3
ld x16,(x11)
bltu x9,x13,mem_val2_random_loop7_3
mv x9,x13
.global mem_val2_random_loop7_3
mem_val2_random_loop7_3:
mv x9,x9
bne x16,x9,inner_fail7_random_loop7_3
j pass7_random_loop7_3
.global inner_fail7_random_loop7_3
inner_fail7_random_loop7_3:
jr x20
.global pass7_random_loop7_3
pass7_random_loop7_3:
c.sw x15,0x00(x11)
ori x11,x11,0x0000005F9
andi x11,x11,-8
li x15,0x672342E8F84626D1
sd x15,0x000000000(x11)
li x9,0xA54C78A7083D2391
amominu.d x13,x9, (x11)
bne x15,x13, inner_fail7_random_loop7_2
ld x16,(x11)
bltu x9,x13,mem_val2_random_loop7_2
mv x9,x13
.global mem_val2_random_loop7_2
mem_val2_random_loop7_2:
mv x9,x9
bne x16,x9,inner_fail7_random_loop7_2
j pass7_random_loop7_2
.global inner_fail7_random_loop7_2
inner_fail7_random_loop7_2:
jr x20
.global pass7_random_loop7_2
pass7_random_loop7_2:
c.lw x9,0x00(x11)
ori x11,x11,0x0000001FC
andi x11,x11,-8
li x15,0xE4A19E187258EE6
sd x15,0x000000000(x11)
li x9,0x762F8F38322185F8
amominu.d x13,x9, (x11)
bne x15,x13, inner_fail7_random_loop7_1
ld x16,(x11)
bltu x9,x13,mem_val2_random_loop7_1
mv x9,x13
.global mem_val2_random_loop7_1
mem_val2_random_loop7_1:
mv x9,x9
bne x16,x9,inner_fail7_random_loop7_1
j pass7_random_loop7_1
.global inner_fail7_random_loop7_1
inner_fail7_random_loop7_1:
jr x20
.global pass7_random_loop7_1
pass7_random_loop7_1:
lwu x9,0x000000000(x11)
ori x11,x11,0x00000025F
andi x11,x11,-8
li x15,0x5E2BD68694EC2339
sd x15,0x000000000(x11)
li x9,0x1BCA8635DBF29CEB
amominu.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail7
ld x16,(x11)
bltu x9,x13,mem_val2
mv x9,x13
.global mem_val2
mem_val2:
mv x9,x9
bne x16,x9,inner_fail7
j pass7
.global inner_fail7
inner_fail7:
jr x20
.global pass7
pass7:
lw x8,0x000000000(x11)
.global check_amomax
check_amomax:
ori x11,x11,0x0000003B5
andi x11,x11,-8
li x15,0xECE9517564D0AD53
sd x15,0x000000000(x11)
li x9,0x1D1912810ACE2772
amomax.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail8_random_loop8_9
ld x16,(x11)
bge x9,x13,mem_val3_random_loop8_9
mv x9,x13
.global mem_val3_random_loop8_9
mem_val3_random_loop8_9:
mv x9,x9
bne x16,x9,inner_fail8_random_loop8_9
j pass8_random_loop8_9
.global inner_fail8_random_loop8_9
inner_fail8_random_loop8_9:
jr x20
.global pass8_random_loop8_9
pass8_random_loop8_9:
lb x10,0x000000000(x11)
ori x11,x11,0x000000346
andi x11,x11,-8
li x15,0x1EA88ACF2C9773EC
sd x15,0x000000000(x11)
li x9,0xAF3E8094312716A6
amomax.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail8_random_loop8_8
ld x16,(x11)
bge x9,x13,mem_val3_random_loop8_8
mv x9,x13
.global mem_val3_random_loop8_8
mem_val3_random_loop8_8:
mv x9,x9
bne x16,x9,inner_fail8_random_loop8_8
j pass8_random_loop8_8
.global inner_fail8_random_loop8_8
inner_fail8_random_loop8_8:
jr x20
.global pass8_random_loop8_8
pass8_random_loop8_8:
ld x10,0x000000000(x11)
ori x11,x11,0x00000032E
andi x11,x11,-8
li x15,0xF92E3F31D95389E3
sd x15,0x000000000(x11)
li x9,0x9BB8D35059FC3B6F
amomax.d.aq x13,x9, (x11)
bne x15,x13, inner_fail8_random_loop8_7
ld x16,(x11)
bge x9,x13,mem_val3_random_loop8_7
mv x9,x13
.global mem_val3_random_loop8_7
mem_val3_random_loop8_7:
mv x9,x9
bne x16,x9,inner_fail8_random_loop8_7
j pass8_random_loop8_7
.global inner_fail8_random_loop8_7
inner_fail8_random_loop8_7:
jr x20
.global pass8_random_loop8_7
pass8_random_loop8_7:
sw x15,0x000000000(x11)
ori x11,x11,0x000000458
andi x11,x11,-8
li x15,0xF2BE7049FDB4F4F7
sd x15,0x000000000(x11)
li x9,0x47523C3DB71391D1
amomax.d.rl x13,x9, (x11)
bne x15,x13, inner_fail8_random_loop8_6
ld x16,(x11)
bge x9,x13,mem_val3_random_loop8_6
mv x9,x13
.global mem_val3_random_loop8_6
mem_val3_random_loop8_6:
mv x9,x9
bne x16,x9,inner_fail8_random_loop8_6
j pass8_random_loop8_6
.global inner_fail8_random_loop8_6
inner_fail8_random_loop8_6:
jr x20
.global pass8_random_loop8_6
pass8_random_loop8_6:
lw x10,0x000000000(x11)
ori x11,x11,0x0000006F1
andi x11,x11,-8
li x15,0xC7DD6A52FA34D712
sd x15,0x000000000(x11)
li x9,0xD0597A7A07E48625
amomax.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail8_random_loop8_5
ld x16,(x11)
bge x9,x13,mem_val3_random_loop8_5
mv x9,x13
.global mem_val3_random_loop8_5
mem_val3_random_loop8_5:
mv x9,x9
bne x16,x9,inner_fail8_random_loop8_5
j pass8_random_loop8_5
.global inner_fail8_random_loop8_5
inner_fail8_random_loop8_5:
jr x20
.global pass8_random_loop8_5
pass8_random_loop8_5:
lwu x8,0x000000000(x11)
ori x11,x11,0x00000034A
andi x11,x11,-8
li x15,0x43E4070B0E8C03C9
sd x15,0x000000000(x11)
li x9,0x8E3A0EE7E6F0C8D
amomax.d.aq x13,x9, (x11)
bne x15,x13, inner_fail8_random_loop8_4
ld x16,(x11)
bge x9,x13,mem_val3_random_loop8_4
mv x9,x13
.global mem_val3_random_loop8_4
mem_val3_random_loop8_4:
mv x9,x9
bne x16,x9,inner_fail8_random_loop8_4
j pass8_random_loop8_4
.global inner_fail8_random_loop8_4
inner_fail8_random_loop8_4:
jr x20
.global pass8_random_loop8_4
pass8_random_loop8_4:
sw x14,0x000000000(x11)
ori x11,x11,0x000000496
andi x11,x11,-8
li x15,0x51EDE30E25274B59
sd x15,0x000000000(x11)
li x9,0xB382A2ECC82A4760
amomax.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail8_random_loop8_3
ld x16,(x11)
bge x9,x13,mem_val3_random_loop8_3
mv x9,x13
.global mem_val3_random_loop8_3
mem_val3_random_loop8_3:
mv x9,x9
bne x16,x9,inner_fail8_random_loop8_3
j pass8_random_loop8_3
.global inner_fail8_random_loop8_3
inner_fail8_random_loop8_3:
jr x20
.global pass8_random_loop8_3
pass8_random_loop8_3:
lw x10,0x000000000(x11)
ori x11,x11,0x00000016F
andi x11,x11,-8
li x15,0x81157F1D3C574640
sd x15,0x000000000(x11)
li x9,0x86ABE0348FD148F5
amomax.d.rl x13,x9, (x11)
bne x15,x13, inner_fail8_random_loop8_2
ld x16,(x11)
bge x9,x13,mem_val3_random_loop8_2
mv x9,x13
.global mem_val3_random_loop8_2
mem_val3_random_loop8_2:
mv x9,x9
bne x16,x9,inner_fail8_random_loop8_2
j pass8_random_loop8_2
.global inner_fail8_random_loop8_2
inner_fail8_random_loop8_2:
jr x20
.global pass8_random_loop8_2
pass8_random_loop8_2:
c.sw x14,0x00(x11)
ori x11,x11,0x000000475
andi x11,x11,-8
li x15,0xBE45B8C663AFA28B
sd x15,0x000000000(x11)
li x9,0x78A2DBFEEE24799E
amomax.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail8_random_loop8_1
ld x16,(x11)
bge x9,x13,mem_val3_random_loop8_1
mv x9,x13
.global mem_val3_random_loop8_1
mem_val3_random_loop8_1:
mv x9,x9
bne x16,x9,inner_fail8_random_loop8_1
j pass8_random_loop8_1
.global inner_fail8_random_loop8_1
inner_fail8_random_loop8_1:
jr x20
.global pass8_random_loop8_1
pass8_random_loop8_1:
lw x9,0x000000000(x11)
ori x11,x11,0x000000275
andi x11,x11,-8
li x15,0x19D5B44E17EA70D2
sd x15,0x000000000(x11)
li x9,0x7AF6CD676CD23D3C
amomax.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail8
ld x16,(x11)
bge x9,x13,mem_val3
mv x9,x13
.global mem_val3
mem_val3:
mv x9,x9
bne x16,x9,inner_fail8
j pass8
.global inner_fail8
inner_fail8:
jr x20
.global pass8
pass8:
c.lw x8,0x00(x11)
.global check_amomaxu
check_amomaxu:
ori x11,x11,0x00000054D
andi x11,x11,-8
li x15,0xFE73C8B6BDC44277
sd x15,0x000000000(x11)
li x9,0x99561A29CCFDFBBF
amomaxu.d.aq x13,x9, (x11)
bne x15,x13, inner_fail9_random_loop9_9
ld x16,(x11)
bgeu x9,x13,mem_val4_random_loop9_9
mv x9,x13
.global mem_val4_random_loop9_9
mem_val4_random_loop9_9:
mv x9,x9
bne x16,x9,inner_fail9_random_loop9_9
j pass9_random_loop9_9
.global inner_fail9_random_loop9_9
inner_fail9_random_loop9_9:
jr x20
.global pass9_random_loop9_9
pass9_random_loop9_9:
lb x9,0x000000000(x11)
ori x11,x11,0x000000175
andi x11,x11,-8
li x15,0xD9357BC07DD66378
sd x15,0x000000000(x11)
li x9,0x918B84DBE313543
amomaxu.d.rl x13,x9, (x11)
bne x15,x13, inner_fail9_random_loop9_8
ld x16,(x11)
bgeu x9,x13,mem_val4_random_loop9_8
mv x9,x13
.global mem_val4_random_loop9_8
mem_val4_random_loop9_8:
mv x9,x9
bne x16,x9,inner_fail9_random_loop9_8
j pass9_random_loop9_8
.global inner_fail9_random_loop9_8
inner_fail9_random_loop9_8:
jr x20
.global pass9_random_loop9_8
pass9_random_loop9_8:
c.sw x13,0x00(x11)
ori x11,x11,0x000000437
andi x11,x11,-8
li x15,0xFE222955001FB01
sd x15,0x000000000(x11)
li x9,0x5CE77B9E81984A1
amomaxu.d x13,x9, (x11)
bne x15,x13, inner_fail9_random_loop9_7
ld x16,(x11)
bgeu x9,x13,mem_val4_random_loop9_7
mv x9,x13
.global mem_val4_random_loop9_7
mem_val4_random_loop9_7:
mv x9,x9
bne x16,x9,inner_fail9_random_loop9_7
j pass9_random_loop9_7
.global inner_fail9_random_loop9_7
inner_fail9_random_loop9_7:
jr x20
.global pass9_random_loop9_7
pass9_random_loop9_7:
sw x15,0x000000000(x11)
ori x11,x11,0x0000002BB
andi x11,x11,-8
li x15,0x319F1D336938FE1E
sd x15,0x000000000(x11)
li x9,0x53A4FD6CB4DE1F2C
amomaxu.d.aq x13,x9, (x11)
bne x15,x13, inner_fail9_random_loop9_6
ld x16,(x11)
bgeu x9,x13,mem_val4_random_loop9_6
mv x9,x13
.global mem_val4_random_loop9_6
mem_val4_random_loop9_6:
mv x9,x9
bne x16,x9,inner_fail9_random_loop9_6
j pass9_random_loop9_6
.global inner_fail9_random_loop9_6
inner_fail9_random_loop9_6:
jr x20
.global pass9_random_loop9_6
pass9_random_loop9_6:
sw x13,0x000000000(x11)
ori x11,x11,0x000000127
andi x11,x11,-8
li x15,0xC3A56CEA431CC030
sd x15,0x000000000(x11)
li x9,0x8E9DA7B994AB09A5
amomaxu.d.aq x13,x9, (x11)
bne x15,x13, inner_fail9_random_loop9_5
ld x16,(x11)
bgeu x9,x13,mem_val4_random_loop9_5
mv x9,x13
.global mem_val4_random_loop9_5
mem_val4_random_loop9_5:
mv x9,x9
bne x16,x9,inner_fail9_random_loop9_5
j pass9_random_loop9_5
.global inner_fail9_random_loop9_5
inner_fail9_random_loop9_5:
jr x20
.global pass9_random_loop9_5
pass9_random_loop9_5:
sd x14,0x000000000(x11)
ori x11,x11,0x0000005F1
andi x11,x11,-8
li x15,0xBAD4F9869F773A8A
sd x15,0x000000000(x11)
li x9,0xC01B26D1C3C9E18B
amomaxu.d.rl x13,x9, (x11)
bne x15,x13, inner_fail9_random_loop9_4
ld x16,(x11)
bgeu x9,x13,mem_val4_random_loop9_4
mv x9,x13
.global mem_val4_random_loop9_4
mem_val4_random_loop9_4:
mv x9,x9
bne x16,x9,inner_fail9_random_loop9_4
j pass9_random_loop9_4
.global inner_fail9_random_loop9_4
inner_fail9_random_loop9_4:
jr x20
.global pass9_random_loop9_4
pass9_random_loop9_4:
lbu x9,0x000000000(x11)
ori x11,x11,0x00000079D
andi x11,x11,-8
li x15,0xC62C17F1F9FBC837
sd x15,0x000000000(x11)
li x9,0x85F6E14BBEA4D125
amomaxu.d.aq x13,x9, (x11)
bne x15,x13, inner_fail9_random_loop9_3
ld x16,(x11)
bgeu x9,x13,mem_val4_random_loop9_3
mv x9,x13
.global mem_val4_random_loop9_3
mem_val4_random_loop9_3:
mv x9,x9
bne x16,x9,inner_fail9_random_loop9_3
j pass9_random_loop9_3
.global inner_fail9_random_loop9_3
inner_fail9_random_loop9_3:
jr x20
.global pass9_random_loop9_3
pass9_random_loop9_3:
sd x15,0x000000000(x11)
ori x11,x11,0x0000002CD
andi x11,x11,-8
li x15,0x1C2090C7A2343D6C
sd x15,0x000000000(x11)
li x9,0xDC64187AFD7ED5D
amomaxu.d.aq x13,x9, (x11)
bne x15,x13, inner_fail9_random_loop9_2
ld x16,(x11)
bgeu x9,x13,mem_val4_random_loop9_2
mv x9,x13
.global mem_val4_random_loop9_2
mem_val4_random_loop9_2:
mv x9,x9
bne x16,x9,inner_fail9_random_loop9_2
j pass9_random_loop9_2
.global inner_fail9_random_loop9_2
inner_fail9_random_loop9_2:
jr x20
.global pass9_random_loop9_2
pass9_random_loop9_2:
lw x10,0x000000000(x11)
ori x11,x11,0x000000345
andi x11,x11,-8
li x15,0x2A25F4AAC79FBB6D
sd x15,0x000000000(x11)
li x9,0x7373616D2A9E82A7
amomaxu.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail9_random_loop9_1
ld x16,(x11)
bgeu x9,x13,mem_val4_random_loop9_1
mv x9,x13
.global mem_val4_random_loop9_1
mem_val4_random_loop9_1:
mv x9,x9
bne x16,x9,inner_fail9_random_loop9_1
j pass9_random_loop9_1
.global inner_fail9_random_loop9_1
inner_fail9_random_loop9_1:
jr x20
.global pass9_random_loop9_1
pass9_random_loop9_1:
ld x9,0x000000000(x11)
ori x11,x11,0x0000000AB
andi x11,x11,-8
li x15,0x5F8305A3EB779CC5
sd x15,0x000000000(x11)
li x9,0x8FD39D9AD1F41505
amomaxu.d.aqrl x13,x9, (x11)
bne x15,x13, inner_fail9
ld x16,(x11)
bgeu x9,x13,mem_val4
mv x9,x13
.global mem_val4
mem_val4:
mv x9,x9
bne x16,x9,inner_fail9
j pass9
.global inner_fail9
inner_fail9:
jr x20
.global pass9
pass9:
c.lw x9,0x00(x11)
li x9,0
li x15,0
.global check_amoswap_w
check_amoswap_w:
ori x11,x11,0x00000040D
andi x11,x11,-8
li x15,0x05DD77631
sw x15,0x000000000(x11)
li x9,0x07C41DFE1
amoswap.w.rl x13,x9, (x11)
bne x15,x13, inner_fail_w_random_loop1_w_9
lwu x16,(x11)
bne x16,x9,inner_fail_w_random_loop1_w_9
j pass_w_random_loop1_w_9
.global inner_fail_w_random_loop1_w_9
inner_fail_w_random_loop1_w_9:
jr x20
.global pass_w_random_loop1_w_9
pass_w_random_loop1_w_9:
lw x10,0x000000000(x11)
ori x11,x11,0x000000364
andi x11,x11,-8
li x15,0x03D55AD19
sw x15,0x000000000(x11)
li x9,0x06DF18EE2
amoswap.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail_w_random_loop1_w_8
lwu x16,(x11)
bne x16,x9,inner_fail_w_random_loop1_w_8
j pass_w_random_loop1_w_8
.global inner_fail_w_random_loop1_w_8
inner_fail_w_random_loop1_w_8:
jr x20
.global pass_w_random_loop1_w_8
pass_w_random_loop1_w_8:
c.lw x9,0x00(x11)
ori x11,x11,0x0000007E4
andi x11,x11,-8
li x15,0x0435E4DAD
sw x15,0x000000000(x11)
li x9,0x04C1542AE
amoswap.w x13,x9, (x11)
bne x15,x13, inner_fail_w_random_loop1_w_7
lwu x16,(x11)
bne x16,x9,inner_fail_w_random_loop1_w_7
j pass_w_random_loop1_w_7
.global inner_fail_w_random_loop1_w_7
inner_fail_w_random_loop1_w_7:
jr x20
.global pass_w_random_loop1_w_7
pass_w_random_loop1_w_7:
ld x9,0x000000000(x11)
ori x11,x11,0x0000006D9
andi x11,x11,-8
li x15,0x047B0DA16
sw x15,0x000000000(x11)
li x9,0x07A3ABA7D
amoswap.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail_w_random_loop1_w_6
lwu x16,(x11)
bne x16,x9,inner_fail_w_random_loop1_w_6
j pass_w_random_loop1_w_6
.global inner_fail_w_random_loop1_w_6
inner_fail_w_random_loop1_w_6:
jr x20
.global pass_w_random_loop1_w_6
pass_w_random_loop1_w_6:
c.lw x10,0x00(x11)
ori x11,x11,0x0000001D2
andi x11,x11,-8
li x15,0x04A28F939
sw x15,0x000000000(x11)
li x9,0x0204C474F
amoswap.w.rl x13,x9, (x11)
bne x15,x13, inner_fail_w_random_loop1_w_5
lwu x16,(x11)
bne x16,x9,inner_fail_w_random_loop1_w_5
j pass_w_random_loop1_w_5
.global inner_fail_w_random_loop1_w_5
inner_fail_w_random_loop1_w_5:
jr x20
.global pass_w_random_loop1_w_5
pass_w_random_loop1_w_5:
lh x9,0x000000000(x11)
ori x11,x11,0x000000097
andi x11,x11,-8
li x15,0x02C100A15
sw x15,0x000000000(x11)
li x9,0x0650DA174
amoswap.w.rl x13,x9, (x11)
bne x15,x13, inner_fail_w_random_loop1_w_4
lwu x16,(x11)
bne x16,x9,inner_fail_w_random_loop1_w_4
j pass_w_random_loop1_w_4
.global inner_fail_w_random_loop1_w_4
inner_fail_w_random_loop1_w_4:
jr x20
.global pass_w_random_loop1_w_4
pass_w_random_loop1_w_4:
c.sw x15,0x00(x11)
ori x11,x11,0x0000002D3
andi x11,x11,-8
li x15,0x01B3C5284
sw x15,0x000000000(x11)
li x9,0x055339AE7
amoswap.w.rl x13,x9, (x11)
bne x15,x13, inner_fail_w_random_loop1_w_3
lwu x16,(x11)
bne x16,x9,inner_fail_w_random_loop1_w_3
j pass_w_random_loop1_w_3
.global inner_fail_w_random_loop1_w_3
inner_fail_w_random_loop1_w_3:
jr x20
.global pass_w_random_loop1_w_3
pass_w_random_loop1_w_3:
c.lw x8,0x00(x11)
ori x11,x11,0x000000412
andi x11,x11,-8
li x15,0x02CE9C59D
sw x15,0x000000000(x11)
li x9,0x032A1B99A
amoswap.w.rl x13,x9, (x11)
bne x15,x13, inner_fail_w_random_loop1_w_2
lwu x16,(x11)
bne x16,x9,inner_fail_w_random_loop1_w_2
j pass_w_random_loop1_w_2
.global inner_fail_w_random_loop1_w_2
inner_fail_w_random_loop1_w_2:
jr x20
.global pass_w_random_loop1_w_2
pass_w_random_loop1_w_2:
lbu x8,0x000000000(x11)
ori x11,x11,0x00000040E
andi x11,x11,-8
li x15,0x02148F8D1
sw x15,0x000000000(x11)
li x9,0x06C3ADF23
amoswap.w x13,x9, (x11)
bne x15,x13, inner_fail_w_random_loop1_w_1
lwu x16,(x11)
bne x16,x9,inner_fail_w_random_loop1_w_1
j pass_w_random_loop1_w_1
.global inner_fail_w_random_loop1_w_1
inner_fail_w_random_loop1_w_1:
jr x20
.global pass_w_random_loop1_w_1
pass_w_random_loop1_w_1:
ld x9,0x000000000(x11)
ori x11,x11,0x0000005BA
andi x11,x11,-8
li x15,0x055F79C53
sw x15,0x000000000(x11)
li x9,0x026223BB9
amoswap.w x13,x9, (x11)
bne x15,x13, inner_fail_w
lwu x16,(x11)
bne x16,x9,inner_fail_w
j pass_w
.global inner_fail_w
inner_fail_w:
jr x20
.global pass_w
pass_w:
sd x15,0x000000000(x11)
.global check_amoadd_w
check_amoadd_w:
ori x11,x11,0x000000159
andi x11,x11,-8
li x15,0x0410CCAC8
sw x15,0x000000000(x11)
li x9,0x0608B5072
amoadd.w.aq x13,x9, (x11)
bne x15,x13, inner_fail2_w_random_loop2_w_9
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w_random_loop2_w_9
j pass2_w_random_loop2_w_9
.global inner_fail2_w_random_loop2_w_9
inner_fail2_w_random_loop2_w_9:
jr x20
.global pass2_w_random_loop2_w_9
pass2_w_random_loop2_w_9:
lw x9,0x000000000(x11)
ori x11,x11,0x00000006B
andi x11,x11,-8
li x15,0x0274A0131
sw x15,0x000000000(x11)
li x9,0x07B66B903
amoadd.w.aq x13,x9, (x11)
bne x15,x13, inner_fail2_w_random_loop2_w_8
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w_random_loop2_w_8
j pass2_w_random_loop2_w_8
.global inner_fail2_w_random_loop2_w_8
inner_fail2_w_random_loop2_w_8:
jr x20
.global pass2_w_random_loop2_w_8
pass2_w_random_loop2_w_8:
lbu x9,0x000000000(x11)
ori x11,x11,0x00000026B
andi x11,x11,-8
li x15,0x05CB8B451
sw x15,0x000000000(x11)
li x9,0x077208601
amoadd.w x13,x9, (x11)
bne x15,x13, inner_fail2_w_random_loop2_w_7
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w_random_loop2_w_7
j pass2_w_random_loop2_w_7
.global inner_fail2_w_random_loop2_w_7
inner_fail2_w_random_loop2_w_7:
jr x20
.global pass2_w_random_loop2_w_7
pass2_w_random_loop2_w_7:
sw x13,0x000000000(x11)
ori x11,x11,0x0000006FB
andi x11,x11,-8
li x15,0x051A633C4
sw x15,0x000000000(x11)
li x9,0x01899B1F5
amoadd.w x13,x9, (x11)
bne x15,x13, inner_fail2_w_random_loop2_w_6
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w_random_loop2_w_6
j pass2_w_random_loop2_w_6
.global inner_fail2_w_random_loop2_w_6
inner_fail2_w_random_loop2_w_6:
jr x20
.global pass2_w_random_loop2_w_6
pass2_w_random_loop2_w_6:
lbu x9,0x000000000(x11)
ori x11,x11,0x000000154
andi x11,x11,-8
li x15,0x07AA3EACE
sw x15,0x000000000(x11)
li x9,0x07467B123
amoadd.w.aq x13,x9, (x11)
bne x15,x13, inner_fail2_w_random_loop2_w_5
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w_random_loop2_w_5
j pass2_w_random_loop2_w_5
.global inner_fail2_w_random_loop2_w_5
inner_fail2_w_random_loop2_w_5:
jr x20
.global pass2_w_random_loop2_w_5
pass2_w_random_loop2_w_5:
lhu x8,0x000000000(x11)
ori x11,x11,0x00000040F
andi x11,x11,-8
li x15,0x00FACB5A2
sw x15,0x000000000(x11)
li x9,0x03813E8D1
amoadd.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail2_w_random_loop2_w_4
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w_random_loop2_w_4
j pass2_w_random_loop2_w_4
.global inner_fail2_w_random_loop2_w_4
inner_fail2_w_random_loop2_w_4:
jr x20
.global pass2_w_random_loop2_w_4
pass2_w_random_loop2_w_4:
lwu x8,0x000000000(x11)
ori x11,x11,0x00000031D
andi x11,x11,-8
li x15,0x00C8E00D5
sw x15,0x000000000(x11)
li x9,0x07BD6A13B
amoadd.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail2_w_random_loop2_w_3
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w_random_loop2_w_3
j pass2_w_random_loop2_w_3
.global inner_fail2_w_random_loop2_w_3
inner_fail2_w_random_loop2_w_3:
jr x20
.global pass2_w_random_loop2_w_3
pass2_w_random_loop2_w_3:
c.sw x15,0x00(x11)
ori x11,x11,0x00000037D
andi x11,x11,-8
li x15,0x07E8AC226
sw x15,0x000000000(x11)
li x9,0x00D61F6CC
amoadd.w.aq x13,x9, (x11)
bne x15,x13, inner_fail2_w_random_loop2_w_2
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w_random_loop2_w_2
j pass2_w_random_loop2_w_2
.global inner_fail2_w_random_loop2_w_2
inner_fail2_w_random_loop2_w_2:
jr x20
.global pass2_w_random_loop2_w_2
pass2_w_random_loop2_w_2:
c.lw x8,0x00(x11)
ori x11,x11,0x000000620
andi x11,x11,-8
li x15,0x0379BFBC6
sw x15,0x000000000(x11)
li x9,0x0603E529E
amoadd.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail2_w_random_loop2_w_1
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w_random_loop2_w_1
j pass2_w_random_loop2_w_1
.global inner_fail2_w_random_loop2_w_1
inner_fail2_w_random_loop2_w_1:
jr x20
.global pass2_w_random_loop2_w_1
pass2_w_random_loop2_w_1:
ld x10,0x000000000(x11)
ori x11,x11,0x0000005A0
andi x11,x11,-8
li x15,0x00CAA04A6
sw x15,0x000000000(x11)
li x9,0x017B9CE23
amoadd.w.aq x13,x9, (x11)
bne x15,x13, inner_fail2_w
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,inner_fail2_w
j pass2_w
.global inner_fail2_w
inner_fail2_w:
jr x20
.global pass2_w
pass2_w:
lwu x9,0x000000000(x11)
.global check_amoor_w
check_amoor_w:
ori x11,x11,0x000000409
andi x11,x11,-8
li x15,0x05CC457D9
sw x15,0x000000000(x11)
li x9,0x02A93D703
amoor.w x13,x9, (x11)
bne x15,x13, inner_fail3_w_random_loop3_w_9
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w_random_loop3_w_9
j pass3_w_random_loop3_w_9
.global inner_fail3_w_random_loop3_w_9
inner_fail3_w_random_loop3_w_9:
jr x20
.global pass3_w_random_loop3_w_9
pass3_w_random_loop3_w_9:
lbu x9,0x000000000(x11)
ori x11,x11,0x000000181
andi x11,x11,-8
li x15,0x052D2C763
sw x15,0x000000000(x11)
li x9,0x010D9E7CB
amoor.w x13,x9, (x11)
bne x15,x13, inner_fail3_w_random_loop3_w_8
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w_random_loop3_w_8
j pass3_w_random_loop3_w_8
.global inner_fail3_w_random_loop3_w_8
inner_fail3_w_random_loop3_w_8:
jr x20
.global pass3_w_random_loop3_w_8
pass3_w_random_loop3_w_8:
lw x8,0x000000000(x11)
ori x11,x11,0x0000007B2
andi x11,x11,-8
li x15,0x064FABACD
sw x15,0x000000000(x11)
li x9,0x00176F643
amoor.w x13,x9, (x11)
bne x15,x13, inner_fail3_w_random_loop3_w_7
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w_random_loop3_w_7
j pass3_w_random_loop3_w_7
.global inner_fail3_w_random_loop3_w_7
inner_fail3_w_random_loop3_w_7:
jr x20
.global pass3_w_random_loop3_w_7
pass3_w_random_loop3_w_7:
c.sw x14,0x00(x11)
ori x11,x11,0x000000234
andi x11,x11,-8
li x15,0x06D24D61A
sw x15,0x000000000(x11)
li x9,0x00F248141
amoor.w.aq x13,x9, (x11)
bne x15,x13, inner_fail3_w_random_loop3_w_6
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w_random_loop3_w_6
j pass3_w_random_loop3_w_6
.global inner_fail3_w_random_loop3_w_6
inner_fail3_w_random_loop3_w_6:
jr x20
.global pass3_w_random_loop3_w_6
pass3_w_random_loop3_w_6:
c.lw x9,0x00(x11)
ori x11,x11,0x00000075B
andi x11,x11,-8
li x15,0x020CC0E3B
sw x15,0x000000000(x11)
li x9,0x04CE2C7DC
amoor.w.aq x13,x9, (x11)
bne x15,x13, inner_fail3_w_random_loop3_w_5
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w_random_loop3_w_5
j pass3_w_random_loop3_w_5
.global inner_fail3_w_random_loop3_w_5
inner_fail3_w_random_loop3_w_5:
jr x20
.global pass3_w_random_loop3_w_5
pass3_w_random_loop3_w_5:
c.lw x8,0x00(x11)
ori x11,x11,0x0000007F4
andi x11,x11,-8
li x15,0x039E6A33B
sw x15,0x000000000(x11)
li x9,0x0558D2785
amoor.w x13,x9, (x11)
bne x15,x13, inner_fail3_w_random_loop3_w_4
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w_random_loop3_w_4
j pass3_w_random_loop3_w_4
.global inner_fail3_w_random_loop3_w_4
inner_fail3_w_random_loop3_w_4:
jr x20
.global pass3_w_random_loop3_w_4
pass3_w_random_loop3_w_4:
lw x9,0x000000000(x11)
ori x11,x11,0x000000510
andi x11,x11,-8
li x15,0x035219EBB
sw x15,0x000000000(x11)
li x9,0x07339ADD3
amoor.w x13,x9, (x11)
bne x15,x13, inner_fail3_w_random_loop3_w_3
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w_random_loop3_w_3
j pass3_w_random_loop3_w_3
.global inner_fail3_w_random_loop3_w_3
inner_fail3_w_random_loop3_w_3:
jr x20
.global pass3_w_random_loop3_w_3
pass3_w_random_loop3_w_3:
lh x9,0x000000000(x11)
ori x11,x11,0x00000000F
andi x11,x11,-8
li x15,0x078DB8D0C
sw x15,0x000000000(x11)
li x9,0x04726F7E5
amoor.w x13,x9, (x11)
bne x15,x13, inner_fail3_w_random_loop3_w_2
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w_random_loop3_w_2
j pass3_w_random_loop3_w_2
.global inner_fail3_w_random_loop3_w_2
inner_fail3_w_random_loop3_w_2:
jr x20
.global pass3_w_random_loop3_w_2
pass3_w_random_loop3_w_2:
lb x9,0x000000000(x11)
ori x11,x11,0x0000006CD
andi x11,x11,-8
li x15,0x05A6AFD69
sw x15,0x000000000(x11)
li x9,0x02B3A1DC1
amoor.w x13,x9, (x11)
bne x15,x13, inner_fail3_w_random_loop3_w_1
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w_random_loop3_w_1
j pass3_w_random_loop3_w_1
.global inner_fail3_w_random_loop3_w_1
inner_fail3_w_random_loop3_w_1:
jr x20
.global pass3_w_random_loop3_w_1
pass3_w_random_loop3_w_1:
ld x8,0x000000000(x11)
ori x11,x11,0x000000561
andi x11,x11,-8
li x15,0x069F9782A
sw x15,0x000000000(x11)
li x9,0x00702AFBB
amoor.w.rl x13,x9, (x11)
bne x15,x13, inner_fail3_w
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,inner_fail3_w
j pass3_w
.global inner_fail3_w
inner_fail3_w:
jr x20
.global pass3_w
pass3_w:
c.lw x9,0x00(x11)
.global check_amoand_w
check_amoand_w:
ori x11,x11,0x0000001A3
andi x11,x11,-8
li x15,0x00AFC055A
sw x15,0x000000000(x11)
li x9,0x0569E4AF9
amoand.w x13,x9, (x11)
bne x15,x13, inner_fail4_w_random_loop4_w_9
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w_random_loop4_w_9
j pass4_w_random_loop4_w_9
.global inner_fail4_w_random_loop4_w_9
inner_fail4_w_random_loop4_w_9:
jr x20
.global pass4_w_random_loop4_w_9
pass4_w_random_loop4_w_9:
sw x13,0x000000000(x11)
ori x11,x11,0x0000006E1
andi x11,x11,-8
li x15,0x037B4943E
sw x15,0x000000000(x11)
li x9,0x077A0B022
amoand.w x13,x9, (x11)
bne x15,x13, inner_fail4_w_random_loop4_w_8
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w_random_loop4_w_8
j pass4_w_random_loop4_w_8
.global inner_fail4_w_random_loop4_w_8
inner_fail4_w_random_loop4_w_8:
jr x20
.global pass4_w_random_loop4_w_8
pass4_w_random_loop4_w_8:
lwu x9,0x000000000(x11)
ori x11,x11,0x00000022C
andi x11,x11,-8
li x15,0x0085CB8EE
sw x15,0x000000000(x11)
li x9,0x044D23402
amoand.w.aq x13,x9, (x11)
bne x15,x13, inner_fail4_w_random_loop4_w_7
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w_random_loop4_w_7
j pass4_w_random_loop4_w_7
.global inner_fail4_w_random_loop4_w_7
inner_fail4_w_random_loop4_w_7:
jr x20
.global pass4_w_random_loop4_w_7
pass4_w_random_loop4_w_7:
lh x9,0x000000000(x11)
ori x11,x11,0x00000047C
andi x11,x11,-8
li x15,0x020A856FD
sw x15,0x000000000(x11)
li x9,0x035621740
amoand.w x13,x9, (x11)
bne x15,x13, inner_fail4_w_random_loop4_w_6
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w_random_loop4_w_6
j pass4_w_random_loop4_w_6
.global inner_fail4_w_random_loop4_w_6
inner_fail4_w_random_loop4_w_6:
jr x20
.global pass4_w_random_loop4_w_6
pass4_w_random_loop4_w_6:
lhu x9,0x000000000(x11)
ori x11,x11,0x00000034C
andi x11,x11,-8
li x15,0x01C42DBA5
sw x15,0x000000000(x11)
li x9,0x022060E21
amoand.w x13,x9, (x11)
bne x15,x13, inner_fail4_w_random_loop4_w_5
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w_random_loop4_w_5
j pass4_w_random_loop4_w_5
.global inner_fail4_w_random_loop4_w_5
inner_fail4_w_random_loop4_w_5:
jr x20
.global pass4_w_random_loop4_w_5
pass4_w_random_loop4_w_5:
ld x8,0x000000000(x11)
ori x11,x11,0x0000001AA
andi x11,x11,-8
li x15,0x0746015E4
sw x15,0x000000000(x11)
li x9,0x01D426F29
amoand.w x13,x9, (x11)
bne x15,x13, inner_fail4_w_random_loop4_w_4
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w_random_loop4_w_4
j pass4_w_random_loop4_w_4
.global inner_fail4_w_random_loop4_w_4
inner_fail4_w_random_loop4_w_4:
jr x20
.global pass4_w_random_loop4_w_4
pass4_w_random_loop4_w_4:
sd x14,0x000000000(x11)
ori x11,x11,0x00000064C
andi x11,x11,-8
li x15,0x00C4F0517
sw x15,0x000000000(x11)
li x9,0x05265E0C2
amoand.w x13,x9, (x11)
bne x15,x13, inner_fail4_w_random_loop4_w_3
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w_random_loop4_w_3
j pass4_w_random_loop4_w_3
.global inner_fail4_w_random_loop4_w_3
inner_fail4_w_random_loop4_w_3:
jr x20
.global pass4_w_random_loop4_w_3
pass4_w_random_loop4_w_3:
ld x8,0x000000000(x11)
ori x11,x11,0x000000556
andi x11,x11,-8
li x15,0x022E2DF91
sw x15,0x000000000(x11)
li x9,0x000DCFCE9
amoand.w x13,x9, (x11)
bne x15,x13, inner_fail4_w_random_loop4_w_2
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w_random_loop4_w_2
j pass4_w_random_loop4_w_2
.global inner_fail4_w_random_loop4_w_2
inner_fail4_w_random_loop4_w_2:
jr x20
.global pass4_w_random_loop4_w_2
pass4_w_random_loop4_w_2:
lbu x9,0x000000000(x11)
ori x11,x11,0x00000021A
andi x11,x11,-8
li x15,0x062BE9AF8
sw x15,0x000000000(x11)
li x9,0x01DF67B89
amoand.w.aq x13,x9, (x11)
bne x15,x13, inner_fail4_w_random_loop4_w_1
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w_random_loop4_w_1
j pass4_w_random_loop4_w_1
.global inner_fail4_w_random_loop4_w_1
inner_fail4_w_random_loop4_w_1:
jr x20
.global pass4_w_random_loop4_w_1
pass4_w_random_loop4_w_1:
sw x15,0x000000000(x11)
ori x11,x11,0x000000502
andi x11,x11,-8
li x15,0x0073C9A76
sw x15,0x000000000(x11)
li x9,0x071127D9C
amoand.w.rl x13,x9, (x11)
bne x15,x13, inner_fail4_w
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,inner_fail4_w
j pass4_w
.global inner_fail4_w
inner_fail4_w:
jr x20
.global pass4_w
pass4_w:
lh x10,0x000000000(x11)
.global check_amoxor_w
check_amoxor_w:
ori x11,x11,0x0000004DB
andi x11,x11,-8
li x15,0x003357387
sw x15,0x000000000(x11)
li x9,0x0105A0E8B
amoxor.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail5_w_random_loop5_w_9
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w_random_loop5_w_9
j pass5_w_random_loop5_w_9
.global inner_fail5_w_random_loop5_w_9
inner_fail5_w_random_loop5_w_9:
jr x20
.global pass5_w_random_loop5_w_9
pass5_w_random_loop5_w_9:
lw x8,0x000000000(x11)
ori x11,x11,0x000000642
andi x11,x11,-8
li x15,0x024100F66
sw x15,0x000000000(x11)
li x9,0x06E2F03A3
amoxor.w.rl x13,x9, (x11)
bne x15,x13, inner_fail5_w_random_loop5_w_8
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w_random_loop5_w_8
j pass5_w_random_loop5_w_8
.global inner_fail5_w_random_loop5_w_8
inner_fail5_w_random_loop5_w_8:
jr x20
.global pass5_w_random_loop5_w_8
pass5_w_random_loop5_w_8:
lwu x8,0x000000000(x11)
ori x11,x11,0x000000545
andi x11,x11,-8
li x15,0x0174B12AC
sw x15,0x000000000(x11)
li x9,0x003A06B1A
amoxor.w.aq x13,x9, (x11)
bne x15,x13, inner_fail5_w_random_loop5_w_7
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w_random_loop5_w_7
j pass5_w_random_loop5_w_7
.global inner_fail5_w_random_loop5_w_7
inner_fail5_w_random_loop5_w_7:
jr x20
.global pass5_w_random_loop5_w_7
pass5_w_random_loop5_w_7:
lbu x9,0x000000000(x11)
ori x11,x11,0x000000786
andi x11,x11,-8
li x15,0x028B931AC
sw x15,0x000000000(x11)
li x9,0x0229E7E2A
amoxor.w x13,x9, (x11)
bne x15,x13, inner_fail5_w_random_loop5_w_6
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w_random_loop5_w_6
j pass5_w_random_loop5_w_6
.global inner_fail5_w_random_loop5_w_6
inner_fail5_w_random_loop5_w_6:
jr x20
.global pass5_w_random_loop5_w_6
pass5_w_random_loop5_w_6:
lh x8,0x000000000(x11)
ori x11,x11,0x00000005C
andi x11,x11,-8
li x15,0x00F5EB420
sw x15,0x000000000(x11)
li x9,0x02D8C483F
amoxor.w.rl x13,x9, (x11)
bne x15,x13, inner_fail5_w_random_loop5_w_5
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w_random_loop5_w_5
j pass5_w_random_loop5_w_5
.global inner_fail5_w_random_loop5_w_5
inner_fail5_w_random_loop5_w_5:
jr x20
.global pass5_w_random_loop5_w_5
pass5_w_random_loop5_w_5:
c.sw x13,0x00(x11)
ori x11,x11,0x0000001C5
andi x11,x11,-8
li x15,0x0560E3346
sw x15,0x000000000(x11)
li x9,0x007998AE9
amoxor.w.aq x13,x9, (x11)
bne x15,x13, inner_fail5_w_random_loop5_w_4
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w_random_loop5_w_4
j pass5_w_random_loop5_w_4
.global inner_fail5_w_random_loop5_w_4
inner_fail5_w_random_loop5_w_4:
jr x20
.global pass5_w_random_loop5_w_4
pass5_w_random_loop5_w_4:
sd x14,0x000000000(x11)
ori x11,x11,0x0000000BF
andi x11,x11,-8
li x15,0x04BBBD770
sw x15,0x000000000(x11)
li x9,0x042C2AE55
amoxor.w.aq x13,x9, (x11)
bne x15,x13, inner_fail5_w_random_loop5_w_3
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w_random_loop5_w_3
j pass5_w_random_loop5_w_3
.global inner_fail5_w_random_loop5_w_3
inner_fail5_w_random_loop5_w_3:
jr x20
.global pass5_w_random_loop5_w_3
pass5_w_random_loop5_w_3:
ld x10,0x000000000(x11)
ori x11,x11,0x0000007F7
andi x11,x11,-8
li x15,0x0310EA9D7
sw x15,0x000000000(x11)
li x9,0x01686FACE
amoxor.w x13,x9, (x11)
bne x15,x13, inner_fail5_w_random_loop5_w_2
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w_random_loop5_w_2
j pass5_w_random_loop5_w_2
.global inner_fail5_w_random_loop5_w_2
inner_fail5_w_random_loop5_w_2:
jr x20
.global pass5_w_random_loop5_w_2
pass5_w_random_loop5_w_2:
c.sw x15,0x00(x11)
ori x11,x11,0x00000063A
andi x11,x11,-8
li x15,0x05EC53460
sw x15,0x000000000(x11)
li x9,0x062912E75
amoxor.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail5_w_random_loop5_w_1
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w_random_loop5_w_1
j pass5_w_random_loop5_w_1
.global inner_fail5_w_random_loop5_w_1
inner_fail5_w_random_loop5_w_1:
jr x20
.global pass5_w_random_loop5_w_1
pass5_w_random_loop5_w_1:
sw x13,0x000000000(x11)
ori x11,x11,0x0000005F9
andi x11,x11,-8
li x15,0x0737C14E7
sw x15,0x000000000(x11)
li x9,0x07D128168
amoxor.w.rl x13,x9, (x11)
bne x15,x13, inner_fail5_w
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,inner_fail5_w
j pass5_w
.global inner_fail5_w
inner_fail5_w:
jr x20
.global pass5_w
pass5_w:
ld x8,0x000000000(x11)
.global check_amomin_w
check_amomin_w:
ori x11,x11,0x00000005B
andi x11,x11,-8
li x15,0x03E85843A
sw x15,0x000000000(x11)
li x9,0x05FAB3A4F
amomin.w.aq x13,x9, (x11)
bne x15,x13, inner_fail6_w_random_loop6_w_9
lwu x16,(x11)
blt x9,x13,mem_val_w_random_loop6_w_9
mv x9,x13
.global mem_val_w_random_loop6_w_9
mem_val_w_random_loop6_w_9:
mv x9,x9
bne x16,x9,inner_fail6_w_random_loop6_w_9
j pass6_w_random_loop6_w_9
.global inner_fail6_w_random_loop6_w_9
inner_fail6_w_random_loop6_w_9:
jr x20
.global pass6_w_random_loop6_w_9
pass6_w_random_loop6_w_9:
lwu x10,0x000000000(x11)
ori x11,x11,0x000000512
andi x11,x11,-8
li x15,0x060647F0F
sw x15,0x000000000(x11)
li x9,0x01ACA60E8
amomin.w.rl x13,x9, (x11)
bne x15,x13, inner_fail6_w_random_loop6_w_8
lwu x16,(x11)
blt x9,x13,mem_val_w_random_loop6_w_8
mv x9,x13
.global mem_val_w_random_loop6_w_8
mem_val_w_random_loop6_w_8:
mv x9,x9
bne x16,x9,inner_fail6_w_random_loop6_w_8
j pass6_w_random_loop6_w_8
.global inner_fail6_w_random_loop6_w_8
inner_fail6_w_random_loop6_w_8:
jr x20
.global pass6_w_random_loop6_w_8
pass6_w_random_loop6_w_8:
sd x14,0x000000000(x11)
ori x11,x11,0x000000446
andi x11,x11,-8
li x15,0x05B62C3B1
sw x15,0x000000000(x11)
li x9,0x07E6A66DD
amomin.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail6_w_random_loop6_w_7
lwu x16,(x11)
blt x9,x13,mem_val_w_random_loop6_w_7
mv x9,x13
.global mem_val_w_random_loop6_w_7
mem_val_w_random_loop6_w_7:
mv x9,x9
bne x16,x9,inner_fail6_w_random_loop6_w_7
j pass6_w_random_loop6_w_7
.global inner_fail6_w_random_loop6_w_7
inner_fail6_w_random_loop6_w_7:
jr x20
.global pass6_w_random_loop6_w_7
pass6_w_random_loop6_w_7:
ld x10,0x000000000(x11)
ori x11,x11,0x000000792
andi x11,x11,-8
li x15,0x0297369BE
sw x15,0x000000000(x11)
li x9,0x00B7BFCA5
amomin.w.aq x13,x9, (x11)
bne x15,x13, inner_fail6_w_random_loop6_w_6
lwu x16,(x11)
blt x9,x13,mem_val_w_random_loop6_w_6
mv x9,x13
.global mem_val_w_random_loop6_w_6
mem_val_w_random_loop6_w_6:
mv x9,x9
bne x16,x9,inner_fail6_w_random_loop6_w_6
j pass6_w_random_loop6_w_6
.global inner_fail6_w_random_loop6_w_6
inner_fail6_w_random_loop6_w_6:
jr x20
.global pass6_w_random_loop6_w_6
pass6_w_random_loop6_w_6:
lw x9,0x000000000(x11)
ori x11,x11,0x000000448
andi x11,x11,-8
li x15,0x0408642D5
sw x15,0x000000000(x11)
li x9,0x022B08D26
amomin.w x13,x9, (x11)
bne x15,x13, inner_fail6_w_random_loop6_w_5
lwu x16,(x11)
blt x9,x13,mem_val_w_random_loop6_w_5
mv x9,x13
.global mem_val_w_random_loop6_w_5
mem_val_w_random_loop6_w_5:
mv x9,x9
bne x16,x9,inner_fail6_w_random_loop6_w_5
j pass6_w_random_loop6_w_5
.global inner_fail6_w_random_loop6_w_5
inner_fail6_w_random_loop6_w_5:
jr x20
.global pass6_w_random_loop6_w_5
pass6_w_random_loop6_w_5:
ld x9,0x000000000(x11)
ori x11,x11,0x00000024B
andi x11,x11,-8
li x15,0x00381BACB
sw x15,0x000000000(x11)
li x9,0x0794E7F25
amomin.w.aq x13,x9, (x11)
bne x15,x13, inner_fail6_w_random_loop6_w_4
lwu x16,(x11)
blt x9,x13,mem_val_w_random_loop6_w_4
mv x9,x13
.global mem_val_w_random_loop6_w_4
mem_val_w_random_loop6_w_4:
mv x9,x9
bne x16,x9,inner_fail6_w_random_loop6_w_4
j pass6_w_random_loop6_w_4
.global inner_fail6_w_random_loop6_w_4
inner_fail6_w_random_loop6_w_4:
jr x20
.global pass6_w_random_loop6_w_4
pass6_w_random_loop6_w_4:
sw x13,0x000000000(x11)
ori x11,x11,0x000000237
andi x11,x11,-8
li x15,0x01194218D
sw x15,0x000000000(x11)
li x9,0x04936820F
amomin.w.rl x13,x9, (x11)
bne x15,x13, inner_fail6_w_random_loop6_w_3
lwu x16,(x11)
blt x9,x13,mem_val_w_random_loop6_w_3
mv x9,x13
.global mem_val_w_random_loop6_w_3
mem_val_w_random_loop6_w_3:
mv x9,x9
bne x16,x9,inner_fail6_w_random_loop6_w_3
j pass6_w_random_loop6_w_3
.global inner_fail6_w_random_loop6_w_3
inner_fail6_w_random_loop6_w_3:
jr x20
.global pass6_w_random_loop6_w_3
pass6_w_random_loop6_w_3:
ld x10,0x000000000(x11)
ori x11,x11,0x000000363
andi x11,x11,-8
li x15,0x07227D3AD
sw x15,0x000000000(x11)
li x9,0x05F78CA66
amomin.w.rl x13,x9, (x11)
bne x15,x13, inner_fail6_w_random_loop6_w_2
lwu x16,(x11)
blt x9,x13,mem_val_w_random_loop6_w_2
mv x9,x13
.global mem_val_w_random_loop6_w_2
mem_val_w_random_loop6_w_2:
mv x9,x9
bne x16,x9,inner_fail6_w_random_loop6_w_2
j pass6_w_random_loop6_w_2
.global inner_fail6_w_random_loop6_w_2
inner_fail6_w_random_loop6_w_2:
jr x20
.global pass6_w_random_loop6_w_2
pass6_w_random_loop6_w_2:
lbu x10,0x000000000(x11)
ori x11,x11,0x00000058E
andi x11,x11,-8
li x15,0x00C270D93
sw x15,0x000000000(x11)
li x9,0x002A6C5F1
amomin.w x13,x9, (x11)
bne x15,x13, inner_fail6_w_random_loop6_w_1
lwu x16,(x11)
blt x9,x13,mem_val_w_random_loop6_w_1
mv x9,x13
.global mem_val_w_random_loop6_w_1
mem_val_w_random_loop6_w_1:
mv x9,x9
bne x16,x9,inner_fail6_w_random_loop6_w_1
j pass6_w_random_loop6_w_1
.global inner_fail6_w_random_loop6_w_1
inner_fail6_w_random_loop6_w_1:
jr x20
.global pass6_w_random_loop6_w_1
pass6_w_random_loop6_w_1:
lw x9,0x000000000(x11)
ori x11,x11,0x000000098
andi x11,x11,-8
li x15,0x05C781D40
sw x15,0x000000000(x11)
li x9,0x034AC137F
amomin.w.rl x13,x9, (x11)
bne x15,x13, inner_fail6_w
lwu x16,(x11)
blt x9,x13,mem_val_w
mv x9,x13
.global mem_val_w
mem_val_w:
mv x9,x9
bne x16,x9,inner_fail6_w
j pass6_w
.global inner_fail6_w
inner_fail6_w:
jr x20
.global pass6_w
pass6_w:
ld x9,0x000000000(x11)
.global check_amominu_w
check_amominu_w:
ori x11,x11,0x000000389
andi x11,x11,-8
li x15,0x05E4D63E2
sw x15,0x000000000(x11)
li x9,0x02D512224
amominu.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail7_w_random_loop7_w_9
lwu x16,(x11)
bltu x9,x13,mem_val2_w_random_loop7_w_9
mv x9,x13
.global mem_val2_w_random_loop7_w_9
mem_val2_w_random_loop7_w_9:
mv x9,x9
bne x16,x9,inner_fail7_w_random_loop7_w_9
j pass7_w_random_loop7_w_9
.global inner_fail7_w_random_loop7_w_9
inner_fail7_w_random_loop7_w_9:
jr x20
.global pass7_w_random_loop7_w_9
pass7_w_random_loop7_w_9:
lwu x10,0x000000000(x11)
ori x11,x11,0x0000003FF
andi x11,x11,-8
li x15,0x010D6E9AC
sw x15,0x000000000(x11)
li x9,0x0175E181C
amominu.w x13,x9, (x11)
bne x15,x13, inner_fail7_w_random_loop7_w_8
lwu x16,(x11)
bltu x9,x13,mem_val2_w_random_loop7_w_8
mv x9,x13
.global mem_val2_w_random_loop7_w_8
mem_val2_w_random_loop7_w_8:
mv x9,x9
bne x16,x9,inner_fail7_w_random_loop7_w_8
j pass7_w_random_loop7_w_8
.global inner_fail7_w_random_loop7_w_8
inner_fail7_w_random_loop7_w_8:
jr x20
.global pass7_w_random_loop7_w_8
pass7_w_random_loop7_w_8:
lb x8,0x000000000(x11)
ori x11,x11,0x0000003AC
andi x11,x11,-8
li x15,0x034B1B094
sw x15,0x000000000(x11)
li x9,0x01B3473D0
amominu.w x13,x9, (x11)
bne x15,x13, inner_fail7_w_random_loop7_w_7
lwu x16,(x11)
bltu x9,x13,mem_val2_w_random_loop7_w_7
mv x9,x13
.global mem_val2_w_random_loop7_w_7
mem_val2_w_random_loop7_w_7:
mv x9,x9
bne x16,x9,inner_fail7_w_random_loop7_w_7
j pass7_w_random_loop7_w_7
.global inner_fail7_w_random_loop7_w_7
inner_fail7_w_random_loop7_w_7:
jr x20
.global pass7_w_random_loop7_w_7
pass7_w_random_loop7_w_7:
lwu x10,0x000000000(x11)
ori x11,x11,0x000000481
andi x11,x11,-8
li x15,0x03BBBA594
sw x15,0x000000000(x11)
li x9,0x036F4E0BF
amominu.w.rl x13,x9, (x11)
bne x15,x13, inner_fail7_w_random_loop7_w_6
lwu x16,(x11)
bltu x9,x13,mem_val2_w_random_loop7_w_6
mv x9,x13
.global mem_val2_w_random_loop7_w_6
mem_val2_w_random_loop7_w_6:
mv x9,x9
bne x16,x9,inner_fail7_w_random_loop7_w_6
j pass7_w_random_loop7_w_6
.global inner_fail7_w_random_loop7_w_6
inner_fail7_w_random_loop7_w_6:
jr x20
.global pass7_w_random_loop7_w_6
pass7_w_random_loop7_w_6:
c.sw x14,0x00(x11)
ori x11,x11,0x000000762
andi x11,x11,-8
li x15,0x06885DD89
sw x15,0x000000000(x11)
li x9,0x07284B345
amominu.w.rl x13,x9, (x11)
bne x15,x13, inner_fail7_w_random_loop7_w_5
lwu x16,(x11)
bltu x9,x13,mem_val2_w_random_loop7_w_5
mv x9,x13
.global mem_val2_w_random_loop7_w_5
mem_val2_w_random_loop7_w_5:
mv x9,x9
bne x16,x9,inner_fail7_w_random_loop7_w_5
j pass7_w_random_loop7_w_5
.global inner_fail7_w_random_loop7_w_5
inner_fail7_w_random_loop7_w_5:
jr x20
.global pass7_w_random_loop7_w_5
pass7_w_random_loop7_w_5:
lwu x8,0x000000000(x11)
ori x11,x11,0x0000005FC
andi x11,x11,-8
li x15,0x00716551C
sw x15,0x000000000(x11)
li x9,0x068F18322
amominu.w x13,x9, (x11)
bne x15,x13, inner_fail7_w_random_loop7_w_4
lwu x16,(x11)
bltu x9,x13,mem_val2_w_random_loop7_w_4
mv x9,x13
.global mem_val2_w_random_loop7_w_4
mem_val2_w_random_loop7_w_4:
mv x9,x9
bne x16,x9,inner_fail7_w_random_loop7_w_4
j pass7_w_random_loop7_w_4
.global inner_fail7_w_random_loop7_w_4
inner_fail7_w_random_loop7_w_4:
jr x20
.global pass7_w_random_loop7_w_4
pass7_w_random_loop7_w_4:
lh x9,0x000000000(x11)
ori x11,x11,0x000000649
andi x11,x11,-8
li x15,0x005F7CEAB
sw x15,0x000000000(x11)
li x9,0x064863584
amominu.w x13,x9, (x11)
bne x15,x13, inner_fail7_w_random_loop7_w_3
lwu x16,(x11)
bltu x9,x13,mem_val2_w_random_loop7_w_3
mv x9,x13
.global mem_val2_w_random_loop7_w_3
mem_val2_w_random_loop7_w_3:
mv x9,x9
bne x16,x9,inner_fail7_w_random_loop7_w_3
j pass7_w_random_loop7_w_3
.global inner_fail7_w_random_loop7_w_3
inner_fail7_w_random_loop7_w_3:
jr x20
.global pass7_w_random_loop7_w_3
pass7_w_random_loop7_w_3:
lh x9,0x000000000(x11)
ori x11,x11,0x0000002BF
andi x11,x11,-8
li x15,0x0007AFAB9
sw x15,0x000000000(x11)
li x9,0x060444D42
amominu.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail7_w_random_loop7_w_2
lwu x16,(x11)
bltu x9,x13,mem_val2_w_random_loop7_w_2
mv x9,x13
.global mem_val2_w_random_loop7_w_2
mem_val2_w_random_loop7_w_2:
mv x9,x9
bne x16,x9,inner_fail7_w_random_loop7_w_2
j pass7_w_random_loop7_w_2
.global inner_fail7_w_random_loop7_w_2
inner_fail7_w_random_loop7_w_2:
jr x20
.global pass7_w_random_loop7_w_2
pass7_w_random_loop7_w_2:
c.sw x14,0x00(x11)
ori x11,x11,0x000000746
andi x11,x11,-8
li x15,0x015DD171F
sw x15,0x000000000(x11)
li x9,0x062C80FA1
amominu.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail7_w_random_loop7_w_1
lwu x16,(x11)
bltu x9,x13,mem_val2_w_random_loop7_w_1
mv x9,x13
.global mem_val2_w_random_loop7_w_1
mem_val2_w_random_loop7_w_1:
mv x9,x9
bne x16,x9,inner_fail7_w_random_loop7_w_1
j pass7_w_random_loop7_w_1
.global inner_fail7_w_random_loop7_w_1
inner_fail7_w_random_loop7_w_1:
jr x20
.global pass7_w_random_loop7_w_1
pass7_w_random_loop7_w_1:
lwu x8,0x000000000(x11)
ori x11,x11,0x000000798
andi x11,x11,-8
li x15,0x062C82089
sw x15,0x000000000(x11)
li x9,0x01E9309E4
amominu.w.aq x13,x9, (x11)
bne x15,x13, inner_fail7_w
lwu x16,(x11)
bltu x9,x13,mem_val2_w
mv x9,x13
.global mem_val2_w
mem_val2_w:
mv x9,x9
bne x16,x9,inner_fail7_w
j pass7_w
.global inner_fail7_w
inner_fail7_w:
jr x20
.global pass7_w
pass7_w:
sw x15,0x000000000(x11)
.global check_amomax_w
check_amomax_w:
ori x11,x11,0x0000002F9
andi x11,x11,-8
li x15,0x060A8AA2E
sw x15,0x000000000(x11)
li x9,0x04A99DF10
amomax.w.rl x13,x9, (x11)
bne x15,x13, inner_fail8_w_random_loop8_w_9
lwu x16,(x11)
bge x9,x13,mem_val3_w_random_loop8_w_9
mv x9,x13
.global mem_val3_w_random_loop8_w_9
mem_val3_w_random_loop8_w_9:
mv x9,x9
bne x16,x9,inner_fail8_w_random_loop8_w_9
j pass8_w_random_loop8_w_9
.global inner_fail8_w_random_loop8_w_9
inner_fail8_w_random_loop8_w_9:
jr x20
.global pass8_w_random_loop8_w_9
pass8_w_random_loop8_w_9:
ld x10,0x000000000(x11)
ori x11,x11,0x000000732
andi x11,x11,-8
li x15,0x04CDD7CEC
sw x15,0x000000000(x11)
li x9,0x06A329A02
amomax.w x13,x9, (x11)
bne x15,x13, inner_fail8_w_random_loop8_w_8
lwu x16,(x11)
bge x9,x13,mem_val3_w_random_loop8_w_8
mv x9,x13
.global mem_val3_w_random_loop8_w_8
mem_val3_w_random_loop8_w_8:
mv x9,x9
bne x16,x9,inner_fail8_w_random_loop8_w_8
j pass8_w_random_loop8_w_8
.global inner_fail8_w_random_loop8_w_8
inner_fail8_w_random_loop8_w_8:
jr x20
.global pass8_w_random_loop8_w_8
pass8_w_random_loop8_w_8:
lb x8,0x000000000(x11)
ori x11,x11,0x0000000FE
andi x11,x11,-8
li x15,0x047CB7274
sw x15,0x000000000(x11)
li x9,0x01F4CA5F0
amomax.w.rl x13,x9, (x11)
bne x15,x13, inner_fail8_w_random_loop8_w_7
lwu x16,(x11)
bge x9,x13,mem_val3_w_random_loop8_w_7
mv x9,x13
.global mem_val3_w_random_loop8_w_7
mem_val3_w_random_loop8_w_7:
mv x9,x9
bne x16,x9,inner_fail8_w_random_loop8_w_7
j pass8_w_random_loop8_w_7
.global inner_fail8_w_random_loop8_w_7
inner_fail8_w_random_loop8_w_7:
jr x20
.global pass8_w_random_loop8_w_7
pass8_w_random_loop8_w_7:
c.lw x10,0x00(x11)
ori x11,x11,0x00000017B
andi x11,x11,-8
li x15,0x06907A3BF
sw x15,0x000000000(x11)
li x9,0x04EE0E3D6
amomax.w x13,x9, (x11)
bne x15,x13, inner_fail8_w_random_loop8_w_6
lwu x16,(x11)
bge x9,x13,mem_val3_w_random_loop8_w_6
mv x9,x13
.global mem_val3_w_random_loop8_w_6
mem_val3_w_random_loop8_w_6:
mv x9,x9
bne x16,x9,inner_fail8_w_random_loop8_w_6
j pass8_w_random_loop8_w_6
.global inner_fail8_w_random_loop8_w_6
inner_fail8_w_random_loop8_w_6:
jr x20
.global pass8_w_random_loop8_w_6
pass8_w_random_loop8_w_6:
ld x9,0x000000000(x11)
ori x11,x11,0x0000004B5
andi x11,x11,-8
li x15,0x064D0405A
sw x15,0x000000000(x11)
li x9,0x055181ACC
amomax.w x13,x9, (x11)
bne x15,x13, inner_fail8_w_random_loop8_w_5
lwu x16,(x11)
bge x9,x13,mem_val3_w_random_loop8_w_5
mv x9,x13
.global mem_val3_w_random_loop8_w_5
mem_val3_w_random_loop8_w_5:
mv x9,x9
bne x16,x9,inner_fail8_w_random_loop8_w_5
j pass8_w_random_loop8_w_5
.global inner_fail8_w_random_loop8_w_5
inner_fail8_w_random_loop8_w_5:
jr x20
.global pass8_w_random_loop8_w_5
pass8_w_random_loop8_w_5:
lh x9,0x000000000(x11)
ori x11,x11,0x000000435
andi x11,x11,-8
li x15,0x06717F289
sw x15,0x000000000(x11)
li x9,0x06EFAB4E7
amomax.w.rl x13,x9, (x11)
bne x15,x13, inner_fail8_w_random_loop8_w_4
lwu x16,(x11)
bge x9,x13,mem_val3_w_random_loop8_w_4
mv x9,x13
.global mem_val3_w_random_loop8_w_4
mem_val3_w_random_loop8_w_4:
mv x9,x9
bne x16,x9,inner_fail8_w_random_loop8_w_4
j pass8_w_random_loop8_w_4
.global inner_fail8_w_random_loop8_w_4
inner_fail8_w_random_loop8_w_4:
jr x20
.global pass8_w_random_loop8_w_4
pass8_w_random_loop8_w_4:
lhu x8,0x000000000(x11)
ori x11,x11,0x0000003B3
andi x11,x11,-8
li x15,0x078E44D9B
sw x15,0x000000000(x11)
li x9,0x030811859
amomax.w x13,x9, (x11)
bne x15,x13, inner_fail8_w_random_loop8_w_3
lwu x16,(x11)
bge x9,x13,mem_val3_w_random_loop8_w_3
mv x9,x13
.global mem_val3_w_random_loop8_w_3
mem_val3_w_random_loop8_w_3:
mv x9,x9
bne x16,x9,inner_fail8_w_random_loop8_w_3
j pass8_w_random_loop8_w_3
.global inner_fail8_w_random_loop8_w_3
inner_fail8_w_random_loop8_w_3:
jr x20
.global pass8_w_random_loop8_w_3
pass8_w_random_loop8_w_3:
lbu x10,0x000000000(x11)
ori x11,x11,0x0000002E5
andi x11,x11,-8
li x15,0x00F24F357
sw x15,0x000000000(x11)
li x9,0x041914D6D
amomax.w.aq x13,x9, (x11)
bne x15,x13, inner_fail8_w_random_loop8_w_2
lwu x16,(x11)
bge x9,x13,mem_val3_w_random_loop8_w_2
mv x9,x13
.global mem_val3_w_random_loop8_w_2
mem_val3_w_random_loop8_w_2:
mv x9,x9
bne x16,x9,inner_fail8_w_random_loop8_w_2
j pass8_w_random_loop8_w_2
.global inner_fail8_w_random_loop8_w_2
inner_fail8_w_random_loop8_w_2:
jr x20
.global pass8_w_random_loop8_w_2
pass8_w_random_loop8_w_2:
lw x9,0x000000000(x11)
ori x11,x11,0x0000006F7
andi x11,x11,-8
li x15,0x07368E155
sw x15,0x000000000(x11)
li x9,0x034EBC21E
amomax.w.rl x13,x9, (x11)
bne x15,x13, inner_fail8_w_random_loop8_w_1
lwu x16,(x11)
bge x9,x13,mem_val3_w_random_loop8_w_1
mv x9,x13
.global mem_val3_w_random_loop8_w_1
mem_val3_w_random_loop8_w_1:
mv x9,x9
bne x16,x9,inner_fail8_w_random_loop8_w_1
j pass8_w_random_loop8_w_1
.global inner_fail8_w_random_loop8_w_1
inner_fail8_w_random_loop8_w_1:
jr x20
.global pass8_w_random_loop8_w_1
pass8_w_random_loop8_w_1:
c.lw x9,0x00(x11)
ori x11,x11,0x000000111
andi x11,x11,-8
li x15,0x04E348410
sw x15,0x000000000(x11)
li x9,0x02F408D8A
amomax.w x13,x9, (x11)
bne x15,x13, inner_fail8_w
lwu x16,(x11)
bge x9,x13,mem_val3_w
mv x9,x13
.global mem_val3_w
mem_val3_w:
mv x9,x9
bne x16,x9,inner_fail8_w
j pass8_w
.global inner_fail8_w
inner_fail8_w:
jr x20
.global pass8_w
pass8_w:
ld x10,0x000000000(x11)
.global check_amomaxu_w
check_amomaxu_w:
ori x11,x11,0x00000079C
andi x11,x11,-8
li x15,0x0645C07A0
sw x15,0x000000000(x11)
li x9,0x04AD79D18
amomaxu.w.aq x13,x9, (x11)
bne x15,x13, inner_fail9_w_random_loop9_w_9
lwu x16,(x11)
bgeu x9,x13,mem_val4_w_random_loop9_w_9
mv x9,x13
.global mem_val4_w_random_loop9_w_9
mem_val4_w_random_loop9_w_9:
mv x9,x9
bne x16,x9,inner_fail9_w_random_loop9_w_9
j pass9_w_random_loop9_w_9
.global inner_fail9_w_random_loop9_w_9
inner_fail9_w_random_loop9_w_9:
jr x20
.global pass9_w_random_loop9_w_9
pass9_w_random_loop9_w_9:
lwu x10,0x000000000(x11)
ori x11,x11,0x000000543
andi x11,x11,-8
li x15,0x00068BE2F
sw x15,0x000000000(x11)
li x9,0x07E422D19
amomaxu.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail9_w_random_loop9_w_8
lwu x16,(x11)
bgeu x9,x13,mem_val4_w_random_loop9_w_8
mv x9,x13
.global mem_val4_w_random_loop9_w_8
mem_val4_w_random_loop9_w_8:
mv x9,x9
bne x16,x9,inner_fail9_w_random_loop9_w_8
j pass9_w_random_loop9_w_8
.global inner_fail9_w_random_loop9_w_8
inner_fail9_w_random_loop9_w_8:
jr x20
.global pass9_w_random_loop9_w_8
pass9_w_random_loop9_w_8:
c.sw x14,0x00(x11)
ori x11,x11,0x0000007D5
andi x11,x11,-8
li x15,0x01EE34CEB
sw x15,0x000000000(x11)
li x9,0x026F10D3C
amomaxu.w.rl x13,x9, (x11)
bne x15,x13, inner_fail9_w_random_loop9_w_7
lwu x16,(x11)
bgeu x9,x13,mem_val4_w_random_loop9_w_7
mv x9,x13
.global mem_val4_w_random_loop9_w_7
mem_val4_w_random_loop9_w_7:
mv x9,x9
bne x16,x9,inner_fail9_w_random_loop9_w_7
j pass9_w_random_loop9_w_7
.global inner_fail9_w_random_loop9_w_7
inner_fail9_w_random_loop9_w_7:
jr x20
.global pass9_w_random_loop9_w_7
pass9_w_random_loop9_w_7:
ld x9,0x000000000(x11)
ori x11,x11,0x00000066C
andi x11,x11,-8
li x15,0x02A1BABC2
sw x15,0x000000000(x11)
li x9,0x07401252D
amomaxu.w x13,x9, (x11)
bne x15,x13, inner_fail9_w_random_loop9_w_6
lwu x16,(x11)
bgeu x9,x13,mem_val4_w_random_loop9_w_6
mv x9,x13
.global mem_val4_w_random_loop9_w_6
mem_val4_w_random_loop9_w_6:
mv x9,x9
bne x16,x9,inner_fail9_w_random_loop9_w_6
j pass9_w_random_loop9_w_6
.global inner_fail9_w_random_loop9_w_6
inner_fail9_w_random_loop9_w_6:
jr x20
.global pass9_w_random_loop9_w_6
pass9_w_random_loop9_w_6:
ld x9,0x000000000(x11)
ori x11,x11,0x00000020F
andi x11,x11,-8
li x15,0x03B5F8A96
sw x15,0x000000000(x11)
li x9,0x02CC0ABBE
amomaxu.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail9_w_random_loop9_w_5
lwu x16,(x11)
bgeu x9,x13,mem_val4_w_random_loop9_w_5
mv x9,x13
.global mem_val4_w_random_loop9_w_5
mem_val4_w_random_loop9_w_5:
mv x9,x9
bne x16,x9,inner_fail9_w_random_loop9_w_5
j pass9_w_random_loop9_w_5
.global inner_fail9_w_random_loop9_w_5
inner_fail9_w_random_loop9_w_5:
jr x20
.global pass9_w_random_loop9_w_5
pass9_w_random_loop9_w_5:
lh x9,0x000000000(x11)
ori x11,x11,0x000000749
andi x11,x11,-8
li x15,0x00635D3A5
sw x15,0x000000000(x11)
li x9,0x003225469
amomaxu.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail9_w_random_loop9_w_4
lwu x16,(x11)
bgeu x9,x13,mem_val4_w_random_loop9_w_4
mv x9,x13
.global mem_val4_w_random_loop9_w_4
mem_val4_w_random_loop9_w_4:
mv x9,x9
bne x16,x9,inner_fail9_w_random_loop9_w_4
j pass9_w_random_loop9_w_4
.global inner_fail9_w_random_loop9_w_4
inner_fail9_w_random_loop9_w_4:
jr x20
.global pass9_w_random_loop9_w_4
pass9_w_random_loop9_w_4:
lbu x8,0x000000000(x11)
ori x11,x11,0x000000427
andi x11,x11,-8
li x15,0x01ED36E1D
sw x15,0x000000000(x11)
li x9,0x01E43EC62
amomaxu.w x13,x9, (x11)
bne x15,x13, inner_fail9_w_random_loop9_w_3
lwu x16,(x11)
bgeu x9,x13,mem_val4_w_random_loop9_w_3
mv x9,x13
.global mem_val4_w_random_loop9_w_3
mem_val4_w_random_loop9_w_3:
mv x9,x9
bne x16,x9,inner_fail9_w_random_loop9_w_3
j pass9_w_random_loop9_w_3
.global inner_fail9_w_random_loop9_w_3
inner_fail9_w_random_loop9_w_3:
jr x20
.global pass9_w_random_loop9_w_3
pass9_w_random_loop9_w_3:
lw x10,0x000000000(x11)
ori x11,x11,0x0000000A3
andi x11,x11,-8
li x15,0x009EC8ECB
sw x15,0x000000000(x11)
li x9,0x00BA8EB32
amomaxu.w.aqrl x13,x9, (x11)
bne x15,x13, inner_fail9_w_random_loop9_w_2
lwu x16,(x11)
bgeu x9,x13,mem_val4_w_random_loop9_w_2
mv x9,x13
.global mem_val4_w_random_loop9_w_2
mem_val4_w_random_loop9_w_2:
mv x9,x9
bne x16,x9,inner_fail9_w_random_loop9_w_2
j pass9_w_random_loop9_w_2
.global inner_fail9_w_random_loop9_w_2
inner_fail9_w_random_loop9_w_2:
jr x20
.global pass9_w_random_loop9_w_2
pass9_w_random_loop9_w_2:
ld x10,0x000000000(x11)
ori x11,x11,0x00000079E
andi x11,x11,-8
li x15,0x068C6DEF7
sw x15,0x000000000(x11)
li x9,0x04A08CB88
amomaxu.w.rl x13,x9, (x11)
bne x15,x13, inner_fail9_w_random_loop9_w_1
lwu x16,(x11)
bgeu x9,x13,mem_val4_w_random_loop9_w_1
mv x9,x13
.global mem_val4_w_random_loop9_w_1
mem_val4_w_random_loop9_w_1:
mv x9,x9
bne x16,x9,inner_fail9_w_random_loop9_w_1
j pass9_w_random_loop9_w_1
.global inner_fail9_w_random_loop9_w_1
inner_fail9_w_random_loop9_w_1:
jr x20
.global pass9_w_random_loop9_w_1
pass9_w_random_loop9_w_1:
lw x8,0x000000000(x11)
ori x11,x11,0x0000001CF
andi x11,x11,-8
li x15,0x075D9C419
sw x15,0x000000000(x11)
li x9,0x012F2221F
amomaxu.w.rl x13,x9, (x11)
bne x15,x13, inner_fail9_w
lwu x16,(x11)
bgeu x9,x13,mem_val4_w
mv x9,x13
.global mem_val4_w
mem_val4_w:
mv x9,x9
bne x16,x9,inner_fail9_w
j pass9_w
.global inner_fail9_w
inner_fail9_w:
jr x20
.global pass9_w
pass9_w:
sd x13,0x000000000(x11)
DATA_CACHE_CLIV
DATA_CACHE_DIS
j start
.global success
success:
EXIT
.global fail
fail:
FAIL
#******this region is added by generator******
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 98,519
|
smart_run/tests/cases/ISA/ISA_FP/ct_fpu_smoke.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
.text
.align 6
.global main
main:
##############################################################
# NUMBER and MACRO DEFINITION
##############################################################
.set HpsNaN, 0x7c01
.set HnsNaN, 0xfc01
.set HpqNaN, 0x7e00
.set HnqNaN, 0xfe00
.set HpInf, 0x7c00
.set HnInf, 0xfc00
.set HpNorm, 0x3800 #//0.5
.set HnNorm, 0xb800 #//-0.5
.set HpDeNorm, 0x0001
.set HnDeNorm, 0x8001
.set HpZero, 0x0000
.set HnZero, 0x8000
.set HpLFN, 0x7bff
.set HnLFN, 0xfbff
.set HpMIN, 0x0400
.set HnMIN, 0x8400
.set HpSPECIAL_1, 0x0002 # // denorm+denrom
.set HnSPECIAL_1, 0x8002 # // -denorm + -denorm
.set HpOne, 0x3c00
.set HnOne, 0xbc00
.set HpONE, 0x3c00
.set HnONE, 0xbc00
.set Hp1p1, 0x3e00
.set Hn1p1, 0xbe00
.set HpDeNormMost, 0x03ff
.set HnDeNormMost, 0x83ff
#//==========================================================
#// single format
#//==========================================================
.set SpsNaN, 0x7f800001 #//0_111'1111'1_000'0000'0000'0000'0000'0001
.set SnsNaN, 0xff800001 #//1_111'1111'1_000'0000'0000'0000'0000'0001
.set SpqNaN, 0x7fc00000 #//0_111'1111'1_100'0000'0000'0000'0000'0000
.set SnqNaN, 0xffc00000 #//1_111'1111'1_100'0000'0000'0000'0000'0000
.set SpInf, 0x7f800000 #//0_111'1111'1_000'0000'0000'0000'0000'0000
.set SnInf, 0xff800000 #//1_111'1111'1_000'0000'0000'0000'0000'0000
.set SpNorm, 0x3f000000 #//0_011'1111'0_000'0000'0000'0000'0000'0000 (+0.5)
.set SnNorm, 0xbf000000 #//1_011'1111'0_000'0000'0000'0000'0000'0000 (-0.5)
.set SpDeNorm, 0x00000001 #//0_000'0000'0_000'0000'0000'0000'0000'0001
.set SnDeNorm, 0x80000001 #//1_000'0000'0_000'0000'0000'0000'0000'0001
.set SpZero, 0x00000000 #//0_000'0000'0_000'0000'0000'0000'0000'0000
.set SnZero, 0x80000000 #//1_000'0000'0_000'0000'0000'0000'0000'0000
.set SpLFN, 0x7f7fffff #//0_111'1111'0_111'1111'1111'1111'1111'1111
.set SnLFN, 0xff7fffff #//1_111'1111'0_111'1111'1111'1111'1111'1111
.set SpMIN, 0x00800000 #//0_000'0000'1_000'0000'0000'0000'0000'0000
.set SnMIN, 0x80800000 #//1_000'0000'1_000'0000'0000'0000'0000'0000
.set SpSPECIAL_1, 0x00000002
.set SnSPECIAL_1, 0x80000002
.set SpOne, 0x3f800000
.set SnOne, 0xbf800000
.set SpONE, 0x3f800000
.set SnONE, 0xbf800000
.set Sp1p1, 0x3fc00000
.set Sn1p1, 0xbfc00000
.set SpDeNormMost, 0x007fffff #//0_000'0000'0111'_ffff...ffff
.set SnDeNormMost, 0x807fffff #//1_000'0000'0111'_ffff...ffff
#//==========================================================
#// double format
#//==========================================================
.set DpsNaN, 0x7ff0000000000001 #//0_111'1111'1111'_0000'0...0'0001
.set DpsNaNH, 0x7ff00000
.set DpsNaNL, 0x00000001
.set DnsNaN, 0xfff0000000000001 #//1_111'1111'1111'_0000'0...0'0001
.set DnsNaNH, 0xfff00000
.set DnsNaNL, 0x00000001
.set DpqNaN, 0x7ff8000000000000 #//0_111'1111'1111'_1000'0...0'0000
.set DpqNaNH, 0x7ff80000
.set DpqNaNL, 0x00000000
.set DnqNaN, 0xfff8000000000000 #//1_111'1111'1111'_1000'0...0'0000
.set DnqNaNH, 0xfff80000
.set DnqNaNL, 0x00000000
.set DpInf, 0x7ff0000000000000 #//0_111'1111'1111'_0000'0...0'0000
.set DpInfH, 0x7ff00000
.set DpInfL, 0x00000000
.set DnInf, 0xfff0000000000000 #//1_111'1111'1111'_0000'0...0'0000
.set DnInfH, 0xfff00000
.set DnInfL, 0x00000000
.set DpNorm, 0x3fe0000000000000 #//0_011'1111'1110'_0000'0...0'0000 (+0.5)
.set DpNormH, 0x3fe00000
.set DpNormL, 0x00000000
.set DnNorm, 0xbfe0000000000000 #//1_011'1111'1110'_0000'0...0'0000 (-0.5)
.set DnNormH, 0xbfe00000
.set DnNormL, 0x00000000
.set DpDeNorm, 0x0000000000000001 #//0_000'0000'0000'_0000'0...0'0001
.set DpDeNormH, 0x00000000
.set DpDeNormL, 0x00000001
.set DnDeNorm, 0x8000000000000001 #//1_000'0000'0000'_0000'0...0'0001
.set DnDeNormH, 0x80000000
.set DnDeNormL, 0x00000001
.set DpZero, 0x0000000000000000 #//0_000'0000'0000'_0000'0...0'0000
.set DpZeroH, 0x00000000
.set DpZeroL, 0x00000000
.set DnZero, 0x8000000000000000 #//1_000'0000'0000'_0000'0...0'0000
.set DnZeroH, 0x80000000
.set DnZeroL, 0x00000000
.set DpLFN, 0x7fefffffffffffff #//0_111'1111'1110'_1111'1...1'1111
.set DpLFNH, 0x7fefffff
.set DpLFNL, 0xffffffff
.set DnLFN, 0xffefffffffffffff #//1_111'1111'1110'_1111'1...1'1111
.set DnLFNH, 0xffefffff
.set DnLFNL, 0xffffffff
.set DpMIN, 0x0010000000000000 #//0_000'0000'0001'_0000'0...0'0000
.set DpMINH, 0x00100000
.set DpMINL, 0x00000000
.set DnMIN, 0x8010000000000000
.set DnMINH, 0x80100000
.set DnMINL, 0x00000000
.set DpSPECIAL_1, 0x0000000000000002
.set DnSPECIAL_1, 0x8000000000000002
.set DpOne, 0x3ff0000000000000
.set DnOne, 0xbff0000000000000
.set DpONE, 0x3ff0000000000000
.set DnONE, 0xbff0000000000000
.set Dp1p1, 0x3ff8000000000000
.set Dn1p1, 0xbff8000000000000
#//xuwj modified ,add SNaN,QNaN
.set DpqNaN2, 0x7ff800000000000f #//0_111'1111'1111'_1000'0000...0'1111
.set DpqNaN3, 0x7ff80000000000f0 #//0_111'1111'1111'_1000'0000...'1111'0000
.set DnqNaN2, 0xfff8000000000f00 #//1_111'1111'1111'_1000'0000...0'1111'0000'0000
.set DnqNaN3, 0xfff800000000f000 #//1_111'1111'1111'_1000'0000...'1111'0000'0000'0000
.set DpsNaN2, 0x7ff00000000f0000 #//0_111'1111'1111'_0000'0...0'0001'0000'0000'0000'0000
.set DpsNaN3, 0x7ff0000000f00000 #//0_111'1111'1111'_0000'0...0'0001'0000'0000'0000'0000'0000
.set DnsNaN2, 0x7ff000000f000000 #//0_111'1111'1111'_0000'0...0'0001'0000'0000'0000'0000'0000'0000
.set DnsNaN3, 0x7ff00000f000000 #//0_111'1111'1111'_0000'0...0'0001'0000'0000'0000'0000'0000'0000'0000
.set DpDeNormMost, 0x000fffffffffffff #//0_000'0000'0000'_ffff...ffff
.set DnDeNormMost, 0x800fffffffffffff #//1_000'0000'0000'_ffff...ffff
#//==========================================================
#// deformal
#//==========================================================
#// Single
.set SpNmMAX, 0x7f7fffff #//0_111'1111'0_111'1111'1111'1111'1111'1111
.set SpNmMIN, 0x00800000 #//0_000'0000'1_000'0000'0000'0000'0000'0000
.set SnNmMAX, 0xff7fffff #//1_111'1111'0_111'1111'1111'1111'1111'1111
.set SnNmMIN, 0x80800000 #//1_000'0000'1_000'0000'0000'0000'0000'0000
.set SpDmMAX, 0x007fffff #//0_000'0000'0_111'1111'1111'1111'1111'1111
.set SpDmMIN, 0x00000001 #//0_000'0000'0_000'0000'0000'0000'0000'0001
.set SnDmMAX, 0x807fffff #//1_000'0000'0_111'1111'1111'1111'1111'1111
.set SnDmMIN, 0x80000001 #//0_000'0000'0_000'0000'0000'0000'0000'0001
.set SpOne, 0x3f800000
.set SnOne, 0xbf800000
.set SpqNaN1, 0x7fc00001 #//0_111'1111'1_100'0000'0000'0000'0000'0000
.set SnqNaN1, 0xffc00001 #//1_111'1111'1_100'0000'0000'0000'0000'0000
.set SpOne, 0x3f800000
.set SnOne, 0xbf800000
#// Double
.set DpNmMAX, 0x7fefffffffffffff #//0_111'1111'1110'_1111'1...1'1111
.set DpNmMIN, 0x0010000000000000 #//0_000'0000'0001'_0000'0...0'0000
.set DnNmMAX, 0xffefffffffffffff #//1_111'1111'1110'_1111'1...1'1111
.set DnNmMIN, 0x8010000000000000
.set DpDmMAX, 0x000fffffffffffff #//0_000'0000'0000'_1111'1...1'1111
.set DpDmMIN, 0x0000000000000001 #//0_000'0000'0000'_0000'0...0'0001
.set DnDmMAX, 0x800fffffffffffff #//1_000'0000'0000'_1111'1...1'1111
.set DnDmMIN, 0x8000000000000001 #//1_000'0000'0000'_0000'0...0'0001
.set DpOne, 0x3ff0000000000000
.set DnOne, 0xbff0000000000000
.set DpqNaN1, 0x7ff8000000000001 #//0_111'1111'1111'_1000'0...0'0000
.set DnqNaN1, 0xfff8000000000001 #//1_111'1111'1111'_1000'0...0'0000
.set DpOne, 0x3ff0000000000000
.set DnOne, 0xbff0000000000000
.macro FPUMOVH FDESREG, IMME16, IMM_REG
li \IMM_REG, \IMME16
li x3, 0xffff0000
or \IMM_REG,\IMM_REG,x3
fmv.w.x \FDESREG, \IMM_REG
.endm
.macro FPUMOVS FDESREG, IMME32, IMM_REG
li \IMM_REG, \IMME32
fmv.w.x \FDESREG, \IMM_REG
.endm
.macro FPUMOVD FDESREG,IMME64,IMM_REG
li \IMM_REG, \IMME64
fmv.d.x \FDESREG, \IMM_REG
.endm
.macro FPUSCHECKS FDESREG, EXPNUM, IMM_REG0, IMM_REG1
li \IMM_REG0,0xffffffff00000000
li \IMM_REG1, \EXPNUM
or \IMM_REG0,\IMM_REG1,\IMM_REG0
fmv.x.d \IMM_REG1,\FDESREG
.endm
.macro FPUHCHECKH FDESREG, EXPNUM, IMM_REG0, IMM_REG1
li \IMM_REG0,0xffffffffffff0000
li \IMM_REG1, \EXPNUM
or \IMM_REG0,\IMM_REG1,\IMM_REG0
fmv.x.d \IMM_REG1,\FDESREG
bne \IMM_REG0, \IMM_REG1, RESULT_FAIL
.endm
.macro FPUSCHECKD FDESREG,EXPNUML, IMM_REG0, IMM_REG1
fmv.x.d \IMM_REG0, \FDESREG
li \IMM_REG1, \EXPNUML
.endm
.macro FPUEXCHK EXPNUM32, IMM_REG0, IMM_REG1
li \IMM_REG0, 0x7f
csrrs \IMM_REG1,fxcr,x0
csrrc x0,fxcr, \IMM_REG0 #//clear flag bits
and \IMM_REG0, \IMM_REG0,\IMM_REG1
li \IMM_REG1, \EXPNUM32
.endm
.macro FPUSCHECKH FDESREG, EXPNUM, EXP_FLAG
li x3, 0xffffffffffff0000
li x4, \EXPNUM
or x3, x4,x3
fmv.x.d x4,\FDESREG
bne x3, x4, RESULTFAIL
FPUEXCHECK \EXP_FLAG
.endm
.macro FPUEXCHECK EXPNUM32 #//xuwj modified, add bne instruction in macro,use x3x4 default
#//to see if fccee is enabled
li x3, 0x3f
csrrs x4,fxcr,x0
csrrc x0,fxcr, x3 #//clear flag bits
and x3, x3,x4
li x4, \EXPNUM32
bne x3, x4,EXCEPTION_FAIL
.endm
.macro FPUSCHECKH_RESULT FDESREG, EXPNUM
li x3, 0xffffffffffff0000
li x4, \EXPNUM
or x3, x4,x3
fmv.x.d x4,\FDESREG
bne x3, x4, RESULTFAIL
.endm
.macro FPURMCH IMME2 , IMM_REG0
li x18, 0xf8ffffff
csrrs \IMM_REG0, fxcr, x0
and \IMM_REG0, \IMM_REG0,x18
li x18,\IMME2
slli x18, x18,24
or x18, x18,\IMM_REG0
csrrw x0, fxcr,x18
.endm
.macro FPUQNANCH IMME2, IMM_REG0 ##//imm_reg0 is a tmp reg
csrrs x18, fxcr,x0
li \IMM_REG0, 0xff7fffff
and x18,\IMM_REG0,x18
li \IMM_REG0, \IMME2
slli \IMM_REG0,\IMM_REG0, 23 #//left shift 23 bits
or \IMM_REG0, \IMM_REG0,x18 #//set the current QNAN
csrrw x0, fxcr,\IMM_REG0 #//put in relative vcr
.endm
.macro ADDS_H FDESREG0, FDESREG1, RESULT, EXPNUM32
FPUMOVH f12, HpZero , x3#//PUT CASE TOP
fadd.h f12, \FDESREG0 , \FDESREG1
FPUSCHECKH f12, \RESULT, \EXPNUM32
.endm
.macro ADDS_VR FDESREG0, FDESREG1, RESULT, EXPNUM32
FPUMOVS f12, SpZero , x3#//PUT CASE TOP
fadd.s f12, \FDESREG0 , \FDESREG1
#//CHECK
FPUSCHECKS f12, \RESULT, x3,x4
bne x3, x4, RESULTFAIL
FPUEXCHK \EXPNUM32 , x3, x4 #//close self check
bne x3,x4, FLAGCFAIL
.endm
.macro SUBS_SC IMM1, IMM2, IMM3, EXPNUM32
FPUMOVS f1 , \IMM1, x3
FPUMOVS f2 , \IMM2, x3
fsub.s f12, f1 , f2
#//CHECK
FPUSCHECKS f12,\IMM3,x3,x4
bne x3,x4, RESULTFAIL
FPUEXCHK \EXPNUM32 , x4, x3
bne x3,x4, FLAGCFAIL
.endm
.macro SUBD_SC SOURCE1, SOURCE2, RESULT,EXPNUM32
FPUMOVD f1 , \SOURCE1 ,x3
FPUMOVD f2 , \SOURCE2, x3
fsub.d f12, f1 , f2
FPUSCHECKD f12, \RESULT, x3, x4
bne x3,x4, RESULTFAIL
FPUEXCHK \EXPNUM32 , x3, x4
bne x3,x4, FLAGCFAIL
.endm
#.include "./fp_macro_num_def.h"
####################################################################
# MAIN PROGRAM #
####################################################################
.global FADD
FADD:
FPURMCH 0 , x4
FPUMOVH f4 , 0x43ff, x4
FPUMOVH f7 , 0x5000, x4
ADDS_H f4, f7, 0x5080, 0x0021
FPURMCH 1 , x4
ADDS_H f4, f7, 0x507f, 0x0021
FPURMCH 2 , x4
ADDS_H f4, f7, 0x507f, 0x0021
FPURMCH 3 , x4
ADDS_H f4, f7, 0x5080, 0x0021
FPURMCH 4 , x4
ADDS_H f4, f7, 0x5080, 0x0021
FPUMOVH f1, 0x43ff,x4
FPUMOVH f2, 0x5000,x4
.global FADDH_STATIC_1
FADDH_STATIC_1:
fadd.h f3,f1,f2,rne
FPUSCHECKH f3, 0x5080, 0x0021
.global FADDH_STATIC_2
FADDH_STATIC_2:
fadd.h f3,f1,f2,rtz
FPUSCHECKH f3, 0x507f, 0x0021
.global FADDH_STATIC_3
FADDH_STATIC_3:
fadd.h f3,f1,f2,rdn
FPUSCHECKH f3, 0x507f, 0x0021
.global FADDH_STATIC_4
FADDH_STATIC_4:
fadd.h f3,f1,f2,rup
FPUSCHECKH f3, 0x5080, 0x0021
.global FADDH_STATIC_5
FADDH_STATIC_5:
fadd.h f3,f1,f2,rmm
FPUSCHECKH f3, 0x5080, 0x0021
.global FADDH_OF_1
FADDH_OF_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpLFN, x4
FPUMOVH f7 , HpLFN, x4
ADDS_H f4, f7, HpInf, 0x0025
.global FADDH_IX_1
FADDH_IX_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpLFN, x4
ADDS_H f4, f7, HpqNaN, 0x0030
.global FSUB
FSUB:
FPUMOVH f4 , 0xc3ff, x4
FPUMOVH f7 , 0x5000, x4
FPURMCH 0 , x4
fsub.h f3,f7,f4
FPUSCHECKH f3, 0x5080, 0x0021
FPURMCH 1 , x4
fsub.h f3,f7,f4
FPUSCHECKH f3, 0x507f, 0x0021
FPURMCH 2 , x4
fsub.h f3,f7,f4
FPUSCHECKH f3, 0x507f, 0x0021
FPURMCH 3 , x4
fsub.h f3,f7,f4
FPUSCHECKH f3, 0x5080, 0x0021
FPURMCH 4 , x4
fsub.h f3,f7,f4
FPUSCHECKH f3, 0x5080, 0x0021
FPUMOVH f1, 0xc3ff,x4
FPUMOVH f2, 0x5000,x4
.global FSUBH_STATIC_1
FSUBH_STATIC_1:
fsub.h f3,f2,f1,rne
FPUSCHECKH f3, 0x5080, 0x0021
.global FSUBH_STATIC_2
FSUBH_STATIC_2:
fsub.h f3,f2,f1,rtz
FPUSCHECKH f3, 0x507f, 0x0021
.global FSUBH_STATIC_3
FSUBH_STATIC_3:
fsub.h f3,f2,f1,rdn
FPUSCHECKH f3, 0x507f, 0x0021
.global FSUBH_STATIC_4
FSUBH_STATIC_4:
fsub.h f3,f2,f1,rup
FPUSCHECKH f3, 0x5080, 0x0021
.global FSUBH_STATIC_5
FSUBH_STATIC_5:
fsub.h f3,f2,f1,rmm
FPUSCHECKH f3, 0x5080, 0x0021
.global FSUBH_OF_1
FSUBH_OF_1:
FPURMCH 0 , x4
FPUMOVH f4 , HnLFN, x4
FPUMOVH f7 , HpLFN, x4
fsub.h f3,f7,f4
FPUSCHECKH f3, HpInf, 0x0025
.global FSUBH_IX_1
FSUBH_IX_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpLFN, x4
fsub.h f3,f7,f4
FPUSCHECKH f3, HpqNaN, 0x0030
.global FMUL
FMUL:
FPUMOVH f4 , 0x3400, x4
FPUMOVH f7 , 0x000f, x4
FPURMCH 0 , x4
fmul.h f3,f7,f4
FPUSCHECKH f3, 0x0004, 0x0023
FPURMCH 1 , x4
fmul.h f3,f7,f4
FPUSCHECKH f3, 0x0003, 0x0023
FPURMCH 2 , x4
fmul.h f3,f7,f4
FPUSCHECKH f3, 0x0003, 0x0023
FPURMCH 3 , x4
fmul.h f3,f7,f4
FPUSCHECKH f3, 0x0004, 0x0023
FPURMCH 4 , x4
fmul.h f3,f7,f4
FPUSCHECKH f3, 0x0004, 0x0023
FPUMOVH f1, 0x3400,x4
FPUMOVH f2, 0x000f,x4
.global FMULH_STATIC_1
FMULH_STATIC_1:
fmul.h f3,f2,f1,rne
FPUSCHECKH f3, 0x0004, 0x0023
.global FMULH_STATIC_2
FMULH_STATIC_2:
fmul.h f3,f2,f1,rtz
FPUSCHECKH f3, 0x0003, 0x0023
.global FMULH_STATIC_3
FMULH_STATIC_3:
fmul.h f3,f2,f1,rdn
FPUSCHECKH f3, 0x0003, 0x0023
.global FMULH_STATIC_4
FMULH_STATIC_4:
fmul.h f3,f2,f1,rup
FPUSCHECKH f3, 0x0004, 0x0023
.global FMULH_STATIC_5
FMULH_STATIC_5:
fmul.h f3,f2,f1,rmm
FPUSCHECKH f3, 0x0004, 0x0023
.global FMULH_OF_1
FMULH_OF_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpLFN, x4
FPUMOVH f7 , HpLFN, x4
fmul.h f3,f7,f4
FPUSCHECKH f3, HpInf, 0x0025
.global FMULH_IX_1
FMULH_IX_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpLFN, x4
fmul.h f3,f7,f4
FPUSCHECKH f3, HpqNaN, 0x0030
.global fmadd
fmadd:
FPUMOVH f4 , 0x3c00, x4
FPUMOVH f7 , 0x43ff, x4
FPUMOVH f3 , 0x5000, x4
FPURMCH 0 , x4
fmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPURMCH 1 , x4
FPUMOVH f3 , 0x5000, x4
fmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x507f, 0x0021
FPURMCH 2 , x4
FPUMOVH f3 , 0x5000, x4
fmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x507f, 0x0021
FPURMCH 3 , x4
FPUMOVH f3 , 0x5000, x4
fmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPURMCH 4 , x4
FPUMOVH f3 , 0x5000, x4
fmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPUMOVH f1, 0x3c00,x4
FPUMOVH f2, 0x43ff,x4
.global FMADD_STATIC_1
FMADD_STATIC_1:
FPUMOVH f3 , 0x5000, x4
fmadd.h f5,f1,f2,f3,rne
FPUSCHECKH f5, 0x5080, 0x0021
.global FMADD_STATIC_2
FMADD_STATIC_2:
FPUMOVH f3 , 0x5000, x4
fmadd.h f5,f1,f2,f3,rtz
FPUSCHECKH f5, 0x507f, 0x0021
.global FMADD_STATIC_3
FMADD_STATIC_3:
FPUMOVH f3 , 0x5000, x4
fmadd.h f5,f1,f2,f3,rdn
FPUSCHECKH f5, 0x507f, 0x0021
.global FMADD_STATIC_4
FMADD_STATIC_4:
FPUMOVH f3 , 0x5000, x4
fmadd.h f5,f1,f2,f3,rup
FPUSCHECKH f5, 0x5080, 0x0021
.global FMADD_STATIC_5
FMADD_STATIC_5:
FPUMOVH f3 , 0x5000, x4
fmadd.h f5,f1,f2,f3,rmm
FPUSCHECKH f5, 0x5080, 0x0021
.global FMADD_OF_1
FMADD_OF_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpLFN, x4
FPUMOVH f7 , HpLFN, x4
FPUMOVH f3 , HpLFN, x4
fmadd.h f5,f4,f7,f3
FPUSCHECKH f5, HpInf, 0x0025
.global FMADD_IX_1
FMADD_IX_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpLFN, x4
FPUMOVH f3 , HpLFN, x4
fmadd.h f5,f4,f7,f3
FPUSCHECKH f5, HpqNaN, 0x0030
.global FNMADD
FNMADD:
FPUMOVH f4 , 0xbc00, x4
FPUMOVH f7 , 0x43ff, x4
FPUMOVH f3 , 0xd000, x4
FPURMCH 0 , x4
fnmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPURMCH 1 , x4
FPUMOVH f3 , 0xd000, x4
fnmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x507f, 0x0021
FPURMCH 2 , x4
FPUMOVH f3 , 0xd000, x4
fnmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x507f, 0x0021
FPURMCH 3 , x4
FPUMOVH f3 , 0xd000, x4
fnmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPURMCH 4 , x4
FPUMOVH f3 , 0xd000, x4
fnmadd.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPUMOVH f1, 0xbc00,x4
FPUMOVH f2, 0x43ff,x4
.global FNMADD_STATIC_1
FNMADD_STATIC_1:
FPUMOVH f3 , 0xd000, x4
fnmadd.h f5,f1,f2,f3,rne
FPUSCHECKH f5, 0x5080, 0x0021
.global FNMADD_STATIC_2
FNMADD_STATIC_2:
FPUMOVH f3 , 0xd000, x4
fnmadd.h f5,f1,f2,f3,rtz
FPUSCHECKH f5, 0x507f, 0x0021
.global FNMADD_STATIC_3
FNMADD_STATIC_3:
FPUMOVH f3 , 0xd000, x4
fnmadd.h f5,f1,f2,f3,rdn
FPUSCHECKH f5, 0x507f, 0x0021
.global FNMADD_STATIC_4
FNMADD_STATIC_4:
FPUMOVH f3 , 0xd000, x4
fnmadd.h f5,f1,f2,f3,rup
FPUSCHECKH f5, 0x5080, 0x0021
.global FNMADD_STATIC_5
FNMADD_STATIC_5:
FPUMOVH f3 , 0xd000, x4
fnmadd.h f5,f1,f2,f3,rmm
FPUSCHECKH f5, 0x5080, 0x0021
.global FNMADD_OF_1
FNMADD_OF_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpLFN, x4
FPUMOVH f7 , HnLFN, x4
FPUMOVH f3 , HnLFN, x4
fnmadd.h f5,f4,f7,f3
FPUSCHECKH f5, HpInf, 0x0025
.global FNMADD_IX_1
FNMADD_IX_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpLFN, x4
FPUMOVH f3 , HpLFN, x4
fnmadd.h f5,f4,f7,f3
FPUSCHECKH f5, HpqNaN, 0x0030
.global FMSUB
FMSUB:
FPUMOVH f4 , 0x3c00, x4
FPUMOVH f7 , 0x43ff, x4
FPUMOVH f3 , 0xd000, x4
FPURMCH 0 , x4
fmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPURMCH 1 , x4
FPUMOVH f3 , 0xd000, x4
fmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x507f, 0x0021
FPURMCH 2 , x4
FPUMOVH f3 , 0xd000, x4
fmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x507f, 0x0021
FPURMCH 3 , x4
FPUMOVH f3 , 0xd000, x4
fmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPURMCH 4 , x4
FPUMOVH f3 , 0xd000, x4
fmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPUMOVH f1, 0x3c00,x4
FPUMOVH f2, 0x43ff,x4
.global FMSUB_STATIC_1
FMSUB_STATIC_1:
FPUMOVH f3 , 0xd000, x4
fmsub.h f5,f1,f2,f3,rne
FPUSCHECKH f5, 0x5080, 0x0021
.global FMSUB_STATIC_2
FMSUB_STATIC_2:
FPUMOVH f3 , 0xd000, x4
fmsub.h f5,f1,f2,f3,rtz
FPUSCHECKH f5, 0x507f, 0x0021
.global FMSUB_STATIC_3
FMSUB_STATIC_3:
FPUMOVH f3 , 0xd000, x4
fmsub.h f5,f1,f2,f3,rdn
FPUSCHECKH f5, 0x507f, 0x0021
.global FMSUB_STATIC_4
FMSUB_STATIC_4:
FPUMOVH f3 , 0xd000, x4
fmsub.h f5,f1,f2,f3,rup
FPUSCHECKH f5, 0x5080, 0x0021
.global FMSUB_STATIC_5
FMSUB_STATIC_5:
FPUMOVH f3 , 0xd000, x4
fmsub.h f5,f1,f2,f3,rmm
FPUSCHECKH f5, 0x5080, 0x0021
.global FMSUB_OF_1
FMSUB_OF_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpLFN, x4
FPUMOVH f7 , HpLFN, x4
FPUMOVH f3 , HnLFN, x4
fmsub.h f5,f4,f7,f3
FPUSCHECKH f5, HpInf, 0x0025
.global FMSUB_IX_1
FMSUB_IX_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpLFN, x4
FPUMOVH f3 , HpLFN, x4
fmsub.h f5,f4,f7,f3
FPUSCHECKH f5, HpqNaN, 0x0030
.global FNMSUB
FNMSUB:
FPUMOVH f4 , 0xbc00, x4
FPUMOVH f7 , 0x43ff, x4
FPUMOVH f3 , 0x5000, x4
FPURMCH 0 , x4
fnmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPURMCH 1 , x4
FPUMOVH f3 , 0x5000, x4
fnmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x507f, 0x0021
FPURMCH 2 , x4
FPUMOVH f3 , 0x5000, x4
fnmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x507f, 0x0021
FPURMCH 3 , x4
FPUMOVH f3 , 0x5000, x4
fnmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPURMCH 4 , x4
FPUMOVH f3 , 0x5000, x4
fnmsub.h f5,f4,f7,f3
FPUSCHECKH f5, 0x5080, 0x0021
FPUMOVH f1, 0xbc00,x4
FPUMOVH f2, 0x43ff,x4
.global FNMSUB_STATIC_1
FNMSUB_STATIC_1:
FPUMOVH f3 , 0x5000, x4
fnmsub.h f5,f1,f2,f3,rne
FPUSCHECKH f5, 0x5080, 0x0021
.global FNMSUB_STATIC_2
FNMSUB_STATIC_2:
FPUMOVH f3 , 0x5000, x4
fnmsub.h f5,f1,f2,f3,rtz
FPUSCHECKH f5, 0x507f, 0x0021
.global FNMSUB_STATIC_3
FNMSUB_STATIC_3:
FPUMOVH f3 , 0x5000, x4
fnmsub.h f5,f1,f2,f3,rdn
FPUSCHECKH f5, 0x507f, 0x0021
.global FNMSUB_STATIC_4
FNMSUB_STATIC_4:
FPUMOVH f3 , 0x5000, x4
fnmsub.h f5,f1,f2,f3,rup
FPUSCHECKH f5, 0x5080, 0x0021
.global FNMSUB_STATIC_5
FNMSUB_STATIC_5:
FPUMOVH f3 , 0x5000, x4
fnmsub.h f5,f1,f2,f3,rmm
FPUSCHECKH f5, 0x5080, 0x0021
.global FNMSUB_OF_1
FNMSUB_OF_1:
FPURMCH 0 , x4
FPUMOVH f4 , HnLFN, x4
FPUMOVH f7 , HpLFN, x4
FPUMOVH f3 , HpLFN, x4
fnmsub.h f5,f4,f7,f3
FPUSCHECKH f5, HpInf, 0x0025
.global FNMSUB_IX_1
FNMSUB_IX_1:
FPURMCH 0 , x4
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpLFN, x4
FPUMOVH f3 , HpLFN, x4
fnmsub.h f5,f4,f7,f3
FPUSCHECKH f5, HpqNaN, 0x0030
.global FDIV
FDIV:
FPUMOVH f4 , 0x07ff, x4
FPUMOVH f7 , 0x4800, x4
FPURMCH 0 , x4
fdiv.h f3,f4,f7
FPUSCHECKH f3, 0x0100, 0x0023
FPURMCH 1 , x4
fdiv.h f3,f4,f7
FPUSCHECKH f3, 0x00ff, 0x0023
FPURMCH 2 , x4
fdiv.h f3,f4,f7
FPUSCHECKH f3, 0x00ff, 0x0023
FPURMCH 3 , x4
fdiv.h f3,f4,f7
FPUSCHECKH f3, 0x0100, 0x0023
FPURMCH 3 , x4
fdiv.h f3,f4,f7
FPUSCHECKH f3, 0x0100, 0x0023
FPUMOVH f1, 0x4800,x4
FPUMOVH f2, 0x07ff,x4
.global FDIV_STATIC_1
FDIV_STATIC_1:
fdiv.h f3,f2,f1,rne
FPUSCHECKH f3, 0x0100, 0x0023
.global FDIV_STATIC_2
FDIV_STATIC_2:
fdiv.h f3,f2,f1,rtz
FPUSCHECKH f3, 0x00ff, 0x0023
.global FDIV_STATIC_3
FDIV_STATIC_3:
fdiv.h f3,f2,f1,rdn
FPUSCHECKH f3, 0x00ff, 0x0023
.global FDIV_STATIC_4
FDIV_STATIC_4:
fdiv.h f3,f2,f1,rup
FPUSCHECKH f3, 0x0100, 0x0023
.global FDIV_STATIC_5
FDIV_STATIC_5:
fdiv.h f3,f2,f1,rmm
FPUSCHECKH f3, 0x0100, 0x0023
.global FDIV_IO
FDIV_IO:
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpNorm, x4
FPURMCH 0 , x4
fdiv.h f3,f4,f7
FPUSCHECKH f3, HpqNaN, 0x0030
.global FDIV_OF
FDIV_OF:
FPUMOVH f4 , HpLFN, x4
FPUMOVH f7 , HpNorm, x4
FPURMCH 0 , x4
fdiv.h f3,f4,f7
FPUSCHECKH f3, HpInf, 0x0025
.global FDIV_DZ
FDIV_DZ:
FPUMOVH f4 , HpNorm, x4
FPUMOVH f7 , HpZero, x4
FPURMCH 0 , x4
fdiv.h f3,f4,f7
FPUSCHECKH f3, HpInf, 0x0028
.global FSQRT
FSQRT:
FPUMOVH f4, HpsNaN, x4
FPURMCH 0 , x4
fsqrt.h f3,f4
FPUSCHECKH f3, HpqNaN, 0x0030
FPUMOVH f4, HpqNaN, x4
FPURMCH 0 , x4
fsqrt.h f3,f4
FPUSCHECKH f3, HpqNaN, 0x0000
FPUMOVH f4, HpONE, x4
FPURMCH 0 , x4
fsqrt.h f3,f4
FPUSCHECKH f3, HpONE, 0x0000
FPUMOVH f4, HnDeNorm, x4
FPURMCH 0 , x4
fsqrt.h f3,f4
FPUSCHECKH f3, HpqNaN, 0x0030
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpsNaN, x4
fmin.h f3,f4,f7
FPUSCHECKH f3, HpqNaN, 0x0030
FPUMOVH f4 , HpqNaN, x4
FPUMOVH f7 , HpInf, x4
fmin.h f3,f4,f7
FPUSCHECKH f3, HpInf, 0x0000
FPUMOVH f4 , HpNorm, x4
FPUMOVH f7 , HnDeNorm, x4
fmin.h f3,f4,f7
FPUSCHECKH f3, HnDeNorm, 0x0000
FPUMOVH f4 , HpsNaN, x4
FPUMOVH f7 , HpsNaN, x4
fmax.h f3,f4,f7
FPUSCHECKH f3, HpqNaN, 0x0030
FPUMOVH f4 , HpqNaN, x4
FPUMOVH f7 , HpInf, x4
fmin.h f3,f4,f7
FPUSCHECKH f3, HpInf, 0x0000
FPUMOVH f4 , HpNorm, x4
FPUMOVH f7 , HnDeNorm, x4
fmin.h f3,f4,f7
FPUSCHECKH f3, HnDeNorm, 0x0000
.global FMVHX
FMVHX:
li x1,0x0fff
li x4,0x0fff
li x3, 0x2000
fmv.h.x f1,x1
fsh f1,0(x3)
lh x5,0(x3)
bne x5,x4,RESULTFAIL
.global FMVXH
FMVXH:
li x1,0xfbc0
li x3,0x2000
sd x1,0(x3)
flh f1,0(x3)
fmv.x.h x4,f1
li x1,0xfffffffffffffbc0
bne x1,x4,RESULTFAIL
.global FCLASSS_1
FCLASSS_1:
FPUMOVH f1,HnInf,x4
.global INST44H
INST44H:
fclass.h x1,f1
li x3,0x1
bne x1,x3,RESULTFAIL
FPUMOVH f1,HnqNaN,x4
.global FCLASS_2
FCLASS_2:
fclass.h x1,f1
li x3,0x200
bne x1,x3,RESULTFAIL
.global FEQSTESTH
FEQSTESTH:
FPUMOVH f0 , HpsNaN, x4
FPUMOVH f6 , HpNorm, x4
feq.h x1, f0, f6
bne x1,x0, FEQSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVH f2 , HpqNaN, x4
FPUMOVH f4 , HpInf, x4
.global INST61H
INST61H:
feq.h x1, f2, f4
bne x1,x0, FEQSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVH f7 , HnNorm, x4
.global INST62H
INST62H:
feq.h x1, f7,f7
li x3,0x1
bne x1,x3, FEQSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global FLESTESTH
FLESTESTH:
.global INST63H
INST63H:
FPUMOVH f0 , HpsNaN, x4
FPUMOVH f6 , HpNorm, x4
fle.h x1, f0, f6
bne x1,x0, FLESFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVH f2 , HpqNaN, x4
FPUMOVH f4 , HpInf, x4
.global INST64H
INST64H:
fle.h x1, f2, f4
bne x1,x0, FLESFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVH f7 , HnNorm, x4
FPUMOVH f6 , HpNorm, x4
.global INST65H
INST65H:
fle.h x1, f7,f6
li x3,0x1
bne x1,x3, FLESFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVH f0 , HpsNaN, x4
FPUMOVH f6 , HpNorm, x4
.global FLTSTESTH
FLTSTESTH:
flt.h x1, f0, f6
bne x1,x0, FLTSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVH f2 , HpqNaN, x4
FPUMOVH f4 , HpInf, x4
.global INST67H
INST67H:
flt.h x1, f2, f4
bne x1,x0, FLTSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVH f7 , HnNorm, x4
FPUMOVH f6 , HpNorm, x4
.global INST68H
INST68H:
flt.h x1, f7,f6
li x3,0x1
bne x1,x3, FLTSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global FSGNJSH
FSGNJSH:
FPUMOVH f1,HpZero,x4
FPUMOVH f2,HnInf,x4
FPUMOVH f15,HpsNaN,x4
.global INST38H
INST38H:
fsgnj.h f15,f1,f2
FPUSCHECKH f15,HnZero,0x0000
.global FSGNJNSH
FSGNJNSH:
FPUMOVH f1,HpZero,x4
FPUMOVH f2,HpInf,x4
FPUMOVH f15,HpsNaN,x4
.global INST40H
INST40H:
fsgnjn.h f15,f1,f2
FPUSCHECKH f15,HnZero, 0x0000
.global FSGNJXSH
FSGNJXSH:
FPUMOVH f1,HnZero,x4
FPUMOVH f2,HpInf,x4
FPUMOVH f15,HpsNaN,x4
.global INST42H
INST42H:
fsgnjx.h f15,f1,f2
FPUSCHECKH f15,HnZero, 0x0000
FPURMCH 0 , x4
FPUMOVH f0 , 0xbbff, x4
.global INST15_H
INST15_H:
fcvt.w.h x15, f0
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVH f0 , 0xbc00, x4
.global INST15_1_H
INST15_1_H:
fcvt.w.h x15, f0, rne
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
.global INST15_2_H
INST15_2_H:
fcvt.w.h x15, f0, rtz
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
.global INST15_3_H
INST15_3_H:
fcvt.w.h x15, f0, rdn
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
.global INST15_4_H
INST15_4_H:
fcvt.w.h x15, f0, rup
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
.global INST15_5_H
INST15_5_H:
fcvt.w.h x15, f0, rmm
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUMOVH f0 , 0x3e00, x4
.global INST16_H
INST16_H:
fcvt.wu.h x15, f0
li x1, 0x00000002
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
.global INST16_1_H
INST16_1_H:
fcvt.wu.h x15, f0, rne
li x1, 0x00000002
bne x1,x15, RESULTFAIL
.global INST16_2_H
INST16_2_H:
fcvt.wu.h x15, f0, rtz
li x1, 0x00000001
bne x1,x15, RESULTFAIL
.global INST16_3_H
INST16_3_H:
fcvt.wu.h x15, f0, rdn
li x1, 0x00000001
bne x1,x15, RESULTFAIL
.global INST16_4_H
INST16_4_H:
fcvt.wu.h x15, f0, rup
li x1, 0x00000002
bne x1,x15, RESULTFAIL
.global INST16_5_H
INST16_5_H:
fcvt.wu.h x15, f0, rmm
li x1, 0x00000002
bne x1,x15, RESULTFAIL
FPUMOVH f0 , 0xbbff, x4
.global INST17
INST17_H:
fcvt.l.h x15, f0
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVH f0 , 0xbc00, x4
.global INST17_1_H
INST17_1_H:
fcvt.l.h x15, f0, rne
bne x1,x15, RESULTFAIL
FPUMOVH f0 , 0xbc00, x4
.global INST17_2_H
INST17_2_H:
fcvt.l.h x15, f0, rtz
bne x1,x15, RESULTFAIL
FPUMOVH f0 , 0xbc00, x4
.global INST17_3_H
INST17_3_H:
fcvt.l.h x15, f0, rdn
li x1,0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUMOVH f0 , 0xbc00, x4
.global INST17_4_H
INST17_4_H:
fcvt.l.h x15, f0, rup
bne x1,x15, RESULTFAIL
FPUMOVH f0 , 0xbc00, x4
.global INST17_5_H
INST17_5_H:
fcvt.l.h x15, f0, rmm
li x1,0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUMOVH f0 , 0x3e00, x4
.global FCVT_LUH_1
FCVT_LUH_1:
fcvt.lu.h x15, f0
li x1, 0x00000002
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
.global FCVT_LUH_2
FCVT_LUH_2:
fcvt.lu.h x15, f0, rne
li x1, 0x00000002
bne x1,x15, RESULTFAIL
.global FCVT_LUH_3
FCVT_LUH_3:
fcvt.lu.h x15, f0, rtz
li x1, 0x00000001
bne x1,x15, RESULTFAIL
.global FCVT_LUH_4
FCVT_LUH_4:
fcvt.lu.h x15, f0, rdn
li x1, 0x00000001
bne x1,x15, RESULTFAIL
.global FCVT_LUH_5
FCVT_LUH_5:
fcvt.lu.h x15, f0, rup
li x1, 0x00000002
bne x1,x15, RESULTFAIL
.global FCVT_LUH_6
FCVT_LUH_6:
fcvt.lu.h x15, f0, rmm
li x1, 0x00000002
bne x1,x15, RESULTFAIL
li x1,0xffffffffffffffff
.global FCVTHL
FCVTHL:
fcvt.h.l f15, x1
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVH f15, SpqNaN, x4
.global FCVTHL_1
FCVTHL_1:
fcvt.h.l f15, x1, rne
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVH f15, SpqNaN, x4
.global FCVTHL_2
FCVTHL_2:
fcvt.h.l f15, x1, rtz
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVH f15, SpqNaN, x4
.global FCVTHL_3
FCVTHL_3:
fcvt.h.l f15, x1, rdn
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVH f15, SpqNaN, x4
.global FCVTHL_4
FCVTHL_4:
fcvt.h.l f15, x1, rup
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVH f15, SpqNaN, x4
.global FCVTHL_5
FCVTHL_5:
fcvt.h.l f15, x1, rmm
FPUSCHECKH_RESULT f15, 0xbc00
li x1 , 0x0000000000000001
.global FCVTHLU
FCVTHLU:
fcvt.h.lu f15, x1
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global FCVTHLU_1
FCVTHLU_1:
fcvt.h.lu f15, x1, rne
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global FCVTHLU_2
FCVTHLU_2:
fcvt.h.lu f15, x1, rtz
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global FCVTHLU_3
FCVTHLU_3:
fcvt.h.lu f15, x1, rdn
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global FCVTHLU_4
FCVTHLU_4:
fcvt.h.lu f15, x1, rup
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global FCVTHLU_5
FCVTHLU_5:
fcvt.h.lu f15, x1, rmm
FPUSCHECKH_RESULT f15, 0x3c00
li x1 , 0xffffffff
.global INST_FCVTHW
INST_FCVTHW:
fcvt.h.w f15, x1
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHW_1
INST_FCVTHW_1:
fcvt.h.w f15, x1, rne
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHW_2
INST_FCVTHW_2:
fcvt.h.w f15, x1, rtz
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHW_3
INST_FCVTHW_3:
fcvt.h.w f15, x1, rdn
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHW_4
INST_FCVTHW_4:
fcvt.h.w f15, x1, rup
FPUSCHECKH_RESULT f15, 0xbc00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHW_5
INST_FCVTHW_5:
fcvt.h.w f15, x1, rmm
FPUSCHECKH_RESULT f15, 0xbc00
li x1 , 0x00000001
.global INST_FCVTHWU
INST_FCVTHWU:
fcvt.h.wu f15, x1
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHWU_1
INST_FCVTHWU_1:
fcvt.h.wu f15, x1, rne
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHWU_2
INST_FCVTHWU_2:
fcvt.h.wu f15, x1, rtz
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHWU_3
INST_FCVTHWU_3:
fcvt.h.wu f15, x1, rdn
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHWU_4
INST_FCVTHWU_4:
fcvt.h.wu f15, x1, rup
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f15, SpqNaN, x3
.global INST_FCVTHWU_5
INST_FCVTHWU_5:
fcvt.h.wu f15, x1, rmm
FPUSCHECKH_RESULT f15, 0x3c00
csrw fxcr,x0
FPUMOVH f12, HpLFN , x4
.global FCVTSH
FCVTSH:
fcvt.s.h f15, f12
FPUSCHECKS f15, 0x477fe000, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x00000000 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVH f12, HpLFN , x4
.global FCVTDH
FCVTDH:
fcvt.d.h f15, f12
FPUSCHECKD f15, 0x40effc0000000000, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x00000000 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVD f0 , DpLFN, x4
FPUMOVH f15, HpZero, x4
.global FCVTHD
FCVTHD:
fcvt.h.d f15, f0
FPUSCHECKH f15, HpInf, 0x0025
FPUMOVD f1, 0x3ff0000000000000, x3
.global FCVTHD_1
FCVTHD_1:
fcvt.h.d f15, f1, rne
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVH f15, HpZero, x4
.global FCVTHD_2
FCVTHD_2:
fcvt.h.d f15, f1, rtz
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVH f15, HpZero, x4
.global FCVTHD_3
FCVTHD_3:
fcvt.h.d f15, f1, rdn
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVH f15, HpZero, x4
.global FCVTHD_4
FCVTHD_4:
fcvt.h.d f15, f1, rup
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVH f15, HpZero, x4
.global FCVTHD_5
FCVTHD_5:
fcvt.h.d f15, f1, rmm
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVS f0 , SpLFN, x4
FPUMOVH f15, HpZero, x4
.global FCVTHS
FCVTHS:
fcvt.h.s f15, f0
FPUSCHECKH f15, HpInf, 0x0025
FPUMOVS f1, 0x3f800000, x3
.global FCVTHS_1
FCVTHS_1:
fcvt.h.s f15, f1, rne
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVH f15, HpZero, x4
.global FCVTHS_2
FCVTHS_2:
fcvt.h.s f15, f1, rtz
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVH f15, HpZero, x4
.global FCVTHS_3
FCVTHS_3:
fcvt.h.s f15, f1, rdn
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVH f15, HpZero, x4
.global FCVTHS_4
FCVTHS_4:
fcvt.h.s f15, f1, rup
FPUSCHECKH_RESULT f15, 0x3c00
FPUMOVH f15, HpZero, x4
.global FCVTHS_5
FCVTHS_5:
fcvt.h.s f15, f1, rmm
FPUSCHECKH_RESULT f15, 0x3c00
FPURMCH 0 , x4
FPUMOVS f4 , 0x00800001, x4
FPUMOVS f7 , 0x80800000, x4
ADDS_VR f4, f7, 0x00000001, 0x0000
FPURMCH 1 , x4
FPUMOVS f0 , SpLFN, x4
FPUMOVS f2 , 0x73000000, x4
ADDS_VR f0, f2, SpLFN, 0x00000021
FPURMCH 3 , x4
FPUMOVS f0 , SpLFN, x4
FPUMOVS f2 , 0x73000000, x4
ADDS_VR f0, f2, SpInf, 0x00000025
FPURMCH 2 , x4
FPUMOVS f5 , 0x80800001, x4
FPUMOVS f6 , 0x00800000, x4
ADDS_VR f5, f6, 0x80000001, 0x0000
FPURMCH 4 , x4
FPUMOVS f4 , 0x00800001, x4
FPUMOVS f7 , 0x80800000, x4
ADDS_VR f4, f7, 0x00000001, 0x0
FPUMOVS f1, 0x3f800000,x3
FPUMOVS f2, 0x3f000000,x3
.global FADDS_STATIC_1
FADDS_STATIC_1:
fadd.s f3,f1,f2,rne
FPUSCHECKS f3, 0x3fc00000, x3,x4
bne x3,x4, RESULTFAIL
.global FADDS_STATIC_2
FADDS_STATIC_2:
fadd.s f3,f1,f2,rtz
FPUSCHECKS f3, 0x3fc00000, x3,x4
bne x3,x4, RESULTFAIL
.global FADDS_STATIC_3
FADDS_STATIC_3:
fadd.s f3,f1,f2,rdn
FPUSCHECKS f3, 0x3fc00000, x3,x4
bne x3,x4, RESULTFAIL
.global FADDS_STATIC_4
FADDS_STATIC_4:
fadd.s f3,f1,f2,rup
FPUSCHECKS f3, 0x3fc00000, x3,x4
bne x3,x4, RESULTFAIL
.global FADDS_STATIC_5
FADDS_STATIC_5:
fadd.s f3,f1,f2,rmm
FPUSCHECKS f3, 0x3fc00000, x3,x4
bne x3,x4, RESULTFAIL
FPURMCH 0 , x4
FPUMOVD f0 , DpLFN , x4
FPUMOVD f2 , 0x7c90000000000000 , x4
.global INST0
INST0:
fadd.d f12, f0 , f2
FPUSCHECKD f12, DpInf, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x00000025 , x4, x3
bne x3,x4, FLAGCFAIL
FPURMCH 1 , x4
FPUMOVD f0 , DpLFN , x4
FPUMOVD f2 , 0x7c90000000000000 , x4
.global INST1
INST1:
fadd.d f12, f0 , f2
FPUSCHECKD f12, DpLFN, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
FPURMCH 3 , x4
FPUMOVD f0 , DpLFN , x4
FPUMOVD f2 , 0x7c90000000000000 , x4
.global INST2
INST2:
fadd.d f12, f0 , f2
FPUSCHECKD f12, DpInf, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x00000025 , x4, x3
bne x3,x4, FLAGCFAIL
FPURMCH 2 , x4
FPUMOVD f0 , DpLFN , x4
FPUMOVD f2 , 0x7c90000000000000 , x4
.global INST3
INST3:
fadd.d f12, f0 , f2
FPUSCHECKD f12, DpLFN, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
FPURMCH 4 , x4
FPUMOVD f0 , DpLFN , x4
FPUMOVD f2 , 0x7c90000000000000 , x4
.global INST4
INST4:
fadd.d f12, f0 , f2
FPUSCHECKD f12, DpInf, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x00000025 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVD f1, 0x3ff0000000000000,x3
FPUMOVD f2, 0x3fe0000000000000,x3
.global FADDD_STATIC_1
FADDD_STATIC_1:
fadd.d f3,f1,f2, rne
FPUSCHECKD f3, 0x3ff8000000000000,x4,x3
bne x3,x4,RESULTFAIL
.global FADDD_STATIC_2
FADDD_STATIC_2:
fadd.d f3,f1,f2, rtz
FPUSCHECKD f3, 0x3ff8000000000000,x4,x3
bne x3,x4,RESULTFAIL
.global FADDD_STATIC_3
FADDD_STATIC_3:
fadd.d f3,f1,f2, rdn
FPUSCHECKD f3, 0x3ff8000000000000,x4,x3
bne x3,x4,RESULTFAIL
.global FADDD_STATIC_4
FADDD_STATIC_4:
fadd.d f3,f1,f2, rup
FPUSCHECKD f3, 0x3ff8000000000000,x4,x3
bne x3,x4,RESULTFAIL
.global FADDD_STATIC_5
FADDD_STATIC_5:
fadd.d f3,f1,f2, rmm
FPUSCHECKD f3, 0x3ff8000000000000,x4,x3
bne x3,x4,RESULTFAIL
FPURMCH 0 , x4
SUBS_SC 0x4dffffff, 0x40800000, 0x4dffffff, 0x00000021
FPURMCH 1 , x4
SUBS_SC SpLFN, 0xf3000000, SpLFN, 0x00000021
FPURMCH 3 , x4
SUBS_SC SpLFN, 0xf3000000, SpInf, 0x00000025
FPURMCH 2 , x4
SUBS_SC 0x80800001, 0x80800000, 0x80000001, 0x000000
FPURMCH 4 , x4
SUBS_SC 0x4dffffff, 0x40800000, 0x4dffffff, 0x00000021
FPUMOVS f1, 0x3f800000,x3
FPUMOVS f2, 0x3f000000,x3
.global SUBS_STATIC_1
SUBS_STATIC_1:
fsub.s f3,f1,f2,rne
FPUSCHECKS f3, 0x3f000000,x3,x4
bne x3,x4,RESULTFAIL
.global SUBS_STATIC_2
SUBS_STATIC_2:
fsub.s f3,f1,f2,rtz
FPUSCHECKS f3, 0x3f000000,x3,x4
bne x3,x4,RESULTFAIL
.global SUBS_STATIC_3
SUBS_STATIC_3:
fsub.s f3,f1,f2,rdn
FPUSCHECKS f3, 0x3f000000,x3,x4
bne x3,x4,RESULTFAIL
.global SUBS_STATIC_4
SUBS_STATIC_4:
fsub.s f3,f1,f2,rup
FPUSCHECKS f3, 0x3f000000,x3,x4
bne x3,x4,RESULTFAIL
.global SUBS_STATIC_5
SUBS_STATIC_5:
fsub.s f3,f1,f2,rmm
FPUSCHECKS f3, 0x3f000000,x3,x4
bne x3,x4,RESULTFAIL
.global B1
B1:
FPURMCH 0 , x4
SUBD_SC 0x7fefffffffffffff, 0xfc90000000000000, 0x7ff0000000000000, 0x00000025
.global B2
B2:
FPURMCH 1 , x4
SUBD_SC 0x7fefffffffffffff, 0xfc90000000000000, 0x7fefffffffffffff, 0x00000021
.global B3
B3:
FPURMCH 3 , x4
SUBD_SC 0x7fefffffffffffff, 0xfc90000000000000, 0x7ff0000000000000, 0x00000025
.global B4
B4:
FPURMCH 2 , x4
SUBD_SC 0x7fefffffffffffff, 0xfc90000000000000, 0x7fefffffffffffff, 0x00000021
.global B5
B5:
FPURMCH 4 , x4
SUBD_SC 0x7fefffffffffffff, 0xfc90000000000000, 0x7ff0000000000000, 0x00000025
FPUMOVD f1, 0x3ff0000000000000, x3
FPUMOVD f2, 0x3fe0000000000000, x3
.global SUBD_STATIC_1
SUBD_STATIC_1:
fsub.d f3,f1,f2,rne
FPUSCHECKD f3, 0x3fe0000000000000, x3,x4
bne x3,x4,RESULTFAIL
.global SUBD_STATIC_2
SUBD_STATIC_2:
fsub.d f3,f1,f2,rtz
FPUSCHECKD f3, 0x3fe0000000000000, x3,x4
bne x3,x4,RESULTFAIL
.global SUBD_STATIC_3
SUBD_STATIC_3:
fsub.d f3,f1,f2,rdn
FPUSCHECKD f3, 0x3fe0000000000000, x3,x4
bne x3,x4,RESULTFAIL
.global SUBD_STATIC_4
SUBD_STATIC_4:
fsub.d f3,f1,f2,rup
FPUSCHECKD f3, 0x3fe0000000000000, x3,x4
bne x3,x4,RESULTFAIL
.global SUBD_STATIC_5
SUBD_STATIC_5:
fsub.d f3,f1,f2,rmm
FPUSCHECKD f3, 0x3fe0000000000000, x3,x4
bne x3,x4,RESULTFAIL
FPURMCH 0 , x4
FPUMOVS f9 , SnDeNorm, x4
.global INST5
INST5:
fabs.s f12, f9
FPUSCHECKS f12, SpDeNorm, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVD f9 , DnDeNorm, x4
.global INST6
INST6:
fabs.d f12, f9
FPUSCHECKD f12, DpDeNorm, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f0 , SpsNaN , x4
.global INST7
INST7:
fneg.s f12, f0
FPUSCHECKS f12, SnsNaN, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVD f0 , DpsNaN , x4
.global INST8
INST8:
fneg.d f12, f0
FPUSCHECKD f12, DnsNaN, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f12, SpLFN , x4
.global INST9
INST9:
fcvt.d.s f15, f12
FPUSCHECKD f15, 0x47efffffe0000000, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x00000000 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVD f0 , DpLFN, x4
FPUMOVS f15, SpZero, x4
.global INST10
INST10:
fcvt.s.d f15, f0
FPUSCHECKS f15, SpInf, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x00000025 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVD f1, 0x3ff0000000000000, x3
.global INST10_1
INST10_1:
fcvt.s.d f15, f1, rne
FPUSCHECKS f15, 0x3f800000, x4, x3
bne x3,x4, RESULTFAIL
.global INST10_2
INST10_2:
fcvt.s.d f15, f1, rtz
FPUSCHECKS f15, 0x3f800000, x4, x3
bne x3,x4, RESULTFAIL
.global INST10_3
INST10_3:
fcvt.s.d f15, f1, rdn
FPUSCHECKS f15, 0x3f800000, x4, x3
bne x3,x4, RESULTFAIL
.global INST10_4
INST10_4:
fcvt.s.d f15, f1, rup
FPUSCHECKS f15, 0x3f800000, x4, x3
bne x3,x4, RESULTFAIL
.global INST10_5
INST10_5:
fcvt.s.d f15, f1, rmm
FPUSCHECKS f15, 0x3f800000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVD f8 , 0x416fe00000000000, x4
.global INST11
INST11:
fcvt.wu.d x15, f8
li x1,0xff0000
bne x15,x1, RESULTFAIL
FPUEXCHK 0x00000000 , x4, x3
bne x3,x4, FLAGCFAIL
.global INST11_1
INST11_1:
fcvt.wu.d x15, f8, rne
li x1,0xff0000
bne x15,x1, RESULTFAIL
.global INST11_2
INST11_2:
fcvt.wu.d x15, f8, rtz
li x1,0xff0000
bne x15,x1, RESULTFAIL
.global INST11_3
INST11_3:
fcvt.wu.d x15, f8, rdn
li x1,0xff0000
bne x15,x1, RESULTFAIL
.global INST11_4
INST11_4:
fcvt.wu.d x15, f8, rup
li x1,0xff0000
bne x15,x1, RESULTFAIL
.global INST11_5
INST11_5:
fcvt.wu.d x15, f8, rmm
li x1,0xff0000
bne x15,x1, RESULTFAIL
FPUMOVD f0 , 0xbfefffffffffffff, x4
.global INST12
INST12:
fcvt.w.d x15, f0
li x1,0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVD f0, 0x403f000000000000,x3
.global INST12_1
INST12_1:
fcvt.w.d x15, f0, rne
li x1,0x1f
bne x1,x15, RESULTFAIL
.global INST12_2
INST12_2:
fcvt.w.d x15, f0, rtz
li x1,0x1f
bne x1,x15, RESULTFAIL
.global INST12_3
INST12_3:
fcvt.w.d x15, f0, rdn
li x1,0x1f
bne x1,x15, RESULTFAIL
.global INST12_4
INST12_4:
fcvt.w.d x15, f0, rup
li x1,0x1f
bne x1,x15, RESULTFAIL
.global INST12_5
INST12_5:
fcvt.w.d x15, f0, rmm
li x1,0x1f
bne x1,x15, RESULTFAIL
FPUMOVD f8 , 0x432ffe0000000000, x4
.global INST13
INST13:
fcvt.lu.d x15, f8
li x1,0xfff0000000000
bne x15,x1, RESULTFAIL
FPUEXCHK 0x00000000 , x4, x3
bne x3,x4, FLAGCFAIL
.global INST13_1
INST13_1:
fcvt.lu.d x15, f8, rne
li x1,0xfff0000000000
bne x15,x1, RESULTFAIL
.global INST13_2
INST13_2:
fcvt.lu.d x15, f8, rtz
li x1,0xfff0000000000
bne x15,x1, RESULTFAIL
.global INST13_3
INST13_3:
fcvt.lu.d x15, f8, rdn
li x1,0xfff0000000000
bne x15,x1, RESULTFAIL
.global INST13_4
INST13_4:
fcvt.lu.d x15, f8, rup
li x1,0xfff0000000000
bne x15,x1, RESULTFAIL
.global INST13_5
INST13_5:
fcvt.lu.d x15, f8, rmm
li x1,0xfff0000000000
bne x15,x1, RESULTFAIL
FPUMOVD f0 , 0xbfefffffffffffff, x4
.global INST14
INST14:
fcvt.l.d x15, f0
li x1,0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVD f0, 0x3ff2000000000000, x3
.global INST14_1
INST14_1:
fcvt.l.d x15, f0, rne
li x1,0x1
bne x1,x15, RESULTFAIL
.global INST14_2
INST14_2:
fcvt.l.d x15, f0, rtz
li x1,0x1
bne x1,x15, RESULTFAIL
.global INST14_3
INST14_3:
fcvt.l.d x15, f0, rdn
li x1,0x1
bne x1,x15, RESULTFAIL
.global INST14_4
INST14_4:
fcvt.l.d x15, f0, rup
li x1,0x2
bne x1,x15, RESULTFAIL
.global INST14_5
INST14_5:
fcvt.l.d x15, f0, rmm
li x1,0x1
bne x1,x15, RESULTFAIL
FPUMOVS f0 , 0xbf7fffff, x4
.global INST15
INST15:
fcvt.w.s x15, f0
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVS f0 , 0xbf800000, x4
.global INST15_1
INST15_1:
fcvt.w.s x15, f0, rne
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
.global INST15_2
INST15_2:
fcvt.w.s x15, f0, rtz
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
.global INST15_3
INST15_3:
fcvt.w.s x15, f0, rdn
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
.global INST15_4
INST15_4:
fcvt.w.s x15, f0, rup
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
.global INST15_5
INST15_5:
fcvt.w.s x15, f0, rmm
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUMOVS f0 , 0x3fc00000, x4
.global INST16
INST16:
fcvt.wu.s x15, f0
li x1, 0x00000002
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
.global INST16_1
INST16_1:
fcvt.wu.s x15, f0, rne
li x1, 0x00000002
bne x1,x15, RESULTFAIL
.global INST16_2
INST16_2:
fcvt.wu.s x15, f0, rtz
li x1, 0x00000001
bne x1,x15, RESULTFAIL
.global INST16_3
INST16_3:
fcvt.wu.s x15, f0, rdn
li x1, 0x00000001
bne x1,x15, RESULTFAIL
.global INST16_4
INST16_4:
fcvt.wu.s x15, f0, rup
li x1, 0x00000002
bne x1,x15, RESULTFAIL
.global INST16_5
INST16_5:
fcvt.wu.s x15, f0, rmm
li x1, 0x00000002
bne x1,x15, RESULTFAIL
FPUMOVS f0 , 0xbf7fffff, x4
.global INST17
INST17:
fcvt.l.s x15, f0
li x1, 0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
FPUMOVS f0 , 0xbf000000, x4
.global INST17_1
INST17_1:
fcvt.l.s x15, f0, rne
bne x0,x15, RESULTFAIL
FPUMOVS f0 , 0xbf000000, x4
.global INST17_2
INST17_2:
fcvt.l.s x15, f0, rtz
bne x0,x15, RESULTFAIL
FPUMOVS f0 , 0xbf000000, x4
.global INST17_3
INST17_3:
fcvt.l.s x15, f0, rdn
li x1,0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUMOVS f0 , 0xbf000000, x4
.global INST17_4
INST17_4:
fcvt.l.s x15, f0, rup
bne x0,x15, RESULTFAIL
FPUMOVS f0 , 0xbf000000, x4
.global INST17_5
INST17_5:
fcvt.l.s x15, f0, rmm
li x1,0xffffffffffffffff
bne x1,x15, RESULTFAIL
FPUMOVS f0 , 0x3fc00000, x4
.global INST18
INST18:
fcvt.lu.s x15, f0
li x1, 0x00000002
bne x1,x15, RESULTFAIL
FPUEXCHK 0x00000021 , x4, x3
bne x3,x4, FLAGCFAIL
.global INST18_1
INST18_1:
fcvt.lu.s x15, f0, rne
li x1, 0x00000002
bne x1,x15, RESULTFAIL
.global INST18_2
INST18_2:
fcvt.lu.s x15, f0, rtz
li x1, 0x00000001
bne x1,x15, RESULTFAIL
.global INST18_3
INST18_3:
fcvt.lu.s x15, f0, rdn
li x1, 0x00000001
bne x1,x15, RESULTFAIL
.global INST18_4
INST18_4:
fcvt.lu.s x15, f0, rup
li x1, 0x00000002
bne x1,x15, RESULTFAIL
.global INST18_5
INST18_5:
fcvt.lu.s x15, f0, rmm
li x1, 0x00000002
bne x1,x15, RESULTFAIL
li x1,0x7fffffff
.global INST19
INST19:
fcvt.d.w f15, x1
FPUSCHECKD f15, 0x41dfffffffc00000, x4, x3
bne x3,x4, RESULTFAIL
li x1 , 0xffffffff
.global INST20
INST20:
fcvt.d.wu f15, x1
FPUSCHECKD f15, 0x41efffffffe00000, x4, x3
bne x3,x4, RESULTFAIL
li x1,0xffffffff
.global INST21
INST21:
fcvt.d.l f15, x1
FPUSCHECKD f15, 0x41efffffffe00000, x4, x3
bne x3,x4, RESULTFAIL
li x1,0xffffff0000000001
FPUMOVS f15, SpqNaN,x3
.global INST21_1
INST21_1:
fcvt.d.l f15, x1, rne
FPUSCHECKD f15, 0xc26fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST21_2
INST21_2:
fcvt.d.l f15, x1, rtz
FPUSCHECKD f15, 0xc26fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST21_3
INST21_3:
fcvt.d.l f15, x1, rdn
FPUSCHECKD f15, 0xc26fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST21_4
INST21_4:
fcvt.d.l f15, x1, rup
FPUSCHECKD f15, 0xc26fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST21_5
INST21_5:
fcvt.d.l f15, x1, rmm
FPUSCHECKD f15, 0xc26fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
li x1 , 0xffffffffff
.global INST22
INST22:
fcvt.d.lu f15, x1
FPUSCHECKD f15, 0x426fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVD f15, DpqNaN,x3
.global INST22_1
INST22_1:
fcvt.d.lu f15, x1, rne
FPUSCHECKD f15, 0x426fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVD f15, DpqNaN,x3
.global INST22_2
INST22_2:
fcvt.d.lu f15, x1, rtz
FPUSCHECKD f15, 0x426fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVD f15, DpqNaN,x3
.global INST22_3
INST22_3:
fcvt.d.lu f15, x1, rdn
FPUSCHECKD f15, 0x426fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVD f15, DpqNaN,x3
.global INST22_4
INST22_4:
fcvt.d.lu f15, x1, rup
FPUSCHECKD f15, 0x426fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVD f15, DpqNaN,x3
.global INST22_5
INST22_5:
fcvt.d.lu f15, x1, rmm
FPUSCHECKD f15, 0x426fffffffffe000, x4, x3
bne x3,x4, RESULTFAIL
li x1,0x7fffffff
.global INST23
INST23:
fcvt.s.w f15, x1
FPUSCHECKS f15, 0x4f000000, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x21,x3,x4
bne x3,x4,FLAGCFAIL
li x1,0xfffff001
FPUMOVS f15, SpqNaN,x3
.global INST23_1
INST23_1:
fcvt.s.w f15, x1, rne
FPUSCHECKS f15, 0xc57ff000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST23_2
INST23_2:
fcvt.s.w f15, x1, rtz
FPUSCHECKS f15, 0xc57ff000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST23_3
INST23_3:
fcvt.s.w f15, x1, rdn
FPUSCHECKS f15, 0xc57ff000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST23_4
INST23_4:
fcvt.s.w f15, x1, rup
FPUSCHECKS f15, 0xc57ff000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST23_5
INST23_5:
fcvt.s.w f15, x1, rmm
FPUSCHECKS f15, 0xc57ff000, x4, x3
bne x3,x4, RESULTFAIL
li x1 , 0xffffffff
.global INST24
INST24:
fcvt.s.wu f15, x1
FPUSCHECKS f15, 0x4f800000, x4, x3
bne x3,x4, RESULTFAIL
FPUEXCHK 0x21,x3,x4
bne x3,x4,FLAGCFAIL
li x1, 0xfff
FPUMOVS f15, SpqNaN,x3
.global INST24_1
INST24_1:
fcvt.s.wu f15, x1,rne
FPUSCHECKS f15, 0x457ff000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST24_2
INST24_2:
fcvt.s.wu f15, x1, rtz
FPUSCHECKS f15, 0x457ff000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST24_3
INST24_3:
fcvt.s.wu f15, x1, rdn
FPUSCHECKS f15, 0x457ff000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST24_4
INST24_4:
fcvt.s.wu f15, x1, rup
FPUSCHECKS f15, 0x457ff000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN,x3
.global INST24_5
INST24_5:
fcvt.s.wu f15, x1, rmm
FPUSCHECKS f15, 0x457ff000, x4, x3
bne x3,x4, RESULTFAIL
li x1,0xffffffffffffffff
.global INST25
INST25:
fcvt.s.l f15, x1
FPUSCHECKS f15, 0xbf800000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST25_1
INST25_1:
fcvt.s.l f15, x1, rne
FPUSCHECKS f15, 0xbf800000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST25_2
INST25_2:
fcvt.s.l f15, x1, rtz
FPUSCHECKS f15, 0xbf800000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST25_3
INST25_3:
fcvt.s.l f15, x1, rdn
FPUSCHECKS f15, 0xbf800000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST25_4
INST25_4:
fcvt.s.l f15, x1, rup
FPUSCHECKS f15, 0xbf800000, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST25_5
INST25_5:
fcvt.s.l f15, x1, rmm
FPUSCHECKS f15, 0xbf800000, x4, x3
bne x3,x4, RESULTFAIL
li x1 , 0xffffff0000
.global INST26
INST26:
fcvt.s.lu f15, x1
FPUSCHECKS f15, 0x537fffff, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST26_1
INST26_1:
fcvt.s.lu f15, x1, rne
FPUSCHECKS f15, 0x537fffff, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST26_2
INST26_2:
fcvt.s.lu f15, x1, rtz
FPUSCHECKS f15, 0x537fffff, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST26_3
INST26_3:
fcvt.s.lu f15, x1, rdn
FPUSCHECKS f15, 0x537fffff, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST26_4
INST26_4:
fcvt.s.lu f15, x1, rup
FPUSCHECKS f15, 0x537fffff, x4, x3
bne x3,x4, RESULTFAIL
FPUMOVS f15, SpqNaN, x3
.global INST26_5
INST26_5:
fcvt.s.lu f15, x1, rmm
FPUSCHECKS f15, 0x537fffff, x4, x3
bne x3,x4, RESULTFAIL
.global A1
A1:
li x1,0x1234567887654321
# li x3, 0x2000000
li x3, 0x50000
sd x1,0(x3)
fmv.d.x f1,x1
fld f2,0(x3)
feq.d x1,f1,f2
beq x1,x0,RESULTFAIL
.global AA2
AA2:
li x1,0x1111122222222
sd x1,0(x3)
fld f1,0(x3)
fmv.x.d x4,f1
bne x1,x4,RESULTFAIL
.global MAJW1
MAJW1:
li x1,0xfffffff
li x4,0xffffffff0fffffff
# li x3, 0x2000000
li x3, 0x50000
fmv.w.x f1,x1
fsd f1,0(x3)
ld x5,0(x3)
bne x5,x4,RESULTFAIL
.global A2
A2:
li x1,0xfbc00000
# li x3,0x2000000
li x3, 0x50000
sd x1,0(x3)
fld f1,0(x3)
fmv.x.w x4,f1
li x1,0xfffffffffbc00000
bne x1,x4,RESULTFAIL
.global A7
A7:
FPUMOVS f15 0x01020304 , x4
FPUMOVS f14 0x00000000 , x4
fmv.s f14, f15
FPUSCHECKS f14, 0x01020304, x4, x3
bne x3,x4, RESULTFAIL
.global A8
A8:
FPUMOVD f15 , 0x0102030405060708 , x4
FPUMOVD f14 , 0x0 , x4
fmv.d f14, f15
FPUSCHECKD f14,0x0102030405060708, x4, x3
bne x3,x4, RESULTFAIL
.global FSGNJS
FSGNJS:
FPUMOVS f1,SpZero,x3
FPUMOVS f2,SnInf,x3
FPUMOVS f15,SpsNaN,x3
.global INST38
INST38:
fsgnj.s f15,f1,f2
FPUSCHECKS f15,SnZero,x3,x4
bne x3,x4,RESULTFAIL
.global FSGNJD
FSGNJD:
FPUMOVD f1,DpZero,x3
FPUMOVD f2,DnInf,x3
FPUMOVD f15,DpsNaN,x3
.global INST39
INST39:
fsgnj.d f15,f1,f2
FPUSCHECKD f15,DnZero,x3,x4
bne x3,x4,RESULTFAIL
.global FSGNJNS
FSGNJNS:
FPUMOVS f1,SpZero,x3
FPUMOVS f2,SpInf,x3
FPUMOVS f15,SpsNaN,x3
.global INST40
INST40:
fsgnjn.s f15,f1,f2
FPUSCHECKS f15,SnZero,x3,x4
bne x3,x4,RESULTFAIL
.global FSGNJND
FSGNJND:
FPUMOVD f1,DpZero,x3
FPUMOVD f2,DpInf,x3
FPUMOVD f15,DpsNaN,x3
.global INST41
INST41:
fsgnjn.d f15,f1,f2
FPUSCHECKD f15,DnZero,x3,x4
bne x3,x4,RESULTFAIL
.global FSGNJXS
FSGNJXS:
FPUMOVS f1,SnZero,x3
FPUMOVS f2,SpInf,x3
FPUMOVS f15,SpsNaN,x3
.global INST42
INST42:
fsgnjx.s f15,f1,f2
FPUSCHECKS f15,SnZero,x3,x4
bne x3,x4,RESULTFAIL
.global FSGNJXD
FSGNJXD:
FPUMOVD f1,DnZero,x3
FPUMOVD f2,DpInf,x3
FPUMOVD f15,DpsNaN,x3
.global INST43
INST43:
fsgnjx.d f15,f1,f2
FPUSCHECKD f15,DnZero,x3,x4
bne x3,x4,RESULTFAIL
.global FCLASSS
FCLASSS:
FPUMOVS f1,SnInf,x3
.global INST44
INST44:
fclass.s x1,f1
li x3,0x1
bne x1,x3,RESULTFAIL
FPUMOVS f1,SnqNaN,x3
.global INST45
INST45:
fclass.s x1,f1
li x3,0x200
bne x1,x3,RESULTFAIL
.global FCLASSD
FCLASSD:
FPUMOVD f1,DnInf,x3
.global INST46
INST46:
fclass.d x1,f1
li x3,0x1
bne x1,x3,RESULTFAIL
FPUMOVD f1,DnqNaN,x3
.global INST47
INST47:
fclass.d x1,f1
li x3,0x200
bne x1,x3,RESULTFAIL
FPUMOVS f0 , 0x7f801111 , x4
FPUMOVS f1 , 0x7f800111 , x4
FPUMOVS f2 , 0x7fc00011 , x4
FPUMOVS f3 , 0x7fc00010 , x4
FPUMOVS f4 , SpInf , x4
FPUMOVS f5 , SnInf , x4
FPUMOVS f6 , SpNorm , x4
FPUMOVS f7 , SnNorm , x4
FPUMOVS f8 , SpDeNorm, x4
FPUMOVS f9 , SnDeNorm, x4
FPUMOVS f10, SpZero , x4
FPUMOVS f11, SnZero , x4
.global FMAXSTEST
FMAXSTEST:
.global INST48
INST48:
fmax.s f15 , f0, f4
FPUSCHECKS f15, SpqNaN , x4, x3
bne x3,x4, FMAXSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST49
INST49:
fmax.s f15 , f2, f6
FPUSCHECKS f15, SpNorm , x4, x3
bne x3,x4, FMAXSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST50
INST50:
fmax.s f15 , f6, f7
FPUSCHECKS f15, SpNorm , x4, x3
bne x3,x4, FMAXSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUQNANCH 1, x4
.global INST51
INST51:
fmax.s f15 , f0, f0
FPUSCHECKS f15, 0x7fc01111 , x4, x3
bne x3,x4, FMAXSFAIL
.global INST52
INST52:
fmax.s f15 , f2, f1
FPUSCHECKS f15, 0x7fc00111 , x4, x3
bne x3,x4, FMAXSFAIL
.global INST53
INST53:
fmax.s f15 , f2, f3
FPUSCHECKS f15, 0x7fc00011 , x4, x3
bne x3,x4, FMAXSFAIL
FPUQNANCH 0, x4
.global FMINSTEST
FMINSTEST:
fmin.s f15 , f0, f4
FPUSCHECKS f15, SpqNaN , x4, x3
bne x3,x4, FMINSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST55
INST55:
fmin.s f15 , f2, f6
FPUSCHECKS f15, SpNorm , x4, x3
bne x3,x4, FMINSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST56
INST56:
fmin.s f15 , f6, f7
FPUSCHECKS f15, SnNorm , x4, x3
bne x3,x4, FMINSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUQNANCH 1, x4
.global INST57
INST57:
fmin.s f15 , f0, f0
FPUSCHECKS f15, 0x7fc01111 , x4, x3
bne x3,x4, FMINSFAIL
.global INST58
INST58:
fmin.s f15 , f2, f1
FPUSCHECKS f15, 0x7fc00111 , x4, x3
bne x3,x4, FMINSFAIL
.global INST59
INST59:
fmin.s f15 , f2, f2
FPUSCHECKS f15, 0x7fc00011 , x4, x3
bne x3,x4, FMINSFAIL
FPUQNANCH 0, x4
.global FEQSTEST
FEQSTEST:
feq.s x1, f0, f6
bne x1,x0, FEQSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST61
INST61:
feq.s x1, f2, f4
bne x1,x0, FEQSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST62
INST62:
feq.s x1, f7,f7
li x3,0x1
bne x1,x3, FEQSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global FLESTEST
FLESTEST:
.global INST63
INST63:
fle.s x1, f0, f6
bne x1,x0, FLESFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST64
INST64:
fle.s x1, f2, f4
bne x1,x0, FLESFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST65
INST65:
fle.s x1, f7,f6
li x3,0x1
bne x1,x3, FLESFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global FLTSTEST
FLTSTEST:
flt.s x1, f0, f6
bne x1,x0, FLTSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST67
INST67:
flt.s x1, f2, f4
bne x1,x0, FLTSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST68
INST68:
flt.s x1, f7,f6
li x3,0x1
bne x1,x3, FLTSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f0 0x7ff0000000001111, x4
FPUMOVD f1 0x7ff0000000000111 , x4
FPUMOVD f2 0x7ff8000000000011 , x4
FPUMOVD f3 0x7ff8000000000010 , x4
FPUMOVD f4 DpInf, x4
FPUMOVD f5 DnInf, x4
FPUMOVD f6 DpNorm, x4
FPUMOVD f7 DnNorm, x4
FPUMOVD f8 DpDeNorm, x4
FPUMOVD f9 DnDeNorm, x4
FPUMOVD f10 DpZero, x4
FPUMOVD f11 DnZero, x4
.global FMAXDTEST
FMAXDTEST:
fmax.d f15 , f0, f4
FPUSCHECKD f15, DpqNaN , x4, x3
bne x3,x4, FMAXDFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST70
INST70:
fmax.d f15 , f2, f6
FPUSCHECKD f15, DpNorm , x4, x3
bne x3,x4, FMAXDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST71
INST71:
fmax.d f15 , f6, f7
FPUSCHECKD f15, DpNorm , x4, x3
bne x3,x4, FMAXDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUQNANCH 1, x4
.global INST72
INST72:
fmax.d f15 , f0, f0
FPUSCHECKD f15, 0x7ff8000000001111 , x4, x3
bne x3,x4, FMAXDFAIL
.global INST73
INST73:
fmax.d f15 , f2, f1
FPUSCHECKD f15, 0x7ff8000000000111 , x4, x3
bne x3,x4, FMAXDFAIL
.global INST74
INST74:
fmax.d f15 , f2, f3
FPUSCHECKD f15, 0x7ff8000000000011 , x4, x3
bne x3,x4, FMAXDFAIL
FPUQNANCH 0, x4
.global FMINDTEST
FMINDTEST:
fmin.d f15 , f0, f4
FPUSCHECKD f15, DpqNaN , x4, x3
bne x3,x4, FMINDFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST76
INST76:
fmin.d f15 , f2, f6
FPUSCHECKD f15, DpNorm , x4, x3
bne x3,x4, FMINDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST77
INST77:
fmin.d f15 , f6, f7
FPUSCHECKD f15, DnNorm , x4, x3
bne x3,x4, FMINDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUQNANCH 1, x4
.global INST78
INST78:
fmin.d f15 , f0, f0
FPUSCHECKD f15, 0x7ff8000000001111 , x4, x3
bne x3,x4, FMINDFAIL
.global INST79
INST79:
fmin.d f15 , f2, f1
FPUSCHECKD f15, 0x7ff8000000000111 , x4, x3
bne x3,x4, FMINDFAIL
.global INST80
INST80:
fmin.d f15 , f2, f2
FPUSCHECKD f15, 0x7ff8000000000011 , x4, x3
bne x3,x4, FMINDFAIL
FPUQNANCH 0, x4
.global FEQDTEST
FEQDTEST:
feq.d x1, f0, f6
bne x1,x0, FEQSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST82
INST82:
feq.d x1, f2, f4
bne x1,x0, FEQSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST83
INST83:
feq.d x1, f7,f7
li x3,0x1
bne x1,x3, FEQSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global FLEDTEST
FLEDTEST:
.global INST84
INST84:
fle.d x1, f0, f6
bne x1,x0, FLESFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST85
INST85:
fle.d x1, f2, f4
bne x1,x0, FLEDFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST86
INST86:
fle.d x1, f7,f6
li x3,0x1
bne x1,x3, FLEDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global FLTDTEST
FLTDTEST:
flt.d x1, f0, f6
bne x1,x0, FLTDFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST88
INST88:
flt.d x1, f2, f4
bne x1,x0, FLTDFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST89
INST89:
flt.d x1, f7,f6
li x3,0x1
bne x1,x3, FLTDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVS f0 , 0x7f801111 , x4
FPUMOVS f1 , 0x7f800111 , x4
FPUMOVS f2 , 0x7fc00011 , x4
FPUMOVS f3 , 0x7fc00010 , x4
FPUMOVS f4 , SpInf , x4
FPUMOVS f5 , SnInf , x4
FPUMOVS f6 , 0x40827900 , x4
FPUMOVS f7 , 0xc0838000 , x4
FPUMOVS f8 , SpDeNorm, x4
FPUMOVS f9 , SnDeNorm, x4
FPUMOVS f10, SpZero , x4
FPUMOVS f11, SnZero , x4
.global FDIVS
FDIVS:
.global INST90
INST90:
fdiv.s f15, f0, f1
FPUSCHECKS f15, SpqNaN, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST91
INST91:
fdiv.s f15, f2, f6
FPUSCHECKS f15, SpqNaN, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST92
INST92:
fdiv.s f15, f6, f7
FPUSCHECKS f15, 0xbf7e0000, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST93
INST93:
fdiv.s f15, f4, f5
FPUSCHECKS f15, SpqNaN, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST94
INST94:
fdiv.s f15, f10, f11
FPUSCHECKS f15, SpqNaN, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVS f20, 0x71f00000, x3
FPUMOVS f21, 0x58c00000, x3
.global INST94_1
INST94_1:
fdiv.s f15, f20, f21, rne
FPUSCHECKS f15, 0x58a00000, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST94_2
INST94_2:
fdiv.s f15, f20, f21, rtz
FPUSCHECKS f15, 0x58a00000, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST94_3
INST94_3:
fdiv.s f15, f20, f21, rdn
FPUSCHECKS f15, 0x58a00000, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST94_4
INST94_4:
fdiv.s f15, f20, f21, rup
FPUSCHECKS f15, 0x58a00000, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST94_5
INST94_5:
fdiv.s f15, f20, f21, rmm
FPUSCHECKS f15, 0x58a00000, x4, x3
bne x3,x4, FDIVSFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUQNANCH 1, x4
.global INST95
INST95:
fdiv.s f15, f0, f1
FPUSCHECKS f15, 0x7fc01111, x4, x3
bne x3,x4, FDIVSFAIL
.global INST96
INST96:
fdiv.s f15, f2, f1
FPUSCHECKS f15, 0x7fc00111, x4, x3
bne x3,x4, FDIVSFAIL
.global INST97
INST97:
fdiv.s f15, f4, f4
FPUSCHECKS f15, SpqNaN, x4, x3
bne x3,x4, FDIVSFAIL
.global INST98
INST98:
fdiv.s f15, f10, f11
FPUSCHECKS f15, SpqNaN, x4, x3
bne x3,x4, FDIVSFAIL
FPUQNANCH 0, x4
.global FSQRTS
FSQRTS:
fsqrt.s f15, f0
FPUSCHECKS f15, SpqNaN, x4, x3
bne x3,x4, FSQRTSFAIL
FPUEXCHK 0x30, x3,x4
bne x3,x4,FLAGCFAIL
.global INST100
INST100:
fsqrt.s f15, f3
FPUSCHECKS f15, SpqNaN, x4, x3
bne x3,x4, FSQRTSFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
.global INST101
INST101:
fsqrt.s f15, f6
FPUSCHECKS f15, 0x40013afc, x4, x3
bne x3,x4, FSQRTSFAIL
FPUEXCHK 0x21, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVS f20, 0x71a20000,x3
.global INST101_1
INST101_1:
fsqrt.s f15, f20, rne
FPUSCHECKS f15, 0x58900000, x4, x3
bne x3,x4, FSQRTSFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVS f15, SpqNaN, x3
.global INST101_2
INST101_2:
fsqrt.s f15, f20, rtz
FPUSCHECKS f15, 0x58900000, x4, x3
bne x3,x4, FSQRTSFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVS f15, SpqNaN, x3
.global INST101_3
INST101_3:
fsqrt.s f15, f20, rdn
FPUSCHECKS f15, 0x58900000, x4, x3
bne x3,x4, FSQRTSFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVS f15, SpqNaN, x3
.global INST101_4
INST101_4:
fsqrt.s f15, f20, rup
FPUSCHECKS f15, 0x58900000, x4, x3
bne x3,x4, FSQRTSFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVS f15, SpqNaN, x3
.global INST101_5
INST101_5:
fsqrt.s f15, f20, rmm
FPUSCHECKS f15, 0x58900000, x4, x3
bne x3,x4, FSQRTSFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUQNANCH 1, x4
.global INST102
INST102:
fsqrt.s f15, f0
FPUSCHECKS f15, 0x7fc01111, x4, x3
bne x3,x4, FSQRTSFAIL
.global INST103
INST103:
fsqrt.s f15, f2
FPUSCHECKS f15, 0x7fc00011, x4, x3
bne x3,x4, FSQRTSFAIL
.global INST104
INST104:
fsqrt.s f15, f5
FPUSCHECKS f15, SpqNaN, x4, x3
bne x3,x4, FSQRTSFAIL
FPUQNANCH 0, x4
.global FDIVD
FDIVD:
FPUMOVD f0 , 0x7ff0000000001111 , x4
FPUMOVD f1 , 0x7ff0000000000111 , x4
FPUMOVD f2 , 0x7ff8000000000011 , x4
FPUMOVD f3 , 0x7ff8000000000010 , x4
FPUMOVD f4 , DpInf , x4
FPUMOVD f5 , DnInf , x4
FPUMOVD f6 , 0x1021c71b38e39000 , x4
FPUMOVD f7 , 0x8ff5555400000000 , x4
FPUMOVD f8 , DpDeNorm, x4
FPUMOVD f9 , DnDeNorm, x4
FPUMOVD f10, DpZero , x4
FPUMOVD f11, DnZero , x4
.global INST105
INST105:
fdiv.d f15, f0, f1
FPUSCHECKD f15, DpqNaN, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST106
INST106:
fdiv.d f15, f2, f6
FPUSCHECKD f15, DpqNaN, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST107
INST107:
fdiv.d f15, f6, f7
FPUSCHECKD f15, 0xc01aaaaa80000000, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
.global INST108
INST108:
fdiv.d f15, f4, f5
FPUSCHECKD f15, DpqNaN, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
.global INST109
INST109:
fdiv.d f15, f10, f11
FPUSCHECKD f15, DpqNaN, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f20, 0xc035000000000000,x3
FPUMOVD f21, 0x4018000000000000,x3
.global INST109_1
INST109_1:
fdiv.d f15, f20, f21, rne
FPUSCHECKD f15, 0xc00c000000000000, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f15,DpqNaN,x3
.global INST109_2
INST109_2:
fdiv.d f15, f20, f21, rtz
FPUSCHECKD f15, 0xc00c000000000000, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f15,DpqNaN,x3
.global INST109_3
INST109_3:
fdiv.d f15, f20, f21, rdn
FPUSCHECKD f15, 0xc00c000000000000, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f15,DpqNaN,x3
.global INST109_4
INST109_4:
fdiv.d f15, f20, f21, rup
FPUSCHECKD f15, 0xc00c000000000000, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f15,DpqNaN,x3
.global INST109_5
INST109_5:
fdiv.d f15, f20, f21, rmm
FPUSCHECKD f15, 0xc00c000000000000, x4, x3
bne x3,x4, FDIVDFAIL
FPUEXCHK 0x0,x3,x4
bne x3,x4,FLAGCFAIL
FPUQNANCH 1, x4
.global INST110
INST110:
fdiv.d f15, f0, f1
FPUSCHECKD f15, 0x7ff8000000001111, x4, x3
bne x3,x4, FDIVDFAIL
.global INST111
INST111:
fdiv.d f15, f2, f1
FPUSCHECKD f15, 0x7ff8000000000111, x4, x3
bne x3,x4, FDIVDFAIL
.global INST112
INST112:
fdiv.d f15, f4, f4
FPUSCHECKD f15, DpqNaN, x4, x3
bne x3,x4, FDIVDFAIL
.global INST113
INST113:
fdiv.d f15, f10, f11
FPUSCHECKD f15, DpqNaN, x4, x3
bne x3,x4, FDIVDFAIL
FPUQNANCH 0, x4
.global FSQRTD
FSQRTD:
fsqrt.d f15, f0
FPUSCHECKD f15, DpqNaN, x4, x3
bne x3,x4, FSQRTDFAIL
FPUEXCHK 0x30, x3,x4
bne x3,x4,FLAGCFAIL
.global INST115
INST115:
fsqrt.d f15, f3
FPUSCHECKD f15, DpqNaN, x4, x3
bne x3,x4, FSQRTDFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
.global INST116
INST116:
fsqrt.d f15, f6
FPUSCHECKD f15, 0x2807d9f3fd90d976, x4, x3
bne x3,x4, FSQRTDFAIL
FPUEXCHK 0x21, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f20, 0x40efc02000000000 x3
.global INST116_1
INST116_1:
fsqrt.d f15, f20, rne
FPUSCHECKD f15, 0x406fe00000000000, x4, x3
bne x3,x4, FSQRTDFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f15, DpqNaN, x3
.global INST116_2
INST116_2:
fsqrt.d f15, f20, rtz
FPUSCHECKD f15, 0x406fe00000000000, x4, x3
bne x3,x4, FSQRTDFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f15, DpqNaN, x3
.global INST116_3
INST116_3:
fsqrt.d f15, f20, rdn
FPUSCHECKD f15, 0x406fe00000000000, x4, x3
bne x3,x4, FSQRTDFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f15, DpqNaN, x3
.global INST116_4
INST116_4:
fsqrt.d f15, f20, rup
FPUSCHECKD f15, 0x406fe00000000000, x4, x3
bne x3,x4, FSQRTDFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f15, DpqNaN, x3
.global INST116_5
INST116_5:
fsqrt.d f15, f20, rmm
FPUSCHECKD f15, 0x406fe00000000000, x4, x3
bne x3,x4, FSQRTDFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUQNANCH 1, x4
.global INST117
INST117:
fsqrt.d f15, f0
FPUSCHECKD f15, 0x7ff8000000001111, x4, x3
bne x3,x4, FSQRTDFAIL
.global INST118
INST118:
fsqrt.d f15, f2
FPUSCHECKD f15, 0x7ff8000000000011, x4, x3
bne x3,x4, FSQRTDFAIL
.global INST119
INST119:
fsqrt.d f15, f5
FPUSCHECKD f15, DpqNaN, x4, x3
bne x3,x4, FSQRTDFAIL
FPUEXCHK 0x30,x3,x4
bne x3,x4, FLAGCFAIL
FPUQNANCH 0, x4
.global FMULS
FMULS:
FPUMOVS f0, 0x42d10000, x4
FPUMOVS f1, 0x436d8000, x4
.global INST120
INST120:
fmul.s f2, f0, f1
FPUSCHECKS f2, 0x46c1e580, x4, x3
bne x3,x4, FMULSFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVS f0, SpLFN,x4
FPUMOVS f1, 0x3fc00000,x4
.global INST121
INST121:
fmul.s f2,f0,f1
FPUSCHECKS f2, SpInf,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 1,x3
.global INST122
INST122:
fmul.s f2,f0,f1
FPUSCHECKS f2, SpLFN,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 2,x3
.global INST123
INST123:
fmul.s f2,f0,f1
FPUSCHECKS f2, SpLFN,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 3,x3
.global INST124
INST124:
fmul.s f2,f0,f1
FPUSCHECKS f2, SpInf,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 4,x3
.global INST125
INST125:
fmul.s f2,f0,f1
FPUSCHECKS f2, SpInf,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 5,x3
.global INST126
INST126:
fmul.s f2,f0,f1,rne
FPUSCHECKS f2, SpInf,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
.global INST127
INST127:
fmul.s f2,f0,f1,rtz
FPUSCHECKS f2, SpLFN,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
.global INST128
INST128:
fmul.s f2,f0,f1, rdn
FPUSCHECKS f2, SpLFN,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
.global INST129
INST129:
fmul.s f2,f0,f1, rup
FPUSCHECKS f2, SpInf,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
.global INST130
INST130:
fmul.s f2,f0,f1, rmm
FPUSCHECKS f2, SpInf,x3,x4
bne x3,x4, FMULSFAIL
FPUEXCHK 0x25,x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 0, x3
FPUMOVD f0, 0x4037400000000000 ,x3
FPUMOVD f1, 0x4011000000000000 ,x3
.global FMULD
FMULD:
.global INST131
INST131:
fmul.d f2, f0, f1
FPUSCHECKD f2, 0x4058b40000000000, x3,x4
bne x3,x4, FMULDFAIL
FPUEXCHK 0x0, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVD f0, DnLFN, x4
FPUMOVD f1, 0x3ff8000000000000, x4
.global INST132
INST132:
fmul.d f2, f0, f1
FPUSCHECKD f2, DnInf, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 1,x3
.global INST133
INST133:
fmul.d f2, f0, f1
FPUSCHECKD f2, DnLFN, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 2,x3
.global INST134
INST134:
fmul.d f2, f0, f1
FPUSCHECKD f2, DnInf, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 3,x3
.global INST135
INST135:
fmul.d f2, f0, f1
FPUSCHECKD f2, DnLFN, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 4,x3
.global INST136
INST136:
fmul.d f2, f0, f1
FPUSCHECKD f2, DnInf, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
FPURMCH 6,x3
.global INST137
INST137:
fmul.d f2, f0, f1,rne
FPUSCHECKD f2, DnInf, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
.global INST138
INST138:
fmul.d f2, f0, f1,rtz
FPUSCHECKD f2, DnLFN, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
.global INST139
INST139:
fmul.d f2, f0, f1,rdn
FPUSCHECKD f2, DnInf, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
.global INST140
INST140:
fmul.d f2, f0, f1,rup
FPUSCHECKD f2, DnLFN, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
.global INST141
INST141:
fmul.d f2, f0, f1,rmm
FPUSCHECKD f2, DnInf, x4, x3
bne x3,x4, FMULDFAIL
FPUEXCHK 0x25, x3,x4
bne x3,x4,FLAGCFAIL
FPUMOVS f0, 0x7f7ffffe, x1
FPUMOVS f1, 0x3f800000, x1
FPUMOVS f2, 0x73000000, x1
FPURMCH 0,x3
.global FMADDS
FMADDS:
.global INST142
INST142:
fmadd.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMADDSFAIL
FPURMCH 1,x3
.global INST143
INST143:
fmadd.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMADDSFAIL
FPURMCH 2 ,x3
.global INST144
INST144:
fmadd.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMADDSFAIL
FPURMCH 3,x3
.global INST145
INST145:
fmadd.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7fffff, x3, x4
bne x3,x4, FMADDSFAIL
FPURMCH 4,x3
.global INST146
INST146:
fmadd.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7fffff, x3, x4
bne x3,x4, FMADDSFAIL
FPURMCH 7, x3
.global INST147
INST147:
fmadd.s f3, f1, f0, f2,rne
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMADDSFAIL
.global INST148
INST148:
fmadd.s f3, f1, f0, f2,rtz
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMADDSFAIL
.global INST149
INST149:
fmadd.s f3, f1, f0, f2,rdn
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMADDSFAIL
.global INST150
INST150:
fmadd.s f3, f1, f0, f2,rup
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7fffff, x3, x4
bne x3,x4, FMADDSFAIL
.global INST151
INST151:
fmadd.s f3, f1, f0, f2,rmm
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7fffff, x3, x4
bne x3,x4, FMADDSFAIL
FPUMOVD f0, 0x7feffffffffffffe ,x3
FPUMOVD f1, 0x3ff0000000000000 ,x3
FPUMOVD f2, 0x7c90000000000000 ,x3
FPURMCH 0,x3
.global FMADDD
FMADDD:
.global INST152
INST152:
fmadd.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMADDDFAIL
FPURMCH 1,x3
.global INST153
INST153:
fmadd.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMADDDFAIL
FPURMCH 2 ,x3
.global INST154
INST154:
fmadd.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMADDDFAIL
FPURMCH 3,x3
.global INST155
INST155:
fmadd.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7fefffffffffffff, x3, x4
bne x3,x4, FMADDDFAIL
FPURMCH 4,x3
.global INST156
INST156:
fmadd.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7fefffffffffffff, x3, x4
bne x3,x4, FMADDDFAIL
FPURMCH 6, x3
.global INST157
INST157:
fmadd.d f3, f1, f0, f2,rne
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMADDDFAIL
.global INST158
INST158:
fmadd.d f3, f1, f0, f2,rtz
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMADDDFAIL
.global INST159
INST159:
fmadd.d f3, f1, f0, f2,rdn
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMADDDFAIL
.global INST160
INST160:
fmadd.d f3, f1, f0, f2,rup
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7fefffffffffffff, x3, x4
bne x3,x4, FMADDDFAIL
.global INST161
INST161:
fmadd.d f3, f1, f0, f2,rmm
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7fefffffffffffff, x3, x4
bne x3,x4, FMADDDFAIL
.global FMSUBS
FMSUBS:
FPUMOVS f0, 0x7f7ffffe, x1
FPUMOVS f1, 0x3f800000, x1
FPUMOVS f2, 0xf3000000, x1
FPURMCH 0,x3
.global INST162
INST162:
fmsub.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMSUBSFAIL
FPURMCH 1,x3
.global INST163
INST163:
fmsub.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMSUBSFAIL
FPURMCH 2 ,x3
.global INST164
INST164:
fmsub.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMSUBSFAIL
FPURMCH 3,x3
.global INST165
INST165:
fmsub.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7fffff, x3, x4
bne x3,x4, FMSUBSFAIL
FPURMCH 4,x3
.global INST166
INST166:
fmsub.s f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7fffff, x3, x4
bne x3,x4, FMSUBSFAIL
FPURMCH 0,x3
.global INST167
INST167:
fmsub.s f3, f1, f0, f2, rne
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMSUBSFAIL
.global INST168
INST168:
fmsub.s f3, f1, f0, f2, rtz
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMSUBSFAIL
.global INST169
INST169:
fmsub.s f3, f1, f0, f2, rdn
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7ffffe, x3, x4
bne x3,x4, FMSUBSFAIL
.global INST170
INST170:
fmsub.s f3, f1, f0, f2, rup
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7fffff, x3, x4
bne x3,x4, FMSUBSFAIL
fmsub.s f3, f1, f0, f2, rmm
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f3,0x7f7fffff, x3, x4
bne x3,x4, FMSUBSFAIL
FPURMCH 0, x3
.global FMSUBD
FMSUBD:
FPUMOVD f0, 0x7feffffffffffffe ,x3
FPUMOVD f1, 0x3ff0000000000000 ,x3
FPUMOVD f2, 0xfc90000000000000 ,x3
.global INST171
INST171:
fmsub.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMSUBDFAIL
FPURMCH 1, x3
.global INST172
INST172:
fmsub.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMSUBDFAIL
FPURMCH 2, x3
.global INST173
INST173:
fmsub.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMSUBDFAIL
FPURMCH 3, x3
.global INST174
INST174:
fmsub.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7fefffffffffffff, x3, x4
bne x3,x4, FMSUBDFAIL
FPURMCH 4, x3
.global INST175
INST175:
fmsub.d f3, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7fefffffffffffff, x3, x4
bne x3,x4, FMSUBDFAIL
FPURMCH 5,x3
.global INST176
INST176:
fmsub.d f3, f1, f0, f2,rne
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMSUBDFAIL
.global INST177
INST177:
fmsub.d f3, f1, f0, f2,rtz
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMSUBDFAIL
.global INST178
INST178:
fmsub.d f3, f1, f0, f2,rdn
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7feffffffffffffe, x3, x4
bne x3,x4, FMSUBDFAIL
.global INST179
INST179:
fmsub.d f3, f1, f0, f2,rup
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7fefffffffffffff, x3, x4
bne x3,x4, FMSUBDFAIL
.global INST180
INST180:
fmsub.d f3, f1, f0, f2,rmm
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f3,0x7fefffffffffffff, x3, x4
bne x3,x4, FMSUBDFAIL
.global FNMADDS
FNMADDS:
FPUMOVS f0, 0x7f7ffffe, x1
FPUMOVS f1, 0x3f800000, x1
FPUMOVS f2, 0x73000000, x1
FPURMCH 0,x3
.global INST181
INST181:
fnmadd.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 1, x3
.global INST182
INST182:
fnmadd.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 2, x3
.global INST183
INST183:
fnmadd.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7fffff, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 3,x3
.global INST184
INST184:
fnmadd.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 4,x3
.global INST185
INST185:
fnmadd.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7fffff, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 6,x3
.global INST186
INST186:
fnmadd.s f4, f1, f0, f2,rne
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
.global INST187
INST187:
fnmadd.s f4, f1, f0, f2,rtz
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
.global INST188
INST188:
fnmadd.s f4, f1, f0, f2,rdn
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7fffff, x3, x4
bne x3,x4, FNMSUBSFAIL
.global INST189
INST189:
fnmadd.s f4, f1, f0, f2,rup
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
.global INST190
INST190:
fnmadd.s f4, f1, f0, f2,rmm
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7fffff, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 0, x3
FPUMOVD f0, 0x7feffffffffffffe ,x3
FPUMOVD f1, 0x3ff0000000000000 ,x3
FPUMOVD f2, 0x7c90000000000000 ,x3
.global FNMADDD
FNMADDD:
.global INST191
INST191:
fnmadd.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffeffffffffffffe, x3,x4
bne x3,x4, FNMADDDFAIL
FPURMCH 1,x3
.global INST192
INST192:
fnmadd.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffeffffffffffffe, x3,x4
bne x3,x4, FNMADDDFAIL
FPURMCH 2,x3
.global INST193
INST193:
fnmadd.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffefffffffffffff, x3,x4
bne x3,x4, FNMADDDFAIL
FPURMCH 3,x3
.global INST194
INST194:
fnmadd.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffeffffffffffffe, x3,x4
bne x3,x4, FNMADDDFAIL
FPURMCH 4,x3
.global INST195
INST195:
fnmadd.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffefffffffffffff, x3,x4
bne x3,x4, FNMADDDFAIL
FPURMCH 7,x3
.global INST196
INST196:
fnmadd.d f4, f1, f0, f2, rne
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffeffffffffffffe, x3,x4
bne x3,x4, FNMADDDFAIL
.global INST197
INST197:
fnmadd.d f4, f1, f0, f2, rtz
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffeffffffffffffe, x3,x4
bne x3,x4, FNMADDDFAIL
.global INST198
INST198:
fnmadd.d f4, f1, f0, f2, rdn
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffefffffffffffff, x3,x4
bne x3,x4, FNMADDDFAIL
.global INST199
INST199:
fnmadd.d f4, f1, f0, f2, rup
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffeffffffffffffe, x3,x4
bne x3,x4, FNMADDDFAIL
.global INST200
INST200:
fnmadd.d f4, f1, f0, f2, rmm
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0xffefffffffffffff, x3,x4
bne x3,x4, FNMADDDFAIL
FPURMCH 0,x1
FPUMOVS f0, 0x7f7ffffe, x1
FPUMOVS f1, 0x3f800000, x1
FPUMOVS f2, 0xf3000000, x1
.global FNMSUBS
FNMSUBS:
.global INST201
INST201:
fnmsub.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 1,x3
.global INST202
INST202:
fnmsub.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 2,x3
.global INST203
INST203:
fnmsub.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4, 0xff7fffff, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 3,x3
.global INST204
INST204:
fnmsub.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 4, x3
.global INST205
INST205:
fnmsub.s f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7fffff, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 5, x3
.global INST206
INST206:
fnmsub.s f4, f1, f0, f2,rne
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
.global INST207
INST207:
fnmsub.s f4, f1, f0, f2, rtz
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
.global INST208
INST208:
fnmsub.s f4, f1, f0, f2, rdn
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7fffff, x3, x4
bne x3,x4, FNMSUBSFAIL
.global INST209
INST209:
fnmsub.s f4, f1, f0, f2, rup
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7ffffe, x3, x4
bne x3,x4, FNMSUBSFAIL
.global INST210
INST210:
fnmsub.s f4, f1, f0, f2,rmm
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKS f4,0xff7fffff, x3, x4
bne x3,x4, FNMSUBSFAIL
FPURMCH 0 ,x3
FPUMOVD f0, 0x7feffffffffffffe ,x3
FPUMOVD f1, 0xbff0000000000000 ,x3
FPUMOVD f2, 0x7c90000000000000 ,x3
.global INST211
INST211:
fnmsub.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7feffffffffffffe, x3,x4
bne x3,x4, FNMSUBDFAIL
FPURMCH 1,x3
.global INST212
INST212:
fnmsub.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7feffffffffffffe, x3,x4
bne x3,x4, FNMSUBDFAIL
FPURMCH 2,x3
.global INST213
INST213:
fnmsub.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7feffffffffffffe, x3,x4
bne x3,x4, FNMSUBDFAIL
FPURMCH 3,x3
.global INST214
INST214:
fnmsub.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7fefffffffffffff, x3,x4
bne x3,x4, FNMSUBDFAIL
FPURMCH 4,x3
.global INST215
INST215:
fnmsub.d f4, f1, f0, f2
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7fefffffffffffff, x3,x4
bne x3,x4, FNMSUBDFAIL
FPURMCH 7,x3
.global INST216
INST216:
fnmsub.d f4, f1, f0, f2,rne
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7feffffffffffffe, x3,x4
bne x3,x4, FNMSUBDFAIL
.global INST217
INST217:
fnmsub.d f4, f1, f0, f2, rtz
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7feffffffffffffe, x3,x4
bne x3,x4, FNMSUBDFAIL
.global INST218
INST218:
fnmsub.d f4, f1, f0, f2, rdn
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7feffffffffffffe, x3,x4
bne x3,x4, FNMSUBDFAIL
.global INST219
INST219:
fnmsub.d f4, f1, f0, f2, rup
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7fefffffffffffff, x3,x4
bne x3,x4, FNMSUBDFAIL
.global INST220
INST220:
fnmsub.d f4, f1, f0, f2, rmm
FPUEXCHK 0x00000021, x3,x4
bne x3,x4, FLAGCFAIL
FPUSCHECKD f4, 0x7fefffffffffffff, x3,x4
bne x3,x4, FNMSUBDFAIL
j EXIT
.global EXCEPTION_FAIL
EXCEPTION_FAIL:
j FAIL
.global FMULSFAIL
FMULSFAIL:
j FAIL
.global FMADDSFAIL
FMADDSFAIL:
j FAIL
.global FMSUBSFAIL
FMSUBSFAIL:
j FAIL
.global FNMADDSFAIL
FNMADDSFAIL:
j FAIL
.global FNMSUBSFAIL
FNMSUBSFAIL:
j FAIL
.global FMULDFAIL
FMULDFAIL:
j FAIL
.global FMADDDFAIL
FMADDDFAIL:
j FAIL
.global FMSUBDFAIL
FMSUBDFAIL:
j FAIL
.global FNMADDDFAIL
FNMADDDFAIL:
j FAIL
.global FNMSUBDFAIL
FNMSUBDFAIL:
j FAIL
.global RESULTFAIL
RESULTFAIL:
j FAIL
.global FLAGCFAIL
FLAGCFAIL:
j FAIL
.global FDIVSFAIL
FDIVSFAIL:
j FAIL
.global FRECIPSFAIL
FRECIPSFAIL:
j FAIL
.global FSQRTSFAIL
FSQRTSFAIL:
j FAIL
.global FDIVDFAIL
FDIVDFAIL:
j FAIL
.global FRECIPDFAIL
FRECIPDFAIL:
j FAIL
.global FSQRTDFAIL
FSQRTDFAIL:
j FAIL
.global FMAXSFAIL
FMAXSFAIL:
j FAIL
.global FMINSFAIL
FMINSFAIL:
j FAIL
.global FEQSFAIL
FEQSFAIL:
j FAIL
.global FLESFAIL
FLESFAIL:
j FAIL
.global FLTSFAIL
FLTSFAIL:
j FAIL
.global FMAXDFAIL
FMAXDFAIL:
j FAIL
.global FMINDFAIL
FMINDFAIL:
j FAIL
.global FEQDFAIL
FEQDFAIL:
j FAIL
.global FLEDFAIL
FLEDFAIL:
j FAIL
.global FLTDFAIL
FLTDFAIL:
j FAIL
.global EXIT
EXIT:
la x1, __exit
jr x1
.global FAIL
FAIL:
la x1, __fail
jr x1
#******this region is added by generator******
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 33,009
|
smart_run/tests/cases/ISA/ISA_THEAD/isa_thead_smoke.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#* *
#************************************************************
#*************Following is the generated instructions*****************
.macro FPUMOVD FDESREG,IMME64,IMM_REG
li \IMM_REG, \IMME64
fmv.d.x \FDESREG, \IMM_REG
.endm
.text
.align 6
.global main
main:
csrr x10, mhartid
bnez x10, TEST_WFI
nop
.global ENABLE_THEADEE
ENABLE_THEADEE:
li x9,0x400000
csrs mxstatus,x9
.global rev_test
rev_test:
li x6, 0x123456789abcdef0
rev x7, x6
li x6, 0xf0debc9a78563412
bne x6, x7, TEST_FAIL
li x6, 0x0
rev x7, x6
li x6, 0x0
bne x6, x7, TEST_FAIL
li x6, 0xffffffff
rev x7, x6
li x6, 0xffffffff00000000
bne x6, x7, TEST_FAIL
.global ff0_test
ff0_test:
li x6, 0xffffffffffffffff
ff0 x7, x6
li x6, 64
bne x6, x7, TEST_FAIL
li x6, 0x7fffffffffffffff
ff0 x7, x6
li x6, 0
bne x6, x7, TEST_FAIL
li x6, 0xfffffffffffffffe
ff0 x7, x6
li x6, 63
bne x6, x7, TEST_FAIL
li x6, 0xfffeff6ff8ff8fff
ff0 x7, x6
li x6, 15
bne x6, x7, TEST_FAIL
.global ff1_test
ff1_test:
li x6, 0x0
ff1 x7, x6
li x6, 64
bne x6, x7, TEST_FAIL
li x6, 0xffffffffffffffff
ff1 x7, x6
li x6, 0x0
bne x6, x7, TEST_FAIL
li x6, 0x1
ff1 x7, x6
li x6, 63
bne x6, x7, TEST_FAIL
li x6, 0x0001ff6ff8ff8fff
ff1 x7, x6
li x6, 15
bne x6, x7, TEST_FAIL
.global ext_instruction
ext_instruction:
nop
.global ext_1_bit
ext_1_bit:
li x12, 0xff
ext x14, x12, 1, 1
li x15, 0xffffffffffffffff
bne x15, x14, TEST_FAIL
li x12, 0x80000000
ext x14, x12, 1, 1
li x15, 0x0
bne x15, x14,TEST_FAIL
.global ext_64_bit
ext_64_bit:
li x12, 0xabcdabcdabcdabcd
ext x14, x12, 63, 0
bne x14, x12, TEST_FAIL
.global ext_sign_bit1
ext_sign_bit1:
li x12, 0xabcdabcd
ext x14, x12, 2, 1
li x15, 0xfffffffffffffffe
bne x15, x14,TEST_FAIL
.global ext_sign_bit0
ext_sign_bit0:
li x12, 0xabcdabcd
ext x14, x12, 5, 3
li x15, 0x1
bne x15, x14,TEST_FAIL
.global extu_instruction
extu_instruction:
nop
.global extu_1_bit
extu_1_bit:
li x12, 0xff
extu x14, x12, 1, 1
li x15, 0x1
bne x15, x14, TEST_FAIL
li x12, 0x80000000
extu x14, x12, 1, 1
li x15, 0x0
bne x15, x14,TEST_FAIL
.global extu_64_bit
extu_64_bit:
li x12, 0xabcdabcdabcdabcd
extu x14, x12, 63, 0
bne x14, x12, TEST_FAIL
.global extu_sign_bit1
extu_sign_bit1:
li x12, 0xabcdabcd
extu x14, x12, 2, 1
li x15, 0x2
bne x15, x14,TEST_FAIL
.global extu_sign_bit0
extu_sign_bit0:
li x12, 0xabcdabcd
extu x14, x12, 5, 3
li x15, 0x1
bne x15, x14,TEST_FAIL
.global mveqz_instrcution
mveqz_instrcution:
li x3,0x0
li x4,0xabcd
li x6,0xaaaaaaaaaaaaaaaa
li x7,0xffffffffffffff
li x8,0xffffffffffffff
mveqz x7,x6,x3
bne x7,x6, TEST_FAIL
mveqz x7,x8,x4
bne x7,x6, TEST_FAIL
mveqz x7,x8,x0
bne x7,x8, TEST_FAIL
.global mvnez_instruction
mvnez_instruction:
li x3,0x0
li x4,0xabcd
li x6,0xaaaaaaaaaaaaaaaa
li x7,0x5555555555
li x8,0x5555555555
mvnez x7,x6,x3
bne x7,x8, TEST_FAIL
mvnez x7,x6,x4
bne x7,x6, TEST_FAIL
mvnez x7,x8,x0
bne x7,x6, TEST_FAIL
.option norvc
.global STORE1
STORE1:
li x3,0xffffffffffffffff
li x8,0xaaaaaaaaaaaaaaaa
li x4,0x000000000000a000
li x18,0xfffffffffffffff8
sd x8,0xffffffffffffffe0(x4)
lrd x5,x4,x18,2
bne x8,x5,TEST_FAIL
li x18,0x7f8
li x19,0xffffffff000007f8
sd x8,0x7f8(x4)
lrd x5, x4, x18, 0
lurd x9, x4, x19, 0
bne x8,x5,TEST_FAIL
bne x8,x9,TEST_FAIL
li x18,0
li x19,0xffffffff00000000
sd x3,0x0(x4)
lrd x5,x4,x18,1
lurd x9,x4,x19,1
bne x3,x5,TEST_FAIL
bne x9,x5,TEST_FAIL
li x18,0x10
li x19,0xffffffff00000010
sd x8,0x80(x4)
lrd x5,x4,x18,3
lurd x9,x4,x19,3
bne x8,x5,TEST_FAIL
bne x9,x5,TEST_FAIL
.global STORE2
STORE2:
li x6,0xffffffff80000000
li x8,0x0000000080000000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaa80000000
li x18,0
li x19,0x0000ffff00000000
sd x9,0x0(x4)
sw x6,0x0(x4)
lrwu x7,x4,x18,0
lurwu x20,x4,x18,0
lrw x5,x4,x18,0
lurw x21,x4,x18,0
lrd x11,x4,x18,0
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x6,x21,TEST_FAIL
bne x11,x10,TEST_FAIL
li x18,0xffffffffffffff00
li x19,0x0000ffff00000000
sd x9,-256(x4)
sw x6,-256(x4)
lrwu x7,x4,x18,0
lrw x5,x4,x18,0
lrd x11,x4,x18,0
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x11,x10,TEST_FAIL
li x18,0x3fc
li x19,0x0000ffff0000003fc
sd x9,0x7f8(x4)
sw x6,0x7f8(x4)
lrwu x7,x4,x18,1
lurwu x20,x4,x18,1
lrw x5,x4,x18,1
lurw x21,x4,x18,1
lrd x11,x4,x18,1
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x6,x21,TEST_FAIL
bne x11,x10,TEST_FAIL
li x18,0x7e
li x19,0x0000ffff0000007e
sd x9,0x1f8(x4)
sw x6,0x1f8(x4)
lrwu x7,x4,x18,2
lurwu x20,x4,x18,2
lrw x5,x4,x18,2
lurw x21,x4,x18,2
lrd x11,x4,x18,2
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x6,x21,TEST_FAIL
bne x11,x10,TEST_FAIL
li x18,0xFE
li x19,0x0000ffff0000007e
sd x9,0x7f0(x4)
sw x6,0x7f0(x4)
lrwu x7,x4,x18,3
lurwu x20,x4,x18,3
lrw x5,x4,x18,3
lurw x21,x4,x18,3
lrd x11,x4,x18,3
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x6,x21,TEST_FAIL
bne x11,x10,TEST_FAIL
.global STORE3
STORE3:
sw x6,0xfffffffffffffffc(x4)
lwu x7,0xfffffffffffffffc(x4)
lw x5,0xfffffffffffffffc(x4)
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
li x18,0x7fc
sw x6,0x7fc(x4)
lrwu x7,x4,x18,0
lrw x5,x4,x18,0
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
.global STORE4
STORE4:
li x6,0xffffffffffff8000
li x8,0x0000000000008000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x31,0xffffffffaaaa8000
li x18,0
li x19,0xffff000000000000
sd x9,0x0(x4)
sh x6,0x0(x4)
lrhu x7,x4,x18,0
lurhu x20,x4,x18,0
lrh x5, x4,x18,0
lurh x21, x4,x19,0
lrw x11,x4,x18,0
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x6,x21,TEST_FAIL
bne x11,x31,TEST_FAIL
li x18,0xfffffffffffffff0
li x19,0xffff000000000000
sd x9,-16(x4)
sh x6,-16(x4)
lrhu x7,x4,x18,0
lrh x5, x4,x18,0
lrw x11,x4,x18,0
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x11,x31,TEST_FAIL
li x18,0x3f8
li x19,0xffff0000000003f8
sd x9,0x7f0(x4)
sh x6,0x7f0(x4)
lrhu x7,x4,x18,1
lurhu x20,x4,x18,1
lrh x5, x4,x18,1
lurh x21, x4,x19,1
lrw x11,x4,x18,1
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x6,x21,TEST_FAIL
bne x11,x31,TEST_FAIL
li x18,0x7c
li x19,0xffff00000000007c
sd x9,0x1f0(x4)
sh x6,0x1f0(x4)
lrhu x7,x4,x18,2
lurhu x20,x4,x18,2
lrh x5, x4,x18,2
lurh x21, x4,x19,2
lrw x11,x4,x18,2
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x6,x21,TEST_FAIL
bne x11,x31,TEST_FAIL
li x18,0xFC
li x19,0xffff0000000000Fc
sd x9,0x7e0(x4)
sh x6,0x7e0(x4)
lrhu x7,x4,x18,3
lurhu x20,x4,x18,3
lrh x5, x4,x18,3
lurh x21, x4,x19,3
lrw x11,x4,x18,3
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x6,x21,TEST_FAIL
bne x11,x31,TEST_FAIL
.global STORE5
STORE5:
li x18,0xfffffffffffffffe
sh x6,0xfffffffffffffffe(x4)
lrhu x7,x4,x18,0
lrh x5,x4,x18,0
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
.global STORE6
STORE6:
li x18,0x7fe
li x19,0xffffff00000007fe
sh x6,0x7fe(x4)
lrhu x7,x4,x18,0
lurhu x20,x4,x19,0
lrh x5,x4,x18,0
lurh x21,x4,x19,0
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x6,x21,TEST_FAIL
.global STORE7
STORE7:
li x3,0xffffffffffffffff
li x8,0x00000000000000ff
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaaaaff
li x31,0xffffffffaaaaaaff
li x18,0x0
li x19,0xffefff0f00000000
sd x9,0x0(x4)
sb x3,0x0(x4)
lrbu x7,x4,x18,0
lurbu x20,x4,x19,0
lrb x5,x4,x18,0
lurb x21,x4,x19,0
lrw x11,x4,x18,0
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x3,x5,TEST_FAIL
bne x3,x21,TEST_FAIL
bne x31,x11,TEST_FAIL
li x18,0x3f0
li x19,0xffefff0f000003f0
sd x9,0x7e0(x4)
sb x3,0x7e0(x4)
lrbu x7,x4,x18,1
lurbu x20,x4,x19,1
lrb x5,x4,x18,1
lurb x21,x4,x19,1
lrw x11,x4,x18,1
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x3,x5,TEST_FAIL
bne x3,x21,TEST_FAIL
bne x31,x11,TEST_FAIL
li x18,0x78
li x19,0xffeffff000000078
sd x9,0x1e0(x4)
sb x3,0x1e0(x4)
lrbu x7,x4,x18,2
lurbu x20,x4,x19,2
lrb x5,x4,x18,2
lurb x21,x4,x19,2
lrw x11,x4,x18,2
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x3,x5,TEST_FAIL
bne x3,x21,TEST_FAIL
bne x31,x11,TEST_FAIL
li x18,0xF8
li x19,0xffeffff0000000F8
sd x9,0x7c0(x4)
sb x3,0x7c0(x4)
lrbu x7,x4,x18,3
lurbu x20,x4,x19,3
lrb x5,x4,x18,3
lurb x21,x4,x19,3
lrw x11,x4,x18,3
bne x7,x8,TEST_FAIL
bne x20,x8,TEST_FAIL
bne x3,x5,TEST_FAIL
bne x3,x21,TEST_FAIL
bne x31,x11,TEST_FAIL
.global STORE8
STORE8:
sb x3,0xffffffffffffffff(x4)
li x18,0xffffffffffffffff
lrbu x7,x4,x18,0
lrb x5,x4,x18,0
bne x7,x8,TEST_FAIL
bne x3,x5,TEST_FAIL
li x18,0x7ff
sb x3,0x7ff(x4)
lrbu x7,x4,x18,0
lrb x5,x4,x18,0
bne x7,x8,TEST_FAIL
bne x3,x5,TEST_FAIL
.global ld_sdib
ld_sdib:
li x8,0xaaaaaaaaaaaaaaaa
li x3,0xffffffffffffffaa
li x4,0x000000000000a000
li x18,0xfffffffffffffff8
li x19,0x9fe0
sd x8,0xffffffffffffffe0(x4)
ldib x5,(x4),-8,2
bne x8,x5,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fef
li x18,0xf
sd x8,0xf(x4)
ldib x5, (x4), 0xf, 0
bne x8,x5,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fef
li x18,0
sd x8,0x0(x4)
ldib x5,(x4),0x0,1
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
li x19,0xa02f
li x18,0x8
sd x8,0x40(x4)
ldib x5,(x4),0x8,3
bne x8,x5,TEST_FAIL
li x8,0xaaaaaaaaaaaaaaaa
li x3,0xffffffffffffffaa
li x4,0x000000000000a000
li x18,0xfffffffffffffff8
li x19,0x9fe0
sdib x5,(x4),-8,2
ld x8,0x0(x4)
bne x8,x5,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fef
li x18,0xf
sdib x5, (x4), 0xf, 0
ld x8,0(x4)
bne x8,x5,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fef
li x18,0
sdib x5,(x4),0x0,1
ld x8,0x0(x4)
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
li x19,0xa02f
li x18,0x8
sdib x5,(x4),0x8,3
ld x8,0x0(x4)
bne x8,x5,TEST_FAIL
.global ld_sdia
ld_sdia:
li x3,0xffffffffffffffbb
li x8,0xbbbbbbbbbbbbbbbb
li x4,0x000000000000a000
li x18,0xfffffffffffffff8
li x19,0x9fe0
sd x8,0x0(x4)
ldia x5,(x4),-8,2
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
li x19,0x9fef
li x18,0xf
sd x8,0x0(x4)
ldia x5, (x4), 0xf, 0
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
li x19,0x9fef
li x18,0
sd x8,0x0(x4)
ldia x5,(x4),0x0,1
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
li x19,0xa02f
li x18,0x8
sd x8,0x0(x4)
ldia x5,(x4),0x8,3
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
li x3,0xffffffffffffffbb
li x8,0xbbbbbbbbbbbbbbbb
li x4,0x000000000000a000
li x18,0xfffffffffffffff8
li x19,0x9fe0
sdia x5,(x4),-8,2
sd x8,0xffffffffffffffe0(x4)
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
li x19,0x9fef
li x18,0xf
sdia x5, (x4), 0xf, 0
ld x8,0xfffffffffffffff1(x4)
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
li x19,0x9fef
li x18,0
sdia x5,(x4),0x0,1
ld x8,0x0(x4)
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
li x19,0xa02f
li x18,0x8
sdia x5,(x4),0x8,3
ld x8,0xffffffffffffffc0(x4)
bne x8,x5,TEST_FAIL
bne x4,x19, TEST_FAIL
.global ld_swib
ld_swib:
li x6,0xffffffff80000000
li x8,0x0000000080000000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaa8000000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sw x6,0x0(x4)
lwib x7,(x4),0x0,0
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa008
sd x9,0x8(x4)
sw x6,0x8(x4)
lwib x7,(x4),0x4,1
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fc8
sd x9,-64(x4)
sw x6,-64(x4)
lwib x7,(x4),-16,2
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffff80000000
li x8,0x0000000080000000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaa8000000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sw x6,0x0(x4)
lwuib x20,(x4),0x0,0
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa008
sd x9,0x8(x4)
sw x6,0x8(x4)
lwuib x20,(x4),0x4,1
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fc8
sd x9,-64(x4)
sw x6,-64(x4)
lwuib x20,(x4),-16,2
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffff80000000
li x8,0x0000000080000000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaa8000000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
swib x6,(x4),0x0,0
lw x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa008
sd x9,0x8(x4)
swib x6,(x4),0x4,1
lw x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fc8
sd x9,-64(x4)
swib x6,(x4),-16,2
lw x7,0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
.global ld_swia
ld_swia:
li x6,0xffffffff80000000
li x8,0x0000000080000000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaa8000000
li x4,0xa000
li x18,0
li x19,0xa000
sd x9,0x0(x4)
sw x6,0x0(x4)
lwia x7,(x4),0x0,0
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa008
sd x9,0x0(x4)
sw x6,0x0(x4)
lwia x7,(x4),0x4,1
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fc8
sd x9,0(x4)
sw x6,0(x4)
lwia x7,(x4),-16,2
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffff80000000
li x8,0x0000000080000000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaa8000000
li x4,0xa000
li x18,0
li x19,0xa000
sd x9,0x0(x4)
sw x6,0x0(x4)
lwuia x20,(x4),0x0,0
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa008
sd x9,0x0(x4)
sw x6,0x0(x4)
lwuia x20,(x4),0x4,1
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fc8
sd x9,0(x4)
sw x6,0(x4)
lwuia x20,(x4),-16,2
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffff80000000
li x8,0x0000000080000000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaa8000000
li x4,0xa000
li x18,0
li x19,0xa000
sd x9,0x0(x4)
swia x6,(x4),0x0,0
lw x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa008
sd x9,0x0(x4)
swia x6,(x4),0x4,1
lw x7,-8(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0x9fc8
sd x9,0(x4)
swia x6,(x4),-16,2
lw x7,0x40(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
.global ld_shib
ld_shib:
li x6,0xffffffffffff8000
li x8,0x0000000000008000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sh x6,0x0(x4)
lhib x7,(x4),0x0,0
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x10(x4)
sh x6,0x10(x4)
lhib x7,(x4),0x8,1
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x30(x4)
sh x6,0x30(x4)
lhib x7,(x4),0xc,2
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,-64(x4)
sh x6,-64(x4)
lhib x7,(x4),-8,3
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffffffff8000
li x8,0x0000000000008000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sh x6,0x0(x4)
lhuib x20,(x4),0x0,0
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x10(x4)
sh x6,0x10(x4)
lhuib x20,(x4),0x8,1
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x30(x4)
sh x6,0x30(x4)
lhuib x20,(x4),0xc,2
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,-64(x4)
sh x6,-64(x4)
lhuib x20,(x4),-8,3
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffffffff8000
li x8,0x0000000000008000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
shib x6,(x4),0x0,0
lh x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x10(x4)
shib x6,(x4),0x8,1
lh x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x30(x4)
shib x6,(x4),0xc,2
lh x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,-64(x4)
shib x6,(x4),-8,3
lh x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
.global ld_shia
ld_shia:
li x6,0xffffffffffff8000
li x8,0x0000000000008000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sh x6,0x0(x4)
lhia x7,(x4),0x0,0
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x0(x4)
sh x6,0x0(x4)
lhia x7,(x4),0x8,1
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x0(x4)
sh x6,0x0(x4)
lhia x7,(x4),0xc,2
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,0(x4)
sh x6,0(x4)
lhia x7,(x4),-8,3
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffffffff8000
li x8,0x0000000000008000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sh x6,0x0(x4)
lhuia x20,(x4),0x0,0
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x0(x4)
sh x6,0x0(x4)
lhuia x20,(x4),0x8,1
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x0(x4)
sh x6,0x0(x4)
lhuia x20,(x4),0xc,2
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,0(x4)
sh x6,0(x4)
lhuia x20,(x4),-8,3
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffffffff8000
li x8,0x0000000000008000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
shia x6,(x4),0x0,0
lh x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x0(x4)
shia x7,(x4),0x8,1
lh x6,-16(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x0(x4)
shia x6,(x4),0xc,2
lh x7,-48(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,0(x4)
shia x6,(x4),-8,3
lh x7,0x40(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
.global ld_sbib
ld_sbib:
li x6,0xfffffffffffffff0
li x8,0x00000000000000f0
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sb x6,0x0(x4)
lbib x7,(x4),0x0,0
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x10(x4)
sb x6,0x10(x4)
lbib x7,(x4),0x8,1
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x30(x4)
sb x6,0x30(x4)
lbib x7,(x4),0xc,2
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,-64(x4)
sb x6,-64(x4)
lbib x7,(x4),-8,3
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffffffffff80
li x8,0x0000000000000080
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sb x6,0x0(x4)
lbuib x20,(x4),0x0,0
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x10(x4)
sb x6,0x10(x4)
lbuib x20,(x4),0x8,1
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x30(x4)
sb x6,0x30(x4)
lbuib x20,(x4),0xc,2
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,-64(x4)
sb x6,-64(x4)
lbuib x20,(x4),-8,3
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffffffffff80
li x8,0x0000000000000080
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sbib x6,(x4),0x0,0
lb x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x10(x4)
sbib x6,(x4),0x8,1
lb x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x30(x4)
sbib x6,(x4),0xc,2
lb x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,-64(x4)
sbib x7,(x4),-8,3
lb x6,0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
.global ld_sbia
ld_sbia:
li x6,0xffffffffffffff80
li x8,0x0000000000000080
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sb x6,0x0(x4)
lbia x7,(x4),0x0,0
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x0(x4)
sb x6,0x0(x4)
lbia x7,(x4),0x8,1
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x0(x4)
sb x6,0x0(x4)
lbia x7,(x4),0xc,2
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,0(x4)
sb x6,0(x4)
lbia x7,(x4),-8,3
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffffffffff80
li x8,0x0000000000000080
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sb x6,0x0(x4)
lbuia x20,(x4),0x0,0
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x0(x4)
sb x6,0x0(x4)
lbuia x20,(x4),0x8,1
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x0(x4)
sb x6,0x0(x4)
lbuia x20,(x4),0xc,2
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,0(x4)
sb x6,0(x4)
lbuia x20,(x4),-8,3
bne x20,x8,TEST_FAIL
bne x19,x4,TEST_FAIL
li x6,0xffffffffffffff80
li x8,0x0000000000000080
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x4,0xa000
li x19,0xa000
sd x9,0x0(x4)
sbia x6,(x4),0x0,0
lb x7,0x0(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa010
sd x9,0x0(x4)
sbia x6,(x4),0x8,1
lb x7,-16(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa040
sd x9,0x0(x4)
sbia x6,(x4),0xc,2
lb x7,-48(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
li x19,0xa000
sd x9,0(x4)
sbia x6,(x4),-8,3
lb x7,0x40(x4)
bne x7,x6,TEST_FAIL
bne x19,x4,TEST_FAIL
.global ld_sdd
ld_sdd:
li x3,0xffffffffffffffff
li x8,0xaaaaaaaaaaaaaaaa
li x9,0x5555555555555555
li x4,0x000000000000a000
sd x8,0x30(x4)
sd x9,0x38(x4)
ldd x5,x6,(x4),3,4
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
sd x8,0x20(x4)
sd x9,0x28(x4)
ldd x5,x6,(x4),2,4
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
sd x8,0(x4)
sd x9,8(x4)
ldd x5,x6,(x4),0,4
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
sd x8,0x10(x4)
sd x9,0x18(x4)
ldd x5,x6,(x4),1,4
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
addi x4,x4,0x80
sdd x8,x9,(x4),2,4
ld x5,0x20(x4)
ld x6,0x28(x4)
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
sdd x8,x9,(x4),3,4
ld x5,0x30(x4)
ld x6,0x38(x4)
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
sdd x8,x9,(x4),0,4
ld x5,0x0(x4)
ld x6,0x8(x4)
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
sdd x8,x9,(x4),1,4
ld x5,0x10(x4)
ld x6,0x18(x4)
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
.global lwd_swd_test_init
lwd_swd_test_init:
li x3,0xffffffffffffffff
li x8,0xffffffffaaaaaaaa
li x9,0x0000000055555555
li x10,0x00000000aaaaaaaa
li x11,0x0000000055555555
li x4,0x000000000000a000
.global lwd_lwud_test
lwd_lwud_test:
sw x8,8(x4)
sw x9,0xc(x4)
lwd x5,x6,(x4),1,3
lwud x12,x13,(x4),1,3
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
bne x10,x12,TEST_FAIL
bne x11,x13,TEST_FAIL
sw x8,0(x4)
sw x9,4(x4)
lwd x5,x6,(x4),0,3
lwud x12,x13,(x4),0,3
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
bne x10,x12,TEST_FAIL
bne x11,x13,TEST_FAIL
sw x8,0x18(x4)
sw x9,0x1c(x4)
lwd x5,x6,(x4),3,3
lwd x5,x6,(x4),3,3
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
bne x10,x12,TEST_FAIL
bne x11,x13,TEST_FAIL
sw x8,0x10(x4)
sw x9,0x14(x4)
lwd x5,x6,(x4),2,3
lwud x12,x13,(x4),2,3
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
bne x10,x12,TEST_FAIL
bne x11,x13,TEST_FAIL
.global swd_test
swd_test:
addi x4,x4,0x80
swd x8,x9,(x4),2,3
lw x5,0x10(x4)
lw x6,0x14(x4)
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
swd x8,x9,(x4),3,3
lw x5,0x18(x4)
lw x6,0x1c(x4)
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
swd x8,x9,(x4),0,3
lw x5,0(x4)
lw x6,4(x4)
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
swd x8,x9,(x4),1,3
lw x5,8(x4)
lw x6,0xc(x4)
bne x8,x5,TEST_FAIL
bne x9,x6,TEST_FAIL
li x3, 0x20000
li x10, 0xf12345678
sd x10, 0x0(x3)
li x10, 0x87654321
sd x10, 0x8(x3)
li x10, 0xffffffff
sd x10, 0x10(x3)
li x11,0xffffffff12345678
.global FLRW1
FLRW1:
flrw f15, x3,x0,0
fmv.x.d x15, f15
bne x15,x11, TEST_FAIL
li x3, 0x20008
li x11, 0xffffffff87654321
.global FLRW2
FLRW2:
flrw f15, x3, x0,3
fmv.x.d x15,f15
bne x15,x11, TEST_FAIL
li x3, 0x20018
li x4, 0xffffffffffffffff
li x11, 0xffffffffffffffff
.global FLRW3
FLRW3:
flrw f15, x3,x4,3
fmv.x.d x15,f15
bne x15,x11, TEST_FAIL
li x3, 0x20000
li x4, 0xffffffff00000000
li x11,0xffffffff12345678
.global FLURW1
FLURW1:
flurw f15, x3,x4,0
fmv.x.d x15, f15
bne x15,x11, TEST_FAIL
li x3, 0x20000
li x11, 0xffffffff87654321
li x4, 0xff00000001
.global FLURW2
FLURW2:
flurw f15, x3, x4,3
fmv.x.d x15,f15
bne x15,x11, TEST_FAIL
li x3, 0x20000
li x4, 0xffffffff00000002
li x11, 0xffffffffffffffff
.global FLURW3
FLURW3:
flurw f15, x3,x4,3
fmv.x.d x15,f15
bne x15,x11, TEST_FAIL
li x3, 0x20000
li x11,0xf12345678
.global FLRD1
FLRD1:
flrd f15, x3,x0,0
fmv.x.d x15, f15
bne x15,x11, TEST_FAIL
li x11, 0x87654321
li x4, 0x2
.global FLRD2
FLRD2:
flrd f15, x3, x4,2
fmv.x.d x15,f15
bne x15,x11, TEST_FAIL
li x3, 0x20018
li x4, 0xffffffffffffffff
li x11, 0xffffffff
.global FLRD3
FLRD3:
flrd f15, x3,x4,3
fmv.x.d x15,f15
bne x15,x11, TEST_FAIL
li x3, 0x20000
li x4, 0xffffffff00000000
li x11,0xf12345678
.global FLURD1
FLURD1:
flurd f15, x3,x4,0
fmv.x.d x15, f15
bne x15,x11, TEST_FAIL
li x3, 0x20000
li x4, 0xffff00000004
li x11, 0x87654321
.global FLURD2
FLURD2:
flurd f15, x3, x4,1
fmv.x.d x15,f15
bne x15,x11, TEST_FAIL
li x3, 0x20000
li x4, 0x2
li x11, 0xffffffff
.global FLURD3
FLURD3:
flurd f15, x3,x4,3
fmv.x.d x15,f15
bne x15,x11, TEST_FAIL
li x3, 0x20000
li x11,0xeeeeeeee80000000
FPUMOVD f10, 0x80000000, x10
li x10, 0xeeeeeeeeffffffff
sd x10, 0(x3)
.global FSRW1
FSRW1:
fsrw f10, x3,x0,0
ld x15, 0(x3)
bne x15,x11, TEST_FAIL
li x3, 0x20010
li x10, 0x1234567887654321
sd x10, 0(x3)
li x3, 0x20018
li x4, 0xffffffffffffffff
FPUMOVD f10, 0x12345678,x10
li x11, 0x1234567812345678
.global FSRW2
FSRW2:
fsrw f10, x3,x4,3
li x3,0x20010
ld x15, 0(x3)
bne x15,x11,TEST_FAIL
li x3, 0x20000
li x4, 0xffffffff00000000
li x11,0xeeeeeeee80000000
FPUMOVD f10, 0x80000000, x10
li x10, 0xeeeeeeeeffffffff
sd x10, 0(x3)
.global FSURW1
FSURW1:
fsurw f10, x3,x4,0
ld x15, 0(x3)
bne x15,x11, TEST_FAIL
li x3, 0x20010
li x10, 0x1234567887654321
sd x10, 0(x3)
li x3, 0x20000
li x4, 0xffffffff00000002
FPUMOVD f10, 0x12345678,x10
li x11, 0x1234567812345678
.global FSURW2
FSURW2:
fsurw f10, x3,x4,3
li x3,0x20010
ld x15, 0(x3)
bne x15,x11,TEST_FAIL
li x3, 0x20000
li x4, 0xffffffffffffffff
sd x4, 0(x3)
FPUMOVD f10, 0x0, x10
.global FSRD1
FSRD1:
fsrd f10, x3,x0,0
ld x10, 0(x3)
bne x10, x0 ,TEST_FAIL
li x3, 0x20010
li x4, 0xffffffffffffffff
sd x4, 0(x3)
li x3,0x20018
li x4,0xffffffffffffffff
FPUMOVD f10, 0x0, x10
.global FSRD2
FSRD2:
fsrd f10, x3,x4,3
ld x10, 0(x3)
bne x10, x0 ,TEST_FAIL
li x3, 0x20000
li x4, 0xffffffffffffffff
sd x4, 0(x3)
FPUMOVD f10, 0x0, x10
li x4,0xfff00000000
.global FSURD1
FSURD1:
fsurd f10, x3,x4,0
ld x10, 0(x3)
bne x10, x0 ,TEST_FAIL
li x3, 0x20010
li x4, 0xffffffffffffffff
sd x4, 0(x3)
li x3,0x20000
li x4,0xffff00000008
FPUMOVD f10, 0x0, x10
.global FSURD2
FSURD2:
fsurd f10, x3,x4,1
li x3, 0x20010
ld x10, 0(x3)
bne x10, x0 ,TEST_FAIL
.global TEST_EXIT
TEST_EXIT:
la x1, __exit
jr x1
.global TEST_FAIL
TEST_FAIL:
la x1, __fail
jr x1
.global TEST_WFI
TEST_WFI:
wfi
#******this region is added by generator******
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 118,821
|
smart_run/tests/cases/ISA/ISA_BARRIER/ct_lsu_barrier_smart.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#* *
#************************************************************
#*************Following is the generated instructions*****************
.text
.align 6
.global main
main:
.include "core_init.h"
# invalidate whole dcache
DATA_CACHE_CLIV
MM_EN
DATA_CACHE_L1_PREFETCH_DIS
DATA_CACHE_L2_PREFETCH_DIS
li x12, 0x00090000
li x15, 0x1
li x14, 0x0
#data gen
li x13,0x04CEEB7B5
ori x13,x13,0x000000043
ori x9,x12,0x0000005DC
#addr gen
li x11,0x000000000
sh x13,0x000000000(x9)
#addr delay
div x8,x9,x15
lwu x11,0x000000000(x12)
lwu x10,0x000000000(x12)
lwu x11,0x000000000(x12)
div x8,x12,x15
.global random_fence
li x13,0x0EC9C349D
ori x13,x13,0x00000032E
div x8,x9,x15
ori x9,x12,0x0000006CA
fence iow,iorw
ori x9,x12,0x00000050D
ori x9,x12,0x0000005CA
ori x9,x12,0x0000007CE
lh x11,0x000000000(x8)
sb x13,0x000000000(x9)
ori x9,x12,0x00000067B
sh x13,0x000000000(x8)
ori x9,x12,0x0000006FA
lh x10,0x000000000(x8)
li x13,0x078D91EE7
ori x13,x13,0x00000012C
div x8,x9,x15
sh x13,0x000000000(x8)
ori x9,x12,0x0000003D4
ori x9,x12,0x0000001C6
lbu x11,0x000000000(x9)
ori x9,x12,0x00000020C
li x13,0x016542B08
ori x13,x13,0x000000226
div x8,x9,x15
sb x13,0x000000000(x9)
lw x10,0x000000000(x8)
c.lw x10,0x030(x8)
c.lw x11,0x00(x9)
ori x9,x12,0x000000111
lh x10,0x000000000(x9)
sh x13,0x000000000(x8)
ori x9,x12,0x000000579
fence or,ir
li x13,0x0DCE9CDB0
ori x13,x13,0x00000009E
div x8,x9,x15
c.sw x13,0x04(x8)
lh x11,0x000000000(x9)
ori x9,x12,0x00000064D
ori x9,x12,0x0000006BE
fence or,rw
li x13,0x0BB57CC1D
ori x13,x13,0x0000007F0
div x8,x9,x15
ori x9,x12,0x000000115
ori x9,x12,0x000000190
ori x9,x12,0x0000001D9
c.sw x13,0x060(x9)
ori x9,x12,0x0000006A8
lw x10,0x000000000(x8)
ori x9,x12,0x000000790
c.lw x10,0x03C(x9)
lhu x10,0x000000000(x9)
ori x9,x12,0x0000004C1
li x13,0x01C8C8CC8
ori x13,x13,0x0000002C9
div x8,x9,x15
ori x9,x12,0x00000046A
sb x13,0x000000000(x9)
lwu x11,0x000000000(x8)
lh x11,0x000000000(x8)
c.lw x10,0x020(x9)
c.lw x11,0x07C(x9)
lb x11,0x000000000(x8)
c.sw x13,0x044(x8)
lbu x11,0x000000000(x8)
fence io,iw
li x13,0x052124DDB
ori x13,x13,0x00000057E
div x8,x9,x15
sh x13,0x000000000(x9)
ori x9,x12,0x0000002AE
sw x13,0x000000000(x9)
ori x9,x12,0x000000213
ori x9,x12,0x00000076E
lw x11,0x000000000(x8)
lwu x11,0x000000000(x8)
li x13,0x0E7237AE3
ori x13,x13,0x000000419
div x8,x9,x15
sw x13,0x000000000(x9)
ori x9,x12,0x000000005
ori x9,x12,0x000000265
ori x9,x12,0x0000005B2
lhu x10,0x000000000(x9)
fence ior,irw
c.lw x10,0x030(x8)
ori x9,x12,0x0000006F5
li x13,0x07BE794B6
ori x13,x13,0x000000560
div x8,x9,x15
lwu x11,0x000000000(x9)
ori x9,x12,0x0000003E5
c.sw x13,0x03C(x8)
ori x9,x12,0x000000141
c.lw x10,0x02C(x8)
lwu x11,0x000000000(x9)
ori x9,x12,0x0000004B1
sh x13,0x000000000(x9)
lw x10,0x000000000(x8)
ori x9,x12,0x000000297
ori x9,x12,0x000000163
lbu x11,0x000000000(x9)
c.lw x11,0x0C(x9)
sh x13,0x000000000(x9)
c.lw x10,0x040(x9)
ori x9,x12,0x00000004B
sw x13,0x000000000(x9)
ori x9,x12,0x00000036D
c.sw x13,0x02C(x8)
li x13,0x048F922FA
ori x13,x13,0x0000003B2
div x8,x9,x15
lw x11,0x000000000(x9)
lw x10,0x000000000(x8)
ori x9,x12,0x00000023D
fence o,iorw
fence iw,ow
sw x13,0x000000000(x9)
ori x9,x12,0x0000007A1
lh x11,0x000000000(x8)
ori x9,x12,0x00000029B
lhu x10,0x000000000(x9)
li x13,0x0361E0F0B
ori x13,x13,0x000000553
div x8,x9,x15
ori x9,x12,0x00000012A
fence o,irw
sb x13,0x000000000(x9)
lh x10,0x000000000(x9)
ori x9,x12,0x0000005BD
ori x9,x12,0x000000368
fence iow,ow
c.lw x11,0x04(x8)
lb x10,0x000000000(x9)
ori x9,x12,0x000000096
ori x9,x12,0x00000030E
ori x9,x12,0x00000020D
ori x9,x12,0x000000107
lh x10,0x000000000(x8)
sw x13,0x000000000(x9)
fence iw,orw
ori x9,x12,0x000000174
sh x13,0x000000000(x9)
li x13,0x0CDED21AD
ori x13,x13,0x00000071A
div x8,x9,x15
lwu x11,0x000000000(x8)
ori x9,x12,0x0000007B3
lhu x11,0x000000000(x8)
lh x10,0x000000000(x8)
ori x9,x12,0x000000429
c.sw x13,0x074(x8)
ori x9,x12,0x000000596
ori x9,x12,0x00000004D
ori x9,x12,0x000000110
ori x9,x12,0x00000017E
sb x13,0x000000000(x8)
lhu x11,0x000000000(x9)
c.lw x10,0x068(x9)
sw x13,0x000000000(x8)
ori x9,x12,0x000000632
lh x11,0x000000000(x8)
ori x9,x12,0x000000372
li x13,0x0B38816BB
ori x13,x13,0x000000427
div x8,x9,x15
fence io,ior
ori x9,x12,0x00000020C
lhu x11,0x000000000(x9)
ori x9,x12,0x0000002D4
sh x13,0x000000000(x9)
lh x11,0x000000000(x8)
ori x9,x12,0x000000585
fence ow,rw
ori x9,x12,0x00000078A
sw x13,0x000000000(x8)
sw x13,0x000000000(x9)
sb x13,0x000000000(x9)
ori x9,x12,0x00000068E
lwu x11,0x000000000(x9)
ori x9,x12,0x000000167
sb x13,0x000000000(x8)
ori x9,x12,0x000000396
lhu x11,0x000000000(x9)
lw x11,0x000000000(x8)
li x13,0x003350689
ori x13,x13,0x0000003CF
div x8,x9,x15
sh x13,0x000000000(x8)
lh x10,0x000000000(x8)
sh x13,0x000000000(x9)
ori x9,x12,0x000000443
ori x9,x12,0x0000000D8
c.sw x13,0x060(x8)
sh x13,0x000000000(x9)
lw x10,0x000000000(x9)
fence rw,or
ori x9,x12,0x0000002CA
sh x13,0x000000000(x8)
lwu x11,0x000000000(x8)
c.sw x13,0x00(x8)
c.sw x13,0x020(x9)
li x13,0x09B3E1F5D
ori x13,x13,0x000000769
div x8,x9,x15
lhu x11,0x000000000(x8)
lbu x11,0x000000000(x9)
sb x13,0x000000000(x9)
ori x9,x12,0x000000438
lwu x10,0x000000000(x8)
ori x9,x12,0x0000000CF
ori x9,x12,0x000000582
ori x9,x12,0x000000324
lhu x10,0x000000000(x9)
c.lw x11,0x02C(x8)
ori x9,x12,0x0000005AE
fence orw,o
ori x9,x12,0x000000658
ori x9,x12,0x00000074C
sh x13,0x000000000(x8)
c.sw x13,0x03C(x8)
c.lw x11,0x07C(x9)
lbu x10,0x000000000(x9)
li x13,0x0B111C5D9
ori x13,x13,0x00000059B
div x8,x9,x15
ori x9,x12,0x0000003D3
lhu x10,0x000000000(x9)
ori x9,x12,0x00000042D
ori x9,x12,0x0000007AB
fence o,orw
ori x9,x12,0x0000002D2
sh x13,0x000000000(x9)
ori x9,x12,0x00000031F
ori x9,x12,0x000000624
li x13,0x058225841
ori x13,x13,0x00000001D
div x8,x9,x15
ori x9,x12,0x0000004B2
ori x9,x12,0x0000003F2
sh x13,0x000000000(x8)
ori x9,x12,0x0000003F1
lhu x11,0x000000000(x8)
lb x10,0x000000000(x9)
ori x9,x12,0x0000003D8
ori x9,x12,0x0000007C1
lb x10,0x000000000(x8)
c.sw x13,0x00(x8)
sh x13,0x000000000(x9)
sb x13,0x000000000(x8)
c.lw x11,0x08(x9)
lb x11,0x000000000(x8)
lbu x10,0x000000000(x9)
ori x9,x12,0x00000022C
lw x11,0x000000000(x8)
c.sw x13,0x04(x9)
c.sw x13,0x064(x9)
li x13,0x08C946993
ori x13,x13,0x00000079F
div x8,x9,x15
lwu x10,0x000000000(x8)
sw x13,0x000000000(x9)
lw x10,0x000000000(x8)
lhu x10,0x000000000(x8)
c.lw x11,0x08(x9)
ori x9,x12,0x00000013A
ori x9,x12,0x000000285
lwu x10,0x000000000(x8)
ori x9,x12,0x0000001ED
ori x9,x12,0x000000705
li x13,0x0B4A0ED57
ori x13,x13,0x0000001AD
div x8,x9,x15
ori x9,x12,0x000000770
ori x9,x12,0x00000057B
lw x11,0x000000000(x9)
fence iorw,orw
c.sw x13,0x050(x9)
li x13,0x0B2350D32
ori x13,x13,0x0000006B0
div x8,x9,x15
ori x9,x12,0x000000557
ori x9,x12,0x00000036A
ori x9,x12,0x00000035E
c.sw x13,0x050(x9)
fence ir,ir
ori x9,x12,0x00000017A
sw x13,0x000000000(x8)
ori x9,x12,0x00000057B
lw x11,0x000000000(x9)
ori x9,x12,0x0000004A5
ori x9,x12,0x000000106
c.lw x10,0x024(x9)
c.sw x13,0x028(x9)
c.sw x13,0x04C(x9)
lh x11,0x000000000(x9)
lw x10,0x000000000(x9)
sw x13,0x000000000(x9)
li x13,0x08DC43888
ori x13,x13,0x00000066E
div x8,x9,x15
c.sw x13,0x040(x9)
lwu x10,0x000000000(x8)
lh x11,0x000000000(x9)
ori x9,x12,0x0000001A1
ori x9,x12,0x000000287
li x13,0x0244924B0
ori x13,x13,0x0000007B5
div x8,x9,x15
c.sw x13,0x010(x8)
c.lw x10,0x05C(x9)
lb x11,0x000000000(x8)
c.lw x11,0x07C(x9)
li x13,0x06337860A
ori x13,x13,0x000000099
div x8,x9,x15
ori x9,x12,0x0000001DF
ori x9,x12,0x00000049B
fence i,o
c.lw x10,0x050(x9)
ori x9,x12,0x00000051D
ori x9,x12,0x0000000CB
lb x11,0x000000000(x9)
lw x11,0x000000000(x8)
fence iorw,irw
c.lw x11,0x040(x9)
sw x13,0x000000000(x9)
li x13,0x0862B7C2A
ori x13,x13,0x000000558
div x8,x9,x15
ori x9,x12,0x00000060C
lw x10,0x000000000(x9)
lh x10,0x000000000(x8)
sh x13,0x000000000(x8)
c.sw x13,0x068(x8)
ori x9,x12,0x0000005CE
ori x9,x12,0x00000009A
li x13,0x0CFB03D44
ori x13,x13,0x00000042D
div x8,x9,x15
lw x11,0x000000000(x8)
ori x9,x12,0x0000007F9
lhu x10,0x000000000(x8)
lwu x11,0x000000000(x9)
lhu x10,0x000000000(x8)
sh x13,0x000000000(x9)
ori x9,x12,0x0000007E7
c.sw x13,0x02C(x8)
c.lw x10,0x050(x8)
fence o,io
c.sw x13,0x07C(x9)
lw x11,0x000000000(x9)
lb x11,0x000000000(x9)
lbu x10,0x000000000(x8)
lhu x11,0x000000000(x8)
li x13,0x0EA9C0176
ori x13,x13,0x000000234
div x8,x9,x15
ori x9,x12,0x000000508
c.sw x13,0x038(x9)
ori x9,x12,0x00000040A
lhu x10,0x000000000(x9)
ori x9,x12,0x000000203
li x13,0x04966495B
ori x13,x13,0x0000005F1
div x8,x9,x15
sb x13,0x000000000(x8)
lhu x10,0x000000000(x9)
sw x13,0x000000000(x9)
lhu x11,0x000000000(x8)
c.sw x13,0x07C(x9)
ori x9,x12,0x0000004B0
ori x9,x12,0x000000489
lhu x11,0x000000000(x8)
lwu x11,0x000000000(x9)
ori x9,x12,0x0000003E9
ori x9,x12,0x000000543
ori x9,x12,0x0000004DB
li x13,0x0C9764D28
ori x13,x13,0x000000789
div x8,x9,x15
lh x10,0x000000000(x9)
ori x9,x12,0x00000028D
ori x9,x12,0x0000007EB
c.sw x13,0x05C(x9)
lhu x11,0x000000000(x8)
lwu x11,0x000000000(x8)
ori x9,x12,0x000000222
c.sw x13,0x01C(x8)
lwu x11,0x000000000(x8)
lw x11,0x000000000(x8)
li x13,0x02E524E9F
ori x13,x13,0x000000700
div x8,x9,x15
ori x9,x12,0x00000043E
sw x13,0x000000000(x8)
lwu x10,0x000000000(x8)
lbu x11,0x000000000(x8)
lb x10,0x000000000(x9)
ori x9,x12,0x0000003D6
ori x9,x12,0x0000001DE
ori x9,x12,0x0000000FC
ori x9,x12,0x000000076
sw x13,0x000000000(x8)
ori x9,x12,0x000000304
lh x10,0x000000000(x8)
sw x13,0x000000000(x9)
fence iorw,ir
lb x11,0x000000000(x8)
ori x9,x12,0x000000063
li x13,0x03EE5860E
ori x13,x13,0x0000004B3
div x8,x9,x15
c.lw x10,0x028(x8)
c.lw x10,0x040(x9)
ori x9,x12,0x000000655
lb x11,0x000000000(x8)
lbu x11,0x000000000(x8)
sw x13,0x000000000(x9)
ori x9,x12,0x00000079D
lhu x10,0x000000000(x8)
ori x9,x12,0x0000003E7
lh x11,0x000000000(x9)
li x13,0x043CB7A53
ori x13,x13,0x00000029C
div x8,x9,x15
lbu x11,0x000000000(x9)
lb x10,0x000000000(x8)
ori x9,x12,0x000000190
lbu x10,0x000000000(x9)
sw x13,0x000000000(x9)
sb x13,0x000000000(x9)
ori x9,x12,0x0000002ED
li x13,0x0DABEBB3E
ori x13,x13,0x00000008E
div x8,x9,x15
lhu x10,0x000000000(x9)
lw x11,0x000000000(x9)
lhu x11,0x000000000(x8)
ori x9,x12,0x0000001D0
sw x13,0x000000000(x9)
c.lw x11,0x014(x8)
ori x9,x12,0x000000526
lh x10,0x000000000(x9)
fence i,w
ori x9,x12,0x0000000F6
ori x9,x12,0x00000077D
lbu x10,0x000000000(x9)
li x13,0x0766B950F
ori x13,x13,0x0000004E0
div x8,x9,x15
sb x13,0x000000000(x8)
lh x11,0x000000000(x8)
ori x9,x12,0x000000797
lw x10,0x000000000(x8)
ori x9,x12,0x0000005E3
c.lw x11,0x03C(x8)
ori x9,x12,0x0000003C7
lw x11,0x000000000(x9)
lbu x10,0x000000000(x9)
sh x13,0x000000000(x8)
ori x9,x12,0x0000002F8
ori x9,x12,0x000000737
ori x9,x12,0x0000003AC
lwu x10,0x000000000(x9)
ori x9,x12,0x000000375
ori x9,x12,0x000000165
lbu x10,0x000000000(x9)
lb x10,0x000000000(x8)
li x13,0x0856C1089
ori x13,x13,0x000000381
div x8,x9,x15
ori x9,x12,0x00000070E
c.sw x13,0x02C(x8)
lwu x11,0x000000000(x9)
c.lw x11,0x024(x8)
sh x13,0x000000000(x9)
lw x11,0x000000000(x8)
lbu x10,0x000000000(x8)
ori x9,x12,0x000000574
ori x9,x12,0x0000002D2
li x13,0x0EE0A9CFC
ori x13,x13,0x000000556
div x8,x9,x15
lb x11,0x000000000(x9)
lhu x11,0x000000000(x9)
ori x9,x12,0x0000007D1
lw x10,0x000000000(x8)
ori x9,x12,0x0000007F5
li x13,0x0D83000FF
ori x13,x13,0x000000348
div x8,x9,x15
ori x9,x12,0x0000000A2
c.sw x13,0x06C(x9)
ori x9,x12,0x000000210
sh x13,0x000000000(x9)
sh x13,0x000000000(x8)
lwu x11,0x000000000(x9)
lwu x10,0x000000000(x9)
lhu x10,0x000000000(x9)
ori x9,x12,0x0000003B9
li x13,0x01CAEBC8D
ori x13,x13,0x000000407
div x8,x9,x15
sh x13,0x000000000(x8)
ori x9,x12,0x000000766
lh x10,0x000000000(x8)
ori x9,x12,0x0000006C6
lbu x10,0x000000000(x8)
c.sw x13,0x06C(x8)
lb x11,0x000000000(x8)
li x13,0x086B215A6
ori x13,x13,0x0000007FC
div x8,x9,x15
c.lw x10,0x08(x8)
ori x9,x12,0x0000007D3
ori x9,x12,0x00000036B
lw x10,0x000000000(x8)
sb x13,0x000000000(x9)
ori x9,x12,0x0000001D5
fence iow,iorw
ori x9,x12,0x000000482
lb x11,0x000000000(x8)
sw x13,0x000000000(x8)
c.sw x13,0x04C(x8)
lw x11,0x000000000(x9)
ori x9,x12,0x000000029
c.sw x13,0x058(x9)
ori x9,x12,0x0000004ED
ori x9,x12,0x000000501
lwu x10,0x000000000(x9)
lbu x10,0x000000000(x8)
sh x13,0x000000000(x8)
c.sw x13,0x06C(x8)
li x13,0x0DE0E25BD
ori x13,x13,0x0000004C2
div x8,x9,x15
ori x9,x12,0x0000000F1
ori x9,x12,0x000000034
ori x9,x12,0x00000020E
ori x9,x12,0x0000002F9
sb x13,0x000000000(x8)
lwu x10,0x000000000(x9)
li x13,0x0BF51D589
ori x13,x13,0x000000046
div x8,x9,x15
ori x9,x12,0x00000051C
ori x9,x12,0x000000449
lhu x11,0x000000000(x9)
c.sw x13,0x040(x8)
lbu x11,0x000000000(x9)
lwu x11,0x000000000(x8)
ori x9,x12,0x000000693
ori x9,x12,0x0000006C5
lbu x11,0x000000000(x8)
ori x9,x12,0x000000332
ori x9,x12,0x000000701
lh x11,0x000000000(x8)
li x13,0x0A2D29DD2
ori x13,x13,0x00000053C
div x8,x9,x15
ori x9,x12,0x000000786
sb x13,0x000000000(x8)
ori x9,x12,0x00000004C
lwu x11,0x000000000(x8)
c.sw x13,0x06C(x8)
ori x9,x12,0x0000001B1
lw x10,0x000000000(x8)
lwu x10,0x000000000(x8)
ori x9,x12,0x0000002B3
ori x9,x12,0x0000007F7
lh x11,0x000000000(x9)
ori x9,x12,0x000000362
lbu x10,0x000000000(x8)
sh x13,0x000000000(x9)
li x13,0x05669EA6C
ori x13,x13,0x000000442
div x8,x9,x15
ori x9,x12,0x0000007FB
ori x9,x12,0x00000066E
fence o,io
lw x10,0x000000000(x8)
c.lw x10,0x01C(x8)
ori x9,x12,0x00000077C
sh x13,0x000000000(x8)
ori x9,x12,0x0000004F9
lhu x11,0x000000000(x8)
lhu x11,0x000000000(x8)
ori x9,x12,0x000000770
ori x9,x12,0x0000007E8
lwu x10,0x000000000(x8)
ori x9,x12,0x000000566
lhu x10,0x000000000(x8)
sb x13,0x000000000(x8)
li x13,0x0123198F5
ori x13,x13,0x0000001D4
div x8,x9,x15
lwu x10,0x000000000(x9)
ori x9,x12,0x00000048F
lhu x11,0x000000000(x9)
lhu x10,0x000000000(x8)
sb x13,0x000000000(x9)
ori x9,x12,0x000000772
fence iow,or
ori x9,x12,0x000000256
ori x9,x12,0x0000006A2
lhu x10,0x000000000(x9)
c.sw x13,0x010(x9)
li x13,0x0C791959D
ori x13,x13,0x00000053C
div x8,x9,x15
lbu x10,0x000000000(x8)
ori x9,x12,0x0000002D7
fence i,ow
sb x13,0x000000000(x8)
lh x11,0x000000000(x9)
li x13,0x0785CD7E1
ori x13,x13,0x0000006CE
div x8,x9,x15
ori x9,x12,0x0000003A5
lb x10,0x000000000(x9)
ori x9,x12,0x000000645
ori x9,x12,0x00000078B
c.lw x10,0x058(x8)
lwu x11,0x000000000(x9)
ori x9,x12,0x00000067F
lwu x11,0x000000000(x9)
sh x13,0x000000000(x9)
ori x9,x12,0x0000007CD
sw x13,0x000000000(x9)
lb x11,0x000000000(x9)
ori x9,x12,0x00000069D
fence w,iorw
ori x9,x12,0x0000004DF
li x13,0x085E77D09
ori x13,x13,0x000000605
div x8,x9,x15
ori x9,x12,0x0000003ED
c.lw x10,0x048(x8)
ori x9,x12,0x00000048D
sw x13,0x000000000(x9)
sb x13,0x000000000(x8)
li x13,0x0E711CD98
ori x13,x13,0x000000046
div x8,x9,x15
lwu x10,0x000000000(x9)
c.sw x13,0x074(x9)
ori x9,x12,0x000000110
lh x10,0x000000000(x8)
sb x13,0x000000000(x9)
ori x9,x12,0x0000002A5
lbu x11,0x000000000(x9)
li x13,0x027BD19CB
ori x13,x13,0x000000776
div x8,x9,x15
lw x10,0x000000000(x8)
sh x13,0x000000000(x8)
lb x10,0x000000000(x9)
lwu x11,0x000000000(x8)
ori x9,x12,0x000000467
ori x9,x12,0x00000005B
ori x9,x12,0x0000002E8
lbu x10,0x000000000(x9)
lh x10,0x000000000(x9)
ori x9,x12,0x0000005A8
sb x13,0x000000000(x8)
sh x13,0x000000000(x8)
ori x9,x12,0x0000005B8
sb x13,0x000000000(x8)
sb x13,0x000000000(x9)
lwu x11,0x000000000(x9)
ori x9,x12,0x0000005F8
ori x9,x12,0x000000736
li x13,0x07EED9DD9
ori x13,x13,0x0000000E4
div x8,x9,x15
ori x9,x12,0x000000230
lh x11,0x000000000(x8)
ori x9,x12,0x000000621
lb x11,0x000000000(x8)
sb x13,0x000000000(x9)
fence iw,ir
lw x11,0x000000000(x8)
lh x10,0x000000000(x8)
lwu x10,0x000000000(x8)
c.sw x13,0x030(x8)
ori x9,x12,0x0000006AE
sb x13,0x000000000(x8)
ori x9,x12,0x00000008A
ori x9,x12,0x00000052A
li x13,0x0687F6CDF
ori x13,x13,0x0000007CE
div x8,x9,x15
ori x9,x12,0x0000003C9
lw x11,0x000000000(x8)
lhu x10,0x000000000(x9)
lbu x10,0x000000000(x8)
sb x13,0x000000000(x9)
sw x13,0x000000000(x8)
sb x13,0x000000000(x8)
ori x9,x12,0x0000004E5
ori x9,x12,0x0000007C0
sw x13,0x000000000(x8)
lh x11,0x000000000(x9)
sb x13,0x000000000(x9)
ori x9,x12,0x000000686
ori x9,x12,0x0000002D9
li x13,0x0EA814508
ori x13,x13,0x000000779
div x8,x9,x15
sh x13,0x000000000(x8)
sh x13,0x000000000(x8)
lw x10,0x000000000(x8)
ori x9,x12,0x0000001D9
c.lw x10,0x050(x9)
sb x13,0x000000000(x8)
ori x9,x12,0x0000002C0
ori x9,x12,0x0000003BC
ori x9,x12,0x00000056E
ori x9,x12,0x0000004DC
fence irw,orw
c.lw x11,0x010(x9)
sw x13,0x000000000(x9)
ori x9,x12,0x0000004C4
c.sw x13,0x06C(x8)
lwu x11,0x000000000(x9)
ori x9,x12,0x00000067E
ori x9,x12,0x0000002E5
li x13,0x00F027361
ori x13,x13,0x000000224
div x8,x9,x15
lb x11,0x000000000(x9)
lhu x10,0x000000000(x8)
sh x13,0x000000000(x9)
ori x9,x12,0x00000067E
ori x9,x12,0x000000592
sh x13,0x000000000(x9)
c.sw x13,0x0C(x8)
ori x9,x12,0x0000005D3
ori x9,x12,0x0000001AE
sb x13,0x000000000(x9)
sb x13,0x000000000(x8)
ori x9,x12,0x000000694
lwu x11,0x000000000(x9)
lb x10,0x000000000(x9)
ori x9,x12,0x0000002A3
ori x9,x12,0x00000047D
li x13,0x090675D26
ori x13,x13,0x0000003EF
div x8,x9,x15
ori x9,x12,0x00000054E
ori x9,x12,0x0000002FC
ori x9,x12,0x000000029
ori x9,x12,0x00000032C
ori x9,x12,0x00000059B
sh x13,0x000000000(x8)
ori x9,x12,0x000000686
sw x13,0x000000000(x8)
sb x13,0x000000000(x8)
lb x11,0x000000000(x8)
ori x9,x12,0x00000021D
lw x11,0x000000000(x8)
lbu x10,0x000000000(x8)
fence w,o
lw x11,0x000000000(x8)
li x13,0x00A501433
ori x13,x13,0x00000058D
div x8,x9,x15
lh x10,0x000000000(x9)
ori x9,x12,0x0000004FC
ori x9,x12,0x000000203
lbu x11,0x000000000(x9)
sh x13,0x000000000(x9)
li x13,0x0120B76E4
ori x13,x13,0x000000456
div x8,x9,x15
lh x11,0x000000000(x9)
ori x9,x12,0x0000006C1
ori x9,x12,0x0000000E7
lwu x11,0x000000000(x8)
ori x9,x12,0x0000000BB
lw x10,0x000000000(x9)
ori x9,x12,0x00000014B
ori x9,x12,0x0000002BA
ori x9,x12,0x00000034F
ori x9,x12,0x0000006C8
sb x13,0x000000000(x9)
ori x9,x12,0x000000253
ori x9,x12,0x000000790
lhu x10,0x000000000(x9)
ori x9,x12,0x00000068B
ori x9,x12,0x0000002A2
ori x9,x12,0x000000248
li x13,0x0CCB08D7D
ori x13,x13,0x0000001E9
div x8,x9,x15
lb x11,0x000000000(x8)
lwu x11,0x000000000(x9)
lwu x11,0x000000000(x9)
ori x9,x12,0x0000004BD
lw x10,0x000000000(x8)
ori x9,x12,0x000000780
li x13,0x08E16A7D5
ori x13,x13,0x0000001B3
div x8,x9,x15
lw x10,0x000000000(x8)
ori x9,x12,0x00000060D
sw x13,0x000000000(x9)
fence ow,io
lh x11,0x000000000(x9)
lh x11,0x000000000(x9)
sw x13,0x000000000(x9)
sb x13,0x000000000(x8)
sh x13,0x000000000(x8)
fence ior,rw
ori x9,x12,0x0000004EB
lwu x11,0x000000000(x8)
lhu x11,0x000000000(x9)
lwu x11,0x000000000(x8)
lh x10,0x000000000(x8)
ori x9,x12,0x0000007D6
ori x9,x12,0x0000007F9
sb x13,0x000000000(x9)
ori x9,x12,0x00000069E
li x13,0x065902F08
ori x13,x13,0x000000559
div x8,x9,x15
lh x11,0x000000000(x9)
lbu x10,0x000000000(x9)
ori x9,x12,0x00000077A
c.lw x10,0x020(x8)
sh x13,0x000000000(x8)
lw x11,0x000000000(x9)
sb x13,0x000000000(x9)
sb x13,0x000000000(x9)
ori x9,x12,0x000000673
fence ow,ow
c.lw x11,0x018(x9)
fence irw,o
lhu x11,0x000000000(x9)
li x13,0x0240BFB95
ori x13,x13,0x0000006A1
div x8,x9,x15
ori x9,x12,0x00000062B
c.sw x13,0x044(x8)
sb x13,0x000000000(x9)
ori x9,x12,0x0000007A8
lwu x10,0x000000000(x8)
ori x9,x12,0x0000002B2
lwu x11,0x000000000(x9)
ori x9,x12,0x0000001AE
lh x11,0x000000000(x9)
li x13,0x02F9694FA
ori x13,x13,0x0000007FA
div x8,x9,x15
sh x13,0x000000000(x8)
lh x10,0x000000000(x8)
ori x9,x12,0x000000287
lbu x10,0x000000000(x8)
lh x10,0x000000000(x9)
ori x9,x12,0x000000771
lb x11,0x000000000(x8)
c.lw x11,0x058(x8)
lb x11,0x000000000(x9)
c.lw x10,0x034(x8)
sh x13,0x000000000(x9)
lbu x10,0x000000000(x9)
ori x9,x12,0x000000697
ori x9,x12,0x000000327
ori x9,x12,0x000000522
ori x9,x12,0x000000620
c.lw x11,0x04C(x8)
ori x9,x12,0x0000000F9
lhu x11,0x000000000(x9)
lh x11,0x000000000(x8)
li x13,0x0FB998A25
ori x13,x13,0x000000280
div x8,x9,x15
fence io,irw
ori x9,x12,0x0000001D6
ori x9,x12,0x00000046E
ori x9,x12,0x0000004CE
lb x10,0x000000000(x8)
sh x13,0x000000000(x8)
ori x9,x12,0x00000050B
ori x9,x12,0x0000001F4
sh x13,0x000000000(x8)
sw x13,0x000000000(x8)
ori x9,x12,0x0000003D8
ori x9,x12,0x000000213
lwu x10,0x000000000(x9)
li x13,0x061D0D29B
ori x13,x13,0x0000004F7
div x8,x9,x15
ori x9,x12,0x0000005C8
sb x13,0x000000000(x8)
ori x9,x12,0x000000635
ori x9,x12,0x0000002DF
fence r,ior
lh x11,0x000000000(x9)
c.sw x13,0x014(x9)
lb x11,0x000000000(x8)
ori x9,x12,0x00000048A
ori x9,x12,0x0000006AB
sh x13,0x000000000(x8)
li x13,0x01F6E4466
ori x13,x13,0x000000696
div x8,x9,x15
ori x9,x12,0x0000003F8
lhu x10,0x000000000(x9)
c.lw x11,0x054(x9)
fence r,or
c.lw x11,0x07C(x8)
ori x9,x12,0x0000003A1
lhu x10,0x000000000(x8)
ori x9,x12,0x0000004A1
sw x13,0x000000000(x9)
lhu x11,0x000000000(x9)
ori x9,x12,0x0000002C4
sb x13,0x000000000(x8)
lwu x10,0x000000000(x8)
li x13,0x01297CF5F
ori x13,x13,0x0000004A0
div x8,x9,x15
ori x9,x12,0x0000002EB
ori x9,x12,0x0000007FD
c.lw x10,0x078(x9)
ori x9,x12,0x0000003B8
ori x9,x12,0x0000004C0
ori x9,x12,0x0000000AE
ori x9,x12,0x00000043E
c.lw x10,0x054(x8)
lh x10,0x000000000(x9)
lh x10,0x000000000(x9)
ori x9,x12,0x0000000B7
ori x9,x12,0x0000001BF
sh x13,0x000000000(x8)
c.sw x13,0x01C(x8)
ori x9,x12,0x0000001A9
lbu x10,0x000000000(x8)
sb x13,0x000000000(x8)
ori x9,x12,0x000000539
li x13,0x01EE76BA0
ori x13,x13,0x000000787
div x8,x9,x15
lhu x11,0x000000000(x8)
ori x9,x12,0x000000003
lh x10,0x000000000(x9)
ori x9,x12,0x000000492
sw x13,0x000000000(x8)
ori x9,x12,0x000000280
ori x9,x12,0x000000463
ori x9,x12,0x000000464
ori x9,x12,0x0000002B6
lwu x11,0x000000000(x8)
ori x9,x12,0x0000006D5
lhu x11,0x000000000(x8)
lhu x11,0x000000000(x8)
li x13,0x0E55E2886
ori x13,x13,0x000000085
div x8,x9,x15
ori x9,x12,0x0000002AA
sb x13,0x000000000(x9)
c.lw x10,0x030(x9)
ori x9,x12,0x00000011E
ori x9,x12,0x0000005A8
ori x9,x12,0x000000503
lwu x11,0x000000000(x8)
li x13,0x03417C95B
ori x13,x13,0x000000041
div x8,x9,x15
lbu x10,0x000000000(x8)
ori x9,x12,0x0000007F9
ori x9,x12,0x0000001D0
fence io,ow
lb x10,0x000000000(x8)
lh x11,0x000000000(x8)
lhu x11,0x000000000(x9)
li x13,0x08239C841
ori x13,x13,0x000000191
div x8,x9,x15
c.lw x11,0x01C(x8)
c.lw x11,0x01C(x8)
lhu x10,0x000000000(x8)
fence r,iw
li x13,0x0FE8E01F9
ori x13,x13,0x0000006DF
div x8,x9,x15
c.sw x13,0x05C(x8)
c.sw x13,0x054(x9)
ori x9,x12,0x0000003D7
lwu x11,0x000000000(x8)
c.lw x10,0x070(x8)
li x13,0x0E3924CAB
ori x13,x13,0x000000056
div x8,x9,x15
lh x11,0x000000000(x9)
c.sw x13,0x054(x8)
fence o,r
ori x9,x12,0x000000746
ori x9,x12,0x00000037B
sb x13,0x000000000(x9)
ori x9,x12,0x0000005A8
lw x10,0x000000000(x8)
sb x13,0x000000000(x9)
c.sw x13,0x014(x9)
ori x9,x12,0x0000004DF
lh x10,0x000000000(x9)
sb x13,0x000000000(x9)
ori x9,x12,0x000000186
ori x9,x12,0x00000037C
li x13,0x01CD3BF05
ori x13,x13,0x000000189
div x8,x9,x15
lhu x11,0x000000000(x9)
lhu x11,0x000000000(x8)
fence o,ow
lwu x11,0x000000000(x8)
c.sw x13,0x058(x8)
sb x13,0x000000000(x9)
lwu x11,0x000000000(x9)
sh x13,0x000000000(x9)
lw x11,0x000000000(x9)
ori x9,x12,0x0000007C2
c.sw x13,0x068(x9)
ori x9,x12,0x0000007BC
ori x9,x12,0x000000293
ori x9,x12,0x0000006AE
fence rw,io
li x13,0x060C20D98
ori x13,x13,0x0000002CF
div x8,x9,x15
sh x13,0x000000000(x8)
lw x10,0x000000000(x9)
ori x9,x12,0x00000023B
ori x9,x12,0x0000002A2
ori x9,x12,0x00000021E
ori x9,x12,0x0000002B2
ori x9,x12,0x000000605
li x13,0x0C8F3C458
ori x13,x13,0x0000005B8
div x8,x9,x15
ori x9,x12,0x000000721
sh x13,0x000000000(x9)
ori x9,x12,0x0000006C4
ori x9,x12,0x0000006AA
ori x9,x12,0x0000006D2
sw x13,0x000000000(x9)
ori x9,x12,0x0000000FF
lw x11,0x000000000(x8)
c.lw x11,0x034(x9)
sw x13,0x000000000(x8)
ori x9,x12,0x00000061D
lhu x11,0x000000000(x8)
ori x9,x12,0x0000005C8
li x13,0x0FDDAF11B
ori x13,x13,0x00000003F
div x8,x9,x15
ori x9,x12,0x0000005BE
sb x13,0x000000000(x9)
lbu x11,0x000000000(x8)
ori x9,x12,0x000000113
ori x9,x12,0x00000048C
ori x9,x12,0x0000003CF
ori x9,x12,0x0000002C0
ori x9,x12,0x000000495
sb x13,0x000000000(x9)
ori x9,x12,0x00000079C
ori x9,x12,0x0000002FA
ori x9,x12,0x0000001D3
li x13,0x058739BE8
ori x13,x13,0x0000003F3
div x8,x9,x15
sb x13,0x000000000(x9)
lwu x10,0x000000000(x9)
ori x9,x12,0x0000007ED
lh x11,0x000000000(x8)
lh x10,0x000000000(x8)
ori x9,x12,0x0000002C0
lh x11,0x000000000(x8)
ori x9,x12,0x000000492
lwu x10,0x000000000(x8)
li x13,0x0327DB7BA
ori x13,x13,0x000000641
div x8,x9,x15
fence i,orw
fence irw,ior
ori x9,x12,0x00000008C
lh x10,0x000000000(x9)
ori x9,x12,0x00000029F
sw x13,0x000000000(x9)
lh x11,0x000000000(x9)
lw x10,0x000000000(x8)
ori x9,x12,0x0000006FE
lw x10,0x000000000(x9)
ori x9,x12,0x0000001E2
c.lw x11,0x020(x9)
fence r,or
fence ior,iorw
li x13,0x0EC280431
ori x13,x13,0x00000047E
div x8,x9,x15
sh x13,0x000000000(x9)
lh x11,0x000000000(x8)
ori x9,x12,0x000000719
lw x10,0x000000000(x9)
lh x10,0x000000000(x9)
li x13,0x04B401A82
ori x13,x13,0x000000771
div x8,x9,x15
ori x9,x12,0x00000023D
lw x10,0x000000000(x9)
lwu x10,0x000000000(x9)
fence io,iorw
ori x9,x12,0x000000668
c.sw x13,0x040(x8)
lwu x11,0x000000000(x9)
lhu x10,0x000000000(x9)
lbu x11,0x000000000(x8)
ori x9,x12,0x00000025E
li x13,0x01EDF1063
ori x13,x13,0x000000366
div x8,x9,x15
ori x9,x12,0x0000001F1
lbu x10,0x000000000(x9)
sb x13,0x000000000(x8)
c.lw x10,0x04C(x9)
sw x13,0x000000000(x8)
ori x9,x12,0x0000003D4
lwu x11,0x000000000(x9)
lb x10,0x000000000(x9)
lb x10,0x000000000(x9)
ori x9,x12,0x000000688
c.lw x11,0x028(x8)
ori x9,x12,0x000000445
ori x9,x12,0x0000004B0
ori x9,x12,0x000000180
li x13,0x09A1A96EF
ori x13,x13,0x000000667
div x8,x9,x15
sw x13,0x000000000(x8)
fence w,o
lwu x10,0x000000000(x9)
ori x9,x12,0x0000006A3
ori x9,x12,0x000000728
sb x13,0x000000000(x8)
lb x10,0x000000000(x9)
ori x9,x12,0x0000005A8
lwu x10,0x000000000(x9)
c.sw x13,0x064(x9)
sb x13,0x000000000(x8)
sb x13,0x000000000(x8)
li x13,0x074E20B09
ori x13,x13,0x0000005F1
div x8,x9,x15
ori x9,x12,0x000000542
ori x9,x12,0x000000065
lb x10,0x000000000(x8)
ori x9,x12,0x0000007F1
sw x13,0x000000000(x8)
c.sw x13,0x06C(x8)
c.lw x10,0x06C(x9)
ori x9,x12,0x0000007BD
lwu x10,0x000000000(x9)
ori x9,x12,0x00000019A
c.lw x10,0x050(x9)
sh x13,0x000000000(x8)
fence ior,ir
lwu x10,0x000000000(x8)
sw x13,0x000000000(x8)
lhu x10,0x000000000(x9)
li x13,0x0EBDD1DF9
ori x13,x13,0x000000740
div x8,x9,x15
ori x9,x12,0x000000696
ori x9,x12,0x0000004DA
lw x10,0x000000000(x9)
ori x9,x12,0x000000532
sb x13,0x000000000(x9)
lwu x10,0x000000000(x9)
c.sw x13,0x038(x9)
ori x9,x12,0x0000002E6
ori x9,x12,0x0000006CB
sw x13,0x000000000(x9)
li x13,0x02622E3CC
ori x13,x13,0x0000005A6
div x8,x9,x15
ori x9,x12,0x000000606
ori x9,x12,0x0000004BD
lb x10,0x000000000(x8)
lw x10,0x000000000(x8)
ori x9,x12,0x0000002EE
lbu x10,0x000000000(x9)
ori x9,x12,0x0000006D6
sw x13,0x000000000(x9)
ori x9,x12,0x00000030F
ori x9,x12,0x00000046B
ori x9,x12,0x0000003C0
lbu x10,0x000000000(x9)
ori x9,x12,0x0000006DB
li x13,0x033F17685
ori x13,x13,0x0000004A5
div x8,x9,x15
lwu x11,0x000000000(x9)
ori x9,x12,0x000000673
sb x13,0x000000000(x9)
sw x13,0x000000000(x8)
sb x13,0x000000000(x9)
lhu x11,0x000000000(x9)
lbu x10,0x000000000(x9)
ori x9,x12,0x000000061
lbu x10,0x000000000(x8)
sh x13,0x000000000(x9)
sh x13,0x000000000(x8)
lw x10,0x000000000(x9)
sh x13,0x000000000(x9)
sw x13,0x000000000(x9)
li x13,0x0E97DC0AA
ori x13,x13,0x00000061E
div x8,x9,x15
ori x9,x12,0x00000039E
lh x10,0x000000000(x9)
c.lw x11,0x04(x9)
sb x13,0x000000000(x8)
lb x10,0x000000000(x9)
lb x10,0x000000000(x9)
ori x9,x12,0x00000072C
ori x9,x12,0x000000598
lw x10,0x000000000(x9)
lw x10,0x000000000(x9)
lhu x10,0x000000000(x8)
ori x9,x12,0x000000578
sw x13,0x000000000(x9)
li x13,0x08A401AEA
ori x13,x13,0x0000005F5
div x8,x9,x15
sb x13,0x000000000(x9)
lb x10,0x000000000(x9)
ori x9,x12,0x0000005EF
lhu x10,0x000000000(x8)
lwu x10,0x000000000(x9)
sw x13,0x000000000(x9)
lb x10,0x000000000(x8)
ori x9,x12,0x0000003FE
lbu x10,0x000000000(x8)
sw x13,0x000000000(x8)
sw x13,0x000000000(x9)
lh x10,0x000000000(x8)
lh x10,0x000000000(x9)
sb x13,0x000000000(x8)
ori x9,x12,0x0000003E9
lbu x11,0x000000000(x8)
li x13,0x0A3DFD04B
ori x13,x13,0x00000057E
div x8,x9,x15
lwu x11,0x000000000(x9)
lbu x10,0x000000000(x9)
ori x9,x12,0x000000722
c.lw x11,0x064(x9)
c.sw x13,0x08(x8)
sw x13,0x000000000(x8)
ori x9,x12,0x000000174
lhu x10,0x000000000(x9)
sh x13,0x000000000(x8)
ori x9,x12,0x0000007B0
li x13,0x06BB5E444
ori x13,x13,0x000000154
div x8,x9,x15
ori x9,x12,0x000000420
lw x11,0x000000000(x9)
ori x9,x12,0x00000017C
sb x13,0x000000000(x8)
ori x9,x12,0x000000402
lbu x10,0x000000000(x8)
lwu x11,0x000000000(x8)
lb x11,0x000000000(x8)
sh x13,0x000000000(x8)
sw x13,0x000000000(x9)
ori x9,x12,0x000000210
lh x11,0x000000000(x8)
sh x13,0x000000000(x8)
sh x13,0x000000000(x9)
ori x9,x12,0x0000000D0
c.sw x13,0x030(x9)
lwu x11,0x000000000(x8)
li x13,0x00843F24A
ori x13,x13,0x00000068F
div x8,x9,x15
ori x9,x12,0x000000455
ori x9,x12,0x000000430
ori x9,x12,0x0000005EC
ori x9,x12,0x000000069
c.sw x13,0x02C(x9)
sb x13,0x000000000(x8)
lbu x10,0x000000000(x9)
fence ir,rw
li x13,0x0B92F147C
ori x13,x13,0x0000002F7
div x8,x9,x15
c.sw x13,0x03C(x9)
lb x11,0x000000000(x9)
ori x9,x12,0x0000004E5
sb x13,0x000000000(x9)
fence irw,rw
fence rw,iw
fence rw,irw
sb x13,0x000000000(x8)
lbu x11,0x000000000(x9)
sb x13,0x000000000(x8)
lw x11,0x000000000(x8)
ori x9,x12,0x0000007E5
ori x9,x12,0x000000677
sb x13,0x000000000(x9)
sb x13,0x000000000(x9)
lh x11,0x000000000(x9)
ori x9,x12,0x00000013A
lhu x11,0x000000000(x9)
li x13,0x05196BA1B
ori x13,x13,0x000000028
div x8,x9,x15
ori x9,x12,0x000000681
ori x9,x12,0x0000004AA
ori x9,x12,0x000000672
sb x13,0x000000000(x8)
lhu x11,0x000000000(x8)
sw x13,0x000000000(x8)
lh x10,0x000000000(x9)
fence iorw,iorw
ori x9,x12,0x000000501
ori x9,x12,0x0000002EE
ori x9,x12,0x0000005A2
ori x9,x12,0x00000000D
li x13,0x0673DB99F
ori x13,x13,0x0000000E5
div x8,x9,x15
sw x13,0x000000000(x8)
sh x13,0x000000000(x9)
sh x13,0x000000000(x9)
ori x9,x12,0x00000041C
lw x10,0x000000000(x9)
li x13,0x02EC61E16
ori x13,x13,0x000000337
div x8,x9,x15
ori x9,x12,0x00000012E
ori x9,x12,0x000000429
lwu x10,0x000000000(x8)
ori x9,x12,0x000000360
li x13,0x0E3ED24E4
ori x13,x13,0x0000003C8
div x8,x9,x15
ori x9,x12,0x000000580
sb x13,0x000000000(x9)
ori x9,x12,0x0000005D8
lh x10,0x000000000(x8)
lh x11,0x000000000(x9)
ori x9,x12,0x000000172
ori x9,x12,0x000000331
lh x11,0x000000000(x8)
ori x9,x12,0x0000001A2
fence io,or
ori x9,x12,0x0000007BE
c.lw x11,0x030(x8)
c.lw x11,0x064(x9)
c.sw x13,0x034(x9)
fence rw,io
lhu x11,0x000000000(x9)
ori x9,x12,0x0000002B6
lwu x11,0x000000000(x9)
lbu x11,0x000000000(x9)
li x13,0x006DB8EA3
ori x13,x13,0x00000000A
div x8,x9,x15
ori x9,x12,0x0000001AA
ori x9,x12,0x000000162
ori x9,x12,0x0000006A3
fence ior,w
ori x9,x12,0x0000001DE
ori x9,x12,0x0000001A7
c.lw x11,0x05C(x8)
lb x11,0x000000000(x9)
li x13,0x08FFAB203
ori x13,x13,0x000000390
div x8,x9,x15
sh x13,0x000000000(x9)
lw x10,0x000000000(x8)
sh x13,0x000000000(x9)
lwu x10,0x000000000(x8)
ori x9,x12,0x000000434
ori x9,x12,0x0000003C4
ori x9,x12,0x000000654
sw x13,0x000000000(x8)
fence r,i
lhu x10,0x000000000(x9)
ori x9,x12,0x000000088
ori x9,x12,0x00000031F
ori x9,x12,0x00000018E
li x13,0x01E691BBF
ori x13,x13,0x0000003B8
div x8,x9,x15
ori x9,x12,0x0000007D4
sw x13,0x000000000(x8)
ori x9,x12,0x0000001B2
lw x10,0x000000000(x9)
ori x9,x12,0x000000121
sh x13,0x000000000(x8)
ori x9,x12,0x000000460
ori x9,x12,0x000000680
ori x9,x12,0x0000006BD
sh x13,0x000000000(x9)
ori x9,x12,0x0000005CC
ori x9,x12,0x000000393
ori x9,x12,0x0000004AE
lh x11,0x000000000(x9)
ori x9,x12,0x00000030C
c.lw x10,0x078(x9)
ori x9,x12,0x00000076D
lb x11,0x000000000(x8)
li x13,0x09D35AF9E
ori x13,x13,0x0000005E3
div x8,x9,x15
c.lw x10,0x078(x8)
fence orw,ow
lwu x11,0x000000000(x9)
ori x9,x12,0x0000002D7
c.sw x13,0x078(x8)
sw x13,0x000000000(x9)
lb x11,0x000000000(x8)
lw x10,0x000000000(x8)
lw x11,0x000000000(x9)
lwu x11,0x000000000(x8)
lh x11,0x000000000(x9)
lbu x10,0x000000000(x8)
ori x9,x12,0x00000040A
lh x10,0x000000000(x9)
li x13,0x0C4B1C00E
ori x13,x13,0x000000564
div x8,x9,x15
c.sw x13,0x01C(x8)
ori x9,x12,0x0000005C8
fence r,ior
ori x9,x12,0x000000153
lw x11,0x000000000(x8)
lh x11,0x000000000(x8)
ori x9,x12,0x00000065F
sb x13,0x000000000(x8)
ori x9,x12,0x000000269
sh x13,0x000000000(x9)
ori x9,x12,0x00000010B
lwu x11,0x000000000(x8)
sb x13,0x000000000(x8)
ori x9,x12,0x0000002F3
ori x9,x12,0x00000048D
fence rw,irw
li x13,0x073260D1B
ori x13,x13,0x000000520
div x8,x9,x15
sh x13,0x000000000(x9)
fence ow,i
lbu x11,0x000000000(x8)
c.sw x13,0x040(x8)
c.sw x13,0x068(x9)
ori x9,x12,0x000000578
sh x13,0x000000000(x9)
sw x13,0x000000000(x8)
lwu x10,0x000000000(x9)
c.lw x10,0x03C(x8)
lwu x11,0x000000000(x9)
c.sw x13,0x054(x9)
sh x13,0x000000000(x8)
lh x11,0x000000000(x8)
lw x11,0x000000000(x9)
c.sw x13,0x070(x9)
ori x9,x12,0x00000011A
ori x9,x12,0x0000007EB
ori x9,x12,0x000000583
ori x9,x12,0x0000003BE
li x13,0x048777990
ori x13,x13,0x00000069F
div x8,x9,x15
sb x13,0x000000000(x8)
lhu x10,0x000000000(x9)
fence ow,ow
ori x9,x12,0x0000007C5
lb x10,0x000000000(x8)
sw x13,0x000000000(x8)
c.lw x11,0x070(x9)
li x13,0x06E0020D3
ori x13,x13,0x0000002F1
div x8,x9,x15
c.lw x11,0x01C(x8)
ori x9,x12,0x00000069C
ori x9,x12,0x000000628
ori x9,x12,0x00000073E
sw x13,0x000000000(x8)
li x13,0x062B14426
ori x13,x13,0x000000125
div x8,x9,x15
lbu x11,0x000000000(x8)
c.lw x10,0x078(x9)
lw x10,0x000000000(x8)
sw x13,0x000000000(x9)
c.sw x13,0x020(x8)
sh x13,0x000000000(x8)
lw x11,0x000000000(x8)
sh x13,0x000000000(x9)
lh x11,0x000000000(x8)
sw x13,0x000000000(x9)
lb x11,0x000000000(x9)
lwu x10,0x000000000(x8)
lwu x10,0x000000000(x9)
lwu x11,0x000000000(x8)
lb x10,0x000000000(x8)
ori x9,x12,0x0000000E0
sb x13,0x000000000(x9)
sh x13,0x000000000(x9)
c.sw x13,0x04(x9)
lh x10,0x000000000(x9)
li x13,0x0E19AAC1A
ori x13,x13,0x000000707
div x8,x9,x15
ori x9,x12,0x0000005D9
lhu x11,0x000000000(x8)
ori x9,x12,0x0000001E4
ori x9,x12,0x00000008B
ori x9,x12,0x0000004B1
c.lw x10,0x028(x9)
ori x9,x12,0x000000032
lh x10,0x000000000(x9)
sw x13,0x000000000(x8)
ori x9,x12,0x000000483
c.lw x10,0x038(x8)
ori x9,x12,0x0000004A0
lb x10,0x000000000(x9)
c.sw x13,0x068(x8)
sw x13,0x000000000(x9)
lh x11,0x000000000(x9)
c.sw x13,0x050(x8)
lwu x11,0x000000000(x9)
sh x13,0x000000000(x9)
ori x9,x12,0x000000547
li x13,0x0C223970E
ori x13,x13,0x0000001D5
div x8,x9,x15
lb x10,0x000000000(x9)
lh x11,0x000000000(x9)
c.lw x10,0x040(x9)
c.sw x13,0x060(x8)
ori x9,x12,0x0000002E6
sb x13,0x000000000(x8)
ori x9,x12,0x000000174
ori x9,x12,0x0000003D6
li x13,0x0C281F20A
ori x13,x13,0x000000369
div x8,x9,x15
ori x9,x12,0x000000171
sh x13,0x000000000(x8)
fence orw,orw
sb x13,0x000000000(x8)
c.sw x13,0x054(x9)
c.lw x11,0x064(x8)
sh x13,0x000000000(x9)
li x13,0x0781371C7
ori x13,x13,0x00000070F
div x8,x9,x15
ori x9,x12,0x000000689
lhu x11,0x000000000(x8)
lhu x10,0x000000000(x9)
ori x9,x12,0x0000007AC
lb x11,0x000000000(x8)
lbu x11,0x000000000(x8)
lbu x11,0x000000000(x8)
c.sw x13,0x020(x9)
c.sw x13,0x04C(x8)
lwu x11,0x000000000(x8)
li x13,0x069823F3C
ori x13,x13,0x00000026B
div x8,x9,x15
sb x13,0x000000000(x8)
sh x13,0x000000000(x9)
ori x9,x12,0x000000290
lw x11,0x000000000(x8)
sh x13,0x000000000(x8)
lwu x11,0x000000000(x9)
c.lw x11,0x044(x9)
ori x9,x12,0x0000006BC
lwu x10,0x000000000(x9)
sb x13,0x000000000(x9)
lb x11,0x000000000(x8)
ori x9,x12,0x0000003AF
lwu x10,0x000000000(x8)
ori x9,x12,0x0000007B7
ori x9,x12,0x0000002E9
ori x9,x12,0x0000002A5
lw x11,0x000000000(x8)
lhu x10,0x000000000(x8)
li x13,0x062439367
ori x13,x13,0x000000032
div x8,x9,x15
ori x9,x12,0x000000467
lwu x10,0x000000000(x9)
lwu x11,0x000000000(x8)
lhu x11,0x000000000(x8)
ori x9,x12,0x000000433
lb x10,0x000000000(x8)
ori x9,x12,0x000000338
ori x9,x12,0x00000004F
sw x13,0x000000000(x8)
ori x9,x12,0x0000004E1
sw x13,0x000000000(x9)
lw x10,0x000000000(x8)
ori x9,x12,0x000000406
sh x13,0x000000000(x9)
li x13,0x0C06C2AA7
ori x13,x13,0x000000004
div x8,x9,x15
lbu x10,0x000000000(x9)
lw x10,0x000000000(x9)
lwu x10,0x000000000(x8)
sw x13,0x000000000(x8)
ori x9,x12,0x0000007F0
ori x9,x12,0x0000002E7
lw x11,0x000000000(x8)
ori x9,x12,0x00000014A
sb x13,0x000000000(x9)
ori x9,x12,0x00000072D
lh x10,0x000000000(x9)
sw x13,0x000000000(x9)
lhu x10,0x000000000(x9)
ori x9,x12,0x0000002FD
lwu x10,0x000000000(x8)
lb x11,0x000000000(x8)
c.sw x13,0x04(x9)
lbu x11,0x000000000(x8)
ori x9,x12,0x000000682
li x13,0x0D36AD60C
ori x13,x13,0x00000048E
div x8,x9,x15
sh x13,0x000000000(x8)
lw x10,0x000000000(x9)
ori x9,x12,0x0000003C1
c.sw x13,0x028(x8)
ori x9,x12,0x00000037E
ori x9,x12,0x000000658
lb x11,0x000000000(x8)
lh x10,0x000000000(x8)
sh x13,0x000000000(x9)
li x13,0x0FF6FF6B5
ori x13,x13,0x000000685
div x8,x9,x15
ori x9,x12,0x000000598
sb x13,0x000000000(x8)
fence ir,iw
fence ior,iw
ori x9,x12,0x000000368
sb x13,0x000000000(x9)
ori x9,x12,0x00000033E
sw x13,0x000000000(x8)
fence i,ior
ori x9,x12,0x0000001A7
lh x10,0x000000000(x9)
lbu x11,0x000000000(x8)
lhu x11,0x000000000(x9)
lbu x10,0x000000000(x8)
c.lw x11,0x028(x8)
lb x11,0x000000000(x8)
ori x9,x12,0x000000528
sh x13,0x000000000(x8)
ori x9,x12,0x000000049
li x13,0x0DD4D7508
ori x13,x13,0x00000066C
div x8,x9,x15
ori x9,x12,0x000000049
fence w,rw
ori x9,x12,0x000000311
ori x9,x12,0x00000001A
lwu x10,0x000000000(x8)
ori x9,x12,0x000000455
ori x9,x12,0x0000001AC
lbu x10,0x000000000(x8)
ori x9,x12,0x00000027A
lb x11,0x000000000(x9)
lh x10,0x000000000(x9)
ori x9,x12,0x00000006B
lhu x10,0x000000000(x8)
sw x13,0x000000000(x9)
ori x9,x12,0x00000066F
lhu x11,0x000000000(x8)
lb x10,0x000000000(x9)
c.sw x13,0x044(x8)
li x13,0x0336338AB
ori x13,x13,0x00000046E
div x8,x9,x15
fence orw,w
lb x10,0x000000000(x8)
ori x9,x12,0x000000020
sw x13,0x000000000(x9)
sh x13,0x000000000(x8)
li x13,0x03F0A6C4D
ori x13,x13,0x000000454
div x8,x9,x15
lh x11,0x000000000(x9)
ori x9,x12,0x000000685
lh x10,0x000000000(x9)
ori x9,x12,0x000000380
lhu x11,0x000000000(x8)
ori x9,x12,0x000000210
li x13,0x0D652EF23
ori x13,x13,0x000000242
div x8,x9,x15
ori x9,x12,0x000000569
ori x9,x12,0x0000004EC
ori x9,x12,0x000000292
sb x13,0x000000000(x9)
li x13,0x00EAC39AD
ori x13,x13,0x000000321
div x8,x9,x15
sh x13,0x000000000(x8)
ori x9,x12,0x0000003FF
ori x9,x12,0x00000071C
ori x9,x12,0x000000524
lh x10,0x000000000(x8)
lbu x10,0x000000000(x9)
lw x10,0x000000000(x9)
lhu x10,0x000000000(x8)
lhu x10,0x000000000(x9)
ori x9,x12,0x00000015E
lbu x10,0x000000000(x9)
lbu x11,0x000000000(x8)
ori x9,x12,0x000000116
ori x9,x12,0x0000003EE
li x13,0x0A6B2C263
ori x13,x13,0x00000070E
div x8,x9,x15
ori x9,x12,0x0000002D2
lwu x11,0x000000000(x8)
ori x9,x12,0x000000291
ori x9,x12,0x0000000A5
lhu x11,0x000000000(x8)
fence irw,i
c.sw x13,0x044(x9)
sh x13,0x000000000(x8)
ori x9,x12,0x00000019D
fence orw,iow
li x13,0x0F1B96F8C
ori x13,x13,0x00000076E
div x8,x9,x15
lbu x11,0x000000000(x8)
sh x13,0x000000000(x9)
ori x9,x12,0x000000191
lw x11,0x000000000(x8)
ori x9,x12,0x00000040E
fence rw,ir
ori x9,x12,0x00000045C
lh x11,0x000000000(x9)
lhu x11,0x000000000(x9)
ori x9,x12,0x0000006F9
sh x13,0x000000000(x8)
li x13,0x03013A5AA
ori x13,x13,0x00000055A
div x8,x9,x15
lbu x10,0x000000000(x8)
c.lw x11,0x03C(x9)
sw x13,0x000000000(x8)
lh x11,0x000000000(x9)
li x13,0x0FBC583F3
ori x13,x13,0x0000006EF
div x8,x9,x15
ori x9,x12,0x000000137
sb x13,0x000000000(x8)
c.lw x11,0x058(x9)
ori x9,x12,0x0000003E6
fence i,i
sb x13,0x000000000(x8)
ori x9,x12,0x0000006F1
sh x13,0x000000000(x8)
ori x9,x12,0x0000003F8
lbu x11,0x000000000(x8)
ori x9,x12,0x0000002E4
ori x9,x12,0x00000010C
lw x10,0x000000000(x8)
ori x9,x12,0x000000387
sw x13,0x000000000(x9)
ori x9,x12,0x0000006F7
li x13,0x03BCBFE0B
ori x13,x13,0x00000066F
div x8,x9,x15
sw x13,0x000000000(x8)
lh x10,0x000000000(x9)
lw x10,0x000000000(x9)
lbu x11,0x000000000(x9)
ori x9,x12,0x0000001D8
lhu x11,0x000000000(x9)
sh x13,0x000000000(x8)
sb x13,0x000000000(x9)
lh x10,0x000000000(x9)
lb x11,0x000000000(x9)
lb x11,0x000000000(x8)
sw x13,0x000000000(x9)
ori x9,x12,0x0000005D2
lwu x10,0x000000000(x8)
li x13,0x02FF93D55
ori x13,x13,0x000000015
div x8,x9,x15
sb x13,0x000000000(x8)
c.sw x13,0x03C(x9)
sh x13,0x000000000(x8)
sh x13,0x000000000(x9)
lwu x10,0x000000000(x8)
lw x10,0x000000000(x9)
lwu x10,0x000000000(x8)
ori x9,x12,0x0000007A2
sb x13,0x000000000(x9)
ori x9,x12,0x000000376
lw x11,0x000000000(x8)
lhu x11,0x000000000(x9)
ori x9,x12,0x0000000FD
li x13,0x097F9AA11
ori x13,x13,0x00000033C
div x8,x9,x15
sw x13,0x000000000(x9)
ori x9,x12,0x0000004C0
ori x9,x12,0x000000445
ori x9,x12,0x000000190
sw x13,0x000000000(x8)
ori x9,x12,0x0000003BE
li x13,0x044C151B8
ori x13,x13,0x000000723
div x8,x9,x15
sw x13,0x000000000(x8)
c.lw x10,0x024(x9)
sb x13,0x000000000(x8)
lw x11,0x000000000(x9)
c.lw x10,0x020(x8)
lwu x11,0x000000000(x9)
ori x9,x12,0x0000005C0
lwu x10,0x000000000(x8)
ori x9,x12,0x0000002EE
fence r,ior
lb x11,0x000000000(x8)
sw x13,0x000000000(x9)
ori x9,x12,0x00000043F
c.sw x13,0x024(x9)
lh x11,0x000000000(x8)
sh x13,0x000000000(x9)
fence iw,iow
sh x13,0x000000000(x8)
fence or,ior
li x13,0x0675F239F
ori x13,x13,0x00000057B
div x8,x9,x15
ori x9,x12,0x00000034D
c.lw x10,0x040(x8)
fence i,ir
ori x9,x12,0x0000003B5
fence w,iw
lwu x10,0x000000000(x9)
sw x13,0x000000000(x9)
ori x9,x12,0x000000080
ori x9,x12,0x0000003AB
sb x13,0x000000000(x8)
lwu x10,0x000000000(x9)
li x13,0x033332D97
ori x13,x13,0x000000346
div x8,x9,x15
ori x9,x12,0x000000274
fence or,ior
lhu x10,0x000000000(x9)
fence iorw,io
sb x13,0x000000000(x9)
ori x9,x12,0x000000403
lw x10,0x000000000(x8)
ori x9,x12,0x0000001A8
ori x9,x12,0x000000480
lwu x11,0x000000000(x9)
sw x13,0x000000000(x9)
ori x9,x12,0x00000035E
sh x13,0x000000000(x9)
ori x9,x12,0x00000059A
li x13,0x060513215
ori x13,x13,0x0000001AD
div x8,x9,x15
sb x13,0x000000000(x8)
lhu x11,0x000000000(x8)
lh x11,0x000000000(x8)
lhu x11,0x000000000(x8)
ori x9,x12,0x000000536
ori x9,x12,0x00000049B
lb x10,0x000000000(x8)
ori x9,x12,0x00000040C
lbu x10,0x000000000(x8)
ori x9,x12,0x000000309
lb x11,0x000000000(x9)
ori x9,x12,0x0000005DF
fence iorw,o
lwu x10,0x000000000(x8)
lwu x11,0x000000000(x9)
lwu x11,0x000000000(x9)
sb x13,0x000000000(x8)
ori x9,x12,0x0000003BD
c.sw x13,0x014(x9)
li x13,0x0E823D177
ori x13,x13,0x00000054C
div x8,x9,x15
sh x13,0x000000000(x9)
ori x9,x12,0x0000003E0
lbu x10,0x000000000(x9)
ori x9,x12,0x000000387
sb x13,0x000000000(x9)
ori x9,x12,0x000000421
li x13,0x0B879128A
ori x13,x13,0x0000001C9
div x8,x9,x15
c.lw x10,0x058(x9)
lb x11,0x000000000(x9)
ori x9,x12,0x00000018F
fence r,irw
sb x13,0x000000000(x9)
sh x13,0x000000000(x9)
sb x13,0x000000000(x9)
ori x9,x12,0x000000252
ori x9,x12,0x0000004E7
ori x9,x12,0x0000003D7
sh x13,0x000000000(x9)
ori x9,x12,0x0000003D8
fence o,iorw
c.sw x13,0x048(x9)
ori x9,x12,0x00000064A
ori x9,x12,0x0000002D9
fence rw,ow
sh x13,0x000000000(x8)
li x13,0x0BBC4BC5F
ori x13,x13,0x000000792
div x8,x9,x15
lbu x11,0x000000000(x9)
lbu x10,0x000000000(x9)
lhu x11,0x000000000(x8)
lbu x11,0x000000000(x9)
sb x13,0x000000000(x8)
sw x13,0x000000000(x8)
ori x9,x12,0x0000001CC
ori x9,x12,0x00000070B
lbu x11,0x000000000(x8)
ori x9,x12,0x0000000FB
ori x9,x12,0x000000013
c.sw x13,0x020(x8)
lbu x10,0x000000000(x8)
ori x9,x12,0x00000010D
lw x11,0x000000000(x8)
lbu x11,0x000000000(x9)
lb x10,0x000000000(x8)
ori x9,x12,0x000000128
li x13,0x0191470F3
ori x13,x13,0x000000308
div x8,x9,x15
sh x13,0x000000000(x9)
ori x9,x12,0x000000458
c.sw x13,0x00(x8)
sb x13,0x000000000(x8)
ori x9,x12,0x0000000AC
ori x9,x12,0x00000045B
ori x9,x12,0x000000269
lwu x10,0x000000000(x9)
ori x9,x12,0x0000006C3
sw x13,0x000000000(x8)
ori x9,x12,0x000000193
lwu x10,0x000000000(x8)
lh x11,0x000000000(x9)
li x13,0x0B03D92E8
ori x13,x13,0x0000002DA
div x8,x9,x15
ori x9,x12,0x0000003AD
ori x9,x12,0x000000025
lw x10,0x000000000(x9)
lbu x11,0x000000000(x9)
sb x13,0x000000000(x8)
ori x9,x12,0x000000531
ori x9,x12,0x00000028B
c.sw x13,0x03C(x8)
li x13,0x0DF291DA1
ori x13,x13,0x0000004E1
div x8,x9,x15
lw x11,0x000000000(x9)
lb x11,0x000000000(x9)
sw x13,0x000000000(x9)
fence or,orw
sb x13,0x000000000(x8)
li x13,0x01E0389F6
ori x13,x13,0x000000041
div x8,x9,x15
lhu x10,0x000000000(x8)
ori x9,x12,0x00000047D
ori x9,x12,0x0000005EA
lh x10,0x000000000(x9)
ori x9,x12,0x00000032F
lwu x10,0x000000000(x9)
ori x9,x12,0x0000001B4
ori x9,x12,0x0000005A8
ori x9,x12,0x0000003C4
ori x9,x12,0x00000061F
ori x9,x12,0x000000075
c.sw x13,0x04C(x9)
li x13,0x00274E8BA
ori x13,x13,0x000000390
div x8,x9,x15
c.sw x13,0x078(x9)
sw x13,0x000000000(x8)
sb x13,0x000000000(x8)
ori x9,x12,0x00000036B
lhu x10,0x000000000(x8)
ori x9,x12,0x0000005A3
sb x13,0x000000000(x8)
sw x13,0x000000000(x8)
lh x10,0x000000000(x9)
ori x9,x12,0x00000068E
ori x9,x12,0x0000004CF
ori x9,x12,0x000000577
ori x9,x12,0x000000243
lwu x10,0x000000000(x8)
lw x11,0x000000000(x9)
ori x9,x12,0x0000006B9
lw x10,0x000000000(x9)
ori x9,x12,0x000000699
lw x11,0x000000000(x9)
sh x13,0x000000000(x8)
li x13,0x0D0D99076
ori x13,x13,0x00000061C
div x8,x9,x15
lb x11,0x000000000(x9)
c.lw x11,0x068(x8)
c.lw x11,0x04(x9)
sb x13,0x000000000(x9)
li x13,0x099AF1CA5
ori x13,x13,0x00000054E
div x8,x9,x15
ori x9,x12,0x00000042E
lb x10,0x000000000(x8)
ori x9,x12,0x000000726
fence orw,iw
ori x9,x12,0x0000007F9
ori x9,x12,0x00000073D
ori x9,x12,0x000000461
li x13,0x054A505D5
ori x13,x13,0x0000005A5
div x8,x9,x15
lw x10,0x000000000(x8)
ori x9,x12,0x000000591
c.sw x13,0x06C(x8)
lb x11,0x000000000(x9)
lbu x10,0x000000000(x8)
lw x11,0x000000000(x8)
sb x13,0x000000000(x8)
ori x9,x12,0x000000382
lh x10,0x000000000(x8)
ori x9,x12,0x000000716
sw x13,0x000000000(x8)
ori x9,x12,0x000000132
ori x9,x12,0x000000699
lh x10,0x000000000(x9)
li x13,0x0915CDBC0
ori x13,x13,0x000000245
div x8,x9,x15
c.lw x11,0x05C(x8)
ori x9,x12,0x00000025A
ori x9,x12,0x00000014D
ori x9,x12,0x0000001BE
lb x10,0x000000000(x8)
lwu x11,0x000000000(x8)
ori x9,x12,0x000000796
ori x9,x12,0x00000036F
li x13,0x096993125
ori x13,x13,0x0000000F3
div x8,x9,x15
ori x9,x12,0x000000791
sh x13,0x000000000(x9)
ori x9,x12,0x000000467
c.sw x13,0x03C(x8)
ori x9,x12,0x000000729
fence irw,r
sb x13,0x000000000(x9)
ori x9,x12,0x00000010E
lwu x11,0x000000000(x8)
fence r,ior
ori x9,x12,0x0000005C7
ori x9,x12,0x0000000FF
lhu x11,0x000000000(x8)
sb x13,0x000000000(x8)
lh x11,0x000000000(x9)
li x13,0x0646B7D68
ori x13,x13,0x0000000DB
div x8,x9,x15
lw x10,0x000000000(x8)
lbu x10,0x000000000(x8)
ori x9,x12,0x0000004FC
sb x13,0x000000000(x9)
fence orw,w
li x13,0x051F9E984
ori x13,x13,0x0000001EA
div x8,x9,x15
lwu x11,0x000000000(x9)
ori x9,x12,0x000000773
sb x13,0x000000000(x8)
lbu x10,0x000000000(x9)
ori x9,x12,0x000000179
li x13,0x034FF2C31
ori x13,x13,0x00000015C
div x8,x9,x15
lh x10,0x000000000(x9)
lhu x10,0x000000000(x9)
lw x10,0x000000000(x9)
lb x10,0x000000000(x9)
lw x11,0x000000000(x8)
lwu x11,0x000000000(x9)
sh x13,0x000000000(x8)
c.sw x13,0x014(x9)
lwu x10,0x000000000(x8)
lhu x11,0x000000000(x9)
sb x13,0x000000000(x8)
ori x9,x12,0x000000463
lh x10,0x000000000(x9)
sw x13,0x000000000(x9)
fence rw,orw
sh x13,0x000000000(x8)
c.sw x13,0x07C(x9)
lhu x11,0x000000000(x8)
ori x9,x12,0x0000004C2
c.sw x13,0x020(x9)
li x13,0x044DD3C64
ori x13,x13,0x0000002B4
div x8,x9,x15
fence ior,irw
sw x13,0x000000000(x9)
c.lw x11,0x058(x8)
lb x11,0x000000000(x8)
lh x11,0x000000000(x8)
c.sw x13,0x050(x9)
lh x10,0x000000000(x9)
c.sw x13,0x028(x8)
sh x13,0x000000000(x9)
c.lw x11,0x044(x9)
li x13,0x07EC51058
ori x13,x13,0x000000417
div x8,x9,x15
lw x11,0x000000000(x9)
sb x13,0x000000000(x8)
lb x10,0x000000000(x8)
ori x9,x12,0x0000002C9
lh x11,0x000000000(x8)
sb x13,0x000000000(x9)
li x13,0x02CC1FE0B
ori x13,x13,0x0000003CF
div x8,x9,x15
c.sw x13,0x024(x9)
ori x9,x12,0x000000774
sb x13,0x000000000(x9)
ori x9,x12,0x00000043C
ori x9,x12,0x000000770
c.lw x11,0x034(x8)
ori x9,x12,0x000000078
ori x9,x12,0x0000004AF
ori x9,x12,0x00000036A
sw x13,0x000000000(x8)
ori x9,x12,0x00000028A
ori x9,x12,0x0000006CD
lb x11,0x000000000(x9)
lbu x11,0x000000000(x9)
lh x11,0x000000000(x9)
sw x13,0x000000000(x8)
li x13,0x0F183BA84
ori x13,x13,0x000000191
div x8,x9,x15
c.lw x11,0x024(x9)
lhu x11,0x000000000(x8)
ori x9,x12,0x000000081
ori x9,x12,0x000000600
lhu x11,0x000000000(x9)
li x13,0x09EC32447
ori x13,x13,0x00000072F
div x8,x9,x15
c.sw x13,0x040(x8)
c.lw x10,0x08(x8)
fence r,r
ori x9,x12,0x000000608
sb x13,0x000000000(x8)
ori x9,x12,0x000000354
lwu x11,0x000000000(x9)
sh x13,0x000000000(x8)
lw x11,0x000000000(x9)
sb x13,0x000000000(x8)
ori x9,x12,0x0000005A3
ori x9,x12,0x00000008C
ori x9,x12,0x000000305
lhu x11,0x000000000(x8)
sh x13,0x000000000(x9)
lb x10,0x000000000(x9)
ori x9,x12,0x000000354
lhu x11,0x000000000(x9)
ori x9,x12,0x000000177
c.lw x10,0x01C(x8)
li x13,0x020DE953A
ori x13,x13,0x0000001B4
div x8,x9,x15
lbu x11,0x000000000(x8)
ori x9,x12,0x0000006B2
sw x13,0x000000000(x9)
ori x9,x12,0x000000441
ori x9,x12,0x00000019F
sb x13,0x000000000(x9)
c.sw x13,0x044(x9)
c.lw x11,0x044(x8)
ori x9,x12,0x0000005BF
ori x9,x12,0x0000007E7
sw x13,0x000000000(x8)
lb x10,0x000000000(x8)
ori x9,x12,0x000000465
li x13,0x01F23F81A
ori x13,x13,0x0000005D3
div x8,x9,x15
sw x13,0x000000000(x9)
ori x9,x12,0x0000000FF
lwu x11,0x000000000(x9)
ori x9,x12,0x000000432
ori x9,x12,0x000000157
lw x10,0x000000000(x9)
lbu x11,0x000000000(x9)
ori x9,x12,0x00000001E
c.lw x11,0x03C(x8)
lbu x11,0x000000000(x8)
lwu x11,0x000000000(x9)
sw x13,0x000000000(x8)
lb x11,0x000000000(x9)
li x13,0x0DB3468E1
ori x13,x13,0x00000039F
div x8,x9,x15
lw x10,0x000000000(x9)
sw x13,0x000000000(x8)
c.sw x13,0x00(x8)
sh x13,0x000000000(x9)
ori x9,x12,0x00000006F
lbu x10,0x000000000(x8)
li x13,0x03306D527
ori x13,x13,0x00000071A
div x8,x9,x15
lbu x10,0x000000000(x8)
ori x9,x12,0x0000000A7
fence rw,ior
c.sw x13,0x04(x8)
ori x9,x12,0x000000456
ori x9,x12,0x0000002B6
sh x13,0x000000000(x8)
ori x9,x12,0x000000256
lwu x10,0x000000000(x9)
ori x9,x12,0x00000028B
lb x11,0x000000000(x8)
sw x13,0x000000000(x9)
sh x13,0x000000000(x8)
ori x9,x12,0x0000001EC
ori x9,x12,0x00000020D
lh x11,0x000000000(x9)
lb x10,0x000000000(x8)
li x13,0x03EF8D6C1
ori x13,x13,0x000000617
div x8,x9,x15
lhu x10,0x000000000(x9)
ori x9,x12,0x0000005C8
fence iow,ior
ori x9,x12,0x0000006CE
sh x13,0x000000000(x9)
lbu x10,0x000000000(x9)
lb x11,0x000000000(x9)
li x13,0x0DF0A8E7B
ori x13,x13,0x00000005D
div x8,x9,x15
lw x11,0x000000000(x8)
lb x10,0x000000000(x8)
ori x9,x12,0x0000002AB
c.lw x11,0x00(x9)
ori x9,x12,0x0000002DB
ori x9,x12,0x000000067
lh x11,0x000000000(x8)
ori x9,x12,0x0000003CD
lbu x11,0x000000000(x9)
lb x11,0x000000000(x8)
sh x13,0x000000000(x8)
li x13,0x0C44F5D39
ori x13,x13,0x0000007F7
div x8,x9,x15
ori x9,x12,0x00000012F
ori x9,x12,0x0000000C0
lwu x10,0x000000000(x9)
c.lw x11,0x060(x9)
ori x9,x12,0x0000003C1
lw x10,0x000000000(x8)
ori x9,x12,0x000000274
ori x9,x12,0x00000042D
lb x10,0x000000000(x8)
lwu x10,0x000000000(x9)
lhu x10,0x000000000(x8)
ori x9,x12,0x0000005CE
lhu x10,0x000000000(x8)
li x13,0x072EA0D68
ori x13,x13,0x0000002A8
div x8,x9,x15
ori x9,x12,0x0000004FD
lb x11,0x000000000(x8)
sb x13,0x000000000(x9)
ori x9,x12,0x000000694
ori x9,x12,0x000000698
ori x9,x12,0x0000002DF
ori x9,x12,0x000000395
lb x10,0x000000000(x8)
ori x9,x12,0x000000011
ori x9,x12,0x0000002AC
lhu x11,0x000000000(x9)
c.lw x10,0x040(x8)
sb x13,0x000000000(x9)
c.sw x13,0x038(x8)
lb x11,0x000000000(x8)
lb x11,0x000000000(x9)
ori x9,x12,0x000000133
lh x11,0x000000000(x8)
ori x9,x12,0x0000005AC
li x13,0x09539B545
ori x13,x13,0x00000004B
div x8,x9,x15
ori x9,x12,0x000000046
ori x9,x12,0x0000004F5
ori x9,x12,0x00000004F
sh x13,0x000000000(x8)
ori x9,x12,0x000000465
c.sw x13,0x034(x8)
lh x10,0x000000000(x8)
lb x11,0x000000000(x8)
ori x9,x12,0x0000000E3
lhu x10,0x000000000(x9)
lwu x10,0x000000000(x8)
lhu x10,0x000000000(x8)
fence or,i
ori x9,x12,0x000000205
ori x9,x12,0x00000070C
lhu x10,0x000000000(x9)
li x13,0x0A08D6806
ori x13,x13,0x0000001B2
div x8,x9,x15
c.lw x10,0x0C(x9)
lh x10,0x000000000(x8)
sb x13,0x000000000(x9)
lhu x11,0x000000000(x9)
ori x9,x12,0x00000038B
fence rw,i
li x13,0x041A85C5A
ori x13,x13,0x00000052F
div x8,x9,x15
sh x13,0x000000000(x9)
ori x9,x12,0x00000041D
lw x11,0x000000000(x8)
lb x10,0x000000000(x8)
lwu x10,0x000000000(x9)
lbu x11,0x000000000(x8)
li x13,0x04DB9BB7D
ori x13,x13,0x000000251
div x8,x9,x15
ori x9,x12,0x000000322
lh x11,0x000000000(x8)
lw x10,0x000000000(x8)
lb x10,0x000000000(x8)
sb x13,0x000000000(x9)
c.lw x11,0x034(x8)
lw x11,0x000000000(x9)
ori x9,x12,0x000000286
sb x13,0x000000000(x8)
ori x9,x12,0x00000048F
lh x11,0x000000000(x8)
ori x9,x12,0x000000734
lwu x11,0x000000000(x9)
li x13,0x042057491
ori x13,x13,0x00000041C
div x8,x9,x15
c.lw x10,0x024(x8)
ori x9,x12,0x0000007C1
ori x9,x12,0x000000705
lh x11,0x000000000(x9)
lh x11,0x000000000(x8)
sh x13,0x000000000(x8)
li x13,0x0F84FCE88
ori x13,x13,0x000000745
div x8,x9,x15
lh x11,0x000000000(x8)
lb x11,0x000000000(x9)
lb x10,0x000000000(x9)
ori x9,x12,0x000000342
sw x13,0x000000000(x9)
li x13,0x0677B9B0C
ori x13,x13,0x000000625
div x8,x9,x15
lh x11,0x000000000(x8)
ori x9,x12,0x000000755
lwu x10,0x000000000(x8)
lh x10,0x000000000(x8)
lwu x11,0x000000000(x9)
fence iorw,iorw
lhu x11,0x000000000(x8)
c.sw x13,0x048(x8)
lh x10,0x000000000(x9)
lh x10,0x000000000(x9)
ori x9,x12,0x0000005F8
sb x13,0x000000000(x8)
ori x9,x12,0x0000004E6
li x13,0x01092521C
ori x13,x13,0x000000196
div x8,x9,x15
lbu x11,0x000000000(x8)
c.lw x10,0x070(x8)
ori x9,x12,0x0000001A0
ori x9,x12,0x0000004DE
li x13,0x085287062
ori x13,x13,0x00000049E
div x8,x9,x15
lwu x10,0x000000000(x8)
ori x9,x12,0x0000007FE
ori x9,x12,0x0000002E6
ori x9,x12,0x000000338
ori x9,x12,0x000000604
lw x10,0x000000000(x8)
lwu x11,0x000000000(x8)
c.lw x11,0x020(x9)
sb x13,0x000000000(x9)
c.lw x11,0x08(x8)
c.lw x10,0x054(x8)
fence orw,o
ori x9,x12,0x0000007A1
lh x11,0x000000000(x9)
lh x11,0x000000000(x8)
sb x13,0x000000000(x9)
sh x13,0x000000000(x8)
li x13,0x099C4103F
ori x13,x13,0x00000006F
div x8,x9,x15
lb x11,0x000000000(x9)
lwu x10,0x000000000(x9)
ori x9,x12,0x000000778
sb x13,0x000000000(x9)
sw x13,0x000000000(x8)
sw x13,0x000000000(x9)
ori x9,x12,0x0000007A7
fence ir,ir
lbu x11,0x000000000(x9)
ori x9,x12,0x000000415
sh x13,0x000000000(x8)
li x13,0x0EC05F62A
ori x13,x13,0x00000059D
div x8,x9,x15
lh x10,0x000000000(x8)
ori x9,x12,0x00000014A
lhu x11,0x000000000(x9)
sh x13,0x000000000(x9)
sw x13,0x000000000(x8)
li x13,0x061748A4D
ori x13,x13,0x00000061A
div x8,x9,x15
lbu x11,0x000000000(x8)
ori x9,x12,0x000000717
ori x9,x12,0x0000007DE
c.lw x10,0x078(x9)
ori x9,x12,0x000000019
c.lw x10,0x00(x9)
ori x9,x12,0x0000006DD
c.lw x10,0x030(x9)
li x13,0x0D095A430
ori x13,x13,0x000000295
div x8,x9,x15
ori x9,x12,0x0000003A9
lhu x10,0x000000000(x9)
ori x9,x12,0x000000065
lb x11,0x000000000(x9)
lbu x10,0x000000000(x9)
ori x9,x12,0x00000003C
ori x9,x12,0x0000002DB
ori x9,x12,0x0000003C7
lhu x10,0x000000000(x8)
lh x11,0x000000000(x8)
lbu x11,0x000000000(x9)
ori x9,x12,0x000000101
sw x13,0x000000000(x8)
lhu x10,0x000000000(x8)
ori x9,x12,0x0000007C4
li x13,0x0890A41CA
ori x13,x13,0x0000001F9
div x8,x9,x15
lh x11,0x000000000(x9)
c.lw x11,0x04(x9)
fence orw,iorw
ori x9,x12,0x00000026B
ori x9,x12,0x000000599
fence o,or
ori x9,x12,0x0000003F5
sw x13,0x000000000(x9)
sb x13,0x000000000(x9)
lw x11,0x000000000(x9)
sw x13,0x000000000(x9)
fence iorw,ow
lwu x11,0x000000000(x8)
sb x13,0x000000000(x8)
sb x13,0x000000000(x9)
ori x9,x12,0x0000006BD
lbu x11,0x000000000(x9)
fence ow,iow
c.lw x10,0x060(x9)
li x13,0x09CDC03E9
ori x13,x13,0x000000277
div x8,x9,x15
sb x13,0x000000000(x8)
ori x9,x12,0x0000002BA
lbu x11,0x000000000(x9)
lw x11,0x000000000(x9)
ori x9,x12,0x00000074F
ori x9,x12,0x0000005B3
lb x10,0x000000000(x9)
li x13,0x027398FD3
ori x13,x13,0x00000026B
div x8,x9,x15
lbu x10,0x000000000(x9)
ori x9,x12,0x000000173
sw x13,0x000000000(x9)
lw x10,0x000000000(x8)
fence ior,or
ori x9,x12,0x0000003FF
c.lw x11,0x064(x9)
ori x9,x12,0x0000007D8
sh x13,0x000000000(x8)
sb x13,0x000000000(x9)
c.lw x10,0x020(x9)
ori x9,x12,0x0000006F2
lb x11,0x000000000(x8)
li x13,0x02DBF1C9A
ori x13,x13,0x0000004B1
div x8,x9,x15
lbu x11,0x000000000(x8)
lh x11,0x000000000(x8)
lb x10,0x000000000(x9)
ori x9,x12,0x000000534
ori x9,x12,0x0000007D2
c.sw x13,0x030(x8)
lw x11,0x000000000(x9)
ori x9,x12,0x0000000F6
ori x9,x12,0x0000001CC
lwu x10,0x000000000(x9)
lw x11,0x000000000(x8)
lh x11,0x000000000(x9)
lw x11,0x000000000(x9)
lbu x10,0x000000000(x8)
ori x9,x12,0x00000077A
ori x9,x12,0x00000014F
ori x9,x12,0x0000001DB
fence iow,w
lwu x11,0x000000000(x8)
sw x13,0x000000000(x9)
li x13,0x071D4009C
ori x13,x13,0x00000062A
div x8,x9,x15
lhu x11,0x000000000(x9)
lw x11,0x000000000(x8)
ori x9,x12,0x0000005B1
ori x9,x12,0x000000750
sh x13,0x000000000(x8)
ori x9,x12,0x00000004E
sb x13,0x000000000(x9)
lw x10,0x000000000(x8)
lbu x10,0x000000000(x9)
lw x10,0x000000000(x9)
ori x9,x12,0x0000007CF
lhu x11,0x000000000(x9)
li x13,0x0EFEC2B37
ori x13,x13,0x00000043D
div x8,x9,x15
lb x11,0x000000000(x9)
fence ior,io
ori x9,x12,0x0000003C8
ori x9,x12,0x0000003CA
sb x13,0x000000000(x8)
lh x11,0x000000000(x9)
lwu x11,0x000000000(x9)
ori x9,x12,0x00000000B
lh x11,0x000000000(x8)
c.sw x13,0x020(x9)
li x13,0x07FB7D8DF
ori x13,x13,0x00000063D
div x8,x9,x15
sh x13,0x000000000(x8)
c.lw x11,0x02C(x8)
sb x13,0x000000000(x9)
sh x13,0x000000000(x9)
fence iw,ir
sh x13,0x000000000(x9)
lhu x11,0x000000000(x8)
c.lw x11,0x07C(x8)
ori x9,x12,0x0000002C2
lwu x11,0x000000000(x8)
li x13,0x0FD98FC3F
ori x13,x13,0x000000053
div x8,x9,x15
sh x13,0x000000000(x8)
ori x9,x12,0x0000002F1
ori x9,x12,0x0000007E8
ori x9,x12,0x000000501
ori x9,x12,0x000000314
ori x9,x12,0x0000000A6
ori x9,x12,0x0000004DA
ori x9,x12,0x00000059A
ori x9,x12,0x000000652
ori x9,x12,0x000000430
ori x9,x12,0x0000006BD
ori x9,x12,0x000000663
ori x9,x12,0x0000006F0
lw x10,0x000000000(x9)
c.lw x11,0x064(x8)
li x13,0x075E1FEDE
ori x13,x13,0x0000006E1
div x8,x9,x15
lwu x10,0x000000000(x9)
c.sw x13,0x014(x9)
ori x9,x12,0x0000002F9
ori x9,x12,0x0000003B7
lb x11,0x000000000(x8)
c.lw x10,0x010(x9)
sh x13,0x000000000(x8)
ori x9,x12,0x000000706
sw x13,0x000000000(x9)
lhu x11,0x000000000(x9)
lbu x11,0x000000000(x8)
li x13,0x0D1985207
ori x13,x13,0x000000779
div x8,x9,x15
sh x13,0x000000000(x8)
lb x10,0x000000000(x9)
c.lw x11,0x018(x8)
fence io,i
c.lw x10,0x05C(x8)
sb x13,0x000000000(x8)
lwu x10,0x000000000(x8)
fence w,w
lhu x10,0x000000000(x9)
lwu x11,0x000000000(x8)
c.lw x11,0x050(x9)
sh x13,0x000000000(x8)
sb x13,0x000000000(x8)
lb x11,0x000000000(x8)
ori x9,x12,0x000000261
ori x9,x12,0x000000266
li x13,0x0AA28F9C0
ori x13,x13,0x000000035
div x8,x9,x15
lwu x11,0x000000000(x8)
lh x11,0x000000000(x8)
sh x13,0x000000000(x9)
ori x9,x12,0x00000000B
ori x9,x12,0x0000003F5
ori x9,x12,0x000000311
c.sw x13,0x03C(x9)
li x13,0x0D56C0538
ori x13,x13,0x0000000F8
div x8,x9,x15
ori x9,x12,0x0000007C2
lbu x11,0x000000000(x8)
ori x9,x12,0x0000004EC
c.sw x13,0x068(x9)
lhu x10,0x000000000(x8)
sh x13,0x000000000(x8)
lwu x11,0x000000000(x8)
lb x11,0x000000000(x9)
ori x9,x12,0x00000061E
ori x9,x12,0x0000002AB
sb x13,0x000000000(x9)
ori x9,x12,0x0000005A0
lh x11,0x000000000(x8)
sw x13,0x000000000(x9)
lhu x10,0x000000000(x8)
sb x13,0x000000000(x8)
c.sw x13,0x014(x9)
lh x11,0x000000000(x9)
li x13,0x064699FE9
ori x13,x13,0x0000003AB
div x8,x9,x15
ori x9,x12,0x0000006C3
ori x9,x12,0x0000004A0
c.sw x13,0x034(x8)
ori x9,x12,0x000000577
sh x13,0x000000000(x9)
c.sw x13,0x00(x8)
lh x10,0x000000000(x8)
lh x11,0x000000000(x8)
lwu x11,0x000000000(x8)
ori x9,x12,0x00000020B
lh x11,0x000000000(x9)
ori x9,x12,0x000000766
ori x9,x12,0x00000059E
ori x9,x12,0x000000451
ori x9,x12,0x00000057E
c.lw x11,0x074(x8)
c.lw x11,0x03C(x9)
li x13,0x0941C2BC4
ori x13,x13,0x00000038D
div x8,x9,x15
lw x10,0x000000000(x9)
lhu x11,0x000000000(x8)
ori x9,x12,0x0000007D0
ori x9,x12,0x00000002E
ori x9,x12,0x0000004DC
fence o,w
c.lw x10,0x064(x8)
ori x9,x12,0x000000713
fence o,iw
ori x9,x12,0x0000004EF
lhu x10,0x000000000(x8)
li x13,0x07BF984ED
ori x13,x13,0x000000432
div x8,x9,x15
lhu x10,0x000000000(x8)
lb x10,0x000000000(x9)
sh x13,0x000000000(x8)
lbu x11,0x000000000(x8)
lhu x11,0x000000000(x9)
sb x13,0x000000000(x9)
ori x9,x12,0x0000003E0
ori x9,x12,0x00000045D
li x13,0x011B3BAFA
ori x13,x13,0x000000044
div x8,x9,x15
ori x9,x12,0x0000005A0
ori x9,x12,0x0000004B3
lh x11,0x000000000(x8)
ori x9,x12,0x000000188
sb x13,0x000000000(x8)
fence ir,iw
fence o,io
ori x9,x12,0x0000006B4
lhu x11,0x000000000(x9)
ori x9,x12,0x000000422
lhu x10,0x000000000(x9)
ori x9,x12,0x000000650
fence ow,iow
ori x9,x12,0x0000002C3
fence ior,ow
ori x9,x12,0x000000491
ori x9,x12,0x000000549
li x13,0x083AC87E6
ori x13,x13,0x000000078
div x8,x9,x15
lbu x11,0x000000000(x8)
ori x9,x12,0x000000355
ori x9,x12,0x0000001D9
ori x9,x12,0x000000530
c.sw x13,0x018(x9)
lwu x11,0x000000000(x9)
lhu x11,0x000000000(x8)
lb x10,0x000000000(x9)
lb x10,0x000000000(x8)
lw x10,0x000000000(x9)
c.sw x13,0x068(x9)
c.sw x13,0x054(x8)
lh x10,0x000000000(x8)
fence r,r
lh x10,0x000000000(x9)
c.lw x11,0x01C(x8)
c.sw x13,0x010(x8)
lw x11,0x000000000(x8)
ori x9,x12,0x0000000CD
li x13,0x0387B0B1E
ori x13,x13,0x000000349
div x8,x9,x15
ori x9,x12,0x000000169
lhu x10,0x000000000(x8)
lw x10,0x000000000(x9)
sb x13,0x000000000(x9)
lw x10,0x000000000(x9)
lw x11,0x000000000(x8)
ori x9,x12,0x00000074C
sw x13,0x000000000(x8)
sb x13,0x000000000(x9)
c.lw x10,0x07C(x9)
ori x9,x12,0x00000021F
ori x9,x12,0x000000099
c.lw x10,0x024(x9)
lh x11,0x000000000(x9)
li x13,0x0B7DFC442
ori x13,x13,0x000000462
div x8,x9,x15
lb x11,0x000000000(x8)
ori x9,x12,0x000000457
sh x13,0x000000000(x9)
ori x9,x12,0x000000740
ori x9,x12,0x00000064A
ori x9,x12,0x00000042C
ori x9,x12,0x00000026A
sh x13,0x000000000(x9)
ori x9,x12,0x000000296
ori x9,x12,0x0000006ED
lhu x11,0x000000000(x8)
ori x9,x12,0x0000007A8
ori x9,x12,0x00000046B
lw x11,0x000000000(x9)
ori x9,x12,0x000000745
li x13,0x0508D5716
ori x13,x13,0x0000001E9
div x8,x9,x15
lb x10,0x000000000(x9)
ori x9,x12,0x000000125
sh x13,0x000000000(x8)
lh x10,0x000000000(x9)
ori x9,x12,0x0000004C1
li x13,0x0EDE0B4F9
ori x13,x13,0x0000002D6
div x8,x9,x15
ori x9,x12,0x0000007F7
lh x11,0x000000000(x9)
lw x10,0x000000000(x8)
lb x11,0x000000000(x8)
ori x9,x12,0x000000090
lwu x10,0x000000000(x9)
sh x13,0x000000000(x9)
ori x9,x12,0x00000056C
ori x9,x12,0x0000005EB
fence rw,iorw
li x13,0x068D0984E
ori x13,x13,0x00000049A
div x8,x9,x15
ori x9,x12,0x000000342
lb x10,0x000000000(x8)
ori x9,x12,0x000000715
lb x10,0x000000000(x9)
lwu x10,0x000000000(x9)
sh x13,0x000000000(x8)
ori x9,x12,0x0000003D0
li x13,0x09640B798
ori x13,x13,0x0000003BF
div x8,x9,x15
ori x9,x12,0x0000006D2
ori x9,x12,0x000000030
ori x9,x12,0x0000000CB
lh x11,0x000000000(x8)
ori x9,x12,0x00000035C
sw x13,0x000000000(x9)
c.sw x13,0x054(x8)
c.sw x13,0x05C(x8)
c.lw x10,0x010(x9)
li x13,0x0C33D23AC
ori x13,x13,0x000000120
div x8,x9,x15
lh x11,0x000000000(x9)
lbu x10,0x000000000(x9)
ori x9,x12,0x00000016D
sb x13,0x000000000(x9)
sb x13,0x000000000(x8)
fence iorw,iw
li x13,0x0F3409A3D
ori x13,x13,0x0000005B8
div x8,x9,x15
sh x13,0x000000000(x9)
lwu x11,0x000000000(x9)
c.lw x10,0x050(x8)
ori x9,x12,0x000000041
li x13,0x0410C843C
ori x13,x13,0x000000620
div x8,x9,x15
lh x11,0x000000000(x9)
ori x9,x12,0x0000001FE
fence iw,ir
sh x13,0x000000000(x9)
lb x11,0x000000000(x8)
ori x9,x12,0x00000034E
lh x10,0x000000000(x8)
sw x13,0x000000000(x8)
ori x9,x12,0x00000064C
ori x9,x12,0x0000002CA
ori x9,x12,0x0000006F0
ori x9,x12,0x000000526
lh x10,0x000000000(x8)
lh x11,0x000000000(x9)
li x13,0x0D9132FB2
ori x13,x13,0x00000015E
div x8,x9,x15
sh x13,0x000000000(x8)
ori x9,x12,0x00000054D
lbu x10,0x000000000(x9)
ori x9,x12,0x000000298
ori x9,x12,0x000000293
ori x9,x12,0x0000006C0
ori x9,x12,0x000000056
ori x9,x12,0x0000006AA
ori x9,x12,0x000000355
fence rw,or
ori x9,x12,0x000000637
ori x9,x12,0x0000000A8
lb x10,0x000000000(x9)
li x13,0x0DE943395
ori x13,x13,0x0000000CE
div x8,x9,x15
lb x10,0x000000000(x8)
ori x9,x12,0x00000046D
lw x10,0x000000000(x9)
c.lw x10,0x038(x9)
c.lw x10,0x070(x8)
ori x9,x12,0x00000077C
lw x11,0x000000000(x9)
lhu x11,0x000000000(x8)
ori x9,x12,0x00000010F
c.sw x13,0x024(x9)
lhu x11,0x000000000(x8)
fence irw,or
lh x10,0x000000000(x9)
lbu x11,0x000000000(x9)
li x13,0x0A1993564
ori x13,x13,0x00000071E
div x8,x9,x15
ori x9,x12,0x000000689
lbu x10,0x000000000(x9)
ori x9,x12,0x0000002FB
lhu x10,0x000000000(x9)
ori x9,x12,0x000000048
lh x11,0x000000000(x8)
sw x13,0x000000000(x9)
c.sw x13,0x068(x9)
li x13,0x02921A970
ori x13,x13,0x0000000B9
div x8,x9,x15
ori x9,x12,0x0000006B6
lh x11,0x000000000(x8)
ori x9,x12,0x0000003C1
ori x9,x12,0x00000052A
sw x13,0x000000000(x8)
lbu x10,0x000000000(x8)
lbu x11,0x000000000(x8)
sb x13,0x000000000(x8)
ori x9,x12,0x00000067A
ori x9,x12,0x000000320
c.lw x10,0x014(x9)
sb x13,0x000000000(x8)
lh x11,0x000000000(x8)
lwu x11,0x000000000(x8)
lwu x11,0x000000000(x8)
ori x9,x12,0x00000012C
sh x13,0x000000000(x8)
ori x9,x12,0x0000001A7
mv x1,x1
.global seq_rw
seq_rw:
fence
li x13,0x0DCFC4A3F
ori x13,x13,0x0000000E6
div x8,x12,x15
lwu x11,0x000000010(x8)
lwu x10,0x000000020(x8)
sw x13,0x000000030(x12)
sw x13,0x000000040(x12)
sw x13,0x000000050(x8)
sw x13,0x000000060(x8)
lwu x10,0x000000070(x8)
sw x13,0x000000080(x12)
sw x13,0x000000090(x8)
fence rw,rw
li x13,0x0408BA264
ori x13,x13,0x00000041C
div x8,x12,x15
lwu x10,0x0000000A0(x12)
lwu x11,0x0000000B0(x8)
sw x13,0x0000000C0(x8)
lwu x11,0x0000000D0(x12)
sw x13,0x0000000E0(x12)
sw x13,0x0000000F0(x12)
lwu x11,0x000000100(x12)
lwu x11,0x000000110(x12)
fence rw,rw
sw x13,0x000000120(x12)
li x13,0x01ECF1585
ori x13,x13,0x000000525
div x8,x12,x15
lwu x10,0x000000130(x12)
lwu x11,0x000000140(x8)
lwu x10,0x000000150(x8)
sw x13,0x000000160(x8)
sw x13,0x000000170(x12)
fence rw,rw
lwu x11,0x000000180(x8)
lwu x11,0x000000190(x12)
sw x13,0x0000001A0(x12)
lwu x11,0x0000001B0(x12)
li x13,0x0EB6DB7D3
ori x13,x13,0x00000008B
div x8,x12,x15
lwu x10,0x0000001C0(x12)
sw x13,0x0000001D0(x12)
sw x13,0x0000001E0(x8)
sw x13,0x0000001F0(x8)
lwu x10,0x000000200(x12)
lwu x11,0x000000210(x12)
sw x13,0x000000220(x8)
lwu x11,0x000000230(x12)
lwu x11,0x000000240(x12)
lwu x11,0x000000250(x12)
li x13,0x003FFC437
ori x13,x13,0x000000496
div x8,x12,x15
sw x13,0x000000260(x8)
lwu x10,0x000000270(x8)
sw x13,0x000000280(x12)
lwu x11,0x000000290(x12)
fence rw,rw
sw x13,0x0000002A0(x8)
lwu x10,0x0000002B0(x8)
lwu x10,0x0000002C0(x12)
lwu x11,0x0000002D0(x12)
sw x13,0x0000002E0(x8)
li x13,0x047A33973
ori x13,x13,0x00000050A
div x8,x12,x15
lwu x11,0x0000002F0(x12)
sw x13,0x000000300(x12)
sw x13,0x000000310(x12)
sw x13,0x000000320(x8)
lwu x11,0x000000330(x12)
sw x13,0x000000340(x12)
lwu x11,0x000000350(x8)
sw x13,0x000000360(x8)
lwu x11,0x000000370(x12)
sw x13,0x000000380(x8)
li x13,0x0C59858CC
ori x13,x13,0x00000033A
div x8,x12,x15
lwu x10,0x000000390(x12)
lwu x10,0x0000003A0(x8)
lwu x11,0x0000003B0(x8)
sw x13,0x0000003C0(x12)
sw x13,0x0000003D0(x12)
sw x13,0x0000003E0(x12)
fence rw,rw
fence rw,rw
lwu x11,0x0000003F0(x12)
lwu x11,0x000000400(x8)
li x13,0x00454D0BD
ori x13,x13,0x000000026
div x8,x12,x15
lwu x11,0x000000410(x8)
lwu x11,0x000000420(x12)
sw x13,0x000000430(x8)
lwu x11,0x000000440(x12)
sw x13,0x000000450(x12)
lwu x11,0x000000460(x8)
lwu x11,0x000000470(x12)
sw x13,0x000000480(x12)
lwu x11,0x000000490(x12)
sw x13,0x0000004A0(x12)
li x13,0x0D894A059
ori x13,x13,0x000000037
div x8,x12,x15
lwu x11,0x0000004B0(x12)
lwu x11,0x0000004C0(x12)
sw x13,0x0000004D0(x12)
fence rw,rw
lwu x10,0x0000004E0(x8)
lwu x11,0x0000004F0(x8)
sw x13,0x000000500(x8)
fence rw,rw
lwu x10,0x000000510(x12)
sw x13,0x000000520(x8)
li x13,0x060B219DA
ori x13,x13,0x000000443
div x8,x12,x15
sw x13,0x000000530(x8)
lwu x11,0x000000540(x12)
lwu x11,0x000000550(x12)
lwu x10,0x000000560(x8)
sw x13,0x000000570(x8)
sw x13,0x000000580(x12)
sw x13,0x000000590(x8)
sw x13,0x0000005A0(x8)
fence rw,rw
sw x13,0x0000005B0(x8)
li x13,0x0E198D90D
ori x13,x13,0x000000649
div x8,x12,x15
lwu x11,0x0000005C0(x8)
lwu x11,0x0000005D0(x12)
fence rw,rw
sw x13,0x0000005E0(x12)
sw x13,0x0000005F0(x8)
sw x13,0x000000600(x12)
sw x13,0x000000610(x8)
sw x13,0x000000620(x12)
sw x13,0x000000630(x8)
sw x13,0x000000640(x8)
li x13,0x07613D086
ori x13,x13,0x00000023A
div x8,x12,x15
lwu x11,0x000000650(x12)
sw x13,0x000000660(x8)
sw x13,0x000000670(x8)
fence rw,rw
lwu x10,0x000000680(x12)
fence rw,rw
lwu x10,0x000000690(x8)
fence rw,rw
fence rw,rw
lwu x10,0x0000006A0(x12)
li x13,0x02FB6868A
ori x13,x13,0x00000020D
div x8,x12,x15
lwu x10,0x0000006B0(x12)
sw x13,0x0000006C0(x8)
sw x13,0x0000006D0(x8)
lwu x10,0x0000006E0(x8)
lwu x11,0x0000006F0(x8)
lwu x10,0x000000700(x12)
lwu x11,0x000000710(x8)
sw x13,0x000000720(x12)
lwu x11,0x000000730(x12)
lwu x11,0x000000740(x12)
li x13,0x04BFC5CBA
ori x13,x13,0x0000007B4
div x8,x12,x15
sw x13,0x000000750(x12)
sw x13,0x000000760(x8)
sw x13,0x000000770(x8)
lwu x10,0x000000780(x12)
lwu x10,0x000000790(x12)
lwu x11,0x0000007A0(x8)
sw x13,0x0000007B0(x8)
lwu x11,0x0000007C0(x12)
sw x13,0x0000007D0(x12)
sw x13,0x0000007E0(x12)
li x11, 0x32
csrw mcor,x11
mv x1,x1
.global seq_r
seq_r:
fence
li x13,0x079797181
ori x13,x13,0x0000003AB
div x8,x12,x15
lwu x11,0x000000010(x12)
fence r,r
lwu x10,0x000000020(x12)
fence r,r
lwu x10,0x000000030(x8)
fence r,r
sw x13,0x000000040(x8)
lwu x11,0x000000050(x8)
sw x13,0x000000060(x8)
sw x13,0x000000070(x12)
li x13,0x08666D2DF
ori x13,x13,0x0000000D4
div x8,x12,x15
lwu x10,0x000000080(x12)
sw x13,0x000000090(x8)
sw x13,0x0000000A0(x12)
lwu x10,0x0000000B0(x8)
lwu x11,0x0000000C0(x8)
fence r,r
fence r,r
lwu x11,0x0000000D0(x8)
lwu x10,0x0000000E0(x8)
lwu x10,0x0000000F0(x12)
li x13,0x0000F2C04
ori x13,x13,0x000000218
div x8,x12,x15
sw x13,0x000000100(x12)
sw x13,0x000000110(x8)
sw x13,0x000000120(x8)
lwu x11,0x000000130(x12)
sw x13,0x000000140(x8)
lwu x10,0x000000150(x8)
lwu x10,0x000000160(x12)
fence r,r
lwu x11,0x000000170(x12)
sw x13,0x000000180(x12)
li x13,0x08814EC40
ori x13,x13,0x0000003E8
div x8,x12,x15
sw x13,0x000000190(x8)
lwu x10,0x0000001A0(x12)
lwu x11,0x0000001B0(x8)
lwu x10,0x0000001C0(x12)
sw x13,0x0000001D0(x12)
lwu x11,0x0000001E0(x12)
fence r,r
lwu x10,0x0000001F0(x8)
lwu x10,0x000000200(x12)
sw x13,0x000000210(x8)
li x13,0x057C6A1EA
ori x13,x13,0x0000000E4
div x8,x12,x15
lwu x11,0x000000220(x12)
lwu x11,0x000000230(x8)
lwu x11,0x000000240(x8)
lwu x11,0x000000250(x12)
lwu x10,0x000000260(x12)
sw x13,0x000000270(x12)
lwu x11,0x000000280(x8)
fence r,r
lwu x10,0x000000290(x12)
sw x13,0x0000002A0(x8)
li x13,0x071AA8037
ori x13,x13,0x000000446
div x8,x12,x15
sw x13,0x0000002B0(x8)
sw x13,0x0000002C0(x8)
sw x13,0x0000002D0(x8)
sw x13,0x0000002E0(x12)
fence r,r
lwu x11,0x0000002F0(x12)
lwu x11,0x000000300(x12)
fence r,r
sw x13,0x000000310(x8)
lwu x10,0x000000320(x12)
li x13,0x00D639346
ori x13,x13,0x00000024E
div x8,x12,x15
sw x13,0x000000330(x8)
sw x13,0x000000340(x8)
sw x13,0x000000350(x8)
lwu x11,0x000000360(x8)
fence r,r
sw x13,0x000000370(x8)
lwu x11,0x000000380(x12)
sw x13,0x000000390(x8)
sw x13,0x0000003A0(x8)
lwu x10,0x0000003B0(x8)
li x13,0x067486B72
ori x13,x13,0x0000005FC
div x8,x12,x15
sw x13,0x0000003C0(x8)
fence r,r
lwu x11,0x0000003D0(x8)
lwu x10,0x0000003E0(x8)
lwu x10,0x0000003F0(x12)
sw x13,0x000000400(x8)
lwu x10,0x000000410(x8)
sw x13,0x000000420(x8)
lwu x11,0x000000430(x12)
fence r,r
li x13,0x0CB061C1F
ori x13,x13,0x0000002C1
div x8,x12,x15
lwu x11,0x000000440(x8)
lwu x10,0x000000450(x12)
sw x13,0x000000460(x8)
fence r,r
sw x13,0x000000470(x12)
lwu x10,0x000000480(x8)
lwu x11,0x000000490(x12)
fence r,r
fence r,r
lwu x10,0x0000004A0(x8)
li x13,0x0AECBA22D
ori x13,x13,0x00000007E
div x8,x12,x15
fence r,r
lwu x10,0x0000004B0(x12)
lwu x11,0x0000004C0(x8)
fence r,r
sw x13,0x0000004D0(x8)
lwu x11,0x0000004E0(x8)
fence r,r
sw x13,0x0000004F0(x8)
fence r,r
sw x13,0x000000500(x8)
li x13,0x0836003E2
ori x13,x13,0x0000005DC
div x8,x12,x15
sw x13,0x000000510(x12)
sw x13,0x000000520(x12)
sw x13,0x000000530(x12)
sw x13,0x000000540(x12)
lwu x10,0x000000550(x8)
fence r,r
sw x13,0x000000560(x8)
lwu x11,0x000000570(x12)
lwu x10,0x000000580(x8)
lwu x11,0x000000590(x8)
li x13,0x0667B51E7
ori x13,x13,0x0000003BE
div x8,x12,x15
fence r,r
lwu x11,0x0000005A0(x12)
sw x13,0x0000005B0(x8)
sw x13,0x0000005C0(x8)
lwu x11,0x0000005D0(x8)
sw x13,0x0000005E0(x8)
sw x13,0x0000005F0(x12)
fence r,r
lwu x10,0x000000600(x12)
lwu x10,0x000000610(x8)
li x13,0x078E9046A
ori x13,x13,0x0000003E8
div x8,x12,x15
sw x13,0x000000620(x8)
sw x13,0x000000630(x12)
sw x13,0x000000640(x8)
lwu x11,0x000000650(x12)
sw x13,0x000000660(x8)
lwu x10,0x000000670(x12)
sw x13,0x000000680(x12)
sw x13,0x000000690(x12)
fence r,r
fence r,r
li x13,0x035EDF6A6
ori x13,x13,0x0000004C7
div x8,x12,x15
lwu x11,0x0000006A0(x12)
sw x13,0x0000006B0(x12)
sw x13,0x0000006C0(x12)
sw x13,0x0000006D0(x12)
lwu x11,0x0000006E0(x12)
sw x13,0x0000006F0(x12)
sw x13,0x000000700(x12)
lwu x11,0x000000710(x8)
lwu x10,0x000000720(x12)
sw x13,0x000000730(x8)
li x11, 0x32
csrw mcor,x11
mv x1,x1
.global seq_w
seq_w:
fence
li x13,0x086D219E6
ori x13,x13,0x000000383
div x8,x12,x15
sw x13,0x000000010(x8)
lwu x11,0x000000020(x12)
fence w,w
sw x13,0x000000030(x8)
lwu x11,0x000000040(x8)
fence w,w
sw x13,0x000000050(x12)
li x10,0x000000000
fence w,w
sw x13,0x000000060(x8)
li x13,0x04068C3CC
ori x13,x13,0x0000005FF
div x8,x12,x15
fence w,w
sw x13,0x000000070(x8)
sw x13,0x000000080(x12)
sw x13,0x000000090(x8)
lwu x10,0x0000000A0(x12)
lwu x11,0x0000000B0(x12)
lwu x11,0x0000000C0(x12)
fence w,w
li x10,0x000000000
lwu x10,0x0000000D0(x12)
li x13,0x09092717E
ori x13,x13,0x000000637
div x8,x12,x15
sw x13,0x0000000E0(x8)
fence w,w
fence w,w
lwu x10,0x0000000F0(x8)
lwu x11,0x000000100(x8)
sw x13,0x000000110(x12)
fence w,w
sw x13,0x000000120(x12)
sw x13,0x000000130(x12)
lwu x10,0x000000140(x8)
li x13,0x06B2D6A25
ori x13,x13,0x0000000B6
div x8,x12,x15
fence w,w
sw x13,0x000000150(x12)
li x10,0x000000000
lwu x11,0x000000160(x12)
lwu x10,0x000000170(x12)
lwu x11,0x000000180(x12)
li x11,0x000000000
lwu x11,0x000000190(x8)
li x11,0x000000000
li x10,0x000000000
li x13,0x0929048CF
ori x13,x13,0x00000050C
div x8,x12,x15
lwu x11,0x0000001A0(x12)
li x10,0x000000000
sw x13,0x0000001B0(x12)
lwu x11,0x0000001C0(x8)
sw x13,0x0000001D0(x8)
li x11,0x000000000
sw x13,0x0000001E0(x8)
sw x13,0x0000001F0(x12)
fence w,w
lwu x11,0x000000200(x12)
li x13,0x086C4F3EE
ori x13,x13,0x0000005C4
div x8,x12,x15
sw x13,0x000000210(x12)
lwu x11,0x000000220(x12)
sw x13,0x000000230(x12)
sw x13,0x000000240(x8)
lwu x10,0x000000250(x8)
sw x13,0x000000260(x12)
fence w,w
li x11,0x000000000
sw x13,0x000000270(x8)
sw x13,0x000000280(x12)
li x13,0x0D082C21E
ori x13,x13,0x0000007C0
div x8,x12,x15
lwu x11,0x000000290(x12)
li x11,0x000000000
lwu x11,0x0000002A0(x8)
lwu x11,0x0000002B0(x12)
sw x13,0x0000002C0(x8)
sw x13,0x0000002D0(x8)
lwu x11,0x0000002E0(x12)
sw x13,0x0000002F0(x8)
sw x13,0x000000300(x8)
lwu x11,0x000000310(x12)
li x13,0x0DFDBE35F
ori x13,x13,0x0000000FB
div x8,x12,x15
fence w,w
fence w,w
sw x13,0x000000320(x8)
sw x13,0x000000330(x12)
lwu x10,0x000000340(x8)
sw x13,0x000000350(x8)
lwu x10,0x000000360(x8)
sw x13,0x000000370(x8)
fence w,w
lwu x10,0x000000380(x8)
li x13,0x0891515DF
ori x13,x13,0x0000006E7
div x8,x12,x15
sw x13,0x000000390(x8)
li x10,0x000000000
fence w,w
fence w,w
sw x13,0x0000003A0(x12)
sw x13,0x0000003B0(x12)
lwu x10,0x0000003C0(x8)
fence w,w
li x10,0x000000000
sw x13,0x0000003D0(x8)
li x13,0x0AEE64B8C
ori x13,x13,0x0000006B4
div x8,x12,x15
lwu x11,0x0000003E0(x12)
li x11,0x000000000
fence w,w
sw x13,0x0000003F0(x8)
sw x13,0x000000400(x12)
sw x13,0x000000410(x8)
lwu x11,0x000000420(x12)
sw x13,0x000000430(x12)
fence w,w
sw x13,0x000000440(x8)
li x13,0x0BA351ED4
ori x13,x13,0x00000030F
div x8,x12,x15
li x10,0x000000000
lwu x10,0x000000450(x8)
lwu x10,0x000000460(x12)
sw x13,0x000000470(x12)
li x11,0x000000000
li x10,0x000000000
fence w,w
sw x13,0x000000480(x12)
li x11,0x000000000
lwu x11,0x000000490(x12)
li x13,0x085730CCA
ori x13,x13,0x000000018
div x8,x12,x15
li x10,0x000000000
lwu x10,0x0000004A0(x8)
lwu x11,0x0000004B0(x12)
lwu x11,0x0000004C0(x12)
li x11,0x000000000
lwu x11,0x0000004D0(x8)
li x10,0x000000000
lwu x11,0x0000004E0(x12)
fence w,w
lwu x10,0x0000004F0(x12)
li x13,0x0D4F06B8E
ori x13,x13,0x00000070A
div x8,x12,x15
fence w,w
lwu x11,0x000000500(x8)
sw x13,0x000000510(x8)
lwu x11,0x000000520(x8)
lwu x10,0x000000530(x8)
lwu x10,0x000000540(x12)
sw x13,0x000000550(x12)
lwu x11,0x000000560(x12)
li x10,0x000000000
lwu x10,0x000000570(x12)
li x13,0x0C40DEDC4
ori x13,x13,0x0000005CB
div x8,x12,x15
li x10,0x000000000
sw x13,0x000000580(x12)
sw x13,0x000000590(x8)
sw x13,0x0000005A0(x12)
li x10,0x000000000
sw x13,0x0000005B0(x8)
fence w,w
sw x13,0x0000005C0(x12)
lwu x10,0x0000005D0(x8)
lwu x11,0x0000005E0(x12)
j PASS_POINT
.global PASS_POINT
PASS_POINT:
mv x8, x8
EXIT
#******this region is added by generator******
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 49,525
|
smart_run/tests/cases/ISA/ISA_IMAC/ct_imac_smoke.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#* *
#************************************************************
#*************Following is the generated instructions*****************
.text
.align 6
.global main
main:
csrr x10, mhartid
bnez x10, TEST_WFI
# .include "core_init.h"
.option norvc
.global addi.w
addi.w:
li x5, 0x4
li x4, 0x5
addiw x4, x4, 0xffffffffffffffff
bne x4, x5, TEST_FAIL
li x5, 0xffe
li x4, 0x7ff
addiw x4, x4, 0x7ff
bne x4, x5, TEST_FAIL
li x5, 0xfffffffffffff000
li x4, 0xfffffffffffff800
addiw x4, x4, 0xfffffffffffff800
bne x4, x5, TEST_FAIL
addiw x4, x4, 0x123
addiw x5, x4, 0x0
bne x4, x5, TEST_FAIL
li x5, 0x0
addiw x5, x5, 0x0
addiw x0, x0, 0x0
addiw x0, x0, 0x5
bne x5, x0, TEST_FAIL
.global ADDI
ADDI:
li x5, 0x4
li x4, 0x5
addi x4, x4, 0xffffffffffffffff
bne x4, x5, TEST_FAIL
li x5, 0xffe
li x4, 0x7ff
addi x4, x4, 0x7ff
bne x4, x5, TEST_FAIL
li x5, 0xfffffffffffff000
li x4, 0xfffffffffffff800
addi x4, x4, 0xfffffffffffff800
bne x4, x5, TEST_FAIL
addi x4, x4, 0x123
addi x5, x4, 0x0
bne x4, x5, TEST_FAIL
li x5, 0x0
addi x5, x5, 0x0
addi x0, x0, 0x0
addi x0, x0, 0x5
bne x5, x0, TEST_FAIL
.global SLTI
SLTI:
li x4, 0x1
li x5, 0xfff
slti x5, x5, 0xffffffffffffffff
bne x5, x0, TEST_FAIL
li x5, 0xfff
slti x5, x5, 0x1
bne x5, x0, TEST_FAIL
li x5, 0xffffffffffffffff
slti x5, x5, 0x1
bne x5, x4, TEST_FAIL
li x5, 0x1
slti x5, x5, 0x1
bne x5, x0, TEST_FAIL
li x5, 0xffffffffffffffff
slti x5, x5, 0xfffffffffffffffe
bne x5, x0, TEST_FAIL
li x5, 0xffffffffffffffff
slti x5, x5, 0xffffffffffffffff
bne x5, x0, TEST_FAIL
.global SLTIU
SLTIU:
li x4, 0x1
li x5, 0xffffffffffffffff
sltiu x5, x5, 0x7ff
bne x5, x0, TEST_FAIL
li x5, 0xfff
sltiu x5, x5, 0x1
bne x5, x0, TEST_FAIL
li x5, 0x1
sltiu x5, x5, 0x7ff
bne x5, x4, TEST_FAIL
li x5, 0x7ff
sltiu x5, x5, 0x7ff
bne x5, x0, TEST_FAIL
.global ANDI
ANDI:
li x4, 0xaaaaaaaaaaaaaaaa
li x5, 0xaaaaaaaaaaaaaaaa
andi x5, x5, 0xffffffffffffffff
bne x4, x5, TEST_FAIL
li x4, 0x2aa
li x5, 0xaaaaaaaaaaaaaaaa
andi x5, x5, 0x7ff
bne x4, x5, TEST_FAIL
li x4, 0x0
li x5, 0xaaaaaaaaaaaaaaaa
andi x5, x5, 0x0
bne x4, x5, TEST_FAIL
.global ORI
ORI:
li x4, 0xffffffffffffffff
li x5, 0xaaaaaaaaaaaaaaaa
ori x5, x5, 0xffffffffffffffff
bne x4, x5, TEST_FAIL
li x4, 0xaaaaaaaaaaaaafff
li x5, 0xaaaaaaaaaaaaaaaa
ori x5, x5, 0x7ff
bne x4, x5, TEST_FAIL
li x4, 0xaaaaaaaaaaaaaaaa
li x5, 0xaaaaaaaaaaaaaaaa
ori x5, x5, 0x0
bne x4, x5, TEST_FAIL
.global XORI
XORI:
li x4, 0xabcdabcdabcdabcd
xori x5, x4, 0xffffffffffffffff
not x6, x4
bne x6, x5, TEST_FAIL
li x4, 0xabcdabcdabcdffff
li x6, 0xabcdabcdabcdfff0
xori x5, x4, 0xf
bne x6, x5, TEST_FAIL
li x4, 0xabcdabcdabcdabcd
li x6, 0xabcdabcdabcdabcd
xori x5, x4, 0x0
bne x6, x5, TEST_FAIL
.global SLLI
SLLI:
li x4, 0xabcd1234abcd1234
slli x2, x4, 0
bne x2, x4, TEST_FAIL
li x4, 0xabcd1234abcd1234
slli x2, x4, 63
bne x2, x0, TEST_FAIL
li x4, 0xaaaaaaaaaaaaaaaa
slli x2, x4, 36
li x3, 0xaaaaaaa000000000
bne x2, x3, TEST_FAIL
.global SLLIW
SLLIW:
li x4, 0xabcd1234abcd1234
slliw x2, x4, 0
li x3, 0xffffffffabcd1234
bne x2, x3, TEST_FAIL
li x4, 0xabcd1234abcd1234
slliw x2, x4, 31
bne x2, x0, TEST_FAIL
li x4, 0xaaaaaaaaaaaaaaaa
slliw x2, x4, 5
li x3, 0x0000000055555540
bne x2, x3, TEST_FAIL
.global SRLI
SRLI:
li x2, 0xabcd1234abcd1234
srli x3, x2, 0
bne x2, x3, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
srli x3, x2, 63
li x4, 0x1
bne x3, x4, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
srli x2, x2, 1
li x3, 0x5555555555555555
bne x2, x3, TEST_FAIL
.global SRLIW
SRLIW:
li x4, 0xabcd1234abcd1234
srliw x2, x4, 0
li x3, 0xffffffffabcd1234
bne x2, x3, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
srliw x3, x2, 31
li x4, 0x1
bne x3, x4, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
srliw x2, x2, 1
li x3, 0x55555555
bne x2, x3, TEST_FAIL
.global SRAI
SRAI:
li x2, 0xaaaaaaaaaaaaaaaa
srai x2, x2, 63
li x3, 0xffffffffffffffff
bne x2, x3, TEST_FAIL
li x2, 0x5555555555555555
srai x2, x2, 63
bne x2, x0, TEST_FAIL
li x2, 0x5555555555555555
srai x2, x2, 1
li x3, 0x2aaaaaaaaaaaaaaa
bne x2, x3, TEST_FAIL
li x2, 0xabcd1234abcd1234
srai x2, x2, 0
li x3, 0xabcd1234abcd1234
bne x2, x3, TEST_FAIL
.global SRAIW
SRAIW:
li x2, 0xaaaaaaaaaaaaaaaa
sraiw x2, x2, 31
li x3, 0xffffffffffffffff
bne x2, x3, TEST_FAIL
li x2, 0x5555555555555555
sraiw x2, x2, 31
li x3, 0
bne x2, x3, TEST_FAIL
li x2, 0x5555555555555555
sraiw x2, x2, 1
li x3, 0x2aaaaaaa
bne x2, x3, TEST_FAIL
li x2, 0xabcd1234abcd1234
sraiw x2, x2, 0
li x3, 0xffffffffabcd1234
bne x2, x3, TEST_FAIL
.global LUI
LUI:
lui x2, 0x7bcd1
li x3, 0x7bcd1000
bne x2, x3, TEST_FAIL
lui x2, 0x89abc
li x3, 0xffffffff89abc000
bne x2, x3, TEST_FAIL
.option rvc
.global AUIPC
AUIPC:
auipc x2, 0x0
li x3, 0xa00e
c.add x2, x3
auipc x4, 0xa
bne x2, x4, TEST_FAIL
auipc x2, 0x0
li x3, 0x80000014
addw x2, x3, x2
auipc x4, 0x80000
bne x2, x4, TEST_FAIL
.option norvc
.global ADD
ADD:
li x3, 0xffffffffffffffff
li x4, 0xffffffffffffffff
add x5, x3, x4
li x6, 0xfffffffffffffffe
bne x5, x6, TEST_FAIL
li x3, 0x8000000000000000
li x4, 0x8000000000000000
add x5, x3, x4
li x6, 0x0
bne x5, x6, TEST_FAIL
li x3, 0x7fffffffffffffff
li x4, 0x7fffffffffffffff
add x5, x3, x4
li x6, 0xfffffffffffffffe
bne x5, x6, TEST_FAIL
.global ADDW
ADDW:
li x3, 0xabcdabcdffffffff
li x4, 0xabcdabcdffffffff
addw x5, x3, x4
li x6, 0xfffffffffffffffe
bne x5, x6, TEST_FAIL
li x3, 0x80000000
li x4, 0x80000000
addw x5, x3, x4
li x6, 0x0
bne x5, x6, TEST_FAIL
li x3, 0xabcdabcd7fffffff
li x4, 0xabcdabcd7fffffff
addw x5, x3, x4
li x6, 0xfffffffffffffffe
bne x5, x6, TEST_FAIL
.global SUB
SUB:
li x3, 0x8000000000000000
li x4, 0x7fffffffffffffff
sub x5, x3, x4
li x6, 0x1
bne x5, x6, TEST_FAIL
li x3, 0x7fffffffffffffff
li x4, 0x8000000000000000
sub x5, x3, x4
li x6, 0xffffffffffffffff
bne x5, x6, TEST_FAIL
li x3, 0xf
li x4, 0x2
sub x5, x3, x4
li x6, 0xd
bne x5, x6, TEST_FAIL
.global SUBW
SUBW:
li x3, 0xabcdabcd80000000
li x4, 0xabcdabcd7fffffff
subw x5, x3, x4
li x6, 0x1
bne x5, x6, TEST_FAIL
li x3, 0xabcdabcd7fffffff
li x4, 0xabcdabcd80000000
subw x5, x3, x4
li x6, 0xffffffffffffffff
bne x5, x6, TEST_FAIL
li x3, 0xf
li x4, 0x2
subw x5, x3, x4
li x6, 0xd
bne x5, x6, TEST_FAIL
.global SLT
SLT:
li x3, 0x1
li x4, 0x0
li x5, 0x8000000000000000
li x6, 0xffffffffffffffff
li x8, 0xfffffffffffffffe
slt x7, x3, x3
bne x7, x4, TEST_FAIL
slt x7, x3, x4
bne x7, x4, TEST_FAIL
slt x7, x4, x3
bne x7, x3, TEST_FAIL
slt x7, x5, x3
bne x7, x3, TEST_FAIL
slt x7, x6, x8
bne x7, x4, TEST_FAIL
.global SLTU
SLTU:
li x3, 0x1
li x4, 0x0
li x5, 0x8000000000000000
li x6, 0xffffffffffffffff
sltu x7, x3, x3
bne x7, x4, TEST_FAIL
sltu x7, x3, x4
bne x7, x4, TEST_FAIL
sltu x7, x4, x3
bne x7, x3, TEST_FAIL
sltu x7, x5, x3
bne x7, x4, TEST_FAIL
.global AND
AND:
li x3, 0x0
li x4, 0xffffffffffffffff
li x5, 0xabcdabcdabcdabcd
and x6, x5, x4
bne x6, x5, TEST_FAIL
li x5, 0xabcdabcdabcdabcd
and x6, x5, x3
bne x6, x3, TEST_FAIL
.global OR
OR:
li x3, 0x0
li x4, 0xffffffffffffffff
li x5, 0xabcdabcdabcdabcd
or x6, x5, x4
bne x6, x4, TEST_FAIL
li x5, 0xabcdabcdabcdabcd
or x6, x5, x3
bne x6, x5, TEST_FAIL
.global XOR
XOR:
li x3, 0x0
li x4, 0xffffffffffffffff
li x5, 0xabcdabcdabcdabcd
xor x6, x5, x4
not x5, x5
bne x6, x5, TEST_FAIL
li x5, 0xabcdabcdabcdabcd
xor x6, x5, x3
bne x6, x5, TEST_FAIL
.global SLL
SLL:
li x4, 0xabcd1234abcd1234
sll x2, x4, x0
bne x2, x4, TEST_FAIL
li x4, 0xabcd1234abcd1234
li x5, 63
sll x2, x4, x5
li x3, 0x0
bne x2, x3, TEST_FAIL
li x4, 0xaaaaaaaaaaaaaaaa
li x5, 0x8
sll x2, x4, x5
li x3, 0xaaaaaaaaaaaaaa00
bne x2, x3, TEST_FAIL
li x4, 0xaaaaaaaaaaaaaaaa
li x5, 0x41
sll x2, x4, x5
li x3, 0x5555555555555554
bne x2, x3, TEST_FAIL
.global SLLW
SLLW:
li x4, 0xabcd1234
li x3, 0xffffffffabcd1234
sllw x2, x4, x0
bne x2, x3, TEST_FAIL
li x4, 0xabcd1234abcd1234
li x5, 31
sllw x2, x4, x5
li x3, 0x0
bne x2, x3, TEST_FAIL
li x4, 0xaaaaaaaaaaaaaaaa
li x5, 0x8
sllw x2, x4, x5
li x3, 0xffffffffaaaaaa00
bne x2, x3, TEST_FAIL
li x4, 0xaaaaaaaaaaaaaaaa
li x5, 0x21
sllw x2, x4, x5
li x3, 0x55555554
bne x2, x3, TEST_FAIL
.global SRL
SRL:
li x2, 0xabcd1234abcd1234
srl x3, x2, x0
bne x2, x3, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
li x5, 63
srl x3, x2, x5
li x4, 0x1
bne x3, x4, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
li x5, 0x1
srl x2, x2, x5
li x3, 0x5555555555555555
bne x2, x3, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
li x5, 0x41
srl x2, x2, x5
li x3, 0x5555555555555555
bne x2, x3, TEST_FAIL
.global SRLW
SRLW:
li x2, 0xabcd1234
li x4, 0xffffffffabcd1234
srlw x3, x2, x0
bne x4, x3, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
li x5, 31
srlw x3, x2, x5
li x4, 0x1
bne x3, x4, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
li x5, 0x1
srlw x2, x2, x5
li x3, 0x55555555
bne x2, x3, TEST_FAIL
li x2, 0xaaaaaaaaaaaaaaaa
li x5, 0x21
srlw x2, x2, x5
li x3, 0x55555555
bne x2, x3, TEST_FAIL
.global SRA
SRA:
li x2, 0xaaaaaaaaaaaaaaaa
li x5, 0x3f
sra x2, x2, x5
li x3, 0xffffffffffffffff
bne x2, x3, TEST_FAIL
li x2, 0x5555555555555555
li x5, 0x3f
sra x2, x2, x5
li x3, 0
bne x2, x3, TEST_FAIL
li x2, 0x5555555555555555
li x5, 0x1
sra x2, x2, x5
li x3, 0x2aaaaaaaaaaaaaaa
bne x2, x3, TEST_FAIL
li x2, 0x5555555555555555
li x5, 0x41
sra x2, x2, x5
li x3, 0x2aaaaaaaaaaaaaaa
bne x2, x3, TEST_FAIL
li x2, 0xabcd1234abcd1234
sra x2, x2, x0
li x3, 0xabcd1234abcd1234
bne x2, x3, TEST_FAIL
.global SRAW
SRAW:
li x2, 0xaaaaaaaaaaaaaaaa
li x5, 0x1f
sraw x2, x2, x5
li x3, 0xffffffffffffffff
bne x2, x3, TEST_FAIL
li x2, 0x5555555555555555
li x5, 0x1f
sraw x2, x2, x5
li x3, 0
bne x2, x3, TEST_FAIL
li x2, 0x5555555555555555
li x5, 0x1
sraw x2, x2, x5
li x3, 0x2aaaaaaa
bne x2, x3, TEST_FAIL
li x2, 0x5555555555555555
li x5, 0x21
sraw x2, x2, x5
li x3, 0x2aaaaaaa
bne x2, x3, TEST_FAIL
li x2, 0xabcd1234abcd1234
sraw x2, x2, x0
li x3, 0xffffffffabcd1234
bne x2, x3, TEST_FAIL
.global MUL
MUL:
li x3, 0x8000000000000000
li x4, 0x7fffffffffffffff
li x10,0xabcdeabcde
mul x5, x0, x3
bne x5, x0, TEST_FAIL
mul x5, x0, x0
bne x5, x0, TEST_FAIL
mul x5, x0, x4
bne x5, x0, TEST_FAIL
mul x5, x4, x4
li x6, 0x1
bne x5, x6, TEST_FAIL
mul x5, x4, x0
bne x5, x0, TEST_FAIL
mul x5, x4, x3
li x6, 0x8000000000000000
bne x5, x6, TEST_FAIL
mul x5, x4, x10
li x6, 0xffffff5432154322
bne x5, x6, TEST_FAIL
mul x5, x3, x3
li x6, 0x0
bne x5, x6, TEST_FAIL
mul x5, x3, x0
bne x5, x0, TEST_FAIL
mul x5, x3, x4
li x6, 0x8000000000000000
bne x6, x5, TEST_FAIL
mul x5, x3, x10
li x6, 0x0
bne x6, x5, TEST_FAIL
mul x5, x10, x10
li x6, 0xbd3a1a4d5d2ed084
bne x6, x5, TEST_FAIL
.global MULW
MULW:
li x3, 0xabcdabcd80000000
li x4, 0x7fffffff
li x10,0xabcde
mulw x5, x0, x3
bne x5, x0, TEST_FAIL
mulw x5, x0, x0
bne x5, x0, TEST_FAIL
mulw x5, x0, x4
bne x5, x0, TEST_FAIL
mulw x5, x4, x4
li x6, 0x1
bne x5, x6, TEST_FAIL
mulw x5, x4, x0
bne x5, x0, TEST_FAIL
mulw x5, x4, x3
li x6, 0xffffffff80000000
bne x5, x6, TEST_FAIL
mulw x5, x4, x10
li x6, 0xfffffffffff54322
bne x5, x6, TEST_FAIL
mulw x5, x3, x3
li x6, 0x0
bne x5, x6, TEST_FAIL
mulw x5, x3, x0
bne x5, x0, TEST_FAIL
mulw x5, x3, x4
li x6, 0xffffffff80000000
bne x6, x5, TEST_FAIL
mulw x5, x3, x10
li x6, 0x0
bne x6, x5, TEST_FAIL
mulw x5, x10, x10
li x6, 0x4caed084
bne x6, x5, TEST_FAIL
.global MULH
MULH:
li x3, 0x7fffffffffffffff
li x4, 0x8000000000000000
li x10,0xabcde
li x11,0xabcdeabcde
mulh x5, x0, x3
bne x5, x0, TEST_FAIL
mulh x5, x0, x0
bne x5, x0, TEST_FAIL
mulh x5, x0, x4
bne x5, x0, TEST_FAIL
mulh x5, x4, x3
li x6, 0xc000000000000000
bne x5, x6, TEST_FAIL
mulh x5, x4, x0
bne x5, x0, TEST_FAIL
mulh x5, x4, x4
li x6, 0x4000000000000000
bne x5, x6, TEST_FAIL
mulh x5, x4, x10
li x6, 0xfffffffffffaa191
bne x5, x6, TEST_FAIL
mulh x5, x11, x11
li x6, 0x734c
bne x6, x5, TEST_FAIL
.global MULHU
MULHU:
li x3, 0xffffffffffffffff
li x4, 0x23456
li x10, 0xabcde
mulhu x5, x0, x3
bne x5, x0, TEST_FAIL
mulhu x5, x0, x0
bne x5, x0, TEST_FAIL
mulhu x5, x0, x4
bne x5, x0, TEST_FAIL
mulhu x5, x4, x3
li x6, 0x23455
bne x5, x6, TEST_FAIL
mulhu x5, x4, x4
li x6,0x0
bne x5, x6, TEST_FAIL
mulhu x5, x3, x3
li x6, 0xfffffffffffffffe
bne x5, x6, TEST_FAIL
mulhu x5, x3, x4
li x6, 0x23455
bne x6, x5, TEST_FAIL
.global MULHSU
MULHSU:
li x3, 0x8000000000000000
li x4, 0x7fffffffffffffff
li x10,0x23456
mulhsu x5, x0, x3
bne x5, x0, TEST_FAIL
mulhsu x5, x0, x0
bne x5, x0, TEST_FAIL
mulhsu x5, x0, x4
bne x5, x0, TEST_FAIL
mulhsu x5, x0, x10
bne x5, x0, TEST_FAIL
mulhsu x5, x4, x3
li x6, 0x3fffffffffffffff
bne x5, x6, TEST_FAIL
mulhsu x5, x4, x4
li x6, 0x3fffffffffffffff
bne x5, x6, TEST_FAIL
mulhsu x5, x4, x10
li x6, 0x11a2a
bne x5, x6, TEST_FAIL
mulhsu x5, x3, x3
li x6, 0xc000000000000000
bne x5, x6, TEST_FAIL
mulhsu x5, x3, x4
li x6, 0xc000000000000000
bne x6, x5, TEST_FAIL
mulhsu x5, x3, x10
li x6, 0xfffffffffffee5d5
bne x6, x5, TEST_FAIL
.global DIV
DIV:
li x2, 0x8000000000000000
li x3, 0xabcdabcdabcdabcd
li x4, 0x789a789a789a789a
li x5, 0xffffffffffffffff
div x6, x0, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
div x6, x3, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
div x6, x4, x3
li x7, 0xffffffffffffffff
bne x6, x7, TEST_FAIL
div x6, x3, x0
mv x7, x5
bne x6, x7, TEST_FAIL
div x6, x2, x5
mv x7, x2
bne x6, x7, TEST_FAIL
.global DIVW
DIVW:
li x2, 0xabcdabcd80000000
li x3, 0xabcdabcdabcdabcd
li x4, 0x789a789a789a789a
li x5, 0xffffffffffffffff
divw x6, x0, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
divw x6, x3, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
divw x6, x4, x3
li x7, 0xffffffffffffffff
bne x6, x7, TEST_FAIL
divw x6, x3, x0
mv x7, x5
bne x6, x7, TEST_FAIL
divw x6, x2, x5
li x7, 0xffffffff80000000
bne x6, x7, TEST_FAIL
.global DIVU
DIVU:
li x2, 0x8000000000000000
li x3, 0xabcdabcdabcdabcd
li x4, 0x789a789a789a789a
li x5, 0xffffffffffffffff
divu x6, x0, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
divu x6, x3, x4
li x7, 0x1
bne x6, x7, TEST_FAIL
divu x6, x4, x3
li x7, 0x0
bne x6, x7, TEST_FAIL
divu x6, x3, x0
mv x7, x5
bne x6, x7, TEST_FAIL
divu x6, x2, x5
mv x7, x0
bne x6, x7, TEST_FAIL
.global DIVUW
DIVUW:
li x2, 0xabcdabcd80000000
li x3, 0xabcdabcdabcdabcd
li x4, 0x789a789a789a789a
li x5, 0xffffffffffffffff
divuw x6, x0, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
divuw x6, x3, x4
li x7, 0x1
bne x6, x7, TEST_FAIL
divuw x6, x4, x3
li x7, 0x0
bne x6, x7, TEST_FAIL
divuw x6, x3, x0
mv x7, x5
bne x6, x7, TEST_FAIL
divuw x6, x2, x5
mv x7, x0
bne x6, x7, TEST_FAIL
.global REM
REM:
li x2, 0x8000000000000000
li x3, 0xabcdabcdabcdabcd
li x4, 0x789a789a789a789a
li x5, 0xffffffffffffffff
rem x6, x0, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
rem x6, x3, x4
li x7, 0xabcdabcdabcdabcd
bne x6, x7, TEST_FAIL
rem x6, x4, x3
li x7, 0x2468246824682467
bne x6, x7, TEST_FAIL
rem x6, x3, x0
mv x7, x3
bne x6, x7, TEST_FAIL
rem x6, x2, x5
mv x7, x0
bne x6, x7, TEST_FAIL
.global REMW
REMW:
li x2, 0xabcdabcd80000000
li x3, 0xabcdabcdabcdabcd
li x4, 0x789a789a789a789a
li x5, 0xffffffffffffffff
remw x6, x0, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
remw x6, x3, x4
li x7, 0xffffffffabcdabcd
bne x6, x7, TEST_FAIL
remw x6, x4, x3
li x7, 0x24682467
bne x6, x7, TEST_FAIL
remw x6, x3, x0
li x31, 0xffffffffabcdabcd
mv x7, x31
bne x6, x7, TEST_FAIL
remw x6, x2, x5
mv x7, x0
bne x6, x7, TEST_FAIL
.global REMU
REMU:
li x2, 0x8000000000000000
li x3, 0xabcdabcdabcdabcd
li x4, 0x789a789a789a789a
li x5, 0xffffffffffffffff
remu x6, x0, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
remu x6, x3, x4
li x7, 0x3333333333333333
bne x6, x7, TEST_FAIL
remu x6, x4, x3
li x7, 0x789a789a789a789a
bne x6, x7, TEST_FAIL
remu x6, x3, x0
mv x7, x3
bne x6, x7, TEST_FAIL
remu x6, x2, x5
mv x7, x2
bne x6, x7, TEST_FAIL
.global REMUW
REMUW:
li x2, 0xabcdabcd80000000
li x3, 0xabcdabcdabcdabcd
li x4, 0x789a789a789a789a
li x5, 0xffffffffffffffff
remuw x6, x0, x4
li x7, 0x0
bne x6, x7, TEST_FAIL
remuw x6, x3, x4
li x7, 0x33333333
bne x6, x7, TEST_FAIL
remuw x6, x4, x3
li x7, 0x789a789a
bne x6, x7, TEST_FAIL
remuw x6, x3, x0
li x7, 0xffffffffabcdabcd
bne x6, x7, TEST_FAIL
remuw x6, x2, x5
li x7, 0xffffffff80000000
bne x6, x7, TEST_FAIL
.option rvc
.global CLI
CLI:
c.li x4, 0x1f
li x5, 0x1f
bne x4, x5, TEST_FAIL
c.li x4, -32
li x5, -32
bne x4, x5, TEST_FAIL
.global CLUI
CLUI:
c.lui x4, 0xfffe0
li x5, 0xfffffffffffe0000
bne x4, x5, TEST_FAIL
c.lui x4, 0x1f
li x5, 0x1f000
bne x4, x5, TEST_FAIL
.global CADDI
CADDI:
li x4, 0x8000000000000000
li x5, 0x7fffffffffffffe0
c.addi x4, -32
bne x4, x5, TEST_FAIL
li x4, 0x7fffffffffffffff
li x5, 0x800000000000001e
c.addi x4, 31
bne x4, x5, TEST_FAIL
.global CADDIW
CADDIW:
li x4, 0xabcdabcd80000000
li x5, 0x7fffffe0
c.addiw x4, -32
bne x4, x5, TEST_FAIL
li x4, 0x7fffffff
li x5, 0xffffffff8000001e
c.addiw x4, 31
bne x4, x5, TEST_FAIL
.global CADDI16SP
CADDI16SP:
li x2, 0x0
li x3, -512
c.addi16sp x2, -512
bne x2, x3, TEST_FAIL
li x2, 0xffffffffffffff20
li x3, 0xfffffffffffffee0
c.addi16sp x2, -64
bne x2, x3, TEST_FAIL
li x2, 0x0
li x3, 0x1f0
c.addi16sp x2, 0x1f0
bne x2, x3, TEST_FAIL
.global CADDI4SPN
CADDI4SPN:
li x2, 0x0
li x3, 0x3fc
c.addi4spn x8, x2, 0x3fc
bne x8, x3, TEST_FAIL
li x2, 0x0
li x3, 124
c.addi4spn x8, x2,0x7c
bne x8, x3, TEST_FAIL
.global CSLLI
CSLLI:
li x3, 0xaaaaaaaaaaaaaaaa
li x4, 0x0
c.slli x3, 0x3f
bne x3, x4, TEST_FAIL
li x3, 0xaaaa
li x4, 0x55550000
c.slli x3, 0xf
bne x3, x4, TEST_FAIL
li x8, 0xaaaaaaaaaaaaaaaa
li x4, 0x1
c.srli x8, 0x3f
bne x8, x4, TEST_FAIL
li x8, 0x5555555555555555
li x4, 0x555555555555555
c.srli x8, 0x4
bne x8, x4, TEST_FAIL
li x8, 0xffffffffffffffff
li x4, 0xfffffffffffffff
c.srli x8, 0x4
bne x8, x4, TEST_FAIL
.global CSRAI
CSRAI:
li x8, 0xaaaaaaaaaaaaaaaa
li x4, 0xffffffffffffffff
c.srai x8, 0x3f
bne x8, x4, TEST_FAIL
li x8, 0x5555555555555555
li x4, 0x555555555555555
c.srai x8, 0x4
bne x8, x4, TEST_FAIL
.global CANDI
CANDI:
li x8, 0xaaaaaaaaaaaaaaaa
li x4, 0xaaaaaaaaaaaaaaaa
c.andi x8, -1
bne x8, x4, TEST_FAIL
li x8, 0xaaaaaaaaaaaaaaaa
li x4, 0xa
c.andi x8, 0x1f
bne x8, x4, TEST_FAIL
li x8, 0xaaaaaaaa
li x4, 0xaaaaaaa0
c.andi x8, -32
bne x8, x4, TEST_FAIL
.global CMV
CMV:
li x4, 0xabcdabcdabcdabcd
li x5, 0x0
c.mv x5, x4
bne x4, x5, TEST_FAIL
.global CADD
CADD:
li x3, 0xffffffffffffffff
li x4, 0xffffffffffffffff
c.add x3, x4
li x5, 0xfffffffffffffffe
bne x3, x5, TEST_FAIL
li x3, 0x8000000000000000
li x4, 0x8000000000000000
c.add x3, x4
li x5, 0x0
bne x3, x5, TEST_FAIL
li x3, 0x7fffffffffffffff
li x4, 0x7fffffffffffffff
c.add x3, x4
li x5, 0xfffffffffffffffe
bne x3, x5, TEST_FAIL
.global CADDW
CADDW:
li x8, 0xabcdabcdffffffff
li x9, 0xabcdabcdffffffff
c.addw x8, x9
li x10, 0xfffffffffffffffe
bne x8, x10, TEST_FAIL
li x8, 0xabcdabcd80000000
li x9, 0xabcdabcd80000000
c.addw x8, x9
li x10, 0x0
bne x8, x10, TEST_FAIL
li x8, 0xabcdabcd7fffffff
li x9, 0xabcdabcd7fffffff
c.addw x8, x9
li x10, 0xfffffffffffffffe
bne x8, x10, TEST_FAIL
.global CAND
CAND:
li x8, 0x0
li x9, 0xffffffffffffffff
li x5, 0xabcdabcdabcdabcd
li x10, 0xabcdabcdabcdabcd
c.and x10, x9
bne x10, x5, TEST_FAIL
li x10, 0xabcdabcdabcdabcd
c.and x10, x8
bne x10, x8, TEST_FAIL
.global COR
COR:
li x8, 0x0
li x9, 0xffffffffffffffff
li x5, 0xabcdabcdabcdabcd
li x10, 0xabcdabcdabcdabcd
c.or x10, x9
bne x10, x9, TEST_FAIL
li x5, 0xabcdabcdabcdabcd
li x10, 0xabcdabcdabcdabcd
c.or x10, x8
bne x10, x5, TEST_FAIL
.global CXOR
CXOR:
li x8, 0x0
li x9, 0xffffffffffffffff
li x10, 0xabcdabcdabcdabcd
not x5, x10
c.xor x10, x9
bne x10, x5, TEST_FAIL
li x5, 0xabcdabcdabcdabcd
li x10, 0xabcdabcdabcdabcd
c.xor x10, x8
bne x10, x5, TEST_FAIL
.global CSUB
CSUB:
li x8, 0x800000000000000
li x9, 0x7ffffffffffffff
c.sub x8, x9
li x5, 0x1
bne x8, x5, TEST_FAIL
li x8, 0x7fffffffffffffff
li x9, 0x8000000000000000
c.sub x8, x9
li x5, 0xffffffffffffffff
bne x8, x5, TEST_FAIL
li x8, 0xf
li x9, 0x2
c.sub x8, x9
li x5, 0xd
bne x8, x5, TEST_FAIL
.global CSUBW
CSUBW:
li x8, 0xabcdabcd80000000
li x9, 0xabcdabcd7fffffff
c.subw x8, x9
li x5, 0x1
bne x8, x5, TEST_FAIL
li x8, 0xabcdabcd7fffffff
li x9, 0xabcdabcd80000000
c.subw x8, x9
li x5, 0xffffffffffffffff
bne x8, x5, TEST_FAIL
li x8, 0xf
li x9, 0x2
c.subw x8, x9
li x5, 0xd
bne x8, x5, TEST_FAIL
.option norvc
.global STORE1
STORE1:
li x3,0xffffffffffffffff
li x8,0xaaaaaaaaaaaaaaaa
li x4,0x000000000000a000
sd x8,0xfffffffffffffff8(x4)
ld x5,0xfffffffffffffff8(x4)
bne x8,x5,TEST_FAIL
sd x8,0x7f8(x4)
ld x5,0x7f8(x4)
bne x8,x5,TEST_FAIL
sd x3,0x0(x4)
ld x5,0x0(x4)
bne x3,x5,TEST_FAIL
.global STORE2
STORE2:
li x6,0xffffffff80000000
li x8,0x0000000080000000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaa80000000
sd x9,0x0(x4)
sw x6,0x0(x4)
lwu x7,0x0(x4)
lw x5,0x0(x4)
ld x11,0x0(x4)
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x11,x10,TEST_FAIL
.global STORE3
STORE3:
sw x6,0xfffffffffffffffc(x4)
lwu x7,0xfffffffffffffffc(x4)
lw x5,0xfffffffffffffffc(x4)
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
sw x6,0x7fc(x4)
lwu x7,0x7fc(x4)
lw x5,0x7fc(x4)
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
.global STORE4
STORE4:
li x6,0xffffffffffff8000
li x8,0x0000000000008000
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaa8000
li x31,0xffffffffaaaa8000
sd x9,0x0(x4)
sh x6,0x0(x4)
lhu x7,0x0(x4)
lh x5,0x0(x4)
lw x11,0x0(x4)
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
bne x11,x31,TEST_FAIL
.global STORE5
STORE5:
sh x6,0xfffffffffffffffe(x4)
lhu x7,0xfffffffffffffffe(x4)
lh x5,0xfffffffffffffffe(x4)
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
.global STORE6
STORE6:
sh x6,0x7fe(x4)
lhu x7,0x7fe(x4)
lh x5,0x7fe(x4)
bne x7,x8,TEST_FAIL
bne x6,x5,TEST_FAIL
.global STORE7
STORE7:
li x3,0xffffffffffffffff
li x8,0x00000000000000ff
li x9,0xaaaaaaaaaaaaaaaa
li x10,0xaaaaaaaaaaaaaaff
li x31,0xffffffffaaaaaaff
sd x9,0x0(x4)
sb x3,0x0(x4)
lbu x7,0x0(x4)
lb x5,0x0(x4)
lw x11,0x0(x4)
bne x7,x8,TEST_FAIL
bne x3,x5,TEST_FAIL
bne x31,x11,TEST_FAIL
.global STORE8
STORE8:
sb x3,0xffffffffffffffff(x4)
lbu x7,0xffffffffffffffff(x4)
lb x5,0xffffffffffffffff(x4)
bne x7,x8,TEST_FAIL
bne x3,x5,TEST_FAIL
sb x3,0x7ff(x4)
lbu x7,0x7ff(x4)
lb x5,0x7ff(x4)
bne x7,x8,TEST_FAIL
bne x3,x5,TEST_FAIL
li x8,0xaaaaaaaaaaaaaaaa
li x4,0x000000000010000
li x5,0x000000000010000
.global strb0
strb0:
li x29,0x000000000020000
sd x0,0x0(x29)
srb x8,x4,x5,0
ld x31,0x0(x29)
li x30,0xaa
bne x30,x31,TEST_FAIL
.global strb1
strb1:
li x29,0x000000000030000
sd x0,0x0(x29)
srb x8,x4,x5,1
ld x31,0x0(x29)
li x30,0xaa
bne x30,x31,TEST_FAIL
.global strb2
strb2:
li x29,0x000000000050000
sd x0,0x0(x29)
srb x8,x4,x5,2
ld x31,0x0(x29)
li x30,0xaa
bne x30,x31,TEST_FAIL
.global strb3
strb3:
li x29,0x000000000090000
sd x0,0x0(x29)
srb x8,x4,x5,3
ld x31,0x0(x29)
li x30,0xaa
bne x30,x31,TEST_FAIL
.global strh0
strh0:
li x29,0x000000000020000
sd x0,0x0(x29)
srh x8,x4,x5,0
ld x31,0x0(x29)
li x30,0xaaaa
bne x30,x31,TEST_FAIL
.global strh1
strh1:
li x29,0x000000000030000
sd x0,0x0(x29)
srh x8,x4,x5,1
ld x31,0x0(x29)
li x30,0xaaaa
bne x30,x31,TEST_FAIL
.global strh2
strh2:
li x29,0x000000000050000
sd x0,0x0(x29)
srh x8,x4,x5,2
ld x31,0x0(x29)
li x30,0xaaaa
bne x30,x31,TEST_FAIL
.global strh3
strh3:
li x29,0x000000000090000
sd x0,0x0(x29)
srh x8,x4,x5,3
ld x31,0x0(x29)
li x30,0xaaaa
bne x30,x31,TEST_FAIL
.global strw0
strw0:
li x29,0x000000000020000
sd x0,0x0(x29)
srw x8,x4,x5,0
ld x31,0x0(x29)
li x30,0xaaaaaaaa
bne x30,x31,TEST_FAIL
.global strw1
strw1:
li x29,0x000000000030000
sd x0,0x0(x29)
srw x8,x4,x5,1
ld x31,0x0(x29)
li x30,0xaaaaaaaa
bne x30,x31,TEST_FAIL
.global strw2
strw2:
li x29,0x000000000050000
sd x0,0x0(x29)
srw x8,x4,x5,2
ld x31,0x0(x29)
li x30,0xaaaaaaaa
bne x30,x31,TEST_FAIL
.global strw3
strw3:
li x29,0x000000000090000
sd x0,0x0(x29)
srw x8,x4,x5,3
ld x31,0x0(x29)
li x30,0xaaaaaaaa
bne x30,x31,TEST_FAIL
.global strd0
strd0:
li x29,0x000000000020000
sd x0,0x0(x29)
srd x8,x4,x5,0
ld x31,0x0(x29)
li x30,0xaaaaaaaaaaaaaaaa
bne x30,x31,TEST_FAIL
.global strd1
strd1:
li x29,0x000000000030000
sd x0,0x0(x29)
srd x8,x4,x5,1
ld x31,0x0(x29)
li x30,0xaaaaaaaaaaaaaaaa
bne x30,x31,TEST_FAIL
.global strd2
strd2:
li x29,0x000000000050000
sd x0,0x0(x29)
srd x8,x4,x5,2
ld x31,0x0(x29)
li x30,0xaaaaaaaaaaaaaaaa
bne x30,x31,TEST_FAIL
.global strd3
strd3:
li x29,0x000000000090000
sd x0,0x0(x29)
srd x8,x4,x5,3
ld x31,0x0(x29)
li x30,0xaaaaaaaaaaaaaaaa
bne x30,x31,TEST_FAIL
li x8,0xaaaaaaaaaaaaaaaa
li x4,0x000000000010000
li x5,0x000000000010000
.global sutrb0
sutrb0:
li x29,0x000000000020000
sd x0,0x0(x29)
surb x8,x4,x5,0
ld x31,0x0(x29)
li x30,0xaa
bne x30,x31,TEST_FAIL
.global sturb1
sturb1:
li x29,0x000000000030000
sd x0,0x0(x29)
surb x8,x4,x5,1
ld x31,0x0(x29)
li x30,0xaa
bne x30,x31,TEST_FAIL
.global sturb2
sturb2:
li x29,0x000000000050000
sd x0,0x0(x29)
surb x8,x4,x5,2
ld x31,0x0(x29)
li x30,0xaa
bne x30,x31,TEST_FAIL
.global sturb3
sturb3:
li x29,0x000000000090000
sd x0,0x0(x29)
surb x8,x4,x5,3
ld x31,0x0(x29)
li x30,0xaa
bne x30,x31,TEST_FAIL
.global sturh0
sturh0:
li x29,0x000000000020000
sd x0,0x0(x29)
surh x8,x4,x5,0
ld x31,0x0(x29)
li x30,0xaaaa
bne x30,x31,TEST_FAIL
.global sturh1
sturh1:
li x29,0x000000000030000
sd x0,0x0(x29)
surh x8,x4,x5,1
ld x31,0x0(x29)
li x30,0xaaaa
bne x30,x31,TEST_FAIL
.global sturh2
sturh2:
li x29,0x000000000050000
sd x0,0x0(x29)
surh x8,x4,x5,2
ld x31,0x0(x29)
li x30,0xaaaa
bne x30,x31,TEST_FAIL
.global sturh3
sturh3:
li x29,0x000000000090000
sd x0,0x0(x29)
surh x8,x4,x5,3
ld x31,0x0(x29)
li x30,0xaaaa
bne x30,x31,TEST_FAIL
.global sturw0
sturw0:
li x29,0x000000000020000
sd x0,0x0(x29)
surw x8,x4,x5,0
ld x31,0x0(x29)
li x30,0xaaaaaaaa
bne x30,x31,TEST_FAIL
.global sturw1
sturw1:
li x29,0x000000000030000
sd x0,0x0(x29)
surw x8,x4,x5,1
ld x31,0x0(x29)
li x30,0xaaaaaaaa
bne x30,x31,TEST_FAIL
.global sturw2
sturw2:
li x29,0x000000000050000
sd x0,0x0(x29)
surw x8,x4,x5,2
ld x31,0x0(x29)
li x30,0xaaaaaaaa
bne x30,x31,TEST_FAIL
.global sturw3
sturw3:
li x29,0x000000000090000
sd x0,0x0(x29)
surw x8,x4,x5,3
ld x31,0x0(x29)
li x30,0xaaaaaaaa
bne x30,x31,TEST_FAIL
.global sturd0
sturd0:
li x29,0x000000000020000
sd x0,0x0(x29)
surd x8,x4,x5,0
ld x31,0x0(x29)
li x30,0xaaaaaaaaaaaaaaaa
bne x30,x31,TEST_FAIL
.global sturd1
sturd1:
li x29,0x000000000030000
sd x0,0x0(x29)
surd x8,x4,x5,1
ld x31,0x0(x29)
li x30,0xaaaaaaaaaaaaaaaa
bne x30,x31,TEST_FAIL
.global sturd2
sturd2:
li x29,0x000000000050000
sd x0,0x0(x29)
surd x8,x4,x5,2
ld x31,0x0(x29)
li x30,0xaaaaaaaaaaaaaaaa
bne x30,x31,TEST_FAIL
.global sturd3
sturd3:
li x29,0x000000000090000
sd x0,0x0(x29)
surd x8,x4,x5,3
ld x31,0x0(x29)
li x30,0xaaaaaaaaaaaaaaaa
bne x30,x31,TEST_FAIL
.global double_ld_st_1
double_ld_st_1:
li x12,0x786755d775fe6756
fmv.d.x f12,x12
fsd f12,-2048(x4)
fld f19,-2048(x4)
fmv.x.d x19,f19
bne x12,x19, TEST_FAIL
.global double_ld_st_2
double_ld_st_2:
li x12,0x786755d775fe6756
fmv.d.x f12,x12
fsd f12,0x7f8(x4)
fld f19,0x7f8(x4)
fmv.x.d x19,f19
bne x12,x19, TEST_FAIL
.global float_ld_st_1
float_ld_st_1:
li x14,0x786755d775fe6756
li x13,0xffffffff75fe6756
fmv.d.x f14,x14
fsw f14,-2048(x4)
flw f20,-2048(x4)
fmv.x.d x20,f20
bne x13,x20, TEST_FAIL
.global float_ld_st_2
float_ld_st_2:
li x14,0x786755d775fe6756
li x13,0xffffffff75fe6756
fmv.d.x f14,x14
fsw f14,0x7fc(x4)
flw f20,0x7fc(x4)
fmv.x.d x20,f20
bne x13,x20, TEST_FAIL
jal x1,BRANCH_LABEL
.global A
A:
jal x0,JREG_LABEL
.global BRANCH_LABEL
BRANCH_LABEL:
li x4,0x8000000000000000
li x3,0xffffffffffffffff
beq x0,x4,TEST_FAIL
beq x4,x4,BNE_LABEL
beq x0,x0,TEST_FAIL
.global BNE_LABEL
BNE_LABEL:
bne x4,x4,TEST_FAIL
bne x0,x4,BLT_LABEL
beq x0,x0,TEST_FAIL
.global BLT_LABEL
BLT_LABEL:
blt x0,x4,TEST_FAIL
blt x4,x4,TEST_FAIL
blt x4,x0,BGE_LABEL
beq x0,x0,TEST_FAIL
.global BGE_LABEL
BGE_LABEL:
bge x4,x3,TEST_FAIL
bge x3,x4,BGEE_LABEL
beq x0,x0,TEST_FAIL
.global BGEE_LABEL
BGEE_LABEL:
bge x3,x3,BGEU_LABEL
beq x0,x0,TEST_FAIL
.global BGEU_LABEL
BGEU_LABEL:
bgeu x4,x3,TEST_FAIL
bgeu x3,x4,BGEUE_LABEL
beq x0,x0,TEST_FAIL
.global BGEUE_LABEL
BGEUE_LABEL:
bgeu x3,x3,BLTU_LABEL
beq x0,x0,TEST_FAIL
.global BLTU_LABEL
BLTU_LABEL:
bltu x4,x0,TEST_FAIL
bltu x4,x4,TEST_FAIL
bltu x0,x4,J_LABEL
beq x0,x0,TEST_FAIL
.global J_LABEL
J_LABEL:
jalr x1,x1,0x0
.global B
B:
jal x0,C_LABEL
.global JREG_LABEL
JREG_LABEL:
jalr x0,x1,0x0
.global C_LABEL
C_LABEL:
.option rvc
li x8,0xffffffffffffffff
li x9,0xaaaaaaaaaaaaaaaa
.global A2
A2:
li x10,0x0000b000
c.sd x8,0x0(x10)
ld x11,0x0(x10)
bne x8,x11,TEST_FAIL
c.sd x8,0xf8(x10)
ld x11,0xf8(x10)
bne x8,x11,TEST_FAIL
c.sd x8,0x38(x10)
ld x11,0x38(x10)
bne x8,x11,TEST_FAIL
c.sd x8,0x80(x10)
ld x11,0x80(x10)
bne x8,x11,TEST_FAIL
.global A1
A1:
sd x9,0x80(x10)
c.ld x11,0x80(x10)
bne x9,x11,TEST_FAIL
sd x9,0x38(x10)
c.ld x11,0x38(x10)
bne x9,x11,TEST_FAIL
sd x9,0xf8(x10)
c.ld x11,0xf8(x10)
bne x9,x11,TEST_FAIL
sd x9,0x0(x10)
c.ld x11,0x0(x10)
bne x9,x11,TEST_FAIL
li x8,0xffffffffffffffff
li x9,0xaaaaaaaaaaaaaaaa
.global A3
A3:
li x10,0x0000b000
c.sw x8,0x0(x10)
lw x11,0x0(x10)
bne x8,x11,TEST_FAIL
c.sw x8,0x7c(x10)
lw x11,0x7c(x10)
bne x8,x11,TEST_FAIL
c.sw x8,0x3c(x10)
lw x11,0x3c(x10)
bne x8,x11,TEST_FAIL
c.sw x8,0x40(x10)
lw x11,0x40(x10)
bne x8,x11,TEST_FAIL
.global A9
A9:
sw x9,0x40(x10)
c.lw x11,0x40(x10)
li x31,0xffffffffaaaaaaaa
bne x31,x11,TEST_FAIL
sw x9,0x3c(x10)
c.lw x11,0x3c(x10)
li x31,0xffffffffaaaaaaaa
bne x31,x11,TEST_FAIL
sw x9,0x7c(x10)
c.lw x11,0x7c(x10)
li x31,0xffffffffaaaaaaaa
bne x31,x11,TEST_FAIL
sw x9,0x0(x10)
c.lw x11,0x0(x10)
li x31,0xffffffffaaaaaaaa
bne x31,x11,TEST_FAIL
.global A5
A5:
li x8,0x5555555555555555
li x9,0xaaaaaaaaaaaaaaaa
li x2,110000
mv x7,x2
c.sdsp x8,0x0(sp)
ld x12,0x0(x7)
bne x8,x12,TEST_FAIL
c.sdsp x9,0x1f8(sp)
ld x12,0x1f8(x7)
bne x9,x12,TEST_FAIL
c.sdsp x8,0x80(sp)
ld x12,0x80(x7)
bne x8,x12,TEST_FAIL
c.sdsp x8,0x78(sp)
ld x12,0x78(x7)
bne x8,x12,TEST_FAIL
.global A6
A6:
sd x7,0x0(x7)
c.ldsp x12,0x0(sp)
bne x7,x12,TEST_FAIL
sd x7,0x80(x7)
c.ldsp x12,0x80(sp)
bne x7,x12,TEST_FAIL
sd x7,0x1f8(x7)
c.ldsp x12,0x1f8(sp)
bne x7,x12,TEST_FAIL
sd x7,0x78(x7)
c.ldsp x12,0x78(sp)
bne x7,x12,TEST_FAIL
c.sdsp x9,0x1f8(sp)
c.ldsp x12,0x1f8(sp)
bne x9,x12,TEST_FAIL
.global A7
A7:
li x8,0x5555555555555555
li x9,0xaaaaaaaaaaaaaaaa
li x2,110000
mv x7,x2
c.swsp x8,0x0(sp)
lw x12,0x0(x7)
li x31,0x55555555
bne x31,x12,TEST_FAIL
c.swsp x9,0xfc(sp)
lw x12,0xfc(x7)
li x30,0xffffffffaaaaaaaa
bne x30,x12,TEST_FAIL
c.swsp x8,0x80(sp)
lw x12,0x80(x7)
li x31,0x55555555
bne x31,x12,TEST_FAIL
c.swsp x8,0x7c(sp)
lw x12,0x7c(x7)
li x31,0x55555555
bne x31,x12,TEST_FAIL
.global A8
A8:
sw x7,0x0(x7)
c.lwsp x12,0x0(sp)
bne x7,x12,TEST_FAIL
sw x7,0x80(x7)
c.lwsp x12,0x80(sp)
bne x7,x12,TEST_FAIL
sw x7,0xfc(x7)
c.lwsp x12,0xfc(sp)
bne x7,x12,TEST_FAIL
sw x7,0x7c(x7)
c.lwsp x12,0x7c(sp)
bne x7,x12,TEST_FAIL
c.swsp x9,0xfc(sp)
c.lwsp x12,0xfc(sp)
li x31, 0xffffffffaaaaaaaa
bne x31,x12,TEST_FAIL
.global c_double_ld_st
c_double_ld_st:
li x12,0x98778fffff907864
fmv.d.x f12,x12
c.fsd f12,0xf8(x10)
c.fld f13,0xf8(x10)
fmv.x.d x19,f13
bne x12,x19, TEST_FAIL
.global c_double_ld_st_2
c_double_ld_st_2:
li x14,0x786755d775fe6756
fmv.d.x f14,x14
c.fsd f14,0x0(x10)
c.fld f13,0x0(x10)
fmv.x.d x20,f13
bne x14,x20, TEST_FAIL
.global c_double_ld_st_sp
c_double_ld_st_sp:
li x12,0x894231678564fda2
fmv.d.x f12,x12
c.fsdsp f12,0x1f8(x2)
c.fldsp f19,0x1f8(x2)
fmv.x.d x19,f19
bne x12,x19, TEST_FAIL
.global c_double_ld_st_sp_2
c_double_ld_st_sp_2:
li x12,0xfdad34455482da13
fmv.d.x f12,x12
c.fsdsp f12,0x0(x2)
c.fldsp f19,0x0(x2)
fmv.x.d x19,f19
bne x12,x19, TEST_FAIL
jal x2,JR_LABEL1
.global C
C:
c.jr x1
.global JR_LABEL1
JR_LABEL1:
c.j BRANCH_LABEL_16
beq x8,x8,TEST_FAIL
.global BRANCH_LABEL_16
BRANCH_LABEL_16:
li x10,0x8000000000000000
li x9,0x0000000000000000
li x8,0xffffffffffffffff
c.beqz x8,TEST_FAIL
c.beqz x10,TEST_FAIL
c.beqz x9,BNEZ_LABEL
beq x0,x0,TEST_FAIL
.global BNEZ_LABEL
BNEZ_LABEL:
c.bnez x9,TEST_FAIL
c.bnez x10,C_J
.global C_J
C_J:
c.jalr x2
.global D
D:
jal x1,FENCE_LABEL
beq x0,x0,TEST_FAIL
.global FENCE_LABEL
FENCE_LABEL:
li x8,0x0
li x9,0x0
li x10,0xffffffffffffffff
li x11,0xaaa
.global MIE_CSRRWL
MIE_CSRRWL:
csrrw x1,mie,x10
csrrw x1,mie,x0
li x2,0xfff
and x1,x1,x2
bne x1,x11,TEST_FAIL
csrrw x1,mie,x11
bne x1,x9,TEST_FAIL
csrrw x0,mie,x9
bne x0,x8,TEST_FAIL
.global MIE_CSRRWI
MIE_CSRRWI:
csrrwi x1,mie,0x1f
bne x1,x9,TEST_FAIL
csrrwi x1,mie,0x0
li x12,0xa
bne x1,x12,TEST_FAIL
csrrwi x1,mie,0x1f
bne x1,x9,TEST_FAIL
csrrwi x0,mie,0x1f
bne x0,x8,TEST_FAIL
csrrw x0,mie,x9
.global MIE_CSRRS
MIE_CSRRS:
csrrs x1,mie,x10
bne x1,x9,TEST_FAIL
csrrs x1,mie,x0
li x2,0xfff
and x1,x1,x2
bne x1,x11,TEST_FAIL
csrrs x1,mie,x8
li x2,0xfff
and x1,x1,x2
bne x1,x11,TEST_FAIL
csrrs x1,mie,x0
li x2,0xfff
and x1,x1,x2
bne x1,x11,TEST_FAIL
csrrs x0,mie,x0
bne x0,x8,TEST_FAIL
csrrw x0,mie,x11
.global MIE_CSRRC
MIE_CSRRC:
csrrc x1,mie,x8
bne x1,x11,TEST_FAIL
csrrc x1,mie,x0
bne x1,x11,TEST_FAIL
csrrc x1,mie,x10
bne x1,x11,TEST_FAIL
csrrc x1,mie,x0
bne x1,x9,TEST_FAIL
csrrw x0,mie,x11
csrrc x0,mie,x0
bne x0,x8,TEST_FAIL
csrrw x0,mie,x9
.global MIE_CSRRSI
MIE_CSRRSI:
csrrsi x1,mie,0x0
bne x1,x9,TEST_FAIL
csrrsi x1,mie,0x1f
bne x1,x9,TEST_FAIL
csrrsi x1,mie,0x0
li x12,0xa
bne x1,x12,TEST_FAIL
csrrsi x0,mie,0x0
bne x0,x8,TEST_FAIL
csrrw x0,mie,x10
.global MIE_CSRRCI
MIE_CSRRCI:
csrrci x1,mie,0x0
li x2,0xfff
and x1,x1,x2
bne x1,x11,TEST_FAIL
csrrci x1,mie,0x0
li x2,0xfff
and x1,x1,x2
bne x1,x11,TEST_FAIL
csrrci x0,mie,0x1f
bne x0,x8,TEST_FAIL
csrrci x1,mie,0x0
li x2,0xfff
and x1,x1,x2
li x12,0xaa0
bne x1,x12,TEST_FAIL
csrrw x0,mie,x9
.global a_inst
a_inst:
li x2,0x10000
li x3,0x20000
li x11,0x10000
li x12,0x10008
.global check_lr
check_lr:
sw x11,0x0(x2)
lr.w x13,0x0(x2)
lw x14,0x0(x2)
bne x13,x14,TEST_FAIL
.global check_sc
check_sc:
sw x11,0x0(x2)
lr.w x0,0x0(x3)
sc.w x0,x11,(x3)
lw x13,0x0(x2)
lw x14,0x0(x3)
bne x13,x14,TEST_FAIL
.global check_amoswap
check_amoswap:
ori x11,x11,0x00000024A
andi x11,x11,-8
li x15,0xD777CA25BB980856
sd x15,0x000000000(x11)
li x9,0xE3434812C6482D68
amoswap.d x13,x9, (x11)
bne x15,x13,TEST_FAIL
ld x16,(x11)
bne x16,x9,TEST_FAIL
.global check_amoadd
check_amoadd:
ori x11,x11,0x0000002AC
andi x11,x11,-8
li x15,0x3C76E8FBD95888C1
sd x15,0x000000000(x11)
li x9,0xDB483507CBC52F0F
amoadd.d x13,x9, (x11)
bne x15,x13, TEST_FAIL
ld x16,(x11)
add x9,x9,x15
bne x16,x9,TEST_FAIL
.global check_amoor
check_amoor:
ori x11,x11,0x00000042A
andi x11,x11,-8
li x15,0x15E0C8ABA1ADBAB6
sd x15,0x000000000(x11)
li x9,0xC0C293DF16F60603
amoor.d x13,x9, (x11)
bne x15,x13, TEST_FAIL
ld x16,(x11)
or x9,x9,x15
bne x16,x9,TEST_FAIL
.global check_amoand
check_amoand:
ori x11,x11,0x0000005B1
andi x11,x11,-8
li x15,0xDB633D4EB949799B
sd x15,0x000000000(x11)
li x9,0x1D8923A570E63AF6
amoand.d x13,x9, (x11)
bne x15,x13, TEST_FAIL
ld x16,(x11)
and x9,x9,x15
bne x16,x9,TEST_FAIL
.global check_amoxor
check_amoxor:
ori x11,x11,0x00000067B
andi x11,x11,-8
li x15,0xDCAB6DBBF9C65CC2
sd x15,0x000000000(x11)
li x9,0xBE6DBF4C4CCE1B93
amoxor.d x13,x9, (x11)
bne x15,x13, TEST_FAIL
ld x16,(x11)
xor x9,x9,x15
bne x16,x9,TEST_FAIL
.global check_amomin
check_amomin:
ori x11,x11,0x00000073C
andi x11,x11,-8
li x15,0x914AA80CF74A1F3
sd x15,0x000000000(x11)
li x9,0x4B3A91CD3EB0E100
amomin.d x13,x9, (x11)
bne x15,x13, TEST_FAIL
ld x16,(x11)
blt x9,x13,mem_val
mv x9,x13
.global mem_val
mem_val:
mv x9,x9
bne x16,x9,TEST_FAIL
.global check_amominu
check_amominu:
ori x11,x11,0x000000281
andi x11,x11,-8
li x15,0x47E5C00BDF5001B2
sd x15,0x000000000(x11)
li x9,0xC2A4F93901AA970E
amominu.d x13,x9, (x11)
bne x15,x13, TEST_FAIL
ld x16,(x11)
bltu x9,x13,mem_val2
mv x9,x13
.global mem_val2
mem_val2:
mv x9,x9
bne x16,x9,TEST_FAIL
.global check_amomax
check_amomax:
ori x11,x11,0x00000035F
andi x11,x11,-8
li x15,0x422A83AD7A959902
sd x15,0x000000000(x11)
li x9,0x6ED2A57104419E8D
amomax.d x13,x9, (x11)
bne x15,x13, TEST_FAIL
ld x16,(x11)
bge x9,x13,mem_val3
mv x9,x13
.global mem_val3
mem_val3:
mv x9,x9
bne x16,x9,TEST_FAIL
.global check_amomaxu
check_amomaxu:
ori x11,x11,0x00000023D
andi x11,x11,-8
li x15,0xDFCF4ACD38C9FBE9
sd x15,0x000000000(x11)
li x9,0x4BD4607A94336FE5
amomaxu.d x13,x9, (x11)
bne x15,x13, TEST_FAIL
ld x16,(x11)
bgeu x9,x13,mem_val4
mv x9,x13
.global mem_val4
mem_val4:
mv x9,x9
bne x16,x9,TEST_FAIL
li x9,0
li x15,0
.global check_amoswap_w
check_amoswap_w:
ori x11,x11,0x0000001D8
andi x11,x11,-8
li x15,0x026775BDA
sw x15,0x000000000(x11)
li x9,0x00DD7B0FD
amoswap.w.aqrl x13,x9, (x11)
bne x15,x13, TEST_FAIL
lwu x16,(x11)
bne x16,x9,TEST_FAIL
.global check_amoadd_w
check_amoadd_w:
ori x11,x11,0x00000047C
andi x11,x11,-8
li x15,0x0764E1A4E
sw x15,0x000000000(x11)
li x9,0x036571F5E
amoadd.w.rl x13,x9, (x11)
bne x15,x13, TEST_FAIL
lwu x16,(x11)
add x9,x9,x15
bne x16,x9,TEST_FAIL
.global check_amoor_w
check_amoor_w:
ori x11,x11,0x0000001A0
andi x11,x11,-8
li x15,0x03C6C8F73
sw x15,0x000000000(x11)
li x9,0x06E7A8DC1
amoor.w x13,x9, (x11)
bne x15,x13, TEST_FAIL
lwu x16,(x11)
or x9,x9,x15
bne x16,x9,TEST_FAIL
.global check_amoand_w
check_amoand_w:
ori x11,x11,0x000000133
andi x11,x11,-8
li x15,0x00B2A1584
sw x15,0x000000000(x11)
li x9,0x06A7FC67D
amoand.w.rl x13,x9, (x11)
bne x15,x13, TEST_FAIL
lwu x16,(x11)
and x9,x9,x15
bne x16,x9,TEST_FAIL
.global check_amoxor_w
check_amoxor_w:
ori x11,x11,0x0000001E1
andi x11,x11,-8
li x15,0x06E74E6D8
sw x15,0x000000000(x11)
li x9,0x0377E61C5
amoxor.w.aq x13,x9, (x11)
bne x15,x13, TEST_FAIL
lwu x16,(x11)
xor x9,x9,x15
bne x16,x9,TEST_FAIL
.global check_amomin_w
check_amomin_w:
ori x11,x11,0x0000005AA
andi x11,x11,-8
li x15,0x05DABE60E
sw x15,0x000000000(x11)
li x9,0x04C2CFBB3
amomin.w.aqrl x13,x9, (x11)
bne x15,x13, TEST_FAIL
lwu x16,(x11)
blt x9,x13,mem_val_w
mv x9,x13
.global mem_val_w
mem_val_w:
mv x9,x9
bne x16,x9,TEST_FAIL
.global check_amominu_w
check_amominu_w:
ori x11,x11,0x00000076E
andi x11,x11,-8
li x15,0x00E464664
sw x15,0x000000000(x11)
li x9,0x065FC3D17
amominu.w.aqrl x13,x9, (x11)
bne x15,x13, TEST_FAIL
lwu x16,(x11)
bltu x9,x13,mem_val2_w
mv x9,x13
.global mem_val2_w
mem_val2_w:
mv x9,x9
bne x16,x9,TEST_FAIL
.global check_amomax_w
check_amomax_w:
ori x11,x11,0x00000007D
andi x11,x11,-8
li x15,0x0290926ED
sw x15,0x000000000(x11)
li x9,0x058C998F5
amomax.w x13,x9, (x11)
bne x15,x13, TEST_FAIL
lwu x16,(x11)
bge x9,x13,mem_val3_w
mv x9,x13
.global mem_val3_w
mem_val3_w:
mv x9,x9
bne x16,x9,TEST_FAIL
.global check_amomaxu_w
check_amomaxu_w:
ori x11,x11,0x0000002F5
andi x11,x11,-8
li x15,0x07391BD71
sw x15,0x000000000(x11)
li x9,0x048ECBBF1
amomaxu.w.rl x13,x9, (x11)
bne x15,x13, TEST_FAIL
lwu x16,(x11)
bgeu x9,x13,mem_val4_w
mv x9,x13
.global mem_val4_w
mem_val4_w:
mv x9,x9
bne x16,x9,TEST_FAIL
.global TEST_EXIT
TEST_EXIT:
la x1, __exit
jr x1
.global TEST_FAIL
TEST_FAIL:
la x1, __fail
jr x1
.global TEST_WFI
TEST_WFI:
wfi
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 3,558
|
smart_run/tests/cases/interrupt/int_hw_smoke/ct_plic_int_smoke_hw.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#*************Following is the generated instructions*****************
.text
.align 6
.global main
main:
.set PLICBASE ,0xffffffc000000000
#.set PLICBASE_M ,APB_BASE_ADDR
.set PLICBASE_M ,0x4000000000
.set INTPRIO ,0x0
.set INTPEND ,0x1000
.set INTIE ,0x2000
.set INTIE_HART ,0x80
.set INTTH ,0x200000
.set INTCLAIM ,0x200004
.set INTHC_HART ,0x1000
.macro SETINT EXP_CODE, HANDLER_BEGIN,HANDLER_END
sd t0, -24(sp) #address cann't be changed
sd t1, -32(sp) #it relates to crt0.s
la t1, vector_table + 128 #intterrupt start address
addi t0,x0,\EXP_CODE
slli t0,t0,0x3
add t1,t1,t0
la t0, \HANDLER_BEGIN
sd t0, 0(t1)
ld t1, -32(sp)
ld t0, -24(sp)
j \HANDLER_END
ld a4, -16(sp)
.endm
#//-------------------//
#// INT HANDLER //
#//-------------------//
setint 11 int_begin,int_end
.global int_begin
int_begin:
.global stack_push
stack_push:
addi x2,x2,-24
sd x3,0x0(x2)
csrr x3,mepc
sd x3,0x8(x2)
csrr x3,mstatus
sd x3,0x16(x2)
#//int claim
#//ld id from claim_reg
.global int_claim
int_claim:
li x5, PLICBASE_M
li x6, INTCLAIM
add x7, x5,x6
lw x8, 0x0(x7)
.global handler
handler:
nop
nop
nop
nop
#//int cmplt
#//st id to claim_reg
.global int_cmplt
int_cmplt:
sw x8, 0x0(x7)
.global stack_pop
stack_pop:
ld x3,0x16(x2)
csrw mstatus,x3
ld x3,0x8(x2)
csrw mepc,x3
ld x3,0x0(x2)
addi x2,x2,16
mret
.global int_end
int_end:
#//-------------------//
#// INIT BEGIN //
##//-------------------//
#//enable cpu int ie
li x10,0xa
csrs mstatus,x10
#//enable meie
li x10,0x800
csrs mie,x10
#//set threshold highest to mask all
.global set_mthreshold_mask
set_mthreshold_mask:
li x5, PLICBASE_M
li x6, INTTH
add x7, x5,x6
li x8, 0x1f
sw x8, 0x0(x7)
##//set id 16 ip
#.global init_ip
#init_ip:
# li x5, PLICBASE_M
# li x6, INTPEND
# add x7, x5,x6
# li x8, 0x2
# sw x8, 0x0(x7)
#//set id 1 init prio lv:a
.global init_prio
init_prio:
li x5, PLICBASE_M
li x6, INTPRIO
add x7, x5,x6
li x8, 0xa
sw x8, 0x40(x7)
#//enable id 1 mie
.global init_ie
init_ie:
li x5, PLICBASE_M
li x6, INTIE
add x7, x5,x6
li x8, 0x10000
sw x8, 0x0(x7)
#//clear mthreshold
.global set_mthreshold_off
set_mthreshold_off:
li x5, PLICBASE_M
li x6, INTTH
add x7, x5,x6
li x8, 0x0
sw x8, 0x0(x7)
#//-------------------//
#// WAIT FOR INT //
#//-------------------//
#//wait int
wfi
.global EXIT
EXIT:
la x1, __exit
jr x1
.global FAIL
FAIL:
la x1, __fail
jr x1
#******this region is added by generator******
|
Advanced-Microelectronics-Group/OpenC910_Modified
| 3,909
|
smart_run/tests/cases/interrupt/int_smoke/ct_plic_int_smoke.s
|
/*Copyright 2019-2021 T-Head Semiconductor Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#************************************************************
#* Target file generated by rangen *
#************************************************************
#* *
#************************************************************
#*************Following is the generated instructions*****************
.text
.align 6
.global main
main:
.set PLICBASE ,0xffffffc000000000
# .set PLICBASE_M ,APB_BASE_ADDR
# .set PLICBASE_M ,0x4000000000
.set PLICBASE_M ,0xb0000000
.set INTPRIO ,0x0
.set INTPRIO_16 ,0x40
.set INTPEND ,0x1000
.set INTIE ,0x2000
.set INTIE_HART ,0x80
.set INTTH ,0x200000
.set INTCLAIM ,0x200004
.set INTHC_HART ,0x1000
.macro SETINT EXP_CODE, HANDLER_BEGIN,HANDLER_END
sd t0, -24(sp) #address cann't be changed
sd t1, -32(sp) #it relates to crt0.s
la t1, vector_table + 128 #intterrupt start address
addi t0,x0,\EXP_CODE
slli t0,t0,0x3
add t1,t1,t0
la t0, \HANDLER_BEGIN
sd t0, 0(t1)
ld t1, -32(sp)
ld t0, -24(sp)
j \HANDLER_END
ld a4, -16(sp)
.endm
#//-------------------//
#// INT HANDLER //
#//-------------------//
setint 11 int_begin,int_end
.global int_begin
int_begin:
.global stack_push
stack_push:
addi x2,x2,-24
sd x3,0x0(x2)
csrr x3,mepc
sd x3,0x8(x2)
csrr x3,mstatus
sd x3,0x16(x2)
#//int claim
#//ld id from claim_reg
.global int_claim
int_claim:
li x5, PLICBASE_M
li x6, INTCLAIM
add x7, x5,x6
lw x8, 0x0(x7)
.global handler
handler:
nop
nop
nop
nop
#//int cmplt
#//st id to claim_reg
.global int_cmplt
int_cmplt:
sw x8, 0x0(x7)
.global stack_pop
stack_pop:
ld x3,0x16(x2)
csrw mstatus,x3
ld x3,0x8(x2)
csrw mepc,x3
ld x3,0x0(x2)
addi x2,x2,16
mret
.global int_end
int_end:
#//-------------------//
#// INIT BEGIN //
##//-------------------//
#//enable cpu int ie
li x10,0xa
csrs mstatus,x10
#//enable meie
li x10,0x800
csrs mie,x10
#//set threshold highest to mask all
.global set_mthreshold_mask
set_mthreshold_mask:
li x5, PLICBASE_M
li x6, INTTH
add x7, x5,x6
li x8, 0x1f
sw x8, 0x0(x7)
#//set id 1 ip
# .global init_ip
# init_ip:
# li x5, PLICBASE_M
# li x6, INTPEND
# add x7, x5,x6
# li x8, 0x2
# sw x8, 0x0(x7)
#//set id 1 init prio lv:a
.global init_prio
init_prio:
li x5, PLICBASE_M
# li x6, INTPRIO
li x6, INTPRIO_16
add x7, x5,x6
li x8, 0xa
# sw x8, 0x4(x7)
sw x8, (x7)
#//enable id 1 mie
.global init_ie
init_ie:
li x5, PLICBASE_M
li x6, INTIE
add x7, x5,x6
# li x8, 0x2
li x8, 0x10000 # Enable PLIC_INT ID 16 (External Interrupt ID 0)
sw x8, 0x0(x7)
sw x8, 0x80(x7)
#//clear mthreshold
.global set_mthreshold_off
set_mthreshold_off:
li x5, PLICBASE_M
li x6, INTTH
add x7, x5,x6
li x8, 0x0
sw x8, 0x0(x7)
#//-------------------//
#// WAIT FOR INT //
#//-------------------//
#//wait int
fence
fence.i
wfi
.global EXIT
EXIT:
la x1, __exit
jr x1
.global FAIL
FAIL:
la x1, __fail
jr x1
#******this region is added by generator******
|
Advanced-Effects/Advanced-Effects
| 20,117
|
thirdparty/framework/audio/thirdparty/opus/opus-1.4/celt/arm/celt_pitch_xcorr_arm.s
|
; Copyright (c) 2007-2008 CSIRO
; Copyright (c) 2007-2009 Xiph.Org Foundation
; Copyright (c) 2013 Parrot
; Written by Aurélien Zanelli
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions
; are met:
;
; - Redistributions of source code must retain the above copyright
; notice, this list of conditions and the following disclaimer.
;
; - Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in the
; documentation and/or other materials provided with the distribution.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
; ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
; OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
AREA |.text|, CODE, READONLY
GET celt/arm/armopts.s
IF OPUS_ARM_MAY_HAVE_EDSP
EXPORT celt_pitch_xcorr_edsp
ENDIF
IF OPUS_ARM_MAY_HAVE_NEON
EXPORT celt_pitch_xcorr_neon
ENDIF
IF OPUS_ARM_MAY_HAVE_NEON
; Compute sum[k]=sum(x[j]*y[j+k],j=0...len-1), k=0...3
xcorr_kernel_neon PROC
xcorr_kernel_neon_start
; input:
; r3 = int len
; r4 = opus_val16 *x
; r5 = opus_val16 *y
; q0 = opus_val32 sum[4]
; output:
; q0 = opus_val32 sum[4]
; preserved: r0-r3, r6-r11, d2, q4-q7, q9-q15
; internal usage:
; r12 = int j
; d3 = y_3|y_2|y_1|y_0
; q2 = y_B|y_A|y_9|y_8|y_7|y_6|y_5|y_4
; q3 = x_7|x_6|x_5|x_4|x_3|x_2|x_1|x_0
; q8 = scratch
;
; Load y[0...3]
; This requires len>0 to always be valid (which we assert in the C code).
VLD1.16 {d5}, [r5]!
SUBS r12, r3, #8
BLE xcorr_kernel_neon_process4
; Process 8 samples at a time.
; This loop loads one y value more than we actually need. Therefore we have to
; stop as soon as there are 8 or fewer samples left (instead of 7), to avoid
; reading past the end of the array.
xcorr_kernel_neon_process8
; This loop has 19 total instructions (10 cycles to issue, minimum), with
; - 2 cycles of ARM insrtuctions,
; - 10 cycles of load/store/byte permute instructions, and
; - 9 cycles of data processing instructions.
; On a Cortex A8, we dual-issue the maximum amount (9 cycles) between the
; latter two categories, meaning the whole loop should run in 10 cycles per
; iteration, barring cache misses.
;
; Load x[0...7]
VLD1.16 {d6, d7}, [r4]!
; Unlike VMOV, VAND is a data processsing instruction (and doesn't get
; assembled to VMOV, like VORR would), so it dual-issues with the prior VLD1.
VAND d3, d5, d5
SUBS r12, r12, #8
; Load y[4...11]
VLD1.16 {d4, d5}, [r5]!
VMLAL.S16 q0, d3, d6[0]
VEXT.16 d16, d3, d4, #1
VMLAL.S16 q0, d4, d7[0]
VEXT.16 d17, d4, d5, #1
VMLAL.S16 q0, d16, d6[1]
VEXT.16 d16, d3, d4, #2
VMLAL.S16 q0, d17, d7[1]
VEXT.16 d17, d4, d5, #2
VMLAL.S16 q0, d16, d6[2]
VEXT.16 d16, d3, d4, #3
VMLAL.S16 q0, d17, d7[2]
VEXT.16 d17, d4, d5, #3
VMLAL.S16 q0, d16, d6[3]
VMLAL.S16 q0, d17, d7[3]
BGT xcorr_kernel_neon_process8
; Process 4 samples here if we have > 4 left (still reading one extra y value).
xcorr_kernel_neon_process4
ADDS r12, r12, #4
BLE xcorr_kernel_neon_process2
; Load x[0...3]
VLD1.16 d6, [r4]!
; Use VAND since it's a data processing instruction again.
VAND d4, d5, d5
SUB r12, r12, #4
; Load y[4...7]
VLD1.16 d5, [r5]!
VMLAL.S16 q0, d4, d6[0]
VEXT.16 d16, d4, d5, #1
VMLAL.S16 q0, d16, d6[1]
VEXT.16 d16, d4, d5, #2
VMLAL.S16 q0, d16, d6[2]
VEXT.16 d16, d4, d5, #3
VMLAL.S16 q0, d16, d6[3]
; Process 2 samples here if we have > 2 left (still reading one extra y value).
xcorr_kernel_neon_process2
ADDS r12, r12, #2
BLE xcorr_kernel_neon_process1
; Load x[0...1]
VLD2.16 {d6[],d7[]}, [r4]!
; Use VAND since it's a data processing instruction again.
VAND d4, d5, d5
SUB r12, r12, #2
; Load y[4...5]
VLD1.32 {d5[]}, [r5]!
VMLAL.S16 q0, d4, d6
VEXT.16 d16, d4, d5, #1
; Replace bottom copy of {y5,y4} in d5 with {y3,y2} from d4, using VSRI
; instead of VEXT, since it's a data-processing instruction.
VSRI.64 d5, d4, #32
VMLAL.S16 q0, d16, d7
; Process 1 sample using the extra y value we loaded above.
xcorr_kernel_neon_process1
; Load next *x
VLD1.16 {d6[]}, [r4]!
ADDS r12, r12, #1
; y[0...3] are left in d5 from prior iteration(s) (if any)
VMLAL.S16 q0, d5, d6
MOVLE pc, lr
; Now process 1 last sample, not reading ahead.
; Load last *y
VLD1.16 {d4[]}, [r5]!
VSRI.64 d4, d5, #16
; Load last *x
VLD1.16 {d6[]}, [r4]!
VMLAL.S16 q0, d4, d6
MOV pc, lr
ENDP
; opus_val32 celt_pitch_xcorr_neon(opus_val16 *_x, opus_val16 *_y,
; opus_val32 *xcorr, int len, int max_pitch, int arch)
celt_pitch_xcorr_neon PROC
; input:
; r0 = opus_val16 *_x
; r1 = opus_val16 *_y
; r2 = opus_val32 *xcorr
; r3 = int len
; output:
; r0 = int maxcorr
; internal usage:
; r4 = opus_val16 *x (for xcorr_kernel_neon())
; r5 = opus_val16 *y (for xcorr_kernel_neon())
; r6 = int max_pitch
; r12 = int j
; q15 = int maxcorr[4] (q15 is not used by xcorr_kernel_neon())
; ignored:
; int arch
STMFD sp!, {r4-r6, lr}
LDR r6, [sp, #16]
VMOV.S32 q15, #1
; if (max_pitch < 4) goto celt_pitch_xcorr_neon_process4_done
SUBS r6, r6, #4
BLT celt_pitch_xcorr_neon_process4_done
celt_pitch_xcorr_neon_process4
; xcorr_kernel_neon parameters:
; r3 = len, r4 = _x, r5 = _y, q0 = {0, 0, 0, 0}
MOV r4, r0
MOV r5, r1
VEOR q0, q0, q0
; xcorr_kernel_neon only modifies r4, r5, r12, and q0...q3.
; So we don't save/restore any other registers.
BL xcorr_kernel_neon_start
SUBS r6, r6, #4
VST1.32 {q0}, [r2]!
; _y += 4
ADD r1, r1, #8
VMAX.S32 q15, q15, q0
; if (max_pitch < 4) goto celt_pitch_xcorr_neon_process4_done
BGE celt_pitch_xcorr_neon_process4
; We have less than 4 sums left to compute.
celt_pitch_xcorr_neon_process4_done
ADDS r6, r6, #4
; Reduce maxcorr to a single value
VMAX.S32 d30, d30, d31
VPMAX.S32 d30, d30, d30
; if (max_pitch <= 0) goto celt_pitch_xcorr_neon_done
BLE celt_pitch_xcorr_neon_done
; Now compute each remaining sum one at a time.
celt_pitch_xcorr_neon_process_remaining
MOV r4, r0
MOV r5, r1
VMOV.I32 q0, #0
SUBS r12, r3, #8
BLT celt_pitch_xcorr_neon_process_remaining4
; Sum terms 8 at a time.
celt_pitch_xcorr_neon_process_remaining_loop8
; Load x[0...7]
VLD1.16 {q1}, [r4]!
; Load y[0...7]
VLD1.16 {q2}, [r5]!
SUBS r12, r12, #8
VMLAL.S16 q0, d4, d2
VMLAL.S16 q0, d5, d3
BGE celt_pitch_xcorr_neon_process_remaining_loop8
; Sum terms 4 at a time.
celt_pitch_xcorr_neon_process_remaining4
ADDS r12, r12, #4
BLT celt_pitch_xcorr_neon_process_remaining4_done
; Load x[0...3]
VLD1.16 {d2}, [r4]!
; Load y[0...3]
VLD1.16 {d3}, [r5]!
SUB r12, r12, #4
VMLAL.S16 q0, d3, d2
celt_pitch_xcorr_neon_process_remaining4_done
; Reduce the sum to a single value.
VADD.S32 d0, d0, d1
VPADDL.S32 d0, d0
ADDS r12, r12, #4
BLE celt_pitch_xcorr_neon_process_remaining_loop_done
; Sum terms 1 at a time.
celt_pitch_xcorr_neon_process_remaining_loop1
VLD1.16 {d2[]}, [r4]!
VLD1.16 {d3[]}, [r5]!
SUBS r12, r12, #1
VMLAL.S16 q0, d2, d3
BGT celt_pitch_xcorr_neon_process_remaining_loop1
celt_pitch_xcorr_neon_process_remaining_loop_done
VST1.32 {d0[0]}, [r2]!
VMAX.S32 d30, d30, d0
SUBS r6, r6, #1
; _y++
ADD r1, r1, #2
; if (--max_pitch > 0) goto celt_pitch_xcorr_neon_process_remaining
BGT celt_pitch_xcorr_neon_process_remaining
celt_pitch_xcorr_neon_done
VMOV.32 r0, d30[0]
LDMFD sp!, {r4-r6, pc}
ENDP
ENDIF
IF OPUS_ARM_MAY_HAVE_EDSP
; This will get used on ARMv7 devices without NEON, so it has been optimized
; to take advantage of dual-issuing where possible.
xcorr_kernel_edsp PROC
xcorr_kernel_edsp_start
; input:
; r3 = int len
; r4 = opus_val16 *_x (must be 32-bit aligned)
; r5 = opus_val16 *_y (must be 32-bit aligned)
; r6...r9 = opus_val32 sum[4]
; output:
; r6...r9 = opus_val32 sum[4]
; preserved: r0-r5
; internal usage
; r2 = int j
; r12,r14 = opus_val16 x[4]
; r10,r11 = opus_val16 y[4]
STMFD sp!, {r2,r4,r5,lr}
LDR r10, [r5], #4 ; Load y[0...1]
SUBS r2, r3, #4 ; j = len-4
LDR r11, [r5], #4 ; Load y[2...3]
BLE xcorr_kernel_edsp_process4_done
LDR r12, [r4], #4 ; Load x[0...1]
; Stall
xcorr_kernel_edsp_process4
; The multiplies must issue from pipeline 0, and can't dual-issue with each
; other. Every other instruction here dual-issues with a multiply, and is
; thus "free". There should be no stalls in the body of the loop.
SMLABB r6, r12, r10, r6 ; sum[0] = MAC16_16(sum[0],x_0,y_0)
LDR r14, [r4], #4 ; Load x[2...3]
SMLABT r7, r12, r10, r7 ; sum[1] = MAC16_16(sum[1],x_0,y_1)
SUBS r2, r2, #4 ; j-=4
SMLABB r8, r12, r11, r8 ; sum[2] = MAC16_16(sum[2],x_0,y_2)
SMLABT r9, r12, r11, r9 ; sum[3] = MAC16_16(sum[3],x_0,y_3)
SMLATT r6, r12, r10, r6 ; sum[0] = MAC16_16(sum[0],x_1,y_1)
LDR r10, [r5], #4 ; Load y[4...5]
SMLATB r7, r12, r11, r7 ; sum[1] = MAC16_16(sum[1],x_1,y_2)
SMLATT r8, r12, r11, r8 ; sum[2] = MAC16_16(sum[2],x_1,y_3)
SMLATB r9, r12, r10, r9 ; sum[3] = MAC16_16(sum[3],x_1,y_4)
LDRGT r12, [r4], #4 ; Load x[0...1]
SMLABB r6, r14, r11, r6 ; sum[0] = MAC16_16(sum[0],x_2,y_2)
SMLABT r7, r14, r11, r7 ; sum[1] = MAC16_16(sum[1],x_2,y_3)
SMLABB r8, r14, r10, r8 ; sum[2] = MAC16_16(sum[2],x_2,y_4)
SMLABT r9, r14, r10, r9 ; sum[3] = MAC16_16(sum[3],x_2,y_5)
SMLATT r6, r14, r11, r6 ; sum[0] = MAC16_16(sum[0],x_3,y_3)
LDR r11, [r5], #4 ; Load y[6...7]
SMLATB r7, r14, r10, r7 ; sum[1] = MAC16_16(sum[1],x_3,y_4)
SMLATT r8, r14, r10, r8 ; sum[2] = MAC16_16(sum[2],x_3,y_5)
SMLATB r9, r14, r11, r9 ; sum[3] = MAC16_16(sum[3],x_3,y_6)
BGT xcorr_kernel_edsp_process4
xcorr_kernel_edsp_process4_done
ADDS r2, r2, #4
BLE xcorr_kernel_edsp_done
LDRH r12, [r4], #2 ; r12 = *x++
SUBS r2, r2, #1 ; j--
; Stall
SMLABB r6, r12, r10, r6 ; sum[0] = MAC16_16(sum[0],x,y_0)
LDRHGT r14, [r4], #2 ; r14 = *x++
SMLABT r7, r12, r10, r7 ; sum[1] = MAC16_16(sum[1],x,y_1)
SMLABB r8, r12, r11, r8 ; sum[2] = MAC16_16(sum[2],x,y_2)
SMLABT r9, r12, r11, r9 ; sum[3] = MAC16_16(sum[3],x,y_3)
BLE xcorr_kernel_edsp_done
SMLABT r6, r14, r10, r6 ; sum[0] = MAC16_16(sum[0],x,y_1)
SUBS r2, r2, #1 ; j--
SMLABB r7, r14, r11, r7 ; sum[1] = MAC16_16(sum[1],x,y_2)
LDRH r10, [r5], #2 ; r10 = y_4 = *y++
SMLABT r8, r14, r11, r8 ; sum[2] = MAC16_16(sum[2],x,y_3)
LDRHGT r12, [r4], #2 ; r12 = *x++
SMLABB r9, r14, r10, r9 ; sum[3] = MAC16_16(sum[3],x,y_4)
BLE xcorr_kernel_edsp_done
SMLABB r6, r12, r11, r6 ; sum[0] = MAC16_16(sum[0],tmp,y_2)
CMP r2, #1 ; j--
SMLABT r7, r12, r11, r7 ; sum[1] = MAC16_16(sum[1],tmp,y_3)
LDRH r2, [r5], #2 ; r2 = y_5 = *y++
SMLABB r8, r12, r10, r8 ; sum[2] = MAC16_16(sum[2],tmp,y_4)
LDRHGT r14, [r4] ; r14 = *x
SMLABB r9, r12, r2, r9 ; sum[3] = MAC16_16(sum[3],tmp,y_5)
BLE xcorr_kernel_edsp_done
SMLABT r6, r14, r11, r6 ; sum[0] = MAC16_16(sum[0],tmp,y_3)
LDRH r11, [r5] ; r11 = y_6 = *y
SMLABB r7, r14, r10, r7 ; sum[1] = MAC16_16(sum[1],tmp,y_4)
SMLABB r8, r14, r2, r8 ; sum[2] = MAC16_16(sum[2],tmp,y_5)
SMLABB r9, r14, r11, r9 ; sum[3] = MAC16_16(sum[3],tmp,y_6)
xcorr_kernel_edsp_done
LDMFD sp!, {r2,r4,r5,pc}
ENDP
celt_pitch_xcorr_edsp PROC
; input:
; r0 = opus_val16 *_x (must be 32-bit aligned)
; r1 = opus_val16 *_y (only needs to be 16-bit aligned)
; r2 = opus_val32 *xcorr
; r3 = int len
; output:
; r0 = maxcorr
; internal usage
; r4 = opus_val16 *x
; r5 = opus_val16 *y
; r6 = opus_val32 sum0
; r7 = opus_val32 sum1
; r8 = opus_val32 sum2
; r9 = opus_val32 sum3
; r1 = int max_pitch
; r12 = int j
; ignored:
; int arch
STMFD sp!, {r4-r11, lr}
MOV r5, r1
LDR r1, [sp, #36]
MOV r4, r0
TST r5, #3
; maxcorr = 1
MOV r0, #1
BEQ celt_pitch_xcorr_edsp_process1u_done
; Compute one sum at the start to make y 32-bit aligned.
SUBS r12, r3, #4
; r14 = sum = 0
MOV r14, #0
LDRH r8, [r5], #2
BLE celt_pitch_xcorr_edsp_process1u_loop4_done
LDR r6, [r4], #4
MOV r8, r8, LSL #16
celt_pitch_xcorr_edsp_process1u_loop4
LDR r9, [r5], #4
SMLABT r14, r6, r8, r14 ; sum = MAC16_16(sum, x_0, y_0)
LDR r7, [r4], #4
SMLATB r14, r6, r9, r14 ; sum = MAC16_16(sum, x_1, y_1)
LDR r8, [r5], #4
SMLABT r14, r7, r9, r14 ; sum = MAC16_16(sum, x_2, y_2)
SUBS r12, r12, #4 ; j-=4
SMLATB r14, r7, r8, r14 ; sum = MAC16_16(sum, x_3, y_3)
LDRGT r6, [r4], #4
BGT celt_pitch_xcorr_edsp_process1u_loop4
MOV r8, r8, LSR #16
celt_pitch_xcorr_edsp_process1u_loop4_done
ADDS r12, r12, #4
celt_pitch_xcorr_edsp_process1u_loop1
LDRHGE r6, [r4], #2
; Stall
SMLABBGE r14, r6, r8, r14 ; sum = MAC16_16(sum, *x, *y)
SUBSGE r12, r12, #1
LDRHGT r8, [r5], #2
BGT celt_pitch_xcorr_edsp_process1u_loop1
; Restore _x
SUB r4, r4, r3, LSL #1
; Restore and advance _y
SUB r5, r5, r3, LSL #1
; maxcorr = max(maxcorr, sum)
CMP r0, r14
ADD r5, r5, #2
MOVLT r0, r14
SUBS r1, r1, #1
; xcorr[i] = sum
STR r14, [r2], #4
BLE celt_pitch_xcorr_edsp_done
celt_pitch_xcorr_edsp_process1u_done
; if (max_pitch < 4) goto celt_pitch_xcorr_edsp_process2
SUBS r1, r1, #4
BLT celt_pitch_xcorr_edsp_process2
celt_pitch_xcorr_edsp_process4
; xcorr_kernel_edsp parameters:
; r3 = len, r4 = _x, r5 = _y, r6...r9 = sum[4] = {0, 0, 0, 0}
MOV r6, #0
MOV r7, #0
MOV r8, #0
MOV r9, #0
BL xcorr_kernel_edsp_start ; xcorr_kernel_edsp(_x, _y+i, xcorr+i, len)
; maxcorr = max(maxcorr, sum0, sum1, sum2, sum3)
CMP r0, r6
; _y+=4
ADD r5, r5, #8
MOVLT r0, r6
CMP r0, r7
MOVLT r0, r7
CMP r0, r8
MOVLT r0, r8
CMP r0, r9
MOVLT r0, r9
STMIA r2!, {r6-r9}
SUBS r1, r1, #4
BGE celt_pitch_xcorr_edsp_process4
celt_pitch_xcorr_edsp_process2
ADDS r1, r1, #2
BLT celt_pitch_xcorr_edsp_process1a
SUBS r12, r3, #4
; {r10, r11} = {sum0, sum1} = {0, 0}
MOV r10, #0
MOV r11, #0
LDR r8, [r5], #4
BLE celt_pitch_xcorr_edsp_process2_loop_done
LDR r6, [r4], #4
LDR r9, [r5], #4
celt_pitch_xcorr_edsp_process2_loop4
SMLABB r10, r6, r8, r10 ; sum0 = MAC16_16(sum0, x_0, y_0)
LDR r7, [r4], #4
SMLABT r11, r6, r8, r11 ; sum1 = MAC16_16(sum1, x_0, y_1)
SUBS r12, r12, #4 ; j-=4
SMLATT r10, r6, r8, r10 ; sum0 = MAC16_16(sum0, x_1, y_1)
LDR r8, [r5], #4
SMLATB r11, r6, r9, r11 ; sum1 = MAC16_16(sum1, x_1, y_2)
LDRGT r6, [r4], #4
SMLABB r10, r7, r9, r10 ; sum0 = MAC16_16(sum0, x_2, y_2)
SMLABT r11, r7, r9, r11 ; sum1 = MAC16_16(sum1, x_2, y_3)
SMLATT r10, r7, r9, r10 ; sum0 = MAC16_16(sum0, x_3, y_3)
LDRGT r9, [r5], #4
SMLATB r11, r7, r8, r11 ; sum1 = MAC16_16(sum1, x_3, y_4)
BGT celt_pitch_xcorr_edsp_process2_loop4
celt_pitch_xcorr_edsp_process2_loop_done
ADDS r12, r12, #2
BLE celt_pitch_xcorr_edsp_process2_1
LDR r6, [r4], #4
; Stall
SMLABB r10, r6, r8, r10 ; sum0 = MAC16_16(sum0, x_0, y_0)
LDR r9, [r5], #4
SMLABT r11, r6, r8, r11 ; sum1 = MAC16_16(sum1, x_0, y_1)
SUB r12, r12, #2
SMLATT r10, r6, r8, r10 ; sum0 = MAC16_16(sum0, x_1, y_1)
MOV r8, r9
SMLATB r11, r6, r9, r11 ; sum1 = MAC16_16(sum1, x_1, y_2)
celt_pitch_xcorr_edsp_process2_1
LDRH r6, [r4], #2
ADDS r12, r12, #1
; Stall
SMLABB r10, r6, r8, r10 ; sum0 = MAC16_16(sum0, x_0, y_0)
LDRHGT r7, [r4], #2
SMLABT r11, r6, r8, r11 ; sum1 = MAC16_16(sum1, x_0, y_1)
BLE celt_pitch_xcorr_edsp_process2_done
LDRH r9, [r5], #2
SMLABT r10, r7, r8, r10 ; sum0 = MAC16_16(sum0, x_0, y_1)
SMLABB r11, r7, r9, r11 ; sum1 = MAC16_16(sum1, x_0, y_2)
celt_pitch_xcorr_edsp_process2_done
; Restore _x
SUB r4, r4, r3, LSL #1
; Restore and advance _y
SUB r5, r5, r3, LSL #1
; maxcorr = max(maxcorr, sum0)
CMP r0, r10
ADD r5, r5, #2
MOVLT r0, r10
SUB r1, r1, #2
; maxcorr = max(maxcorr, sum1)
CMP r0, r11
; xcorr[i] = sum
STR r10, [r2], #4
MOVLT r0, r11
STR r11, [r2], #4
celt_pitch_xcorr_edsp_process1a
ADDS r1, r1, #1
BLT celt_pitch_xcorr_edsp_done
SUBS r12, r3, #4
; r14 = sum = 0
MOV r14, #0
BLT celt_pitch_xcorr_edsp_process1a_loop_done
LDR r6, [r4], #4
LDR r8, [r5], #4
LDR r7, [r4], #4
LDR r9, [r5], #4
celt_pitch_xcorr_edsp_process1a_loop4
SMLABB r14, r6, r8, r14 ; sum = MAC16_16(sum, x_0, y_0)
SUBS r12, r12, #4 ; j-=4
SMLATT r14, r6, r8, r14 ; sum = MAC16_16(sum, x_1, y_1)
LDRGE r6, [r4], #4
SMLABB r14, r7, r9, r14 ; sum = MAC16_16(sum, x_2, y_2)
LDRGE r8, [r5], #4
SMLATT r14, r7, r9, r14 ; sum = MAC16_16(sum, x_3, y_3)
LDRGE r7, [r4], #4
LDRGE r9, [r5], #4
BGE celt_pitch_xcorr_edsp_process1a_loop4
celt_pitch_xcorr_edsp_process1a_loop_done
ADDS r12, r12, #2
LDRGE r6, [r4], #4
LDRGE r8, [r5], #4
; Stall
SMLABBGE r14, r6, r8, r14 ; sum = MAC16_16(sum, x_0, y_0)
SUBGE r12, r12, #2
SMLATTGE r14, r6, r8, r14 ; sum = MAC16_16(sum, x_1, y_1)
ADDS r12, r12, #1
LDRHGE r6, [r4], #2
LDRHGE r8, [r5], #2
; Stall
SMLABBGE r14, r6, r8, r14 ; sum = MAC16_16(sum, *x, *y)
; maxcorr = max(maxcorr, sum)
CMP r0, r14
; xcorr[i] = sum
STR r14, [r2], #4
MOVLT r0, r14
celt_pitch_xcorr_edsp_done
LDMFD sp!, {r4-r11, pc}
ENDP
ENDIF
END
|
Advanced-Effects/Advanced-Effects
| 20,657
|
thirdparty/framework/audio/thirdparty/opus/opus-1.4/celt/arm/celt_pitch_xcorr_arm-gnu.S
|
.syntax unified
@ Copyright (c) 2007-2008 CSIRO
@ Copyright (c) 2007-2009 Xiph.Org Foundation
@ Copyright (c) 2013 Parrot
@ Written by Aurélien Zanelli
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions
@ are met:
@
@ - Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@
@ - Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
@ OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.text; .p2align 2; .arch armv7-a
.fpu neon
.object_arch armv4t
.include "celt/arm/armopts-gnu.S"
.if OPUS_ARM_MAY_HAVE_EDSP
.global celt_pitch_xcorr_edsp
.endif
.if OPUS_ARM_MAY_HAVE_NEON
.global celt_pitch_xcorr_neon
.endif
.if OPUS_ARM_MAY_HAVE_NEON
@ Compute sum[k]=sum(x[j]*y[j+k],j=0...len-1), k=0...3
.type xcorr_kernel_neon, %function; xcorr_kernel_neon: @ PROC
xcorr_kernel_neon_start:
@ input:
@ r3 = int len
@ r4 = opus_val16 *x
@ r5 = opus_val16 *y
@ q0 = opus_val32 sum[4]
@ output:
@ q0 = opus_val32 sum[4]
@ preserved: r0-r3, r6-r11, d2, q4-q7, q9-q15
@ internal usage:
@ r12 = int j
@ d3 = y_3|y_2|y_1|y_0
@ q2 = y_B|y_A|y_9|y_8|y_7|y_6|y_5|y_4
@ q3 = x_7|x_6|x_5|x_4|x_3|x_2|x_1|x_0
@ q8 = scratch
@
@ Load y[0...3]
@ This requires len>0 to always be valid (which we assert in the C code).
VLD1.16 {d5}, [r5]!
SUBS r12, r3, #8
BLE xcorr_kernel_neon_process4
@ Process 8 samples at a time.
@ This loop loads one y value more than we actually need. Therefore we have to
@ stop as soon as there are 8 or fewer samples left (instead of 7), to avoid
@ reading past the end of the array.
xcorr_kernel_neon_process8:
@ This loop has 19 total instructions (10 cycles to issue, minimum), with
@ - 2 cycles of ARM insrtuctions,
@ - 10 cycles of load/store/byte permute instructions, and
@ - 9 cycles of data processing instructions.
@ On a Cortex A8, we dual-issue the maximum amount (9 cycles) between the
@ latter two categories, meaning the whole loop should run in 10 cycles per
@ iteration, barring cache misses.
@
@ Load x[0...7]
VLD1.16 {d6, d7}, [r4]!
@ Unlike VMOV, VAND is a data processsing instruction (and doesn't get
@ assembled to VMOV, like VORR would), so it dual-issues with the prior VLD1.
VAND d3, d5, d5
SUBS r12, r12, #8
@ Load y[4...11]
VLD1.16 {d4, d5}, [r5]!
VMLAL.S16 q0, d3, d6[0]
VEXT.16 d16, d3, d4, #1
VMLAL.S16 q0, d4, d7[0]
VEXT.16 d17, d4, d5, #1
VMLAL.S16 q0, d16, d6[1]
VEXT.16 d16, d3, d4, #2
VMLAL.S16 q0, d17, d7[1]
VEXT.16 d17, d4, d5, #2
VMLAL.S16 q0, d16, d6[2]
VEXT.16 d16, d3, d4, #3
VMLAL.S16 q0, d17, d7[2]
VEXT.16 d17, d4, d5, #3
VMLAL.S16 q0, d16, d6[3]
VMLAL.S16 q0, d17, d7[3]
BGT xcorr_kernel_neon_process8
@ Process 4 samples here if we have > 4 left (still reading one extra y value).
xcorr_kernel_neon_process4:
ADDS r12, r12, #4
BLE xcorr_kernel_neon_process2
@ Load x[0...3]
VLD1.16 d6, [r4]!
@ Use VAND since it's a data processing instruction again.
VAND d4, d5, d5
SUB r12, r12, #4
@ Load y[4...7]
VLD1.16 d5, [r5]!
VMLAL.S16 q0, d4, d6[0]
VEXT.16 d16, d4, d5, #1
VMLAL.S16 q0, d16, d6[1]
VEXT.16 d16, d4, d5, #2
VMLAL.S16 q0, d16, d6[2]
VEXT.16 d16, d4, d5, #3
VMLAL.S16 q0, d16, d6[3]
@ Process 2 samples here if we have > 2 left (still reading one extra y value).
xcorr_kernel_neon_process2:
ADDS r12, r12, #2
BLE xcorr_kernel_neon_process1
@ Load x[0...1]
VLD2.16 {d6[],d7[]}, [r4]!
@ Use VAND since it's a data processing instruction again.
VAND d4, d5, d5
SUB r12, r12, #2
@ Load y[4...5]
VLD1.32 {d5[]}, [r5]!
VMLAL.S16 q0, d4, d6
VEXT.16 d16, d4, d5, #1
@ Replace bottom copy of {y5,y4} in d5 with {y3,y2} from d4, using VSRI
@ instead of VEXT, since it's a data-processing instruction.
VSRI.64 d5, d4, #32
VMLAL.S16 q0, d16, d7
@ Process 1 sample using the extra y value we loaded above.
xcorr_kernel_neon_process1:
@ Load next *x
VLD1.16 {d6[]}, [r4]!
ADDS r12, r12, #1
@ y[0...3] are left in d5 from prior iteration(s) (if any)
VMLAL.S16 q0, d5, d6
MOVLE pc, lr
@ Now process 1 last sample, not reading ahead.
@ Load last *y
VLD1.16 {d4[]}, [r5]!
VSRI.64 d4, d5, #16
@ Load last *x
VLD1.16 {d6[]}, [r4]!
VMLAL.S16 q0, d4, d6
MOV pc, lr
.size xcorr_kernel_neon, .-xcorr_kernel_neon @ ENDP
@ opus_val32 celt_pitch_xcorr_neon(opus_val16 *_x, opus_val16 *_y,
@ opus_val32 *xcorr, int len, int max_pitch, int arch)
.type celt_pitch_xcorr_neon, %function; celt_pitch_xcorr_neon: @ PROC
@ input:
@ r0 = opus_val16 *_x
@ r1 = opus_val16 *_y
@ r2 = opus_val32 *xcorr
@ r3 = int len
@ output:
@ r0 = int maxcorr
@ internal usage:
@ r4 = opus_val16 *x (for xcorr_kernel_neon())
@ r5 = opus_val16 *y (for xcorr_kernel_neon())
@ r6 = int max_pitch
@ r12 = int j
@ q15 = int maxcorr[4] (q15 is not used by xcorr_kernel_neon())
@ ignored:
@ int arch
STMFD sp!, {r4-r6, lr}
LDR r6, [sp, #16]
VMOV.S32 q15, #1
@ if (max_pitch < 4) goto celt_pitch_xcorr_neon_process4_done
SUBS r6, r6, #4
BLT celt_pitch_xcorr_neon_process4_done
celt_pitch_xcorr_neon_process4:
@ xcorr_kernel_neon parameters:
@ r3 = len, r4 = _x, r5 = _y, q0 = {0, 0, 0, 0}
MOV r4, r0
MOV r5, r1
VEOR q0, q0, q0
@ xcorr_kernel_neon only modifies r4, r5, r12, and q0...q3.
@ So we don't save/restore any other registers.
BL xcorr_kernel_neon_start
SUBS r6, r6, #4
VST1.32 {q0}, [r2]!
@ _y += 4
ADD r1, r1, #8
VMAX.S32 q15, q15, q0
@ if (max_pitch < 4) goto celt_pitch_xcorr_neon_process4_done
BGE celt_pitch_xcorr_neon_process4
@ We have less than 4 sums left to compute.
celt_pitch_xcorr_neon_process4_done:
ADDS r6, r6, #4
@ Reduce maxcorr to a single value
VMAX.S32 d30, d30, d31
VPMAX.S32 d30, d30, d30
@ if (max_pitch <= 0) goto celt_pitch_xcorr_neon_done
BLE celt_pitch_xcorr_neon_done
@ Now compute each remaining sum one at a time.
celt_pitch_xcorr_neon_process_remaining:
MOV r4, r0
MOV r5, r1
VMOV.I32 q0, #0
SUBS r12, r3, #8
BLT celt_pitch_xcorr_neon_process_remaining4
@ Sum terms 8 at a time.
celt_pitch_xcorr_neon_process_remaining_loop8:
@ Load x[0...7]
VLD1.16 {q1}, [r4]!
@ Load y[0...7]
VLD1.16 {q2}, [r5]!
SUBS r12, r12, #8
VMLAL.S16 q0, d4, d2
VMLAL.S16 q0, d5, d3
BGE celt_pitch_xcorr_neon_process_remaining_loop8
@ Sum terms 4 at a time.
celt_pitch_xcorr_neon_process_remaining4:
ADDS r12, r12, #4
BLT celt_pitch_xcorr_neon_process_remaining4_done
@ Load x[0...3]
VLD1.16 {d2}, [r4]!
@ Load y[0...3]
VLD1.16 {d3}, [r5]!
SUB r12, r12, #4
VMLAL.S16 q0, d3, d2
celt_pitch_xcorr_neon_process_remaining4_done:
@ Reduce the sum to a single value.
VADD.S32 d0, d0, d1
VPADDL.S32 d0, d0
ADDS r12, r12, #4
BLE celt_pitch_xcorr_neon_process_remaining_loop_done
@ Sum terms 1 at a time.
celt_pitch_xcorr_neon_process_remaining_loop1:
VLD1.16 {d2[]}, [r4]!
VLD1.16 {d3[]}, [r5]!
SUBS r12, r12, #1
VMLAL.S16 q0, d2, d3
BGT celt_pitch_xcorr_neon_process_remaining_loop1
celt_pitch_xcorr_neon_process_remaining_loop_done:
VST1.32 {d0[0]}, [r2]!
VMAX.S32 d30, d30, d0
SUBS r6, r6, #1
@ _y++
ADD r1, r1, #2
@ if (--max_pitch > 0) goto celt_pitch_xcorr_neon_process_remaining
BGT celt_pitch_xcorr_neon_process_remaining
celt_pitch_xcorr_neon_done:
VMOV.32 r0, d30[0]
LDMFD sp!, {r4-r6, pc}
.size celt_pitch_xcorr_neon, .-celt_pitch_xcorr_neon @ ENDP
.endif
.if OPUS_ARM_MAY_HAVE_EDSP
@ This will get used on ARMv7 devices without NEON, so it has been optimized
@ to take advantage of dual-issuing where possible.
.type xcorr_kernel_edsp, %function; xcorr_kernel_edsp: @ PROC
xcorr_kernel_edsp_start:
@ input:
@ r3 = int len
@ r4 = opus_val16 *_x (must be 32-bit aligned)
@ r5 = opus_val16 *_y (must be 32-bit aligned)
@ r6...r9 = opus_val32 sum[4]
@ output:
@ r6...r9 = opus_val32 sum[4]
@ preserved: r0-r5
@ internal usage
@ r2 = int j
@ r12,r14 = opus_val16 x[4]
@ r10,r11 = opus_val16 y[4]
STMFD sp!, {r2,r4,r5,lr}
LDR r10, [r5], #4 @ Load y[0...1]
SUBS r2, r3, #4 @ j = len-4
LDR r11, [r5], #4 @ Load y[2...3]
BLE xcorr_kernel_edsp_process4_done
LDR r12, [r4], #4 @ Load x[0...1]
@ Stall
xcorr_kernel_edsp_process4:
@ The multiplies must issue from pipeline 0, and can't dual-issue with each
@ other. Every other instruction here dual-issues with a multiply, and is
@ thus "free". There should be no stalls in the body of the loop.
SMLABB r6, r12, r10, r6 @ sum[0] = MAC16_16(sum[0],x_0,y_0)
LDR r14, [r4], #4 @ Load x[2...3]
SMLABT r7, r12, r10, r7 @ sum[1] = MAC16_16(sum[1],x_0,y_1)
SUBS r2, r2, #4 @ j-=4
SMLABB r8, r12, r11, r8 @ sum[2] = MAC16_16(sum[2],x_0,y_2)
SMLABT r9, r12, r11, r9 @ sum[3] = MAC16_16(sum[3],x_0,y_3)
SMLATT r6, r12, r10, r6 @ sum[0] = MAC16_16(sum[0],x_1,y_1)
LDR r10, [r5], #4 @ Load y[4...5]
SMLATB r7, r12, r11, r7 @ sum[1] = MAC16_16(sum[1],x_1,y_2)
SMLATT r8, r12, r11, r8 @ sum[2] = MAC16_16(sum[2],x_1,y_3)
SMLATB r9, r12, r10, r9 @ sum[3] = MAC16_16(sum[3],x_1,y_4)
LDRGT r12, [r4], #4 @ Load x[0...1]
SMLABB r6, r14, r11, r6 @ sum[0] = MAC16_16(sum[0],x_2,y_2)
SMLABT r7, r14, r11, r7 @ sum[1] = MAC16_16(sum[1],x_2,y_3)
SMLABB r8, r14, r10, r8 @ sum[2] = MAC16_16(sum[2],x_2,y_4)
SMLABT r9, r14, r10, r9 @ sum[3] = MAC16_16(sum[3],x_2,y_5)
SMLATT r6, r14, r11, r6 @ sum[0] = MAC16_16(sum[0],x_3,y_3)
LDR r11, [r5], #4 @ Load y[6...7]
SMLATB r7, r14, r10, r7 @ sum[1] = MAC16_16(sum[1],x_3,y_4)
SMLATT r8, r14, r10, r8 @ sum[2] = MAC16_16(sum[2],x_3,y_5)
SMLATB r9, r14, r11, r9 @ sum[3] = MAC16_16(sum[3],x_3,y_6)
BGT xcorr_kernel_edsp_process4
xcorr_kernel_edsp_process4_done:
ADDS r2, r2, #4
BLE xcorr_kernel_edsp_done
LDRH r12, [r4], #2 @ r12 = *x++
SUBS r2, r2, #1 @ j--
@ Stall
SMLABB r6, r12, r10, r6 @ sum[0] = MAC16_16(sum[0],x,y_0)
LDRHGT r14, [r4], #2 @ r14 = *x++
SMLABT r7, r12, r10, r7 @ sum[1] = MAC16_16(sum[1],x,y_1)
SMLABB r8, r12, r11, r8 @ sum[2] = MAC16_16(sum[2],x,y_2)
SMLABT r9, r12, r11, r9 @ sum[3] = MAC16_16(sum[3],x,y_3)
BLE xcorr_kernel_edsp_done
SMLABT r6, r14, r10, r6 @ sum[0] = MAC16_16(sum[0],x,y_1)
SUBS r2, r2, #1 @ j--
SMLABB r7, r14, r11, r7 @ sum[1] = MAC16_16(sum[1],x,y_2)
LDRH r10, [r5], #2 @ r10 = y_4 = *y++
SMLABT r8, r14, r11, r8 @ sum[2] = MAC16_16(sum[2],x,y_3)
LDRHGT r12, [r4], #2 @ r12 = *x++
SMLABB r9, r14, r10, r9 @ sum[3] = MAC16_16(sum[3],x,y_4)
BLE xcorr_kernel_edsp_done
SMLABB r6, r12, r11, r6 @ sum[0] = MAC16_16(sum[0],tmp,y_2)
CMP r2, #1 @ j--
SMLABT r7, r12, r11, r7 @ sum[1] = MAC16_16(sum[1],tmp,y_3)
LDRH r2, [r5], #2 @ r2 = y_5 = *y++
SMLABB r8, r12, r10, r8 @ sum[2] = MAC16_16(sum[2],tmp,y_4)
LDRHGT r14, [r4] @ r14 = *x
SMLABB r9, r12, r2, r9 @ sum[3] = MAC16_16(sum[3],tmp,y_5)
BLE xcorr_kernel_edsp_done
SMLABT r6, r14, r11, r6 @ sum[0] = MAC16_16(sum[0],tmp,y_3)
LDRH r11, [r5] @ r11 = y_6 = *y
SMLABB r7, r14, r10, r7 @ sum[1] = MAC16_16(sum[1],tmp,y_4)
SMLABB r8, r14, r2, r8 @ sum[2] = MAC16_16(sum[2],tmp,y_5)
SMLABB r9, r14, r11, r9 @ sum[3] = MAC16_16(sum[3],tmp,y_6)
xcorr_kernel_edsp_done:
LDMFD sp!, {r2,r4,r5,pc}
.size xcorr_kernel_edsp, .-xcorr_kernel_edsp @ ENDP
.type celt_pitch_xcorr_edsp, %function; celt_pitch_xcorr_edsp: @ PROC
@ input:
@ r0 = opus_val16 *_x (must be 32-bit aligned)
@ r1 = opus_val16 *_y (only needs to be 16-bit aligned)
@ r2 = opus_val32 *xcorr
@ r3 = int len
@ output:
@ r0 = maxcorr
@ internal usage
@ r4 = opus_val16 *x
@ r5 = opus_val16 *y
@ r6 = opus_val32 sum0
@ r7 = opus_val32 sum1
@ r8 = opus_val32 sum2
@ r9 = opus_val32 sum3
@ r1 = int max_pitch
@ r12 = int j
@ ignored:
@ int arch
STMFD sp!, {r4-r11, lr}
MOV r5, r1
LDR r1, [sp, #36]
MOV r4, r0
TST r5, #3
@ maxcorr = 1
MOV r0, #1
BEQ celt_pitch_xcorr_edsp_process1u_done
@ Compute one sum at the start to make y 32-bit aligned.
SUBS r12, r3, #4
@ r14 = sum = 0
MOV r14, #0
LDRH r8, [r5], #2
BLE celt_pitch_xcorr_edsp_process1u_loop4_done
LDR r6, [r4], #4
MOV r8, r8, LSL #16
celt_pitch_xcorr_edsp_process1u_loop4:
LDR r9, [r5], #4
SMLABT r14, r6, r8, r14 @ sum = MAC16_16(sum, x_0, y_0)
LDR r7, [r4], #4
SMLATB r14, r6, r9, r14 @ sum = MAC16_16(sum, x_1, y_1)
LDR r8, [r5], #4
SMLABT r14, r7, r9, r14 @ sum = MAC16_16(sum, x_2, y_2)
SUBS r12, r12, #4 @ j-=4
SMLATB r14, r7, r8, r14 @ sum = MAC16_16(sum, x_3, y_3)
LDRGT r6, [r4], #4
BGT celt_pitch_xcorr_edsp_process1u_loop4
MOV r8, r8, LSR #16
celt_pitch_xcorr_edsp_process1u_loop4_done:
ADDS r12, r12, #4
celt_pitch_xcorr_edsp_process1u_loop1:
LDRHGE r6, [r4], #2
@ Stall
SMLABBGE r14, r6, r8, r14 @ sum = MAC16_16(sum, *x, *y)
SUBSGE r12, r12, #1
LDRHGT r8, [r5], #2
BGT celt_pitch_xcorr_edsp_process1u_loop1
@ Restore _x
SUB r4, r4, r3, LSL #1
@ Restore and advance _y
SUB r5, r5, r3, LSL #1
@ maxcorr = max(maxcorr, sum)
CMP r0, r14
ADD r5, r5, #2
MOVLT r0, r14
SUBS r1, r1, #1
@ xcorr[i] = sum
STR r14, [r2], #4
BLE celt_pitch_xcorr_edsp_done
celt_pitch_xcorr_edsp_process1u_done:
@ if (max_pitch < 4) goto celt_pitch_xcorr_edsp_process2
SUBS r1, r1, #4
BLT celt_pitch_xcorr_edsp_process2
celt_pitch_xcorr_edsp_process4:
@ xcorr_kernel_edsp parameters:
@ r3 = len, r4 = _x, r5 = _y, r6...r9 = sum[4] = {0, 0, 0, 0}
MOV r6, #0
MOV r7, #0
MOV r8, #0
MOV r9, #0
BL xcorr_kernel_edsp_start @ xcorr_kernel_edsp(_x, _y+i, xcorr+i, len)
@ maxcorr = max(maxcorr, sum0, sum1, sum2, sum3)
CMP r0, r6
@ _y+=4
ADD r5, r5, #8
MOVLT r0, r6
CMP r0, r7
MOVLT r0, r7
CMP r0, r8
MOVLT r0, r8
CMP r0, r9
MOVLT r0, r9
STMIA r2!, {r6-r9}
SUBS r1, r1, #4
BGE celt_pitch_xcorr_edsp_process4
celt_pitch_xcorr_edsp_process2:
ADDS r1, r1, #2
BLT celt_pitch_xcorr_edsp_process1a
SUBS r12, r3, #4
@ {r10, r11} = {sum0, sum1} = {0, 0}
MOV r10, #0
MOV r11, #0
LDR r8, [r5], #4
BLE celt_pitch_xcorr_edsp_process2_loop_done
LDR r6, [r4], #4
LDR r9, [r5], #4
celt_pitch_xcorr_edsp_process2_loop4:
SMLABB r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_0, y_0)
LDR r7, [r4], #4
SMLABT r11, r6, r8, r11 @ sum1 = MAC16_16(sum1, x_0, y_1)
SUBS r12, r12, #4 @ j-=4
SMLATT r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_1, y_1)
LDR r8, [r5], #4
SMLATB r11, r6, r9, r11 @ sum1 = MAC16_16(sum1, x_1, y_2)
LDRGT r6, [r4], #4
SMLABB r10, r7, r9, r10 @ sum0 = MAC16_16(sum0, x_2, y_2)
SMLABT r11, r7, r9, r11 @ sum1 = MAC16_16(sum1, x_2, y_3)
SMLATT r10, r7, r9, r10 @ sum0 = MAC16_16(sum0, x_3, y_3)
LDRGT r9, [r5], #4
SMLATB r11, r7, r8, r11 @ sum1 = MAC16_16(sum1, x_3, y_4)
BGT celt_pitch_xcorr_edsp_process2_loop4
celt_pitch_xcorr_edsp_process2_loop_done:
ADDS r12, r12, #2
BLE celt_pitch_xcorr_edsp_process2_1
LDR r6, [r4], #4
@ Stall
SMLABB r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_0, y_0)
LDR r9, [r5], #4
SMLABT r11, r6, r8, r11 @ sum1 = MAC16_16(sum1, x_0, y_1)
SUB r12, r12, #2
SMLATT r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_1, y_1)
MOV r8, r9
SMLATB r11, r6, r9, r11 @ sum1 = MAC16_16(sum1, x_1, y_2)
celt_pitch_xcorr_edsp_process2_1:
LDRH r6, [r4], #2
ADDS r12, r12, #1
@ Stall
SMLABB r10, r6, r8, r10 @ sum0 = MAC16_16(sum0, x_0, y_0)
LDRHGT r7, [r4], #2
SMLABT r11, r6, r8, r11 @ sum1 = MAC16_16(sum1, x_0, y_1)
BLE celt_pitch_xcorr_edsp_process2_done
LDRH r9, [r5], #2
SMLABT r10, r7, r8, r10 @ sum0 = MAC16_16(sum0, x_0, y_1)
SMLABB r11, r7, r9, r11 @ sum1 = MAC16_16(sum1, x_0, y_2)
celt_pitch_xcorr_edsp_process2_done:
@ Restore _x
SUB r4, r4, r3, LSL #1
@ Restore and advance _y
SUB r5, r5, r3, LSL #1
@ maxcorr = max(maxcorr, sum0)
CMP r0, r10
ADD r5, r5, #2
MOVLT r0, r10
SUB r1, r1, #2
@ maxcorr = max(maxcorr, sum1)
CMP r0, r11
@ xcorr[i] = sum
STR r10, [r2], #4
MOVLT r0, r11
STR r11, [r2], #4
celt_pitch_xcorr_edsp_process1a:
ADDS r1, r1, #1
BLT celt_pitch_xcorr_edsp_done
SUBS r12, r3, #4
@ r14 = sum = 0
MOV r14, #0
BLT celt_pitch_xcorr_edsp_process1a_loop_done
LDR r6, [r4], #4
LDR r8, [r5], #4
LDR r7, [r4], #4
LDR r9, [r5], #4
celt_pitch_xcorr_edsp_process1a_loop4:
SMLABB r14, r6, r8, r14 @ sum = MAC16_16(sum, x_0, y_0)
SUBS r12, r12, #4 @ j-=4
SMLATT r14, r6, r8, r14 @ sum = MAC16_16(sum, x_1, y_1)
LDRGE r6, [r4], #4
SMLABB r14, r7, r9, r14 @ sum = MAC16_16(sum, x_2, y_2)
LDRGE r8, [r5], #4
SMLATT r14, r7, r9, r14 @ sum = MAC16_16(sum, x_3, y_3)
LDRGE r7, [r4], #4
LDRGE r9, [r5], #4
BGE celt_pitch_xcorr_edsp_process1a_loop4
celt_pitch_xcorr_edsp_process1a_loop_done:
ADDS r12, r12, #2
LDRGE r6, [r4], #4
LDRGE r8, [r5], #4
@ Stall
SMLABBGE r14, r6, r8, r14 @ sum = MAC16_16(sum, x_0, y_0)
SUBGE r12, r12, #2
SMLATTGE r14, r6, r8, r14 @ sum = MAC16_16(sum, x_1, y_1)
ADDS r12, r12, #1
LDRHGE r6, [r4], #2
LDRHGE r8, [r5], #2
@ Stall
SMLABBGE r14, r6, r8, r14 @ sum = MAC16_16(sum, *x, *y)
@ maxcorr = max(maxcorr, sum)
CMP r0, r14
@ xcorr[i] = sum
STR r14, [r2], #4
MOVLT r0, r14
celt_pitch_xcorr_edsp_done:
LDMFD sp!, {r4-r11, pc}
.size celt_pitch_xcorr_edsp, .-celt_pitch_xcorr_edsp @ ENDP
.endif
@ END:
.section .note.GNU-stack,"",%progbits
|
adventuregamestudio/ags
| 6,546
|
PSP/exception/prx/exception_asm.S
|
#include "as_reg_compat.h"
.set noreorder
.set noat
#define BadVAddr $8 // Address for the most recent address-related exception
#define Status $12 // Processor status and control
#define Cause $13 // Cause of last general exception
#define EPC $14 // Program counter at last exception
#define PRId $15 // Processor identification and revision
#define FSR $31
#define FIR $0
#define REG_GPR_0 (6*4)
#define REG_GPR_1 (REG_GPR_0 + 4)
#define REG_GPR_2 (REG_GPR_1 + 4)
#define REG_GPR_3 (REG_GPR_2 + 4)
#define REG_GPR_4 (REG_GPR_3 + 4)
#define REG_GPR_5 (REG_GPR_4 + 4)
#define REG_GPR_6 (REG_GPR_5 + 4)
#define REG_GPR_7 (REG_GPR_6 + 4)
#define REG_GPR_8 (REG_GPR_7 + 4)
#define REG_GPR_9 (REG_GPR_8 + 4)
#define REG_GPR_10 (REG_GPR_9 + 4)
#define REG_GPR_11 (REG_GPR_10 + 4)
#define REG_GPR_12 (REG_GPR_11 + 4)
#define REG_GPR_13 (REG_GPR_12 + 4)
#define REG_GPR_14 (REG_GPR_13 + 4)
#define REG_GPR_15 (REG_GPR_14 + 4)
#define REG_GPR_16 (REG_GPR_15 + 4)
#define REG_GPR_17 (REG_GPR_16 + 4)
#define REG_GPR_18 (REG_GPR_17 + 4)
#define REG_GPR_19 (REG_GPR_18 + 4)
#define REG_GPR_20 (REG_GPR_19 + 4)
#define REG_GPR_21 (REG_GPR_20 + 4)
#define REG_GPR_22 (REG_GPR_21 + 4)
#define REG_GPR_23 (REG_GPR_22 + 4)
#define REG_GPR_24 (REG_GPR_23 + 4)
#define REG_GPR_25 (REG_GPR_24 + 4)
#define REG_GPR_26 (REG_GPR_25 + 4)
#define REG_GPR_27 (REG_GPR_26 + 4)
#define REG_GPR_28 (REG_GPR_27 + 4)
#define REG_GPR_29 (REG_GPR_28 + 4)
#define REG_GPR_30 (REG_GPR_29 + 4)
#define REG_GPR_31 (REG_GPR_30 + 4)
#define REG_STATUS (REG_GPR_31 + 4)
#define REG_LO (REG_STATUS + 4)
#define REG_HI (REG_LO + 4)
#define REG_BADVADDR (REG_HI + 4)
#define REG_CAUSE (REG_BADVADDR + 4)
#define REG_EPC (REG_CAUSE + 4)
#define REG_FPR_0 (REG_EPC + 4)
#define REG_FPR_1 (REG_FPR_0 + 4)
#define REG_FPR_2 (REG_FPR_1 + 4)
#define REG_FPR_3 (REG_FPR_2 + 4)
#define REG_FPR_4 (REG_FPR_3 + 4)
#define REG_FPR_5 (REG_FPR_4 + 4)
#define REG_FPR_6 (REG_FPR_5 + 4)
#define REG_FPR_7 (REG_FPR_6 + 4)
#define REG_FPR_8 (REG_FPR_7 + 4)
#define REG_FPR_9 (REG_FPR_8 + 4)
#define REG_FPR_10 (REG_FPR_9 + 4)
#define REG_FPR_11 (REG_FPR_10 + 4)
#define REG_FPR_12 (REG_FPR_11 + 4)
#define REG_FPR_13 (REG_FPR_12 + 4)
#define REG_FPR_14 (REG_FPR_13 + 4)
#define REG_FPR_15 (REG_FPR_14 + 4)
#define REG_FPR_16 (REG_FPR_15 + 4)
#define REG_FPR_17 (REG_FPR_16 + 4)
#define REG_FPR_18 (REG_FPR_17 + 4)
#define REG_FPR_19 (REG_FPR_18 + 4)
#define REG_FPR_20 (REG_FPR_19 + 4)
#define REG_FPR_21 (REG_FPR_20 + 4)
#define REG_FPR_22 (REG_FPR_21 + 4)
#define REG_FPR_23 (REG_FPR_22 + 4)
#define REG_FPR_24 (REG_FPR_23 + 4)
#define REG_FPR_25 (REG_FPR_24 + 4)
#define REG_FPR_26 (REG_FPR_25 + 4)
#define REG_FPR_27 (REG_FPR_26 + 4)
#define REG_FPR_28 (REG_FPR_27 + 4)
#define REG_FPR_29 (REG_FPR_28 + 4)
#define REG_FPR_30 (REG_FPR_29 + 4)
#define REG_FPR_31 (REG_FPR_30 + 4)
#define REG_FSR (REG_FPR_31 + 4)
#define REG_FIR (REG_FSR + 4)
#define REG_FP (REG_FIR + 4)
.extern exception_regs
.extern curr_handler
.global _pspDebugExceptionHandler
.ent _pspDebugExceptionHandler
_pspDebugExceptionHandler:
nop
nop
lw $v0, exception_regs
sw $0, REG_GPR_0($v0)
sw $1, REG_GPR_1($v0)
cfc0 $1, $4 # Get original v0
sw $1, REG_GPR_2($v0)
cfc0 $1, $5 # Get original v1
sw $1, REG_GPR_3($v0)
sw $4, REG_GPR_4($v0)
sw $5, REG_GPR_5($v0)
sw $6, REG_GPR_6($v0)
sw $7, REG_GPR_7($v0)
sw $8, REG_GPR_8($v0)
sw $9, REG_GPR_9($v0)
sw $10, REG_GPR_10($v0)
sw $11, REG_GPR_11($v0)
sw $12, REG_GPR_12($v0)
sw $13, REG_GPR_13($v0)
sw $14, REG_GPR_14($v0)
sw $15, REG_GPR_15($v0)
sw $16, REG_GPR_16($v0)
sw $17, REG_GPR_17($v0)
sw $18, REG_GPR_18($v0)
sw $19, REG_GPR_19($v0)
sw $20, REG_GPR_20($v0)
sw $21, REG_GPR_21($v0)
sw $22, REG_GPR_22($v0)
sw $23, REG_GPR_23($v0)
sw $24, REG_GPR_24($v0)
sw $25, REG_GPR_25($v0)
sw $26, REG_GPR_26($v0)
sw $27, REG_GPR_27($v0)
sw $28, REG_GPR_28($v0)
sw $29, REG_GPR_29($v0)
sw $30, REG_GPR_30($v0)
sw $31, REG_GPR_31($v0)
mflo $v1
sw $v1, REG_LO($v0)
mfhi $v1
sw $v1, REG_HI($v0)
mfc0 $v1, BadVAddr
sw $v1, REG_BADVADDR($v0)
mfc0 $v1, Cause
sw $v1, REG_CAUSE($v0)
mfc0 $v1, EPC
sw $v1, REG_EPC($v0)
mfc0 $v1, Status
sw $v1, REG_STATUS($v0)
# Check if cop1 is enable and skip if not
lui $a0, 0x2000
and $a0, $a0, $v1
beq $a0, $0, 1f
nop
swc1 $0, REG_FPR_0($v0)
swc1 $1, REG_FPR_1($v0)
swc1 $2, REG_FPR_2($v0)
swc1 $3, REG_FPR_3($v0)
swc1 $4, REG_FPR_4($v0)
swc1 $5, REG_FPR_5($v0)
swc1 $6, REG_FPR_6($v0)
swc1 $7, REG_FPR_7($v0)
swc1 $8, REG_FPR_8($v0)
swc1 $9, REG_FPR_9($v0)
swc1 $10, REG_FPR_10($v0)
swc1 $11, REG_FPR_11($v0)
swc1 $12, REG_FPR_12($v0)
swc1 $13, REG_FPR_13($v0)
swc1 $14, REG_FPR_14($v0)
swc1 $15, REG_FPR_15($v0)
swc1 $16, REG_FPR_16($v0)
swc1 $17, REG_FPR_17($v0)
swc1 $18, REG_FPR_18($v0)
swc1 $19, REG_FPR_19($v0)
swc1 $20, REG_FPR_20($v0)
swc1 $21, REG_FPR_21($v0)
swc1 $22, REG_FPR_22($v0)
swc1 $23, REG_FPR_23($v0)
swc1 $24, REG_FPR_24($v0)
swc1 $25, REG_FPR_25($v0)
swc1 $26, REG_FPR_26($v0)
swc1 $27, REG_FPR_27($v0)
swc1 $28, REG_FPR_28($v0)
swc1 $29, REG_FPR_29($v0)
swc1 $30, REG_FPR_30($v0)
swc1 $31, REG_FPR_31($v0)
cfc1 $t0, FSR
sw $t0, REG_FSR($v0)
cfc1 $t0, FIR
sw $t0, REG_FIR($v0)
ctc1 $0, FSR # Clear any cause flags
# Jump target for ignore cop1
1:
sw $sp, REG_FP($v0)
move $a0, $v0
lw $2, curr_handler
mtc0 $2, $14
nop
nop
eret
nop
nop
.end _pspDebugExceptionHandler
#include "pspimport.s"
IMPORT_START "ExceptionManagerForKernel",0x00010011
IMPORT_FUNC "ExceptionManagerForKernel",0x565C0B0E,sceKernelRegisterDefaultExceptionHandler371
|
aead/chacha20
| 3,531
|
chacha/chacha_386.s
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// +build 386,!gccgo,!appengine,!nacl
#include "const.s"
#include "macro.s"
// FINALIZE xors len bytes from src and block using
// the temp. registers t0 and t1 and writes the result
// to dst.
#define FINALIZE(dst, src, block, len, t0, t1) \
XORL t0, t0; \
XORL t1, t1; \
FINALIZE_LOOP:; \
MOVB 0(src), t0; \
MOVB 0(block), t1; \
XORL t0, t1; \
MOVB t1, 0(dst); \
INCL src; \
INCL block; \
INCL dst; \
DECL len; \
JG FINALIZE_LOOP \
#define Dst DI
#define Nonce AX
#define Key BX
#define Rounds DX
// func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte)
TEXT ·hChaCha20SSE2(SB), 4, $0-12
MOVL out+0(FP), Dst
MOVL nonce+4(FP), Nonce
MOVL key+8(FP), Key
MOVOU ·sigma<>(SB), X0
MOVOU 0*16(Key), X1
MOVOU 1*16(Key), X2
MOVOU 0*16(Nonce), X3
MOVL $20, Rounds
chacha_loop:
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4)
CHACHA_SHUFFLE_SSE(X1, X2, X3)
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4)
CHACHA_SHUFFLE_SSE(X3, X2, X1)
SUBL $2, Rounds
JNZ chacha_loop
MOVOU X0, 0*16(Dst)
MOVOU X3, 1*16(Dst)
RET
// func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte)
TEXT ·hChaCha20SSSE3(SB), 4, $0-12
MOVL out+0(FP), Dst
MOVL nonce+4(FP), Nonce
MOVL key+8(FP), Key
MOVOU ·sigma<>(SB), X0
MOVOU 0*16(Key), X1
MOVOU 1*16(Key), X2
MOVOU 0*16(Nonce), X3
MOVL $20, Rounds
MOVOU ·rol16<>(SB), X5
MOVOU ·rol8<>(SB), X6
chacha_loop:
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6)
CHACHA_SHUFFLE_SSE(X1, X2, X3)
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6)
CHACHA_SHUFFLE_SSE(X3, X2, X1)
SUBL $2, Rounds
JNZ chacha_loop
MOVOU X0, 0*16(Dst)
MOVOU X3, 1*16(Dst)
RET
#undef Dst
#undef Nonce
#undef Key
#undef Rounds
#define State AX
#define Dst DI
#define Src SI
#define Len DX
#define Tmp0 BX
#define Tmp1 BP
// func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int
TEXT ·xorKeyStreamSSE2(SB), 4, $0-40
MOVL dst_base+0(FP), Dst
MOVL src_base+12(FP), Src
MOVL state+28(FP), State
MOVL src_len+16(FP), Len
MOVL $0, ret+36(FP) // Number of bytes written to the keystream buffer - 0 iff len mod 64 == 0
MOVOU 0*16(State), X0
MOVOU 1*16(State), X1
MOVOU 2*16(State), X2
MOVOU 3*16(State), X3
TESTL Len, Len
JZ DONE
GENERATE_KEYSTREAM:
MOVO X0, X4
MOVO X1, X5
MOVO X2, X6
MOVO X3, X7
MOVL rounds+32(FP), Tmp0
CHACHA_LOOP:
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0)
CHACHA_SHUFFLE_SSE(X5, X6, X7)
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0)
CHACHA_SHUFFLE_SSE(X7, X6, X5)
SUBL $2, Tmp0
JA CHACHA_LOOP
MOVOU 0*16(State), X0 // Restore X0 from state
PADDL X0, X4
PADDL X1, X5
PADDL X2, X6
PADDL X3, X7
MOVOU ·one<>(SB), X0
PADDQ X0, X3
CMPL Len, $64
JL BUFFER_KEYSTREAM
XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X0)
MOVOU 0*16(State), X0 // Restore X0 from state
ADDL $64, Src
ADDL $64, Dst
SUBL $64, Len
JZ DONE
JMP GENERATE_KEYSTREAM // There is at least one more plaintext byte
BUFFER_KEYSTREAM:
MOVL block+24(FP), State
MOVOU X4, 0(State)
MOVOU X5, 16(State)
MOVOU X6, 32(State)
MOVOU X7, 48(State)
MOVL Len, ret+36(FP) // Number of bytes written to the keystream buffer - 0 < Len < 64
FINALIZE(Dst, Src, State, Len, Tmp0, Tmp1)
DONE:
MOVL state+28(FP), State
MOVOU X3, 3*16(State)
RET
#undef State
#undef Dst
#undef Src
#undef Len
#undef Tmp0
#undef Tmp1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.