repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
aixcc-public/challenge-001-exemplar-source
9,813
arch/powerpc/lib/copy_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Memory copy functions for 32-bit PowerPC. * * Copyright (C) 1996-2005 Paul Mackerras. */ #include <asm/processor.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/code-patching-asm.h> #include <asm/kasan.h> #define COPY_16_BYTES \ lwz r7,4(r4); \ lwz r8,8(r4); \ lwz r9,12(r4); \ lwzu r10,16(r4); \ stw r7,4(r6); \ stw r8,8(r6); \ stw r9,12(r6); \ stwu r10,16(r6) #define COPY_16_BYTES_WITHEX(n) \ 8 ## n ## 0: \ lwz r7,4(r4); \ 8 ## n ## 1: \ lwz r8,8(r4); \ 8 ## n ## 2: \ lwz r9,12(r4); \ 8 ## n ## 3: \ lwzu r10,16(r4); \ 8 ## n ## 4: \ stw r7,4(r6); \ 8 ## n ## 5: \ stw r8,8(r6); \ 8 ## n ## 6: \ stw r9,12(r6); \ 8 ## n ## 7: \ stwu r10,16(r6) #define COPY_16_BYTES_EXCODE(n) \ 9 ## n ## 0: \ addi r5,r5,-(16 * n); \ b 104f; \ 9 ## n ## 1: \ addi r5,r5,-(16 * n); \ b 105f; \ EX_TABLE(8 ## n ## 0b,9 ## n ## 0b); \ EX_TABLE(8 ## n ## 1b,9 ## n ## 0b); \ EX_TABLE(8 ## n ## 2b,9 ## n ## 0b); \ EX_TABLE(8 ## n ## 3b,9 ## n ## 0b); \ EX_TABLE(8 ## n ## 4b,9 ## n ## 1b); \ EX_TABLE(8 ## n ## 5b,9 ## n ## 1b); \ EX_TABLE(8 ## n ## 6b,9 ## n ## 1b); \ EX_TABLE(8 ## n ## 7b,9 ## n ## 1b) .text CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) #ifndef CONFIG_KASAN _GLOBAL(memset16) rlwinm. r0 ,r5, 31, 1, 31 addi r6, r3, -4 beq- 2f rlwimi r4 ,r4 ,16 ,0 ,15 mtctr r0 1: stwu r4, 4(r6) bdnz 1b 2: andi. r0, r5, 1 beqlr sth r4, 4(r6) blr EXPORT_SYMBOL(memset16) #endif /* * Use dcbz on the complete cache lines in the destination * to set them to zero. This requires that the destination * area is cacheable. -- paulus * * During early init, cache might not be active yet, so dcbz cannot be used. * We therefore skip the optimised bloc that uses dcbz. This jump is * replaced by a nop once cache is active. This is done in machine_init() */ _GLOBAL_KASAN(memset) cmplwi 0,r5,4 blt 7f rlwimi r4,r4,8,16,23 rlwimi r4,r4,16,0,15 stw r4,0(r3) beqlr andi. r0,r3,3 add r5,r0,r5 subf r6,r0,r3 cmplwi 0,r4,0 /* * Skip optimised bloc until cache is enabled. Will be replaced * by 'bne' during boot to use normal procedure if r4 is not zero */ 5: b 2f patch_site 5b, patch__memset_nocache clrlwi r7,r6,32-LG_CACHELINE_BYTES add r8,r7,r5 srwi r9,r8,LG_CACHELINE_BYTES addic. r9,r9,-1 /* total number of complete cachelines */ ble 2f xori r0,r7,CACHELINE_MASK & ~3 srwi. r0,r0,2 beq 3f mtctr r0 4: stwu r4,4(r6) bdnz 4b 3: mtctr r9 li r7,4 10: dcbz r7,r6 addi r6,r6,CACHELINE_BYTES bdnz 10b clrlwi r5,r8,32-LG_CACHELINE_BYTES addi r5,r5,4 2: srwi r0,r5,2 mtctr r0 bdz 6f 1: stwu r4,4(r6) bdnz 1b 6: andi. r5,r5,3 beqlr mtctr r5 addi r6,r6,3 8: stbu r4,1(r6) bdnz 8b blr 7: cmpwi 0,r5,0 beqlr mtctr r5 addi r6,r3,-1 9: stbu r4,1(r6) bdnz 9b blr EXPORT_SYMBOL(memset) EXPORT_SYMBOL_KASAN(memset) /* * This version uses dcbz on the complete cache lines in the * destination area to reduce memory traffic. This requires that * the destination area is cacheable. * We only use this version if the source and dest don't overlap. * -- paulus. * * During early init, cache might not be active yet, so dcbz cannot be used. * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is * replaced by a nop once cache is active. This is done in machine_init() */ _GLOBAL_KASAN(memmove) cmplw 0,r3,r4 bgt backwards_memcpy /* fall through */ _GLOBAL_KASAN(memcpy) 1: b generic_memcpy patch_site 1b, patch__memcpy_nocache add r7,r3,r5 /* test if the src & dst overlap */ add r8,r4,r5 cmplw 0,r4,r7 cmplw 1,r3,r8 crand 0,0,4 /* cr0.lt &= cr1.lt */ blt generic_memcpy /* if regions overlap */ addi r4,r4,-4 addi r6,r3,-4 neg r0,r3 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */ beq 58f cmplw 0,r5,r0 /* is this more than total to do? */ blt 63f /* if not much to do */ andi. r8,r0,3 /* get it word-aligned first */ subf r5,r0,r5 mtctr r8 beq+ 61f 70: lbz r9,4(r4) /* do some bytes */ addi r4,r4,1 addi r6,r6,1 stb r9,3(r6) bdnz 70b 61: srwi. r0,r0,2 mtctr r0 beq 58f 72: lwzu r9,4(r4) /* do some words */ stwu r9,4(r6) bdnz 72b 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */ clrlwi r5,r5,32-LG_CACHELINE_BYTES li r11,4 mtctr r0 beq 63f 53: dcbz r11,r6 COPY_16_BYTES #if L1_CACHE_BYTES >= 32 COPY_16_BYTES #if L1_CACHE_BYTES >= 64 COPY_16_BYTES COPY_16_BYTES #if L1_CACHE_BYTES >= 128 COPY_16_BYTES COPY_16_BYTES COPY_16_BYTES COPY_16_BYTES #endif #endif #endif bdnz 53b 63: srwi. r0,r5,2 mtctr r0 beq 64f 30: lwzu r0,4(r4) stwu r0,4(r6) bdnz 30b 64: andi. r0,r5,3 mtctr r0 beq+ 65f addi r4,r4,3 addi r6,r6,3 40: lbzu r0,1(r4) stbu r0,1(r6) bdnz 40b 65: blr EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(memmove) EXPORT_SYMBOL_KASAN(memcpy) EXPORT_SYMBOL_KASAN(memmove) generic_memcpy: srwi. r7,r5,3 addi r6,r3,-4 addi r4,r4,-4 beq 2f /* if less than 8 bytes to do */ andi. r0,r6,3 /* get dest word aligned */ mtctr r7 bne 5f 1: lwz r7,4(r4) lwzu r8,8(r4) stw r7,4(r6) stwu r8,8(r6) bdnz 1b andi. r5,r5,7 2: cmplwi 0,r5,4 blt 3f lwzu r0,4(r4) addi r5,r5,-4 stwu r0,4(r6) 3: cmpwi 0,r5,0 beqlr mtctr r5 addi r4,r4,3 addi r6,r6,3 4: lbzu r0,1(r4) stbu r0,1(r6) bdnz 4b blr 5: subfic r0,r0,4 mtctr r0 6: lbz r7,4(r4) addi r4,r4,1 stb r7,4(r6) addi r6,r6,1 bdnz 6b subf r5,r0,r5 rlwinm. r7,r5,32-3,3,31 beq 2b mtctr r7 b 1b _GLOBAL(backwards_memcpy) rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ add r6,r3,r5 add r4,r4,r5 beq 2f andi. r0,r6,3 mtctr r7 bne 5f 1: lwz r7,-4(r4) lwzu r8,-8(r4) stw r7,-4(r6) stwu r8,-8(r6) bdnz 1b andi. r5,r5,7 2: cmplwi 0,r5,4 blt 3f lwzu r0,-4(r4) subi r5,r5,4 stwu r0,-4(r6) 3: cmpwi 0,r5,0 beqlr mtctr r5 4: lbzu r0,-1(r4) stbu r0,-1(r6) bdnz 4b blr 5: mtctr r0 6: lbzu r7,-1(r4) stbu r7,-1(r6) bdnz 6b subf r5,r0,r5 rlwinm. r7,r5,32-3,3,31 beq 2b mtctr r7 b 1b _GLOBAL(__copy_tofrom_user) addi r4,r4,-4 addi r6,r3,-4 neg r0,r3 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */ beq 58f cmplw 0,r5,r0 /* is this more than total to do? */ blt 63f /* if not much to do */ andi. r8,r0,3 /* get it word-aligned first */ mtctr r8 beq+ 61f 70: lbz r9,4(r4) /* do some bytes */ 71: stb r9,4(r6) addi r4,r4,1 addi r6,r6,1 bdnz 70b 61: subf r5,r0,r5 srwi. r0,r0,2 mtctr r0 beq 58f 72: lwzu r9,4(r4) /* do some words */ 73: stwu r9,4(r6) bdnz 72b EX_TABLE(70b,100f) EX_TABLE(71b,101f) EX_TABLE(72b,102f) EX_TABLE(73b,103f) 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */ clrlwi r5,r5,32-LG_CACHELINE_BYTES li r11,4 beq 63f /* Here we decide how far ahead to prefetch the source */ li r3,4 cmpwi r0,1 li r7,0 ble 114f li r7,1 #if MAX_COPY_PREFETCH > 1 /* Heuristically, for large transfers we prefetch MAX_COPY_PREFETCH cachelines ahead. For small transfers we prefetch 1 cacheline ahead. */ cmpwi r0,MAX_COPY_PREFETCH ble 112f li r7,MAX_COPY_PREFETCH 112: mtctr r7 111: dcbt r3,r4 addi r3,r3,CACHELINE_BYTES bdnz 111b #else dcbt r3,r4 addi r3,r3,CACHELINE_BYTES #endif /* MAX_COPY_PREFETCH > 1 */ 114: subf r8,r7,r0 mr r0,r7 mtctr r8 53: dcbt r3,r4 54: dcbz r11,r6 EX_TABLE(54b,105f) /* the main body of the cacheline loop */ COPY_16_BYTES_WITHEX(0) #if L1_CACHE_BYTES >= 32 COPY_16_BYTES_WITHEX(1) #if L1_CACHE_BYTES >= 64 COPY_16_BYTES_WITHEX(2) COPY_16_BYTES_WITHEX(3) #if L1_CACHE_BYTES >= 128 COPY_16_BYTES_WITHEX(4) COPY_16_BYTES_WITHEX(5) COPY_16_BYTES_WITHEX(6) COPY_16_BYTES_WITHEX(7) #endif #endif #endif bdnz 53b cmpwi r0,0 li r3,4 li r7,0 bne 114b 63: srwi. r0,r5,2 mtctr r0 beq 64f 30: lwzu r0,4(r4) 31: stwu r0,4(r6) bdnz 30b 64: andi. r0,r5,3 mtctr r0 beq+ 65f 40: lbz r0,4(r4) 41: stb r0,4(r6) addi r4,r4,1 addi r6,r6,1 bdnz 40b 65: li r3,0 blr /* read fault, initial single-byte copy */ 100: li r9,0 b 90f /* write fault, initial single-byte copy */ 101: li r9,1 90: subf r5,r8,r5 li r3,0 b 99f /* read fault, initial word copy */ 102: li r9,0 b 91f /* write fault, initial word copy */ 103: li r9,1 91: li r3,2 b 99f /* * this stuff handles faults in the cacheline loop and branches to either * 104f (if in read part) or 105f (if in write part), after updating r5 */ COPY_16_BYTES_EXCODE(0) #if L1_CACHE_BYTES >= 32 COPY_16_BYTES_EXCODE(1) #if L1_CACHE_BYTES >= 64 COPY_16_BYTES_EXCODE(2) COPY_16_BYTES_EXCODE(3) #if L1_CACHE_BYTES >= 128 COPY_16_BYTES_EXCODE(4) COPY_16_BYTES_EXCODE(5) COPY_16_BYTES_EXCODE(6) COPY_16_BYTES_EXCODE(7) #endif #endif #endif /* read fault in cacheline loop */ 104: li r9,0 b 92f /* fault on dcbz (effectively a write fault) */ /* or write fault in cacheline loop */ 105: li r9,1 92: li r3,LG_CACHELINE_BYTES mfctr r8 add r0,r0,r8 b 106f /* read fault in final word loop */ 108: li r9,0 b 93f /* write fault in final word loop */ 109: li r9,1 93: andi. r5,r5,3 li r3,2 b 99f /* read fault in final byte loop */ 110: li r9,0 b 94f /* write fault in final byte loop */ 111: li r9,1 94: li r5,0 li r3,0 /* * At this stage the number of bytes not copied is * r5 + (ctr << r3), and r9 is 0 for read or 1 for write. */ 99: mfctr r0 106: slw r3,r0,r3 add. r3,r3,r5 beq 120f /* shouldn't happen */ cmpwi 0,r9,0 bne 120f /* for a read fault, first try to continue the copy one byte at a time */ mtctr r3 130: lbz r0,4(r4) 131: stb r0,4(r6) addi r4,r4,1 addi r6,r6,1 bdnz 130b /* then clear out the destination: r3 bytes starting at 4(r6) */ 132: mfctr r3 120: blr EX_TABLE(30b,108b) EX_TABLE(31b,109b) EX_TABLE(40b,110b) EX_TABLE(41b,111b) EX_TABLE(130b,132b) EX_TABLE(131b,120b) EXPORT_SYMBOL(__copy_tofrom_user)
aixcc-public/challenge-001-exemplar-source
8,149
arch/powerpc/lib/checksum_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains assembly-language implementations * of IP-style 1's complement checksum routines. * * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au). */ #include <linux/sys.h> #include <asm/processor.h> #include <asm/errno.h> #include <asm/ppc_asm.h> #include <asm/export.h> /* * Computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit). * * __csum_partial(r3=buff, r4=len, r5=sum) */ _GLOBAL(__csum_partial) addic r0,r5,0 /* clear carry */ srdi. r6,r4,3 /* less than 8 bytes? */ beq .Lcsum_tail_word /* * If only halfword aligned, align to a double word. Since odd * aligned addresses should be rare and they would require more * work to calculate the correct checksum, we ignore that case * and take the potential slowdown of unaligned loads. */ rldicl. r6,r3,64-1,64-2 /* r6 = (r3 >> 1) & 0x3 */ beq .Lcsum_aligned li r7,4 sub r6,r7,r6 mtctr r6 1: lhz r6,0(r3) /* align to doubleword */ subi r4,r4,2 addi r3,r3,2 adde r0,r0,r6 bdnz 1b .Lcsum_aligned: /* * We unroll the loop such that each iteration is 64 bytes with an * entry and exit limb of 64 bytes, meaning a minimum size of * 128 bytes. */ srdi. r6,r4,7 beq .Lcsum_tail_doublewords /* len < 128 */ srdi r6,r4,6 subi r6,r6,1 mtctr r6 stdu r1,-STACKFRAMESIZE(r1) std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) ld r6,0(r3) ld r9,8(r3) ld r10,16(r3) ld r11,24(r3) /* * On POWER6 and POWER7 back to back adde instructions take 2 cycles * because of the XER dependency. This means the fastest this loop can * go is 16 cycles per iteration. The scheduling of the loop below has * been shown to hit this on both POWER6 and POWER7. */ .align 5 2: adde r0,r0,r6 ld r12,32(r3) ld r14,40(r3) adde r0,r0,r9 ld r15,48(r3) ld r16,56(r3) addi r3,r3,64 adde r0,r0,r10 adde r0,r0,r11 adde r0,r0,r12 adde r0,r0,r14 adde r0,r0,r15 ld r6,0(r3) ld r9,8(r3) adde r0,r0,r16 ld r10,16(r3) ld r11,24(r3) bdnz 2b adde r0,r0,r6 ld r12,32(r3) ld r14,40(r3) adde r0,r0,r9 ld r15,48(r3) ld r16,56(r3) addi r3,r3,64 adde r0,r0,r10 adde r0,r0,r11 adde r0,r0,r12 adde r0,r0,r14 adde r0,r0,r15 adde r0,r0,r16 ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) addi r1,r1,STACKFRAMESIZE andi. r4,r4,63 .Lcsum_tail_doublewords: /* Up to 127 bytes to go */ srdi. r6,r4,3 beq .Lcsum_tail_word mtctr r6 3: ld r6,0(r3) addi r3,r3,8 adde r0,r0,r6 bdnz 3b andi. r4,r4,7 .Lcsum_tail_word: /* Up to 7 bytes to go */ srdi. r6,r4,2 beq .Lcsum_tail_halfword lwz r6,0(r3) addi r3,r3,4 adde r0,r0,r6 subi r4,r4,4 .Lcsum_tail_halfword: /* Up to 3 bytes to go */ srdi. r6,r4,1 beq .Lcsum_tail_byte lhz r6,0(r3) addi r3,r3,2 adde r0,r0,r6 subi r4,r4,2 .Lcsum_tail_byte: /* Up to 1 byte to go */ andi. r6,r4,1 beq .Lcsum_finish lbz r6,0(r3) #ifdef __BIG_ENDIAN__ sldi r9,r6,8 /* Pad the byte out to 16 bits */ adde r0,r0,r9 #else adde r0,r0,r6 #endif .Lcsum_finish: addze r0,r0 /* add in final carry */ rldicl r4,r0,32,0 /* fold two 32 bit halves together */ add r3,r4,r0 srdi r3,r3,32 blr EXPORT_SYMBOL(__csum_partial) .macro srcnr 100: EX_TABLE(100b,.Lerror_nr) .endm .macro source 150: EX_TABLE(150b,.Lerror) .endm .macro dstnr 200: EX_TABLE(200b,.Lerror_nr) .endm .macro dest 250: EX_TABLE(250b,.Lerror) .endm /* * Computes the checksum of a memory block at src, length len, * and adds in 0xffffffff (32-bit), while copying the block to dst. * If an access exception occurs, it returns 0. * * csum_partial_copy_generic(r3=src, r4=dst, r5=len) */ _GLOBAL(csum_partial_copy_generic) li r6,-1 addic r0,r6,0 /* clear carry */ srdi. r6,r5,3 /* less than 8 bytes? */ beq .Lcopy_tail_word /* * If only halfword aligned, align to a double word. Since odd * aligned addresses should be rare and they would require more * work to calculate the correct checksum, we ignore that case * and take the potential slowdown of unaligned loads. * * If the source and destination are relatively unaligned we only * align the source. This keeps things simple. */ rldicl. r6,r3,64-1,64-2 /* r6 = (r3 >> 1) & 0x3 */ beq .Lcopy_aligned li r9,4 sub r6,r9,r6 mtctr r6 1: srcnr; lhz r6,0(r3) /* align to doubleword */ subi r5,r5,2 addi r3,r3,2 adde r0,r0,r6 dstnr; sth r6,0(r4) addi r4,r4,2 bdnz 1b .Lcopy_aligned: /* * We unroll the loop such that each iteration is 64 bytes with an * entry and exit limb of 64 bytes, meaning a minimum size of * 128 bytes. */ srdi. r6,r5,7 beq .Lcopy_tail_doublewords /* len < 128 */ srdi r6,r5,6 subi r6,r6,1 mtctr r6 stdu r1,-STACKFRAMESIZE(r1) std r14,STK_REG(R14)(r1) std r15,STK_REG(R15)(r1) std r16,STK_REG(R16)(r1) source; ld r6,0(r3) source; ld r9,8(r3) source; ld r10,16(r3) source; ld r11,24(r3) /* * On POWER6 and POWER7 back to back adde instructions take 2 cycles * because of the XER dependency. This means the fastest this loop can * go is 16 cycles per iteration. The scheduling of the loop below has * been shown to hit this on both POWER6 and POWER7. */ .align 5 2: adde r0,r0,r6 source; ld r12,32(r3) source; ld r14,40(r3) adde r0,r0,r9 source; ld r15,48(r3) source; ld r16,56(r3) addi r3,r3,64 adde r0,r0,r10 dest; std r6,0(r4) dest; std r9,8(r4) adde r0,r0,r11 dest; std r10,16(r4) dest; std r11,24(r4) adde r0,r0,r12 dest; std r12,32(r4) dest; std r14,40(r4) adde r0,r0,r14 dest; std r15,48(r4) dest; std r16,56(r4) addi r4,r4,64 adde r0,r0,r15 source; ld r6,0(r3) source; ld r9,8(r3) adde r0,r0,r16 source; ld r10,16(r3) source; ld r11,24(r3) bdnz 2b adde r0,r0,r6 source; ld r12,32(r3) source; ld r14,40(r3) adde r0,r0,r9 source; ld r15,48(r3) source; ld r16,56(r3) addi r3,r3,64 adde r0,r0,r10 dest; std r6,0(r4) dest; std r9,8(r4) adde r0,r0,r11 dest; std r10,16(r4) dest; std r11,24(r4) adde r0,r0,r12 dest; std r12,32(r4) dest; std r14,40(r4) adde r0,r0,r14 dest; std r15,48(r4) dest; std r16,56(r4) addi r4,r4,64 adde r0,r0,r15 adde r0,r0,r16 ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) addi r1,r1,STACKFRAMESIZE andi. r5,r5,63 .Lcopy_tail_doublewords: /* Up to 127 bytes to go */ srdi. r6,r5,3 beq .Lcopy_tail_word mtctr r6 3: srcnr; ld r6,0(r3) addi r3,r3,8 adde r0,r0,r6 dstnr; std r6,0(r4) addi r4,r4,8 bdnz 3b andi. r5,r5,7 .Lcopy_tail_word: /* Up to 7 bytes to go */ srdi. r6,r5,2 beq .Lcopy_tail_halfword srcnr; lwz r6,0(r3) addi r3,r3,4 adde r0,r0,r6 dstnr; stw r6,0(r4) addi r4,r4,4 subi r5,r5,4 .Lcopy_tail_halfword: /* Up to 3 bytes to go */ srdi. r6,r5,1 beq .Lcopy_tail_byte srcnr; lhz r6,0(r3) addi r3,r3,2 adde r0,r0,r6 dstnr; sth r6,0(r4) addi r4,r4,2 subi r5,r5,2 .Lcopy_tail_byte: /* Up to 1 byte to go */ andi. r6,r5,1 beq .Lcopy_finish srcnr; lbz r6,0(r3) #ifdef __BIG_ENDIAN__ sldi r9,r6,8 /* Pad the byte out to 16 bits */ adde r0,r0,r9 #else adde r0,r0,r6 #endif dstnr; stb r6,0(r4) .Lcopy_finish: addze r0,r0 /* add in final carry */ rldicl r4,r0,32,0 /* fold two 32 bit halves together */ add r3,r4,r0 srdi r3,r3,32 blr .Lerror: ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) addi r1,r1,STACKFRAMESIZE .Lerror_nr: li r3,0 blr EXPORT_SYMBOL(csum_partial_copy_generic) /* * __sum16 csum_ipv6_magic(const struct in6_addr *saddr, * const struct in6_addr *daddr, * __u32 len, __u8 proto, __wsum sum) */ _GLOBAL(csum_ipv6_magic) ld r8, 0(r3) ld r9, 8(r3) add r5, r5, r6 addc r0, r8, r9 ld r10, 0(r4) ld r11, 8(r4) #ifdef CONFIG_CPU_LITTLE_ENDIAN rotldi r5, r5, 8 #endif adde r0, r0, r10 add r5, r5, r7 adde r0, r0, r11 adde r0, r0, r5 addze r0, r0 rotldi r3, r0, 32 /* fold two 32 bit halves together */ add r3, r0, r3 srdi r0, r3, 32 rotlwi r3, r0, 16 /* fold two 16 bit halves together */ add r3, r0, r3 not r3, r3 rlwinm r3, r3, 16, 16, 31 blr EXPORT_SYMBOL(csum_ipv6_magic)
aixcc-public/challenge-001-exemplar-source
3,651
arch/powerpc/purgatory/trampoline_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * kexec trampoline * * Based on code taken from kexec-tools and kexec-lite. * * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation * Copyright (C) 2006, Mohan Kumar M, IBM Corporation * Copyright (C) 2013, Anton Blanchard, IBM Corporation */ #include <asm/asm-compat.h> #include <asm/crashdump-ppc64.h> .balign 256 .globl purgatory_start purgatory_start: b master /* ABI: possible run_at_load flag at 0x5c */ .org purgatory_start + 0x5c .globl run_at_load run_at_load: .long 0 .size run_at_load, . - run_at_load /* ABI: slaves start at 60 with r3=phys */ .org purgatory_start + 0x60 slave: b . /* ABI: end of copied region */ .org purgatory_start + 0x100 .size purgatory_start, . - purgatory_start /* * The above 0x100 bytes at purgatory_start are replaced with the * code from the kernel (or next stage) by setup_purgatory(). */ master: or %r1,%r1,%r1 /* low priority to let other threads catchup */ isync mr %r17,%r3 /* save cpu id to r17 */ mr %r15,%r4 /* save physical address in reg15 */ /* Work out where we're running */ bcl 20, 31, 0f 0: mflr %r18 /* * Copy BACKUP_SRC_SIZE bytes from BACKUP_SRC_START to * backup_start 8 bytes at a time. * * Use r3 = dest, r4 = src, r5 = size, r6 = count */ ld %r3, (backup_start - 0b)(%r18) cmpdi %cr0, %r3, 0 beq .Lskip_copy /* skip if there is no backup region */ lis %r5, BACKUP_SRC_SIZE@h ori %r5, %r5, BACKUP_SRC_SIZE@l cmpdi %cr0, %r5, 0 beq .Lskip_copy /* skip if copy size is zero */ lis %r4, BACKUP_SRC_START@h ori %r4, %r4, BACKUP_SRC_START@l li %r6, 0 .Lcopy_loop: ldx %r0, %r6, %r4 stdx %r0, %r6, %r3 addi %r6, %r6, 8 cmpld %cr0, %r6, %r5 blt .Lcopy_loop .Lskip_copy: or %r3,%r3,%r3 /* ok now to high priority, lets boot */ lis %r6,0x1 mtctr %r6 /* delay a bit for slaves to catch up */ bdnz . /* before we overwrite 0-100 again */ /* load device-tree address */ ld %r3, (dt_offset - 0b)(%r18) mr %r16,%r3 /* save dt address in reg16 */ li %r4,20 LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */ cmpwi %cr0,%r6,2 /* v2 or later? */ blt 1f li %r4,28 STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */ 1: /* Load opal base and entry values in r8 & r9 respectively */ ld %r8,(opal_base - 0b)(%r18) ld %r9,(opal_entry - 0b)(%r18) /* load the kernel address */ ld %r4,(kernel - 0b)(%r18) /* load the run_at_load flag */ /* possibly patched by kexec */ ld %r6,(run_at_load - 0b)(%r18) /* and patch it into the kernel */ stw %r6,(0x5c)(%r4) mr %r3,%r16 /* restore dt address */ li %r5,0 /* r5 will be 0 for kernel */ mfmsr %r11 andi. %r10,%r11,1 /* test MSR_LE */ bne .Little_endian mtctr %r4 /* prepare branch to */ bctr /* start kernel */ .Little_endian: mtsrr0 %r4 /* prepare branch to */ clrrdi %r11,%r11,1 /* clear MSR_LE */ mtsrr1 %r11 rfid /* update MSR and start kernel */ .balign 8 .globl kernel kernel: .8byte 0x0 .size kernel, . - kernel .balign 8 .globl dt_offset dt_offset: .8byte 0x0 .size dt_offset, . - dt_offset .balign 8 .globl backup_start backup_start: .8byte 0x0 .size backup_start, . - backup_start .balign 8 .globl opal_base opal_base: .8byte 0x0 .size opal_base, . - opal_base .balign 8 .globl opal_entry opal_entry: .8byte 0x0 .size opal_entry, . - opal_entry .data .balign 8 .globl purgatory_sha256_digest purgatory_sha256_digest: .skip 32 .size purgatory_sha256_digest, . - purgatory_sha256_digest .balign 8 .globl purgatory_sha_regions purgatory_sha_regions: .skip 8 * 2 * 16 .size purgatory_sha_regions, . - purgatory_sha_regions
aixcc-public/challenge-001-exemplar-source
19,692
arch/powerpc/kvm/bookehv_interrupts.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. * * Author: Varun Sethi <varun.sethi@freescale.com> * Author: Scott Wood <scotwood@freescale.com> * Author: Mihai Caraman <mihai.caraman@freescale.com> * * This file is derived from arch/powerpc/kvm/booke_interrupts.S */ #include <asm/ppc_asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/asm-compat.h> #include <asm/asm-offsets.h> #include <asm/bitsperlong.h> #ifdef CONFIG_64BIT #include <asm/exception-64e.h> #include <asm/hw_irq.h> #include <asm/irqflags.h> #else #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ #endif #define LONGBYTES (BITS_PER_LONG / 8) #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) /* The host stack layout: */ #define HOST_R1 0 /* Implied by stwu. */ #define HOST_CALLEE_LR PPC_LR_STKOFF #define HOST_RUN (HOST_CALLEE_LR + LONGBYTES) /* * r2 is special: it holds 'current', and it made nonvolatile in the * kernel with the -ffixed-r2 gcc option. */ #define HOST_R2 (HOST_RUN + LONGBYTES) #define HOST_CR (HOST_R2 + LONGBYTES) #define HOST_NV_GPRS (HOST_CR + LONGBYTES) #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES) #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ /* LR in caller stack frame. */ #define HOST_STACK_LR (HOST_STACK_SIZE + PPC_LR_STKOFF) #define NEED_EMU 0x00000001 /* emulation -- save nv regs */ #define NEED_DEAR 0x00000002 /* save faulting DEAR */ #define NEED_ESR 0x00000004 /* save faulting ESR */ /* * On entry: * r4 = vcpu, r5 = srr0, r6 = srr1 * saved in vcpu: cr, ctr, r3-r13 */ .macro kvm_handler_common intno, srr0, flags /* Restore host stack pointer */ PPC_STL r1, VCPU_GPR(R1)(r4) PPC_STL r2, VCPU_GPR(R2)(r4) PPC_LL r1, VCPU_HOST_STACK(r4) PPC_LL r2, HOST_R2(r1) START_BTB_FLUSH_SECTION BTB_FLUSH(r10) END_BTB_FLUSH_SECTION mfspr r10, SPRN_PID lwz r8, VCPU_HOST_PID(r4) PPC_LL r11, VCPU_SHARED(r4) PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */ li r14, \intno stw r10, VCPU_GUEST_PID(r4) mtspr SPRN_PID, r8 #ifdef CONFIG_KVM_EXIT_TIMING /* save exit time */ 1: mfspr r7, SPRN_TBRU mfspr r8, SPRN_TBRL mfspr r9, SPRN_TBRU cmpw r9, r7 stw r8, VCPU_TIMING_EXIT_TBL(r4) bne- 1b stw r9, VCPU_TIMING_EXIT_TBU(r4) #endif oris r8, r6, MSR_CE@h PPC_STD(r6, VCPU_SHARED_MSR, r11) ori r8, r8, MSR_ME | MSR_RI PPC_STL r5, VCPU_PC(r4) /* * Make sure CE/ME/RI are set (if appropriate for exception type) * whether or not the guest had it set. Since mfmsr/mtmsr are * somewhat expensive, skip in the common case where the guest * had all these bits set (and thus they're still set if * appropriate for the exception type). */ cmpw r6, r8 beq 1f mfmsr r7 .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0 oris r7, r7, MSR_CE@h .endif .if \srr0 != SPRN_MCSRR0 ori r7, r7, MSR_ME | MSR_RI .endif mtmsr r7 1: .if \flags & NEED_EMU PPC_STL r15, VCPU_GPR(R15)(r4) PPC_STL r16, VCPU_GPR(R16)(r4) PPC_STL r17, VCPU_GPR(R17)(r4) PPC_STL r18, VCPU_GPR(R18)(r4) PPC_STL r19, VCPU_GPR(R19)(r4) PPC_STL r20, VCPU_GPR(R20)(r4) PPC_STL r21, VCPU_GPR(R21)(r4) PPC_STL r22, VCPU_GPR(R22)(r4) PPC_STL r23, VCPU_GPR(R23)(r4) PPC_STL r24, VCPU_GPR(R24)(r4) PPC_STL r25, VCPU_GPR(R25)(r4) PPC_STL r26, VCPU_GPR(R26)(r4) PPC_STL r27, VCPU_GPR(R27)(r4) PPC_STL r28, VCPU_GPR(R28)(r4) PPC_STL r29, VCPU_GPR(R29)(r4) PPC_STL r30, VCPU_GPR(R30)(r4) PPC_STL r31, VCPU_GPR(R31)(r4) /* * We don't use external PID support. lwepx faults would need to be * handled by KVM and this implies aditional code in DO_KVM (for * DTB_MISS, DSI and LRAT) to check ESR[EPID] and EPLC[EGS] which * is too intrusive for the host. Get last instuction in * kvmppc_get_last_inst(). */ li r9, KVM_INST_FETCH_FAILED stw r9, VCPU_LAST_INST(r4) .endif .if \flags & NEED_ESR mfspr r8, SPRN_ESR PPC_STL r8, VCPU_FAULT_ESR(r4) .endif .if \flags & NEED_DEAR mfspr r9, SPRN_DEAR PPC_STL r9, VCPU_FAULT_DEAR(r4) .endif b kvmppc_resume_host .endm #ifdef CONFIG_64BIT /* Exception types */ #define EX_GEN 1 #define EX_GDBELL 2 #define EX_DBG 3 #define EX_MC 4 #define EX_CRIT 5 #define EX_TLB 6 /* * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h */ .macro kvm_handler intno type scratch, paca_ex, ex_r10, ex_r11, srr0, srr1, flags _GLOBAL(kvmppc_handler_\intno\()_\srr1) mr r11, r4 /* * Get vcpu from Paca: paca->__current.thread->kvm_vcpu */ PPC_LL r4, PACACURRENT(r13) PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4) PPC_STL r10, VCPU_CR(r4) PPC_STL r11, VCPU_GPR(R4)(r4) PPC_STL r5, VCPU_GPR(R5)(r4) PPC_STL r6, VCPU_GPR(R6)(r4) PPC_STL r8, VCPU_GPR(R8)(r4) PPC_STL r9, VCPU_GPR(R9)(r4) .if \type == EX_TLB PPC_LL r5, EX_TLB_R13(r12) PPC_LL r6, EX_TLB_R10(r12) PPC_LL r8, EX_TLB_R11(r12) mfspr r12, \scratch .else mfspr r5, \scratch PPC_LL r6, (\paca_ex + \ex_r10)(r13) PPC_LL r8, (\paca_ex + \ex_r11)(r13) .endif PPC_STL r5, VCPU_GPR(R13)(r4) PPC_STL r3, VCPU_GPR(R3)(r4) PPC_STL r7, VCPU_GPR(R7)(r4) PPC_STL r12, VCPU_GPR(R12)(r4) PPC_STL r6, VCPU_GPR(R10)(r4) PPC_STL r8, VCPU_GPR(R11)(r4) mfctr r5 PPC_STL r5, VCPU_CTR(r4) mfspr r5, \srr0 mfspr r6, \srr1 kvm_handler_common \intno, \srr0, \flags .endm #define EX_PARAMS(type) \ EX_##type, \ SPRN_SPRG_##type##_SCRATCH, \ PACA_EX##type, \ EX_R10, \ EX_R11 #define EX_PARAMS_TLB \ EX_TLB, \ SPRN_SPRG_GEN_SCRATCH, \ PACA_EXTLB, \ EX_TLB_R10, \ EX_TLB_R11 kvm_handler BOOKE_INTERRUPT_CRITICAL, EX_PARAMS(CRIT), \ SPRN_CSRR0, SPRN_CSRR1, 0 kvm_handler BOOKE_INTERRUPT_MACHINE_CHECK, EX_PARAMS(MC), \ SPRN_MCSRR0, SPRN_MCSRR1, 0 kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1,(NEED_EMU | NEED_DEAR | NEED_ESR) kvm_handler BOOKE_INTERRUPT_INST_STORAGE, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, NEED_ESR kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR) kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU) kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_DECREMENTER, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_FIT, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_WATCHDOG, EX_PARAMS(CRIT),\ SPRN_CSRR0, SPRN_CSRR1, 0 /* * Only bolted TLB miss exception handlers are supported for now */ kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_ALTIVEC_ASSIST, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_DOORBELL, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, EX_PARAMS(CRIT), \ SPRN_CSRR0, SPRN_CSRR1, 0 kvm_handler BOOKE_INTERRUPT_HV_PRIV, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, NEED_EMU kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, EX_PARAMS(GDBELL), \ SPRN_GSRR0, SPRN_GSRR1, 0 kvm_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, EX_PARAMS(CRIT), \ SPRN_CSRR0, SPRN_CSRR1, 0 kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \ SPRN_DSRR0, SPRN_DSRR1, 0 kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \ SPRN_CSRR0, SPRN_CSRR1, 0 kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PARAMS(GEN), \ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) #else /* * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h */ .macro kvm_handler intno srr0, srr1, flags _GLOBAL(kvmppc_handler_\intno\()_\srr1) PPC_LL r11, THREAD_KVM_VCPU(r10) PPC_STL r3, VCPU_GPR(R3)(r11) mfspr r3, SPRN_SPRG_RSCRATCH0 PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, THREAD_NORMSAVE(0)(r10) PPC_STL r5, VCPU_GPR(R5)(r11) PPC_STL r13, VCPU_CR(r11) mfspr r5, \srr0 PPC_STL r3, VCPU_GPR(R10)(r11) PPC_LL r3, THREAD_NORMSAVE(2)(r10) PPC_STL r6, VCPU_GPR(R6)(r11) PPC_STL r4, VCPU_GPR(R11)(r11) mfspr r6, \srr1 PPC_STL r7, VCPU_GPR(R7)(r11) PPC_STL r8, VCPU_GPR(R8)(r11) PPC_STL r9, VCPU_GPR(R9)(r11) PPC_STL r3, VCPU_GPR(R13)(r11) mfctr r7 PPC_STL r12, VCPU_GPR(R12)(r11) PPC_STL r7, VCPU_CTR(r11) mr r4, r11 kvm_handler_common \intno, \srr0, \flags .endm .macro kvm_lvl_handler intno scratch srr0, srr1, flags _GLOBAL(kvmppc_handler_\intno\()_\srr1) mfspr r10, SPRN_SPRG_THREAD PPC_LL r11, THREAD_KVM_VCPU(r10) PPC_STL r3, VCPU_GPR(R3)(r11) mfspr r3, \scratch PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, GPR9(r8) PPC_STL r5, VCPU_GPR(R5)(r11) PPC_STL r9, VCPU_CR(r11) mfspr r5, \srr0 PPC_STL r3, VCPU_GPR(R8)(r11) PPC_LL r3, GPR10(r8) PPC_STL r6, VCPU_GPR(R6)(r11) PPC_STL r4, VCPU_GPR(R9)(r11) mfspr r6, \srr1 PPC_LL r4, GPR11(r8) PPC_STL r7, VCPU_GPR(R7)(r11) PPC_STL r3, VCPU_GPR(R10)(r11) mfctr r7 PPC_STL r12, VCPU_GPR(R12)(r11) PPC_STL r13, VCPU_GPR(R13)(r11) PPC_STL r4, VCPU_GPR(R11)(r11) PPC_STL r7, VCPU_CTR(r11) mr r4, r11 kvm_handler_common \intno, \srr0, \flags .endm kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR) kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU) kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0 kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0 kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0 kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 #endif /* Registers: * SPRG_SCRATCH0: guest r10 * r4: vcpu pointer * r11: vcpu->arch.shared * r14: KVM exit number */ _GLOBAL(kvmppc_resume_host) /* Save remaining volatile guest register state to vcpu. */ mfspr r3, SPRN_VRSAVE PPC_STL r0, VCPU_GPR(R0)(r4) mflr r5 mfspr r6, SPRN_SPRG4 PPC_STL r5, VCPU_LR(r4) mfspr r7, SPRN_SPRG5 stw r3, VCPU_VRSAVE(r4) #ifdef CONFIG_64BIT PPC_LL r3, PACA_SPRG_VDSO(r13) #endif mfspr r5, SPRN_SPRG9 PPC_STD(r6, VCPU_SHARED_SPRG4, r11) mfspr r8, SPRN_SPRG6 PPC_STD(r7, VCPU_SHARED_SPRG5, r11) mfspr r9, SPRN_SPRG7 #ifdef CONFIG_64BIT mtspr SPRN_SPRG_VDSO_WRITE, r3 #endif PPC_STD(r5, VCPU_SPRG9, r4) PPC_STD(r8, VCPU_SHARED_SPRG6, r11) mfxer r3 PPC_STD(r9, VCPU_SHARED_SPRG7, r11) /* save guest MAS registers and restore host mas4 & mas6 */ mfspr r5, SPRN_MAS0 PPC_STL r3, VCPU_XER(r4) mfspr r6, SPRN_MAS1 stw r5, VCPU_SHARED_MAS0(r11) mfspr r7, SPRN_MAS2 stw r6, VCPU_SHARED_MAS1(r11) PPC_STD(r7, VCPU_SHARED_MAS2, r11) mfspr r5, SPRN_MAS3 mfspr r6, SPRN_MAS4 stw r5, VCPU_SHARED_MAS7_3+4(r11) mfspr r7, SPRN_MAS6 stw r6, VCPU_SHARED_MAS4(r11) mfspr r5, SPRN_MAS7 lwz r6, VCPU_HOST_MAS4(r4) stw r7, VCPU_SHARED_MAS6(r11) lwz r8, VCPU_HOST_MAS6(r4) mtspr SPRN_MAS4, r6 stw r5, VCPU_SHARED_MAS7_3+0(r11) mtspr SPRN_MAS6, r8 /* Enable MAS register updates via exception */ mfspr r3, SPRN_EPCR rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH mtspr SPRN_EPCR, r3 isync #ifdef CONFIG_64BIT /* * We enter with interrupts disabled in hardware, but * we need to call RECONCILE_IRQ_STATE to ensure * that the software state is kept in sync. */ RECONCILE_IRQ_STATE(r3,r5) #endif /* Switch to kernel stack and jump to handler. */ mr r3, r4 mr r5, r14 /* intno */ mr r14, r4 /* Save vcpu pointer. */ mr r4, r5 bl kvmppc_handle_exit /* Restore vcpu pointer and the nonvolatiles we used. */ mr r4, r14 PPC_LL r14, VCPU_GPR(R14)(r4) andi. r5, r3, RESUME_FLAG_NV beq skip_nv_load PPC_LL r15, VCPU_GPR(R15)(r4) PPC_LL r16, VCPU_GPR(R16)(r4) PPC_LL r17, VCPU_GPR(R17)(r4) PPC_LL r18, VCPU_GPR(R18)(r4) PPC_LL r19, VCPU_GPR(R19)(r4) PPC_LL r20, VCPU_GPR(R20)(r4) PPC_LL r21, VCPU_GPR(R21)(r4) PPC_LL r22, VCPU_GPR(R22)(r4) PPC_LL r23, VCPU_GPR(R23)(r4) PPC_LL r24, VCPU_GPR(R24)(r4) PPC_LL r25, VCPU_GPR(R25)(r4) PPC_LL r26, VCPU_GPR(R26)(r4) PPC_LL r27, VCPU_GPR(R27)(r4) PPC_LL r28, VCPU_GPR(R28)(r4) PPC_LL r29, VCPU_GPR(R29)(r4) PPC_LL r30, VCPU_GPR(R30)(r4) PPC_LL r31, VCPU_GPR(R31)(r4) skip_nv_load: /* Should we return to the guest? */ andi. r5, r3, RESUME_FLAG_HOST beq lightweight_exit srawi r3, r3, 2 /* Shift -ERR back down. */ heavyweight_exit: /* Not returning to guest. */ PPC_LL r5, HOST_STACK_LR(r1) lwz r6, HOST_CR(r1) /* * We already saved guest volatile register state; now save the * non-volatiles. */ PPC_STL r15, VCPU_GPR(R15)(r4) PPC_STL r16, VCPU_GPR(R16)(r4) PPC_STL r17, VCPU_GPR(R17)(r4) PPC_STL r18, VCPU_GPR(R18)(r4) PPC_STL r19, VCPU_GPR(R19)(r4) PPC_STL r20, VCPU_GPR(R20)(r4) PPC_STL r21, VCPU_GPR(R21)(r4) PPC_STL r22, VCPU_GPR(R22)(r4) PPC_STL r23, VCPU_GPR(R23)(r4) PPC_STL r24, VCPU_GPR(R24)(r4) PPC_STL r25, VCPU_GPR(R25)(r4) PPC_STL r26, VCPU_GPR(R26)(r4) PPC_STL r27, VCPU_GPR(R27)(r4) PPC_STL r28, VCPU_GPR(R28)(r4) PPC_STL r29, VCPU_GPR(R29)(r4) PPC_STL r30, VCPU_GPR(R30)(r4) PPC_STL r31, VCPU_GPR(R31)(r4) /* Load host non-volatile register state from host stack. */ PPC_LL r14, HOST_NV_GPR(R14)(r1) PPC_LL r15, HOST_NV_GPR(R15)(r1) PPC_LL r16, HOST_NV_GPR(R16)(r1) PPC_LL r17, HOST_NV_GPR(R17)(r1) PPC_LL r18, HOST_NV_GPR(R18)(r1) PPC_LL r19, HOST_NV_GPR(R19)(r1) PPC_LL r20, HOST_NV_GPR(R20)(r1) PPC_LL r21, HOST_NV_GPR(R21)(r1) PPC_LL r22, HOST_NV_GPR(R22)(r1) PPC_LL r23, HOST_NV_GPR(R23)(r1) PPC_LL r24, HOST_NV_GPR(R24)(r1) PPC_LL r25, HOST_NV_GPR(R25)(r1) PPC_LL r26, HOST_NV_GPR(R26)(r1) PPC_LL r27, HOST_NV_GPR(R27)(r1) PPC_LL r28, HOST_NV_GPR(R28)(r1) PPC_LL r29, HOST_NV_GPR(R29)(r1) PPC_LL r30, HOST_NV_GPR(R30)(r1) PPC_LL r31, HOST_NV_GPR(R31)(r1) /* Return to kvm_vcpu_run(). */ mtlr r5 mtcr r6 addi r1, r1, HOST_STACK_SIZE /* r3 still contains the return code from kvmppc_handle_exit(). */ blr /* Registers: * r3: vcpu pointer */ _GLOBAL(__kvmppc_vcpu_run) stwu r1, -HOST_STACK_SIZE(r1) PPC_STL r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */ /* Save host state to stack. */ mr r4, r3 mflr r3 mfcr r5 PPC_STL r3, HOST_STACK_LR(r1) stw r5, HOST_CR(r1) /* Save host non-volatile register state to stack. */ PPC_STL r14, HOST_NV_GPR(R14)(r1) PPC_STL r15, HOST_NV_GPR(R15)(r1) PPC_STL r16, HOST_NV_GPR(R16)(r1) PPC_STL r17, HOST_NV_GPR(R17)(r1) PPC_STL r18, HOST_NV_GPR(R18)(r1) PPC_STL r19, HOST_NV_GPR(R19)(r1) PPC_STL r20, HOST_NV_GPR(R20)(r1) PPC_STL r21, HOST_NV_GPR(R21)(r1) PPC_STL r22, HOST_NV_GPR(R22)(r1) PPC_STL r23, HOST_NV_GPR(R23)(r1) PPC_STL r24, HOST_NV_GPR(R24)(r1) PPC_STL r25, HOST_NV_GPR(R25)(r1) PPC_STL r26, HOST_NV_GPR(R26)(r1) PPC_STL r27, HOST_NV_GPR(R27)(r1) PPC_STL r28, HOST_NV_GPR(R28)(r1) PPC_STL r29, HOST_NV_GPR(R29)(r1) PPC_STL r30, HOST_NV_GPR(R30)(r1) PPC_STL r31, HOST_NV_GPR(R31)(r1) /* Load guest non-volatiles. */ PPC_LL r14, VCPU_GPR(R14)(r4) PPC_LL r15, VCPU_GPR(R15)(r4) PPC_LL r16, VCPU_GPR(R16)(r4) PPC_LL r17, VCPU_GPR(R17)(r4) PPC_LL r18, VCPU_GPR(R18)(r4) PPC_LL r19, VCPU_GPR(R19)(r4) PPC_LL r20, VCPU_GPR(R20)(r4) PPC_LL r21, VCPU_GPR(R21)(r4) PPC_LL r22, VCPU_GPR(R22)(r4) PPC_LL r23, VCPU_GPR(R23)(r4) PPC_LL r24, VCPU_GPR(R24)(r4) PPC_LL r25, VCPU_GPR(R25)(r4) PPC_LL r26, VCPU_GPR(R26)(r4) PPC_LL r27, VCPU_GPR(R27)(r4) PPC_LL r28, VCPU_GPR(R28)(r4) PPC_LL r29, VCPU_GPR(R29)(r4) PPC_LL r30, VCPU_GPR(R30)(r4) PPC_LL r31, VCPU_GPR(R31)(r4) lightweight_exit: PPC_STL r2, HOST_R2(r1) mfspr r3, SPRN_PID stw r3, VCPU_HOST_PID(r4) lwz r3, VCPU_GUEST_PID(r4) mtspr SPRN_PID, r3 PPC_LL r11, VCPU_SHARED(r4) /* Disable MAS register updates via exception */ mfspr r3, SPRN_EPCR oris r3, r3, SPRN_EPCR_DMIUH@h mtspr SPRN_EPCR, r3 isync /* Save host mas4 and mas6 and load guest MAS registers */ mfspr r3, SPRN_MAS4 stw r3, VCPU_HOST_MAS4(r4) mfspr r3, SPRN_MAS6 stw r3, VCPU_HOST_MAS6(r4) lwz r3, VCPU_SHARED_MAS0(r11) lwz r5, VCPU_SHARED_MAS1(r11) PPC_LD(r6, VCPU_SHARED_MAS2, r11) lwz r7, VCPU_SHARED_MAS7_3+4(r11) lwz r8, VCPU_SHARED_MAS4(r11) mtspr SPRN_MAS0, r3 mtspr SPRN_MAS1, r5 mtspr SPRN_MAS2, r6 mtspr SPRN_MAS3, r7 mtspr SPRN_MAS4, r8 lwz r3, VCPU_SHARED_MAS6(r11) lwz r5, VCPU_SHARED_MAS7_3+0(r11) mtspr SPRN_MAS6, r3 mtspr SPRN_MAS7, r5 /* * Host interrupt handlers may have clobbered these guest-readable * SPRGs, so we need to reload them here with the guest's values. */ lwz r3, VCPU_VRSAVE(r4) PPC_LD(r5, VCPU_SHARED_SPRG4, r11) mtspr SPRN_VRSAVE, r3 PPC_LD(r6, VCPU_SHARED_SPRG5, r11) mtspr SPRN_SPRG4W, r5 PPC_LD(r7, VCPU_SHARED_SPRG6, r11) mtspr SPRN_SPRG5W, r6 PPC_LD(r8, VCPU_SHARED_SPRG7, r11) mtspr SPRN_SPRG6W, r7 PPC_LD(r5, VCPU_SPRG9, r4) mtspr SPRN_SPRG7W, r8 mtspr SPRN_SPRG9, r5 /* Load some guest volatiles. */ PPC_LL r3, VCPU_LR(r4) PPC_LL r5, VCPU_XER(r4) PPC_LL r6, VCPU_CTR(r4) PPC_LL r7, VCPU_CR(r4) PPC_LL r8, VCPU_PC(r4) PPC_LD(r9, VCPU_SHARED_MSR, r11) PPC_LL r0, VCPU_GPR(R0)(r4) PPC_LL r1, VCPU_GPR(R1)(r4) PPC_LL r2, VCPU_GPR(R2)(r4) PPC_LL r10, VCPU_GPR(R10)(r4) PPC_LL r11, VCPU_GPR(R11)(r4) PPC_LL r12, VCPU_GPR(R12)(r4) PPC_LL r13, VCPU_GPR(R13)(r4) mtlr r3 mtxer r5 mtctr r6 mtsrr0 r8 mtsrr1 r9 #ifdef CONFIG_KVM_EXIT_TIMING /* save enter time */ 1: mfspr r6, SPRN_TBRU mfspr r9, SPRN_TBRL mfspr r8, SPRN_TBRU cmpw r8, r6 stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4) bne 1b stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) #endif /* * Don't execute any instruction which can change CR after * below instruction. */ mtcr r7 /* Finish loading guest volatiles and jump to guest. */ PPC_LL r5, VCPU_GPR(R5)(r4) PPC_LL r6, VCPU_GPR(R6)(r4) PPC_LL r7, VCPU_GPR(R7)(r4) PPC_LL r8, VCPU_GPR(R8)(r4) PPC_LL r9, VCPU_GPR(R9)(r4) PPC_LL r3, VCPU_GPR(R3)(r4) PPC_LL r4, VCPU_GPR(R4)(r4) rfi
aixcc-public/challenge-001-exemplar-source
3,477
arch/powerpc/kvm/book3s_32_sr.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ /****************************************************************************** * * * Entry code * * * *****************************************************************************/ .macro LOAD_GUEST_SEGMENTS /* Required state: * * MSR = ~IR|DR * R1 = host R1 * R2 = host R2 * R3 = shadow vcpu * all other volatile GPRS = free except R4, R6 * SVCPU[CR] = guest CR * SVCPU[XER] = guest XER * SVCPU[CTR] = guest CTR * SVCPU[LR] = guest LR */ #define XCHG_SR(n) lwz r9, (SVCPU_SR+(n*4))(r3); \ mtsr n, r9 XCHG_SR(0) XCHG_SR(1) XCHG_SR(2) XCHG_SR(3) XCHG_SR(4) XCHG_SR(5) XCHG_SR(6) XCHG_SR(7) XCHG_SR(8) XCHG_SR(9) XCHG_SR(10) XCHG_SR(11) XCHG_SR(12) XCHG_SR(13) XCHG_SR(14) XCHG_SR(15) /* Clear BATs. */ #define KVM_KILL_BAT(n, reg) \ mtspr SPRN_IBAT##n##U,reg; \ mtspr SPRN_IBAT##n##L,reg; \ mtspr SPRN_DBAT##n##U,reg; \ mtspr SPRN_DBAT##n##L,reg; \ li r9, 0 KVM_KILL_BAT(0, r9) KVM_KILL_BAT(1, r9) KVM_KILL_BAT(2, r9) KVM_KILL_BAT(3, r9) .endm /****************************************************************************** * * * Exit code * * * *****************************************************************************/ .macro LOAD_HOST_SEGMENTS /* Register usage at this point: * * R1 = host R1 * R2 = host R2 * R12 = exit handler id * R13 = shadow vcpu - SHADOW_VCPU_OFF * SVCPU.* = guest * * SVCPU[CR] = guest CR * SVCPU[XER] = guest XER * SVCPU[CTR] = guest CTR * SVCPU[LR] = guest LR * */ /* Restore BATs */ /* We only overwrite the upper part, so we only restoree the upper part. */ #define KVM_LOAD_BAT(n, reg, RA, RB) \ lwz RA,(n*16)+0(reg); \ lwz RB,(n*16)+4(reg); \ mtspr SPRN_IBAT##n##U,RA; \ mtspr SPRN_IBAT##n##L,RB; \ lwz RA,(n*16)+8(reg); \ lwz RB,(n*16)+12(reg); \ mtspr SPRN_DBAT##n##U,RA; \ mtspr SPRN_DBAT##n##L,RB; \ lis r9, BATS@ha addi r9, r9, BATS@l tophys(r9, r9) KVM_LOAD_BAT(0, r9, r10, r11) KVM_LOAD_BAT(1, r9, r10, r11) KVM_LOAD_BAT(2, r9, r10, r11) KVM_LOAD_BAT(3, r9, r10, r11) /* Restore Segment Registers */ /* 0xc - 0xf */ li r0, 4 mtctr r0 LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc)) lis r4, 0xc000 3: mtsrin r3, r4 addi r3, r3, 0x111 /* increment VSID */ addis r4, r4, 0x1000 /* address of next segment */ bdnz 3b /* 0x0 - 0xb */ /* switch_mmu_context() needs paging, let's enable it */ mfmsr r9 ori r11, r9, MSR_DR mtmsr r11 sync /* switch_mmu_context() clobbers r12, rescue it */ SAVE_GPR(12, r1) /* Calling switch_mmu_context(<inv>, current->mm, <inv>); */ lwz r4, MM(r2) bl switch_mmu_context /* restore r12 */ REST_GPR(12, r1) /* Disable paging again */ mfmsr r9 li r6, MSR_DR andc r9, r9, r6 mtmsr r9 sync .endm
aixcc-public/challenge-001-exemplar-source
14,587
arch/powerpc/kvm/booke_interrupts.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright IBM Corp. 2007 * Copyright 2011 Freescale Semiconductor, Inc. * * Authors: Hollis Blanchard <hollisb@us.ibm.com> */ #include <asm/ppc_asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/asm-offsets.h> /* The host stack layout: */ #define HOST_R1 0 /* Implied by stwu. */ #define HOST_CALLEE_LR 4 #define HOST_RUN 8 /* r2 is special: it holds 'current', and it made nonvolatile in the * kernel with the -ffixed-r2 gcc option. */ #define HOST_R2 12 #define HOST_CR 16 #define HOST_NV_GPRS 20 #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ (1<<BOOKE_INTERRUPT_DEBUG)) #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ (1<<BOOKE_INTERRUPT_ALIGNMENT)) #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ (1<<BOOKE_INTERRUPT_PROGRAM) | \ (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ (1<<BOOKE_INTERRUPT_ALIGNMENT)) .macro __KVM_HANDLER ivor_nr scratch srr0 /* Get pointer to vcpu and record exit number. */ mtspr \scratch , r4 mfspr r4, SPRN_SPRG_THREAD lwz r4, THREAD_KVM_VCPU(r4) stw r3, VCPU_GPR(R3)(r4) stw r5, VCPU_GPR(R5)(r4) stw r6, VCPU_GPR(R6)(r4) mfspr r3, \scratch mfctr r5 stw r3, VCPU_GPR(R4)(r4) stw r5, VCPU_CTR(r4) mfspr r3, \srr0 lis r6, kvmppc_resume_host@h stw r3, VCPU_PC(r4) li r5, \ivor_nr ori r6, r6, kvmppc_resume_host@l mtctr r6 bctr .endm .macro KVM_HANDLER ivor_nr scratch srr0 _GLOBAL(kvmppc_handler_\ivor_nr) __KVM_HANDLER \ivor_nr \scratch \srr0 .endm .macro KVM_DBG_HANDLER ivor_nr scratch srr0 _GLOBAL(kvmppc_handler_\ivor_nr) mtspr \scratch, r4 mfspr r4, SPRN_SPRG_THREAD lwz r4, THREAD_KVM_VCPU(r4) stw r3, VCPU_CRIT_SAVE(r4) mfcr r3 mfspr r4, SPRN_CSRR1 andi. r4, r4, MSR_PR bne 1f /* debug interrupt happened in enter/exit path */ mfspr r4, SPRN_CSRR1 rlwinm r4, r4, 0, ~MSR_DE mtspr SPRN_CSRR1, r4 lis r4, 0xffff ori r4, r4, 0xffff mtspr SPRN_DBSR, r4 mfspr r4, SPRN_SPRG_THREAD lwz r4, THREAD_KVM_VCPU(r4) mtcr r3 lwz r3, VCPU_CRIT_SAVE(r4) mfspr r4, \scratch rfci 1: /* debug interrupt happened in guest */ mtcr r3 mfspr r4, SPRN_SPRG_THREAD lwz r4, THREAD_KVM_VCPU(r4) lwz r3, VCPU_CRIT_SAVE(r4) mfspr r4, \scratch __KVM_HANDLER \ivor_nr \scratch \srr0 .endm .macro KVM_HANDLER_ADDR ivor_nr .long kvmppc_handler_\ivor_nr .endm .macro KVM_HANDLER_END .long kvmppc_handlers_end .endm _GLOBAL(kvmppc_handlers_start) KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0 KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0 _GLOBAL(kvmppc_handlers_end) /* Registers: * SPRG_SCRATCH0: guest r4 * r4: vcpu pointer * r5: KVM exit number */ _GLOBAL(kvmppc_resume_host) mfcr r3 stw r3, VCPU_CR(r4) stw r7, VCPU_GPR(R7)(r4) stw r8, VCPU_GPR(R8)(r4) stw r9, VCPU_GPR(R9)(r4) li r6, 1 slw r6, r6, r5 #ifdef CONFIG_KVM_EXIT_TIMING /* save exit time */ 1: mfspr r7, SPRN_TBRU mfspr r8, SPRN_TBRL mfspr r9, SPRN_TBRU cmpw r9, r7 bne 1b stw r8, VCPU_TIMING_EXIT_TBL(r4) stw r9, VCPU_TIMING_EXIT_TBU(r4) #endif /* Save the faulting instruction and all GPRs for emulation. */ andi. r7, r6, NEED_INST_MASK beq ..skip_inst_copy mfspr r9, SPRN_SRR0 mfmsr r8 ori r7, r8, MSR_DS mtmsr r7 isync lwz r9, 0(r9) mtmsr r8 isync stw r9, VCPU_LAST_INST(r4) stw r15, VCPU_GPR(R15)(r4) stw r16, VCPU_GPR(R16)(r4) stw r17, VCPU_GPR(R17)(r4) stw r18, VCPU_GPR(R18)(r4) stw r19, VCPU_GPR(R19)(r4) stw r20, VCPU_GPR(R20)(r4) stw r21, VCPU_GPR(R21)(r4) stw r22, VCPU_GPR(R22)(r4) stw r23, VCPU_GPR(R23)(r4) stw r24, VCPU_GPR(R24)(r4) stw r25, VCPU_GPR(R25)(r4) stw r26, VCPU_GPR(R26)(r4) stw r27, VCPU_GPR(R27)(r4) stw r28, VCPU_GPR(R28)(r4) stw r29, VCPU_GPR(R29)(r4) stw r30, VCPU_GPR(R30)(r4) stw r31, VCPU_GPR(R31)(r4) ..skip_inst_copy: /* Also grab DEAR and ESR before the host can clobber them. */ andi. r7, r6, NEED_DEAR_MASK beq ..skip_dear mfspr r9, SPRN_DEAR stw r9, VCPU_FAULT_DEAR(r4) ..skip_dear: andi. r7, r6, NEED_ESR_MASK beq ..skip_esr mfspr r9, SPRN_ESR stw r9, VCPU_FAULT_ESR(r4) ..skip_esr: /* Save remaining volatile guest register state to vcpu. */ stw r0, VCPU_GPR(R0)(r4) stw r1, VCPU_GPR(R1)(r4) stw r2, VCPU_GPR(R2)(r4) stw r10, VCPU_GPR(R10)(r4) stw r11, VCPU_GPR(R11)(r4) stw r12, VCPU_GPR(R12)(r4) stw r13, VCPU_GPR(R13)(r4) stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */ mflr r3 stw r3, VCPU_LR(r4) mfxer r3 stw r3, VCPU_XER(r4) /* Restore host stack pointer and PID before IVPR, since the host * exception handlers use them. */ lwz r1, VCPU_HOST_STACK(r4) lwz r3, VCPU_HOST_PID(r4) mtspr SPRN_PID, r3 #ifdef CONFIG_PPC_85xx /* we cheat and know that Linux doesn't use PID1 which is always 0 */ lis r3, 0 mtspr SPRN_PID1, r3 #endif /* Restore host IVPR before re-enabling interrupts. We cheat and know * that Linux IVPR is always 0xc0000000. */ lis r3, 0xc000 mtspr SPRN_IVPR, r3 /* Switch to kernel stack and jump to handler. */ LOAD_REG_ADDR(r3, kvmppc_handle_exit) mtctr r3 mr r3, r4 lwz r2, HOST_R2(r1) mr r14, r4 /* Save vcpu pointer. */ bctrl /* kvmppc_handle_exit() */ /* Restore vcpu pointer and the nonvolatiles we used. */ mr r4, r14 lwz r14, VCPU_GPR(R14)(r4) /* Sometimes instruction emulation must restore complete GPR state. */ andi. r5, r3, RESUME_FLAG_NV beq ..skip_nv_load lwz r15, VCPU_GPR(R15)(r4) lwz r16, VCPU_GPR(R16)(r4) lwz r17, VCPU_GPR(R17)(r4) lwz r18, VCPU_GPR(R18)(r4) lwz r19, VCPU_GPR(R19)(r4) lwz r20, VCPU_GPR(R20)(r4) lwz r21, VCPU_GPR(R21)(r4) lwz r22, VCPU_GPR(R22)(r4) lwz r23, VCPU_GPR(R23)(r4) lwz r24, VCPU_GPR(R24)(r4) lwz r25, VCPU_GPR(R25)(r4) lwz r26, VCPU_GPR(R26)(r4) lwz r27, VCPU_GPR(R27)(r4) lwz r28, VCPU_GPR(R28)(r4) lwz r29, VCPU_GPR(R29)(r4) lwz r30, VCPU_GPR(R30)(r4) lwz r31, VCPU_GPR(R31)(r4) ..skip_nv_load: /* Should we return to the guest? */ andi. r5, r3, RESUME_FLAG_HOST beq lightweight_exit srawi r3, r3, 2 /* Shift -ERR back down. */ heavyweight_exit: /* Not returning to guest. */ #ifdef CONFIG_SPE /* save guest SPEFSCR and load host SPEFSCR */ mfspr r9, SPRN_SPEFSCR stw r9, VCPU_SPEFSCR(r4) lwz r9, VCPU_HOST_SPEFSCR(r4) mtspr SPRN_SPEFSCR, r9 #endif /* We already saved guest volatile register state; now save the * non-volatiles. */ stw r15, VCPU_GPR(R15)(r4) stw r16, VCPU_GPR(R16)(r4) stw r17, VCPU_GPR(R17)(r4) stw r18, VCPU_GPR(R18)(r4) stw r19, VCPU_GPR(R19)(r4) stw r20, VCPU_GPR(R20)(r4) stw r21, VCPU_GPR(R21)(r4) stw r22, VCPU_GPR(R22)(r4) stw r23, VCPU_GPR(R23)(r4) stw r24, VCPU_GPR(R24)(r4) stw r25, VCPU_GPR(R25)(r4) stw r26, VCPU_GPR(R26)(r4) stw r27, VCPU_GPR(R27)(r4) stw r28, VCPU_GPR(R28)(r4) stw r29, VCPU_GPR(R29)(r4) stw r30, VCPU_GPR(R30)(r4) stw r31, VCPU_GPR(R31)(r4) /* Load host non-volatile register state from host stack. */ lwz r14, HOST_NV_GPR(R14)(r1) lwz r15, HOST_NV_GPR(R15)(r1) lwz r16, HOST_NV_GPR(R16)(r1) lwz r17, HOST_NV_GPR(R17)(r1) lwz r18, HOST_NV_GPR(R18)(r1) lwz r19, HOST_NV_GPR(R19)(r1) lwz r20, HOST_NV_GPR(R20)(r1) lwz r21, HOST_NV_GPR(R21)(r1) lwz r22, HOST_NV_GPR(R22)(r1) lwz r23, HOST_NV_GPR(R23)(r1) lwz r24, HOST_NV_GPR(R24)(r1) lwz r25, HOST_NV_GPR(R25)(r1) lwz r26, HOST_NV_GPR(R26)(r1) lwz r27, HOST_NV_GPR(R27)(r1) lwz r28, HOST_NV_GPR(R28)(r1) lwz r29, HOST_NV_GPR(R29)(r1) lwz r30, HOST_NV_GPR(R30)(r1) lwz r31, HOST_NV_GPR(R31)(r1) /* Return to kvm_vcpu_run(). */ lwz r4, HOST_STACK_LR(r1) lwz r5, HOST_CR(r1) addi r1, r1, HOST_STACK_SIZE mtlr r4 mtcr r5 /* r3 still contains the return code from kvmppc_handle_exit(). */ blr /* Registers: * r3: vcpu pointer */ _GLOBAL(__kvmppc_vcpu_run) stwu r1, -HOST_STACK_SIZE(r1) stw r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */ /* Save host state to stack. */ mr r4, r3 mflr r3 stw r3, HOST_STACK_LR(r1) mfcr r5 stw r5, HOST_CR(r1) /* Save host non-volatile register state to stack. */ stw r14, HOST_NV_GPR(R14)(r1) stw r15, HOST_NV_GPR(R15)(r1) stw r16, HOST_NV_GPR(R16)(r1) stw r17, HOST_NV_GPR(R17)(r1) stw r18, HOST_NV_GPR(R18)(r1) stw r19, HOST_NV_GPR(R19)(r1) stw r20, HOST_NV_GPR(R20)(r1) stw r21, HOST_NV_GPR(R21)(r1) stw r22, HOST_NV_GPR(R22)(r1) stw r23, HOST_NV_GPR(R23)(r1) stw r24, HOST_NV_GPR(R24)(r1) stw r25, HOST_NV_GPR(R25)(r1) stw r26, HOST_NV_GPR(R26)(r1) stw r27, HOST_NV_GPR(R27)(r1) stw r28, HOST_NV_GPR(R28)(r1) stw r29, HOST_NV_GPR(R29)(r1) stw r30, HOST_NV_GPR(R30)(r1) stw r31, HOST_NV_GPR(R31)(r1) /* Load guest non-volatiles. */ lwz r14, VCPU_GPR(R14)(r4) lwz r15, VCPU_GPR(R15)(r4) lwz r16, VCPU_GPR(R16)(r4) lwz r17, VCPU_GPR(R17)(r4) lwz r18, VCPU_GPR(R18)(r4) lwz r19, VCPU_GPR(R19)(r4) lwz r20, VCPU_GPR(R20)(r4) lwz r21, VCPU_GPR(R21)(r4) lwz r22, VCPU_GPR(R22)(r4) lwz r23, VCPU_GPR(R23)(r4) lwz r24, VCPU_GPR(R24)(r4) lwz r25, VCPU_GPR(R25)(r4) lwz r26, VCPU_GPR(R26)(r4) lwz r27, VCPU_GPR(R27)(r4) lwz r28, VCPU_GPR(R28)(r4) lwz r29, VCPU_GPR(R29)(r4) lwz r30, VCPU_GPR(R30)(r4) lwz r31, VCPU_GPR(R31)(r4) #ifdef CONFIG_SPE /* save host SPEFSCR and load guest SPEFSCR */ mfspr r3, SPRN_SPEFSCR stw r3, VCPU_HOST_SPEFSCR(r4) lwz r3, VCPU_SPEFSCR(r4) mtspr SPRN_SPEFSCR, r3 #endif lightweight_exit: stw r2, HOST_R2(r1) mfspr r3, SPRN_PID stw r3, VCPU_HOST_PID(r4) lwz r3, VCPU_SHADOW_PID(r4) mtspr SPRN_PID, r3 #ifdef CONFIG_PPC_85xx lwz r3, VCPU_SHADOW_PID1(r4) mtspr SPRN_PID1, r3 #endif /* Load some guest volatiles. */ lwz r0, VCPU_GPR(R0)(r4) lwz r2, VCPU_GPR(R2)(r4) lwz r9, VCPU_GPR(R9)(r4) lwz r10, VCPU_GPR(R10)(r4) lwz r11, VCPU_GPR(R11)(r4) lwz r12, VCPU_GPR(R12)(r4) lwz r13, VCPU_GPR(R13)(r4) lwz r3, VCPU_LR(r4) mtlr r3 lwz r3, VCPU_XER(r4) mtxer r3 /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, * so how do we make sure vcpu won't fault? */ lis r8, kvmppc_booke_handlers@ha lwz r8, kvmppc_booke_handlers@l(r8) mtspr SPRN_IVPR, r8 lwz r5, VCPU_SHARED(r4) /* Can't switch the stack pointer until after IVPR is switched, * because host interrupt handlers would get confused. */ lwz r1, VCPU_GPR(R1)(r4) /* * Host interrupt handlers may have clobbered these * guest-readable SPRGs, or the guest kernel may have * written directly to the shared area, so we * need to reload them here with the guest's values. */ PPC_LD(r3, VCPU_SHARED_SPRG4, r5) mtspr SPRN_SPRG4W, r3 PPC_LD(r3, VCPU_SHARED_SPRG5, r5) mtspr SPRN_SPRG5W, r3 PPC_LD(r3, VCPU_SHARED_SPRG6, r5) mtspr SPRN_SPRG6W, r3 PPC_LD(r3, VCPU_SHARED_SPRG7, r5) mtspr SPRN_SPRG7W, r3 #ifdef CONFIG_KVM_EXIT_TIMING /* save enter time */ 1: mfspr r6, SPRN_TBRU mfspr r7, SPRN_TBRL mfspr r8, SPRN_TBRU cmpw r8, r6 bne 1b stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4) stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) #endif /* Finish loading guest volatiles and jump to guest. */ lwz r3, VCPU_CTR(r4) lwz r5, VCPU_CR(r4) lwz r6, VCPU_PC(r4) lwz r7, VCPU_SHADOW_MSR(r4) mtctr r3 mtcr r5 mtsrr0 r6 mtsrr1 r7 lwz r5, VCPU_GPR(R5)(r4) lwz r6, VCPU_GPR(R6)(r4) lwz r7, VCPU_GPR(R7)(r4) lwz r8, VCPU_GPR(R8)(r4) /* Clear any debug events which occurred since we disabled MSR[DE]. * XXX This gives us a 3-instruction window in which a breakpoint * intended for guest context could fire in the host instead. */ lis r3, 0xffff ori r3, r3, 0xffff mtspr SPRN_DBSR, r3 lwz r3, VCPU_GPR(R3)(r4) lwz r4, VCPU_GPR(R4)(r4) rfi .data .align 4 .globl kvmppc_booke_handler_addr kvmppc_booke_handler_addr: KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND KVM_HANDLER_END /*Always keep this in end*/ #ifdef CONFIG_SPE _GLOBAL(kvmppc_save_guest_spe) cmpi 0,r3,0 beqlr- SAVE_32EVRS(0, r4, r3, VCPU_EVR) evxor evr6, evr6, evr6 evmwumiaa evr6, evr6, evr6 li r4,VCPU_ACC evstddx evr6, r4, r3 /* save acc */ blr _GLOBAL(kvmppc_load_guest_spe) cmpi 0,r3,0 beqlr- li r4,VCPU_ACC evlddx evr6,r4,r3 evmra evr6,evr6 /* load acc */ REST_32EVRS(0, r4, r3, VCPU_EVR) blr #endif
aixcc-public/challenge-001-exemplar-source
4,186
arch/powerpc/kvm/book3s_rmhandlers.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ #include <asm/ppc_asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> #include <asm/mmu.h> #include <asm/page.h> #include <asm/asm-offsets.h> #include <asm/asm-compat.h> #ifdef CONFIG_PPC_BOOK3S_64 #include <asm/exception-64s.h> #endif /***************************************************************************** * * * Real Mode handlers that need to be in low physical memory * * * ****************************************************************************/ #if defined(CONFIG_PPC_BOOK3S_64) #ifdef CONFIG_PPC64_ELF_ABI_V2 #define FUNC(name) name #else #define FUNC(name) GLUE(.,name) #endif #elif defined(CONFIG_PPC_BOOK3S_32) #define FUNC(name) name #define RFI_TO_KERNEL rfi #define RFI_TO_GUEST rfi .macro INTERRUPT_TRAMPOLINE intno .global kvmppc_trampoline_\intno kvmppc_trampoline_\intno: mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */ /* * First thing to do is to find out if we're coming * from a KVM guest or a Linux process. * * To distinguish, we check a magic byte in the PACA/current */ mfspr r13, SPRN_SPRG_THREAD lwz r13, THREAD_KVM_SVCPU(r13) /* PPC32 can have a NULL pointer - let's check for that */ mtspr SPRN_SPRG_SCRATCH1, r12 /* Save r12 */ mfcr r12 cmpwi r13, 0 bne 1f 2: mtcr r12 mfspr r12, SPRN_SPRG_SCRATCH1 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ b kvmppc_resume_\intno /* Get back original handler */ 1: tophys(r13, r13) stw r12, HSTATE_SCRATCH1(r13) mfspr r12, SPRN_SPRG_SCRATCH1 stw r12, HSTATE_SCRATCH0(r13) lbz r12, HSTATE_IN_GUEST(r13) cmpwi r12, KVM_GUEST_MODE_NONE bne ..kvmppc_handler_hasmagic_\intno /* No KVM guest? Then jump back to the Linux handler! */ lwz r12, HSTATE_SCRATCH1(r13) b 2b /* Now we know we're handling a KVM guest */ ..kvmppc_handler_hasmagic_\intno: /* Should we just skip the faulting instruction? */ cmpwi r12, KVM_GUEST_MODE_SKIP beq kvmppc_handler_skip_ins /* Let's store which interrupt we're handling */ li r12, \intno /* Jump into the SLB exit code that goes to the highmem handler */ b kvmppc_handler_trampoline_exit .endm INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DECREMENTER INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC /* * Bring us back to the faulting code, but skip the * faulting instruction. * * This is a generic exit path from the interrupt * trampolines above. * * Input Registers: * * R12 = free * R13 = Shadow VCPU (PACA) * HSTATE.SCRATCH0 = guest R12 * HSTATE.SCRATCH1 = guest CR * SPRG_SCRATCH0 = guest R13 * */ kvmppc_handler_skip_ins: /* Patch the IP to the next instruction */ mfsrr0 r12 addi r12, r12, 4 mtsrr0 r12 /* Clean up all state */ lwz r12, HSTATE_SCRATCH1(r13) mtcr r12 PPC_LL r12, HSTATE_SCRATCH0(r13) GET_SCRATCH0(r13) /* And get back into the code */ RFI_TO_KERNEL #endif /* * Call kvmppc_handler_trampoline_enter in real mode * * On entry, r4 contains the guest shadow MSR * MSR.EE has to be 0 when calling this function */ _GLOBAL_TOC(kvmppc_entry_trampoline) mfmsr r5 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) toreal(r7) li r6, MSR_IR | MSR_DR andc r6, r5, r6 /* Clear DR and IR in MSR value */ /* * Set EE in HOST_MSR so that it's enabled when we get into our * C exit handler function. */ ori r5, r5, MSR_EE mtsrr0 r7 mtsrr1 r6 RFI_TO_KERNEL #include "book3s_segment.S"
aixcc-public/challenge-001-exemplar-source
9,129
arch/powerpc/kvm/tm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Derived from book3s_hv_rmhandlers.S, which is: * * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> */ #include <asm/reg.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/tm.h> #include <asm/cputable.h> #ifdef CONFIG_PPC_TRANSACTIONAL_MEM #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) /* * Save transactional state and TM-related registers. * Called with: * - r3 pointing to the vcpu struct * - r4 containing the MSR with current TS bits: * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR). * - r5 containing a flag indicating that non-volatile registers * must be preserved. * If r5 == 0, this can modify all checkpointed registers, but * restores r1, r2 before exit. If r5 != 0, this restores the * MSR TM/FP/VEC/VSX bits to their state on entry. */ _GLOBAL(__kvmppc_save_tm) mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -SWITCH_FRAME_SIZE(r1) mr r9, r3 cmpdi cr7, r5, 0 /* Turn on TM. */ mfmsr r8 mr r10, r8 li r0, 1 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG ori r8, r8, MSR_FP oris r8, r8, (MSR_VEC | MSR_VSX)@h mtmsrd r8 rldicl. r4, r4, 64 - MSR_TS_S_LG, 62 beq 1f /* TM not active in guest. */ std r1, HSTATE_SCRATCH2(r13) std r3, HSTATE_SCRATCH1(r13) /* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */ mfcr r6 SAVE_GPR(6, r1) /* Save DSCR so we can restore it to avoid running with user value */ mfspr r7, SPRN_DSCR SAVE_GPR(7, r1) /* * We are going to do treclaim., which will modify all checkpointed * registers. Save the non-volatile registers on the stack if * preservation of non-volatile state has been requested. */ beq cr7, 3f SAVE_NVGPRS(r1) /* MSR[TS] will be 0 (non-transactional) once we do treclaim. */ li r0, 0 rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG SAVE_GPR(10, r1) /* final MSR value */ 3: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE BEGIN_FTR_SECTION /* Emulation of the treclaim instruction needs TEXASR before treclaim */ mfspr r6, SPRN_TEXASR std r6, VCPU_ORIG_TEXASR(r3) END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) #endif /* Clear the MSR RI since r1, r13 are all going to be foobar. */ li r5, 0 mtmsrd r5, 1 li r3, TM_CAUSE_KVM_RESCHED /* All GPRs are volatile at this point. */ TRECLAIM(R3) /* Temporarily store r13 and r9 so we have some regs to play with */ SET_SCRATCH0(r13) GET_PACA(r13) std r9, PACATMSCRATCH(r13) ld r9, HSTATE_SCRATCH1(r13) /* Save away PPR soon so we don't run with user value. */ std r0, VCPU_GPRS_TM(0)(r9) mfspr r0, SPRN_PPR HMT_MEDIUM /* Reload stack pointer. */ std r1, VCPU_GPRS_TM(1)(r9) ld r1, HSTATE_SCRATCH2(r13) /* Set MSR RI now we have r1 and r13 back. */ std r2, VCPU_GPRS_TM(2)(r9) li r2, MSR_RI mtmsrd r2, 1 /* Reload TOC pointer. */ LOAD_PACA_TOC() /* Save all but r0-r2, r9 & r13 */ reg = 3 .rept 29 .if (reg != 9) && (reg != 13) std reg, VCPU_GPRS_TM(reg)(r9) .endif reg = reg + 1 .endr /* ... now save r13 */ GET_SCRATCH0(r4) std r4, VCPU_GPRS_TM(13)(r9) /* ... and save r9 */ ld r4, PACATMSCRATCH(r13) std r4, VCPU_GPRS_TM(9)(r9) /* Restore host DSCR and CR values, after saving guest values */ mfcr r6 mfspr r7, SPRN_DSCR stw r6, VCPU_CR_TM(r9) std r7, VCPU_DSCR_TM(r9) REST_GPR(6, r1) REST_GPR(7, r1) mtcr r6 mtspr SPRN_DSCR, r7 /* Save away checkpointed SPRs. */ std r0, VCPU_PPR_TM(r9) mflr r5 mfctr r7 mfspr r8, SPRN_AMR mfspr r10, SPRN_TAR mfxer r11 std r5, VCPU_LR_TM(r9) std r7, VCPU_CTR_TM(r9) std r8, VCPU_AMR_TM(r9) std r10, VCPU_TAR_TM(r9) std r11, VCPU_XER_TM(r9) /* Save FP/VSX. */ addi r3, r9, VCPU_FPRS_TM bl store_fp_state addi r3, r9, VCPU_VRS_TM bl store_vr_state mfspr r6, SPRN_VRSAVE stw r6, VCPU_VRSAVE_TM(r9) /* Restore non-volatile registers if requested to */ beq cr7, 1f REST_NVGPRS(r1) REST_GPR(10, r1) 1: /* * We need to save these SPRs after the treclaim so that the software * error code is recorded correctly in the TEXASR. Also the user may * change these outside of a transaction, so they must always be * context switched. */ mfspr r7, SPRN_TEXASR std r7, VCPU_TEXASR(r9) mfspr r5, SPRN_TFHAR mfspr r6, SPRN_TFIAR std r5, VCPU_TFHAR(r9) std r6, VCPU_TFIAR(r9) /* Restore MSR state if requested */ beq cr7, 2f mtmsrd r10, 0 2: addi r1, r1, SWITCH_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr /* * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can * be invoked from C function by PR KVM only. */ _GLOBAL(_kvmppc_save_tm_pr) mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -PPC_MIN_STKFRM(r1) mfspr r8, SPRN_TAR std r8, PPC_MIN_STKFRM-8(r1) li r5, 1 /* preserve non-volatile registers */ bl __kvmppc_save_tm ld r8, PPC_MIN_STKFRM-8(r1) mtspr SPRN_TAR, r8 addi r1, r1, PPC_MIN_STKFRM ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr); /* * Restore transactional state and TM-related registers. * Called with: * - r3 pointing to the vcpu struct. * - r4 is the guest MSR with desired TS bits: * For HV KVM, it is VCPU_MSR * For PR KVM, it is provided by caller * - r5 containing a flag indicating that non-volatile registers * must be preserved. * If r5 == 0, this potentially modifies all checkpointed registers, but * restores r1, r2 from the PACA before exit. * If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry. */ _GLOBAL(__kvmppc_restore_tm) mflr r0 std r0, PPC_LR_STKOFF(r1) cmpdi cr7, r5, 0 /* Turn on TM/FP/VSX/VMX so we can restore them. */ mfmsr r5 mr r10, r5 li r6, MSR_TM >> 32 sldi r6, r6, 32 or r5, r5, r6 ori r5, r5, MSR_FP oris r5, r5, (MSR_VEC | MSR_VSX)@h mtmsrd r5 /* * The user may change these outside of a transaction, so they must * always be context switched. */ ld r5, VCPU_TFHAR(r3) ld r6, VCPU_TFIAR(r3) ld r7, VCPU_TEXASR(r3) mtspr SPRN_TFHAR, r5 mtspr SPRN_TFIAR, r6 mtspr SPRN_TEXASR, r7 mr r5, r4 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 beq 9f /* TM not active in guest */ /* Make sure the failure summary is set, otherwise we'll program check * when we trechkpt. It's possible that this might have been not set * on a kvmppc_set_one_reg() call but we shouldn't let this crash the * host. */ oris r7, r7, (TEXASR_FS)@h mtspr SPRN_TEXASR, r7 /* * Make a stack frame and save non-volatile registers if requested. */ stdu r1, -SWITCH_FRAME_SIZE(r1) std r1, HSTATE_SCRATCH2(r13) mfcr r6 mfspr r7, SPRN_DSCR SAVE_GPR(2, r1) SAVE_GPR(6, r1) SAVE_GPR(7, r1) beq cr7, 4f SAVE_NVGPRS(r1) /* MSR[TS] will be 1 (suspended) once we do trechkpt */ li r0, 1 rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG SAVE_GPR(10, r1) /* final MSR value */ 4: /* * We need to load up the checkpointed state for the guest. * We need to do this early as it will blow away any GPRs, VSRs and * some SPRs. */ mr r31, r3 addi r3, r31, VCPU_FPRS_TM bl load_fp_state addi r3, r31, VCPU_VRS_TM bl load_vr_state mr r3, r31 lwz r7, VCPU_VRSAVE_TM(r3) mtspr SPRN_VRSAVE, r7 ld r5, VCPU_LR_TM(r3) lwz r6, VCPU_CR_TM(r3) ld r7, VCPU_CTR_TM(r3) ld r8, VCPU_AMR_TM(r3) ld r9, VCPU_TAR_TM(r3) ld r10, VCPU_XER_TM(r3) mtlr r5 mtcr r6 mtctr r7 mtspr SPRN_AMR, r8 mtspr SPRN_TAR, r9 mtxer r10 /* * Load up PPR and DSCR values but don't put them in the actual SPRs * till the last moment to avoid running with userspace PPR and DSCR for * too long. */ ld r29, VCPU_DSCR_TM(r3) ld r30, VCPU_PPR_TM(r3) /* Clear the MSR RI since r1, r13 are all going to be foobar. */ li r5, 0 mtmsrd r5, 1 /* Load GPRs r0-r28 */ reg = 0 .rept 29 ld reg, VCPU_GPRS_TM(reg)(r31) reg = reg + 1 .endr mtspr SPRN_DSCR, r29 mtspr SPRN_PPR, r30 /* Load final GPRs */ ld 29, VCPU_GPRS_TM(29)(r31) ld 30, VCPU_GPRS_TM(30)(r31) ld 31, VCPU_GPRS_TM(31)(r31) /* TM checkpointed state is now setup. All GPRs are now volatile. */ TRECHKPT /* Now let's get back the state we need. */ HMT_MEDIUM GET_PACA(r13) ld r1, HSTATE_SCRATCH2(r13) REST_GPR(7, r1) mtspr SPRN_DSCR, r7 /* Set the MSR RI since we have our registers back. */ li r5, MSR_RI mtmsrd r5, 1 /* Restore TOC pointer and CR */ REST_GPR(2, r1) REST_GPR(6, r1) mtcr r6 /* Restore non-volatile registers if requested to. */ beq cr7, 5f REST_GPR(10, r1) REST_NVGPRS(r1) 5: addi r1, r1, SWITCH_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) mtlr r0 9: /* Restore MSR bits if requested */ beqlr cr7 mtmsrd r10, 0 blr /* * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it * can be invoked from C function by PR KVM only. */ _GLOBAL(_kvmppc_restore_tm_pr) mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -PPC_MIN_STKFRM(r1) /* save TAR so that it can be recovered later */ mfspr r8, SPRN_TAR std r8, PPC_MIN_STKFRM-8(r1) li r5, 1 bl __kvmppc_restore_tm ld r8, PPC_MIN_STKFRM-8(r1) mtspr SPRN_TAR, r8 addi r1, r1, PPC_MIN_STKFRM ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr); #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
aixcc-public/challenge-001-exemplar-source
5,664
arch/powerpc/kvm/fpu.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * FPU helper code to use FPU operations from inside the kernel * * Copyright (C) 2010 Alexander Graf (agraf@suse.de) */ #include <linux/pgtable.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/cputable.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> /* Instructions operating on single parameters */ /* * Single operation with one input operand * * R3 = (double*)&fpscr * R4 = (short*)&result * R5 = (short*)&param1 */ #define FPS_ONE_IN(name) \ _GLOBAL(fps_ ## name); \ lfd 0,0(r3); /* load up fpscr value */ \ MTFSF_L(0); \ lfs 0,0(r5); \ \ name 0,0; \ \ stfs 0,0(r4); \ mffs 0; \ stfd 0,0(r3); /* save new fpscr value */ \ blr /* * Single operation with two input operands * * R3 = (double*)&fpscr * R4 = (short*)&result * R5 = (short*)&param1 * R6 = (short*)&param2 */ #define FPS_TWO_IN(name) \ _GLOBAL(fps_ ## name); \ lfd 0,0(r3); /* load up fpscr value */ \ MTFSF_L(0); \ lfs 0,0(r5); \ lfs 1,0(r6); \ \ name 0,0,1; \ \ stfs 0,0(r4); \ mffs 0; \ stfd 0,0(r3); /* save new fpscr value */ \ blr /* * Single operation with three input operands * * R3 = (double*)&fpscr * R4 = (short*)&result * R5 = (short*)&param1 * R6 = (short*)&param2 * R7 = (short*)&param3 */ #define FPS_THREE_IN(name) \ _GLOBAL(fps_ ## name); \ lfd 0,0(r3); /* load up fpscr value */ \ MTFSF_L(0); \ lfs 0,0(r5); \ lfs 1,0(r6); \ lfs 2,0(r7); \ \ name 0,0,1,2; \ \ stfs 0,0(r4); \ mffs 0; \ stfd 0,0(r3); /* save new fpscr value */ \ blr FPS_ONE_IN(fres) FPS_ONE_IN(frsqrte) FPS_ONE_IN(fsqrts) FPS_TWO_IN(fadds) FPS_TWO_IN(fdivs) FPS_TWO_IN(fmuls) FPS_TWO_IN(fsubs) FPS_THREE_IN(fmadds) FPS_THREE_IN(fmsubs) FPS_THREE_IN(fnmadds) FPS_THREE_IN(fnmsubs) FPS_THREE_IN(fsel) /* Instructions operating on double parameters */ /* * Beginning of double instruction processing * * R3 = (double*)&fpscr * R4 = (u32*)&cr * R5 = (double*)&result * R6 = (double*)&param1 * R7 = (double*)&param2 [load_two] * R8 = (double*)&param3 [load_three] * LR = instruction call function */ fpd_load_three: lfd 2,0(r8) /* load param3 */ fpd_load_two: lfd 1,0(r7) /* load param2 */ fpd_load_one: lfd 0,0(r6) /* load param1 */ fpd_load_none: lfd 3,0(r3) /* load up fpscr value */ MTFSF_L(3) lwz r6, 0(r4) /* load cr */ mtcr r6 blr /* * End of double instruction processing * * R3 = (double*)&fpscr * R4 = (u32*)&cr * R5 = (double*)&result * LR = caller of instruction call function */ fpd_return: mfcr r6 stfd 0,0(r5) /* save result */ mffs 0 stfd 0,0(r3) /* save new fpscr value */ stw r6,0(r4) /* save new cr value */ blr /* * Double operation with no input operand * * R3 = (double*)&fpscr * R4 = (u32*)&cr * R5 = (double*)&result */ #define FPD_NONE_IN(name) \ _GLOBAL(fpd_ ## name); \ mflr r12; \ bl fpd_load_none; \ mtlr r12; \ \ name. 0; /* call instruction */ \ b fpd_return /* * Double operation with one input operand * * R3 = (double*)&fpscr * R4 = (u32*)&cr * R5 = (double*)&result * R6 = (double*)&param1 */ #define FPD_ONE_IN(name) \ _GLOBAL(fpd_ ## name); \ mflr r12; \ bl fpd_load_one; \ mtlr r12; \ \ name. 0,0; /* call instruction */ \ b fpd_return /* * Double operation with two input operands * * R3 = (double*)&fpscr * R4 = (u32*)&cr * R5 = (double*)&result * R6 = (double*)&param1 * R7 = (double*)&param2 * R8 = (double*)&param3 */ #define FPD_TWO_IN(name) \ _GLOBAL(fpd_ ## name); \ mflr r12; \ bl fpd_load_two; \ mtlr r12; \ \ name. 0,0,1; /* call instruction */ \ b fpd_return /* * CR Double operation with two input operands * * R3 = (double*)&fpscr * R4 = (u32*)&cr * R5 = (double*)&param1 * R6 = (double*)&param2 * R7 = (double*)&param3 */ #define FPD_TWO_IN_CR(name) \ _GLOBAL(fpd_ ## name); \ lfd 1,0(r6); /* load param2 */ \ lfd 0,0(r5); /* load param1 */ \ lfd 3,0(r3); /* load up fpscr value */ \ MTFSF_L(3); \ lwz r6, 0(r4); /* load cr */ \ mtcr r6; \ \ name 0,0,1; /* call instruction */ \ mfcr r6; \ mffs 0; \ stfd 0,0(r3); /* save new fpscr value */ \ stw r6,0(r4); /* save new cr value */ \ blr /* * Double operation with three input operands * * R3 = (double*)&fpscr * R4 = (u32*)&cr * R5 = (double*)&result * R6 = (double*)&param1 * R7 = (double*)&param2 * R8 = (double*)&param3 */ #define FPD_THREE_IN(name) \ _GLOBAL(fpd_ ## name); \ mflr r12; \ bl fpd_load_three; \ mtlr r12; \ \ name. 0,0,1,2; /* call instruction */ \ b fpd_return FPD_ONE_IN(fsqrts) FPD_ONE_IN(frsqrtes) FPD_ONE_IN(fres) FPD_ONE_IN(frsp) FPD_ONE_IN(fctiw) FPD_ONE_IN(fctiwz) FPD_ONE_IN(fsqrt) FPD_ONE_IN(fre) FPD_ONE_IN(frsqrte) FPD_ONE_IN(fneg) FPD_ONE_IN(fabs) FPD_TWO_IN(fadds) FPD_TWO_IN(fsubs) FPD_TWO_IN(fdivs) FPD_TWO_IN(fmuls) FPD_TWO_IN_CR(fcmpu) FPD_TWO_IN(fcpsgn) FPD_TWO_IN(fdiv) FPD_TWO_IN(fadd) FPD_TWO_IN(fmul) FPD_TWO_IN_CR(fcmpo) FPD_TWO_IN(fsub) FPD_THREE_IN(fmsubs) FPD_THREE_IN(fmadds) FPD_THREE_IN(fnmsubs) FPD_THREE_IN(fnmadds) FPD_THREE_IN(fsel) FPD_THREE_IN(fmsub) FPD_THREE_IN(fmadd) FPD_THREE_IN(fnmsub) FPD_THREE_IN(fnmadd) _GLOBAL(kvm_cvt_fd) lfs 0,0(r3) stfd 0,0(r4) blr _GLOBAL(kvm_cvt_df) lfd 0,0(r3) stfs 0,0(r4) blr
aixcc-public/challenge-001-exemplar-source
10,875
arch/powerpc/kvm/book3s_64_entry.S
/* SPDX-License-Identifier: GPL-2.0-only */ #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/code-patching-asm.h> #include <asm/exception-64s.h> #include <asm/export.h> #include <asm/kvm_asm.h> #include <asm/kvm_book3s_asm.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> #include <asm/ptrace.h> #include <asm/reg.h> #include <asm/ultravisor-api.h> /* * These are branched to from interrupt handlers in exception-64s.S which set * IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero. */ /* * This is a hcall, so register convention is as * Documentation/powerpc/papr_hcalls.rst. * * This may also be a syscall from PR-KVM userspace that is to be * reflected to the PR guest kernel, so registers may be set up for * a system call rather than hcall. We don't currently clobber * anything here, but the 0xc00 handler has already clobbered CTR * and CR0, so PR-KVM can not support a guest kernel that preserves * those registers across its system calls. * * The state of registers is as kvmppc_interrupt, except CFAR is not * saved, R13 is not in SCRATCH0, and R10 does not contain the trap. */ .global kvmppc_hcall .balign IFETCH_ALIGN_BYTES kvmppc_hcall: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE lbz r10,HSTATE_IN_GUEST(r13) cmpwi r10,KVM_GUEST_MODE_HV_P9 beq kvmppc_p9_exit_hcall #endif ld r10,PACA_EXGEN+EX_R13(r13) SET_SCRATCH0(r10) li r10,0xc00 /* Now we look like kvmppc_interrupt */ li r11,PACA_EXGEN b .Lgot_save_area /* * KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that * call convention: * * guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area * guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts * guest R13 also saved in SCRATCH0 * R13 = PACA * R11 = (H)SRR0 * R12 = (H)SRR1 * R9 = guest CR * PPR is set to medium * * With the addition for KVM: * R10 = trap vector */ .global kvmppc_interrupt .balign IFETCH_ALIGN_BYTES kvmppc_interrupt: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE std r10,HSTATE_SCRATCH0(r13) lbz r10,HSTATE_IN_GUEST(r13) cmpwi r10,KVM_GUEST_MODE_HV_P9 beq kvmppc_p9_exit_interrupt ld r10,HSTATE_SCRATCH0(r13) #endif li r11,PACA_EXGEN cmpdi r10,0x200 bgt+ .Lgot_save_area li r11,PACA_EXMC beq .Lgot_save_area li r11,PACA_EXNMI .Lgot_save_area: add r11,r11,r13 BEGIN_FTR_SECTION ld r12,EX_CFAR(r11) std r12,HSTATE_CFAR(r13) END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r12,EX_CTR(r11) mtctr r12 BEGIN_FTR_SECTION ld r12,EX_PPR(r11) std r12,HSTATE_PPR(r13) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r12,EX_R12(r11) std r12,HSTATE_SCRATCH0(r13) sldi r12,r9,32 or r12,r12,r10 ld r9,EX_R9(r11) ld r10,EX_R10(r11) ld r11,EX_R11(r11) /* * Hcalls and other interrupts come here after normalising register * contents and save locations: * * R12 = (guest CR << 32) | interrupt vector * R13 = PACA * guest R12 saved in shadow HSTATE_SCRATCH0 * guest R13 saved in SPRN_SCRATCH0 */ std r9,HSTATE_SCRATCH2(r13) lbz r9,HSTATE_IN_GUEST(r13) cmpwi r9,KVM_GUEST_MODE_SKIP beq- .Lmaybe_skip .Lno_skip: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE cmpwi r9,KVM_GUEST_MODE_GUEST beq kvmppc_interrupt_pr #endif b kvmppc_interrupt_hv #else b kvmppc_interrupt_pr #endif /* * "Skip" interrupts are part of a trick KVM uses a with hash guests to load * the faulting instruction in guest memory from the hypervisor without * walking page tables. * * When the guest takes a fault that requires the hypervisor to load the * instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1 * and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets * MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions * but loads and stores will access the guest context. This is used to load * the faulting instruction using the faulting guest effective address. * * However the guest context may not be able to translate, or it may cause a * machine check or other issue, which results in a fault in the host * (even with KVM-HV). * * These faults come here because KVM_GUEST_MODE_SKIP was set, so if they * are (or are likely) caused by that load, the instruction is skipped by * just returning with the PC advanced +4, where it is noticed the load did * not execute and it goes to the slow path which walks the page tables to * read guest memory. */ .Lmaybe_skip: cmpwi r12,BOOK3S_INTERRUPT_MACHINE_CHECK beq 1f cmpwi r12,BOOK3S_INTERRUPT_DATA_STORAGE beq 1f cmpwi r12,BOOK3S_INTERRUPT_DATA_SEGMENT beq 1f #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* HSRR interrupts get 2 added to interrupt number */ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2 beq 2f #endif b .Lno_skip 1: mfspr r9,SPRN_SRR0 addi r9,r9,4 mtspr SPRN_SRR0,r9 ld r12,HSTATE_SCRATCH0(r13) ld r9,HSTATE_SCRATCH2(r13) GET_SCRATCH0(r13) RFI_TO_KERNEL #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 2: mfspr r9,SPRN_HSRR0 addi r9,r9,4 mtspr SPRN_HSRR0,r9 ld r12,HSTATE_SCRATCH0(r13) ld r9,HSTATE_SCRATCH2(r13) GET_SCRATCH0(r13) HRFI_TO_KERNEL #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* Stack frame offsets for kvmppc_p9_enter_guest */ #define SFS (144 + STACK_FRAME_MIN_SIZE) #define STACK_SLOT_NVGPRS (SFS - 144) /* 18 gprs */ /* * void kvmppc_p9_enter_guest(struct vcpu *vcpu); * * Enter the guest on a ISAv3.0 or later system. */ .balign IFETCH_ALIGN_BYTES _GLOBAL(kvmppc_p9_enter_guest) EXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest) mflr r0 std r0,PPC_LR_STKOFF(r1) stdu r1,-SFS(r1) std r1,HSTATE_HOST_R1(r13) mfcr r4 stw r4,SFS+8(r1) reg = 14 .rept 18 std reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) reg = reg + 1 .endr ld r4,VCPU_LR(r3) mtlr r4 ld r4,VCPU_CTR(r3) mtctr r4 ld r4,VCPU_XER(r3) mtspr SPRN_XER,r4 ld r1,VCPU_CR(r3) BEGIN_FTR_SECTION ld r4,VCPU_CFAR(r3) mtspr SPRN_CFAR,r4 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) BEGIN_FTR_SECTION ld r4,VCPU_PPR(r3) mtspr SPRN_PPR,r4 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) reg = 4 .rept 28 ld reg,__VCPU_GPR(reg)(r3) reg = reg + 1 .endr ld r4,VCPU_KVM(r3) lbz r4,KVM_SECURE_GUEST(r4) cmpdi r4,0 ld r4,VCPU_GPR(R4)(r3) bne .Lret_to_ultra mtcr r1 ld r0,VCPU_GPR(R0)(r3) ld r1,VCPU_GPR(R1)(r3) ld r2,VCPU_GPR(R2)(r3) ld r3,VCPU_GPR(R3)(r3) HRFI_TO_GUEST b . /* * Use UV_RETURN ultracall to return control back to the Ultravisor * after processing an hypercall or interrupt that was forwarded * (a.k.a. reflected) to the Hypervisor. * * All registers have already been reloaded except the ucall requires: * R0 = hcall result * R2 = SRR1, so UV can detect a synthesized interrupt (if any) * R3 = UV_RETURN */ .Lret_to_ultra: mtcr r1 ld r1,VCPU_GPR(R1)(r3) ld r0,VCPU_GPR(R3)(r3) mfspr r2,SPRN_SRR1 LOAD_REG_IMMEDIATE(r3, UV_RETURN) sc 2 /* * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from * above if the interrupt was taken for a guest that was entered via * kvmppc_p9_enter_guest(). * * The exit code recovers the host stack and vcpu pointer, saves all guest GPRs * and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re- * establishes the host stack and registers to return from the * kvmppc_p9_enter_guest() function, which saves CTR and other guest registers * (SPRs and FP, VEC, etc). */ .balign IFETCH_ALIGN_BYTES kvmppc_p9_exit_hcall: mfspr r11,SPRN_SRR0 mfspr r12,SPRN_SRR1 li r10,0xc00 std r10,HSTATE_SCRATCH0(r13) .balign IFETCH_ALIGN_BYTES kvmppc_p9_exit_interrupt: /* * If set to KVM_GUEST_MODE_HV_P9 but we're still in the * hypervisor, that means we can't return from the entry stack. */ rldicl. r10,r12,64-MSR_HV_LG,63 bne- kvmppc_p9_bad_interrupt std r1,HSTATE_SCRATCH1(r13) std r3,HSTATE_SCRATCH2(r13) ld r1,HSTATE_HOST_R1(r13) ld r3,HSTATE_KVM_VCPU(r13) std r9,VCPU_CR(r3) 1: std r11,VCPU_PC(r3) std r12,VCPU_MSR(r3) reg = 14 .rept 18 std reg,__VCPU_GPR(reg)(r3) reg = reg + 1 .endr /* r1, r3, r9-r13 are saved to vcpu by C code */ std r0,VCPU_GPR(R0)(r3) std r2,VCPU_GPR(R2)(r3) reg = 4 .rept 5 std reg,__VCPU_GPR(reg)(r3) reg = reg + 1 .endr LOAD_PACA_TOC() mflr r4 std r4,VCPU_LR(r3) mfspr r4,SPRN_XER std r4,VCPU_XER(r3) reg = 14 .rept 18 ld reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) reg = reg + 1 .endr lwz r4,SFS+8(r1) mtcr r4 /* * Flush the link stack here, before executing the first blr on the * way out of the guest. * * The link stack won't match coming out of the guest anyway so the * only cost is the flush itself. The call clobbers r0. */ 1: nop patch_site 1b patch__call_kvm_flush_link_stack_p9 addi r1,r1,SFS ld r0,PPC_LR_STKOFF(r1) mtlr r0 blr /* * Took an interrupt somewhere right before HRFID to guest, so registers are * in a bad way. Return things hopefully enough to run host virtual code and * run the Linux interrupt handler (SRESET or MCE) to print something useful. * * We could be really clever and save all host registers in known locations * before setting HSTATE_IN_GUEST, then restoring them all here, and setting * return address to a fixup that sets them up again. But that's a lot of * effort for a small bit of code. Lots of other things to do first. */ kvmppc_p9_bad_interrupt: BEGIN_MMU_FTR_SECTION /* * Hash host doesn't try to recover MMU (requires host SLB reload) */ b . END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) /* * Clean up guest registers to give host a chance to run. */ li r10,0 mtspr SPRN_AMR,r10 mtspr SPRN_IAMR,r10 mtspr SPRN_CIABR,r10 mtspr SPRN_DAWRX0,r10 BEGIN_FTR_SECTION mtspr SPRN_DAWRX1,r10 END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) /* * Switch to host MMU mode (don't have the real host PID but we aren't * going back to userspace). */ hwsync isync mtspr SPRN_PID,r10 ld r10, HSTATE_KVM_VCPU(r13) ld r10, VCPU_KVM(r10) lwz r10, KVM_HOST_LPID(r10) mtspr SPRN_LPID,r10 ld r10, HSTATE_KVM_VCPU(r13) ld r10, VCPU_KVM(r10) ld r10, KVM_HOST_LPCR(r10) mtspr SPRN_LPCR,r10 isync /* * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear * MSR_RI in r12 ([H]SRR1) so the handler won't try to return. */ li r10,KVM_GUEST_MODE_NONE stb r10,HSTATE_IN_GUEST(r13) li r10,MSR_RI andc r12,r12,r10 /* * Go back to interrupt handler. MCE and SRESET have their specific * PACA save area so they should be used directly. They set up their * own stack. The other handlers all use EXGEN. They will use the * guest r1 if it looks like a kernel stack, so just load the * emergency stack and go to program check for all other interrupts. */ ld r10,HSTATE_SCRATCH0(r13) cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK beq .Lcall_machine_check_common cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET beq .Lcall_system_reset_common b . .Lcall_machine_check_common: b machine_check_common .Lcall_system_reset_common: b system_reset_common #endif
aixcc-public/challenge-001-exemplar-source
10,207
arch/powerpc/kvm/book3s_segment.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright SUSE Linux Products GmbH 2010 * * Authors: Alexander Graf <agraf@suse.de> */ /* Real mode helpers */ #include <asm/asm-compat.h> #include <asm/feature-fixups.h> #if defined(CONFIG_PPC_BOOK3S_64) #define GET_SHADOW_VCPU(reg) \ mr reg, r13 #elif defined(CONFIG_PPC_BOOK3S_32) #define GET_SHADOW_VCPU(reg) \ tophys(reg, r2); \ lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ tophys(reg, reg) #endif /* Disable for nested KVM */ #define USE_QUICK_LAST_INST /* Get helper functions for subarch specific functionality */ #if defined(CONFIG_PPC_BOOK3S_64) #include "book3s_64_slb.S" #elif defined(CONFIG_PPC_BOOK3S_32) #include "book3s_32_sr.S" #endif /****************************************************************************** * * * Entry code * * * *****************************************************************************/ .global kvmppc_handler_trampoline_enter kvmppc_handler_trampoline_enter: /* Required state: * * MSR = ~IR|DR * R1 = host R1 * R2 = host R2 * R4 = guest shadow MSR * R5 = normal host MSR * R6 = current host MSR (EE, IR, DR off) * LR = highmem guest exit code * all other volatile GPRS = free * SVCPU[CR] = guest CR * SVCPU[XER] = guest XER * SVCPU[CTR] = guest CTR * SVCPU[LR] = guest LR */ /* r3 = shadow vcpu */ GET_SHADOW_VCPU(r3) /* Save guest exit handler address and MSR */ mflr r0 PPC_STL r0, HSTATE_VMHANDLER(r3) PPC_STL r5, HSTATE_HOST_MSR(r3) /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ PPC_STL r1, HSTATE_HOST_R1(r3) PPC_STL r2, HSTATE_HOST_R2(r3) /* Activate guest mode, so faults get handled by KVM */ li r11, KVM_GUEST_MODE_GUEST stb r11, HSTATE_IN_GUEST(r3) /* Switch to guest segment. This is subarch specific. */ LOAD_GUEST_SEGMENTS #ifdef CONFIG_PPC_BOOK3S_64 BEGIN_FTR_SECTION /* Save host FSCR */ mfspr r8, SPRN_FSCR std r8, HSTATE_HOST_FSCR(r13) /* Set FSCR during guest execution */ ld r9, SVCPU_SHADOW_FSCR(r13) mtspr SPRN_FSCR, r9 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* Some guests may need to have dcbz set to 32 byte length. * * Usually we ensure that by patching the guest's instructions * to trap on dcbz and emulate it in the hypervisor. * * If we can, we should tell the CPU to use 32 byte dcbz though, * because that's a lot faster. */ lbz r0, HSTATE_RESTORE_HID5(r3) cmpwi r0, 0 beq no_dcbz32_on mfspr r0,SPRN_HID5 ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */ mtspr SPRN_HID5,r0 no_dcbz32_on: #endif /* CONFIG_PPC_BOOK3S_64 */ /* Enter guest */ PPC_LL r8, SVCPU_CTR(r3) PPC_LL r9, SVCPU_LR(r3) lwz r10, SVCPU_CR(r3) PPC_LL r11, SVCPU_XER(r3) mtctr r8 mtlr r9 mtcr r10 mtxer r11 /* Move SRR0 and SRR1 into the respective regs */ PPC_LL r9, SVCPU_PC(r3) /* First clear RI in our current MSR value */ li r0, MSR_RI andc r6, r6, r0 PPC_LL r0, SVCPU_R0(r3) PPC_LL r1, SVCPU_R1(r3) PPC_LL r2, SVCPU_R2(r3) PPC_LL r5, SVCPU_R5(r3) PPC_LL r7, SVCPU_R7(r3) PPC_LL r8, SVCPU_R8(r3) PPC_LL r10, SVCPU_R10(r3) PPC_LL r11, SVCPU_R11(r3) PPC_LL r12, SVCPU_R12(r3) PPC_LL r13, SVCPU_R13(r3) MTMSR_EERI(r6) mtsrr0 r9 mtsrr1 r4 PPC_LL r4, SVCPU_R4(r3) PPC_LL r6, SVCPU_R6(r3) PPC_LL r9, SVCPU_R9(r3) PPC_LL r3, (SVCPU_R3)(r3) RFI_TO_GUEST kvmppc_handler_trampoline_enter_end: /****************************************************************************** * * * Exit code * * * *****************************************************************************/ .global kvmppc_interrupt_pr kvmppc_interrupt_pr: /* 64-bit entry. Register usage at this point: * * SPRG_SCRATCH0 = guest R13 * R9 = HSTATE_IN_GUEST * R12 = (guest CR << 32) | exit handler id * R13 = PACA * HSTATE.SCRATCH0 = guest R12 * HSTATE.SCRATCH2 = guest R9 */ #ifdef CONFIG_PPC64 /* Match 32-bit entry */ ld r9,HSTATE_SCRATCH2(r13) rotldi r12, r12, 32 /* Flip R12 halves for stw */ stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */ srdi r12, r12, 32 /* shift trap into low half */ #endif .global kvmppc_handler_trampoline_exit kvmppc_handler_trampoline_exit: /* Register usage at this point: * * SPRG_SCRATCH0 = guest R13 * R12 = exit handler id * R13 = shadow vcpu (32-bit) or PACA (64-bit) * HSTATE.SCRATCH0 = guest R12 * HSTATE.SCRATCH1 = guest CR */ /* Save registers */ PPC_STL r0, SVCPU_R0(r13) PPC_STL r1, SVCPU_R1(r13) PPC_STL r2, SVCPU_R2(r13) PPC_STL r3, SVCPU_R3(r13) PPC_STL r4, SVCPU_R4(r13) PPC_STL r5, SVCPU_R5(r13) PPC_STL r6, SVCPU_R6(r13) PPC_STL r7, SVCPU_R7(r13) PPC_STL r8, SVCPU_R8(r13) PPC_STL r9, SVCPU_R9(r13) PPC_STL r10, SVCPU_R10(r13) PPC_STL r11, SVCPU_R11(r13) /* Restore R1/R2 so we can handle faults */ PPC_LL r1, HSTATE_HOST_R1(r13) PPC_LL r2, HSTATE_HOST_R2(r13) /* Save guest PC and MSR */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION andi. r0, r12, 0x2 cmpwi cr1, r0, 0 beq 1f mfspr r3,SPRN_HSRR0 mfspr r4,SPRN_HSRR1 andi. r12,r12,0x3ffd b 2f END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) #endif 1: mfsrr0 r3 mfsrr1 r4 2: PPC_STL r3, SVCPU_PC(r13) PPC_STL r4, SVCPU_SHADOW_SRR1(r13) /* Get scratch'ed off registers */ GET_SCRATCH0(r9) PPC_LL r8, HSTATE_SCRATCH0(r13) lwz r7, HSTATE_SCRATCH1(r13) PPC_STL r9, SVCPU_R13(r13) PPC_STL r8, SVCPU_R12(r13) stw r7, SVCPU_CR(r13) /* Save more register state */ mfxer r5 mfdar r6 mfdsisr r7 mfctr r8 mflr r9 PPC_STL r5, SVCPU_XER(r13) PPC_STL r6, SVCPU_FAULT_DAR(r13) stw r7, SVCPU_FAULT_DSISR(r13) PPC_STL r8, SVCPU_CTR(r13) PPC_STL r9, SVCPU_LR(r13) /* * In order for us to easily get the last instruction, * we got the #vmexit at, we exploit the fact that the * virtual layout is still the same here, so we can just * ld from the guest's PC address */ /* We only load the last instruction when it's safe */ cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE beq ld_last_inst cmpwi r12, BOOK3S_INTERRUPT_PROGRAM beq ld_last_inst cmpwi r12, BOOK3S_INTERRUPT_SYSCALL beq ld_last_prev_inst cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT beq- ld_last_inst #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST beq- ld_last_inst END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) BEGIN_FTR_SECTION cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL beq- ld_last_inst END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) #endif b no_ld_last_inst ld_last_prev_inst: addi r3, r3, -4 ld_last_inst: /* Save off the guest instruction we're at */ /* In case lwz faults */ li r0, KVM_INST_FETCH_FAILED #ifdef USE_QUICK_LAST_INST /* Set guest mode to 'jump over instruction' so if lwz faults * we'll just continue at the next IP. */ li r9, KVM_GUEST_MODE_SKIP stb r9, HSTATE_IN_GUEST(r13) /* 1) enable paging for data */ mfmsr r9 ori r11, r9, MSR_DR /* Enable paging for data */ mtmsr r11 sync /* 2) fetch the instruction */ lwz r0, 0(r3) /* 3) disable paging again */ mtmsr r9 sync #endif stw r0, SVCPU_LAST_INST(r13) no_ld_last_inst: /* Unset guest mode */ li r9, KVM_GUEST_MODE_NONE stb r9, HSTATE_IN_GUEST(r13) /* Switch back to host MMU */ LOAD_HOST_SEGMENTS #ifdef CONFIG_PPC_BOOK3S_64 lbz r5, HSTATE_RESTORE_HID5(r13) cmpwi r5, 0 beq no_dcbz32_off li r4, 0 mfspr r5,SPRN_HID5 rldimi r5,r4,6,56 mtspr SPRN_HID5,r5 no_dcbz32_off: BEGIN_FTR_SECTION /* Save guest FSCR on a FAC_UNAVAIL interrupt */ cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL bne+ no_fscr_save mfspr r7, SPRN_FSCR std r7, SVCPU_SHADOW_FSCR(r13) no_fscr_save: /* Restore host FSCR */ ld r8, HSTATE_HOST_FSCR(r13) mtspr SPRN_FSCR, r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) #endif /* CONFIG_PPC_BOOK3S_64 */ /* * For some interrupts, we need to call the real Linux * handler, so it can do work for us. This has to happen * as if the interrupt arrived from the kernel though, * so let's fake it here where most state is restored. * * Having set up SRR0/1 with the address where we want * to continue with relocation on (potentially in module * space), we either just go straight there with rfi[d], * or we jump to an interrupt handler if there is an * interrupt to be handled first. In the latter case, * the rfi[d] at the end of the interrupt handler will * get us back to where we want to continue. */ /* Register usage at this point: * * R1 = host R1 * R2 = host R2 * R10 = raw exit handler id * R12 = exit handler id * R13 = shadow vcpu (32-bit) or PACA (64-bit) * SVCPU.* = guest * * */ PPC_LL r6, HSTATE_HOST_MSR(r13) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * We don't want to change MSR[TS] bits via rfi here. * The actual TM handling logic will be in host with * recovered DR/IR bits after HSTATE_VMHANDLER. * And MSR_TM can be enabled in HOST_MSR so rfid may * not suppress this change and can lead to exception. * Manually set MSR to prevent TS state change here. */ mfmsr r7 rldicl r7, r7, 64 - MSR_TS_S_LG, 62 rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG #endif PPC_LL r8, HSTATE_VMHANDLER(r13) #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION beq cr1, 1f mtspr SPRN_HSRR1, r6 mtspr SPRN_HSRR0, r8 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) #endif 1: /* Restore host msr -> SRR1 */ mtsrr1 r6 /* Load highmem handler address */ mtsrr0 r8 /* RFI into the highmem handler, or jump to interrupt handler */ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL beqa BOOK3S_INTERRUPT_EXTERNAL cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER beqa BOOK3S_INTERRUPT_DECREMENTER cmpwi r12, BOOK3S_INTERRUPT_PERFMON beqa BOOK3S_INTERRUPT_PERFMON cmpwi r12, BOOK3S_INTERRUPT_DOORBELL beqa BOOK3S_INTERRUPT_DOORBELL RFI_TO_KERNEL kvmppc_handler_trampoline_exit_end:
aixcc-public/challenge-001-exemplar-source
5,677
arch/powerpc/kvm/book3s_interrupts.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ #include <asm/ppc_asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/asm-offsets.h> #include <asm/exception-64s.h> #include <asm/asm-compat.h> #if defined(CONFIG_PPC_BOOK3S_64) #ifdef CONFIG_PPC64_ELF_ABI_V2 #define FUNC(name) name #else #define FUNC(name) GLUE(.,name) #endif #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU #elif defined(CONFIG_PPC_BOOK3S_32) #define FUNC(name) name #define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) #endif /* CONFIG_PPC_BOOK3S_64 */ #define VCPU_LOAD_NVGPRS(vcpu) \ PPC_LL r14, VCPU_GPR(R14)(vcpu); \ PPC_LL r15, VCPU_GPR(R15)(vcpu); \ PPC_LL r16, VCPU_GPR(R16)(vcpu); \ PPC_LL r17, VCPU_GPR(R17)(vcpu); \ PPC_LL r18, VCPU_GPR(R18)(vcpu); \ PPC_LL r19, VCPU_GPR(R19)(vcpu); \ PPC_LL r20, VCPU_GPR(R20)(vcpu); \ PPC_LL r21, VCPU_GPR(R21)(vcpu); \ PPC_LL r22, VCPU_GPR(R22)(vcpu); \ PPC_LL r23, VCPU_GPR(R23)(vcpu); \ PPC_LL r24, VCPU_GPR(R24)(vcpu); \ PPC_LL r25, VCPU_GPR(R25)(vcpu); \ PPC_LL r26, VCPU_GPR(R26)(vcpu); \ PPC_LL r27, VCPU_GPR(R27)(vcpu); \ PPC_LL r28, VCPU_GPR(R28)(vcpu); \ PPC_LL r29, VCPU_GPR(R29)(vcpu); \ PPC_LL r30, VCPU_GPR(R30)(vcpu); \ PPC_LL r31, VCPU_GPR(R31)(vcpu); \ /***************************************************************************** * * * Guest entry / exit code that is in kernel module memory (highmem) * * * ****************************************************************************/ /* Registers: * r3: vcpu pointer */ _GLOBAL(__kvmppc_vcpu_run) kvm_start_entry: /* Write correct stack frame */ mflr r0 PPC_STL r0,PPC_LR_STKOFF(r1) /* Save host state to the stack */ PPC_STLU r1, -SWITCH_FRAME_SIZE(r1) /* Save r3 (vcpu) */ SAVE_GPR(3, r1) /* Save non-volatile registers (r14 - r31) */ SAVE_NVGPRS(r1) /* Save CR */ mfcr r14 stw r14, _CCR(r1) /* Save LR */ PPC_STL r0, _LINK(r1) /* Load non-volatile guest state from the vcpu */ VCPU_LOAD_NVGPRS(r3) kvm_start_lightweight: /* Copy registers into shadow vcpu so we can access them in real mode */ bl FUNC(kvmppc_copy_to_svcpu) nop REST_GPR(3, r1) #ifdef CONFIG_PPC_BOOK3S_64 /* Get the dcbz32 flag */ PPC_LL r0, VCPU_HFLAGS(r3) rldicl r0, r0, 0, 63 /* r3 &= 1 */ stb r0, HSTATE_RESTORE_HID5(r13) /* Load up guest SPRG3 value, since it's user readable */ lbz r4, VCPU_SHAREDBE(r3) cmpwi r4, 0 ld r5, VCPU_SHARED(r3) beq sprg3_little_endian sprg3_big_endian: #ifdef __BIG_ENDIAN__ ld r4, VCPU_SHARED_SPRG3(r5) #else addi r5, r5, VCPU_SHARED_SPRG3 ldbrx r4, 0, r5 #endif b after_sprg3_load sprg3_little_endian: #ifdef __LITTLE_ENDIAN__ ld r4, VCPU_SHARED_SPRG3(r5) #else addi r5, r5, VCPU_SHARED_SPRG3 ldbrx r4, 0, r5 #endif after_sprg3_load: mtspr SPRN_SPRG3, r4 #endif /* CONFIG_PPC_BOOK3S_64 */ PPC_LL r4, VCPU_SHADOW_MSR(r3) /* get shadow_msr */ /* Jump to segment patching handler and into our guest */ bl FUNC(kvmppc_entry_trampoline) nop /* * This is the handler in module memory. It gets jumped at from the * lowmem trampoline code, so it's basically the guest exit code. * */ /* * Register usage at this point: * * R1 = host R1 * R2 = host R2 * R12 = exit handler id * R13 = PACA * SVCPU.* = guest * * MSR.EE = 1 * */ PPC_LL r3, GPR3(r1) /* vcpu pointer */ /* * kvmppc_copy_from_svcpu can clobber volatile registers, save * the exit handler id to the vcpu and restore it from there later. */ stw r12, VCPU_TRAP(r3) /* Transfer reg values from shadow vcpu back to vcpu struct */ bl FUNC(kvmppc_copy_from_svcpu) nop #ifdef CONFIG_PPC_BOOK3S_64 /* * Reload kernel SPRG3 value. * No need to save guest value as usermode can't modify SPRG3. */ ld r3, PACA_SPRG_VDSO(r13) mtspr SPRN_SPRG_VDSO_WRITE, r3 #endif /* CONFIG_PPC_BOOK3S_64 */ /* R7 = vcpu */ PPC_LL r7, GPR3(r1) PPC_STL r14, VCPU_GPR(R14)(r7) PPC_STL r15, VCPU_GPR(R15)(r7) PPC_STL r16, VCPU_GPR(R16)(r7) PPC_STL r17, VCPU_GPR(R17)(r7) PPC_STL r18, VCPU_GPR(R18)(r7) PPC_STL r19, VCPU_GPR(R19)(r7) PPC_STL r20, VCPU_GPR(R20)(r7) PPC_STL r21, VCPU_GPR(R21)(r7) PPC_STL r22, VCPU_GPR(R22)(r7) PPC_STL r23, VCPU_GPR(R23)(r7) PPC_STL r24, VCPU_GPR(R24)(r7) PPC_STL r25, VCPU_GPR(R25)(r7) PPC_STL r26, VCPU_GPR(R26)(r7) PPC_STL r27, VCPU_GPR(R27)(r7) PPC_STL r28, VCPU_GPR(R28)(r7) PPC_STL r29, VCPU_GPR(R29)(r7) PPC_STL r30, VCPU_GPR(R30)(r7) PPC_STL r31, VCPU_GPR(R31)(r7) /* Pass the exit number as 2nd argument to kvmppc_handle_exit */ lwz r4, VCPU_TRAP(r7) /* Restore r3 (vcpu) */ REST_GPR(3, r1) bl FUNC(kvmppc_handle_exit_pr) /* If RESUME_GUEST, get back in the loop */ cmpwi r3, RESUME_GUEST beq kvm_loop_lightweight cmpwi r3, RESUME_GUEST_NV beq kvm_loop_heavyweight kvm_exit_loop: PPC_LL r4, _LINK(r1) mtlr r4 lwz r14, _CCR(r1) mtcr r14 /* Restore non-volatile host registers (r14 - r31) */ REST_NVGPRS(r1) addi r1, r1, SWITCH_FRAME_SIZE blr kvm_loop_heavyweight: PPC_LL r4, _LINK(r1) PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1) /* Load vcpu */ REST_GPR(3, r1) /* Load non-volatile guest state from the vcpu */ VCPU_LOAD_NVGPRS(r3) /* Jump back into the beginning of this function */ b kvm_start_lightweight kvm_loop_lightweight: /* We'll need the vcpu pointer */ REST_GPR(3, r1) /* Jump back into the beginning of this function */ b kvm_start_lightweight
aixcc-public/challenge-001-exemplar-source
3,134
arch/powerpc/kvm/book3s_64_slb.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ #include <asm/asm-compat.h> #include <asm/feature-fixups.h> #define SHADOW_SLB_ENTRY_LEN 0x10 #define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x) #define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8) /****************************************************************************** * * * Entry code * * * *****************************************************************************/ .macro LOAD_GUEST_SEGMENTS /* Required state: * * MSR = ~IR|DR * R13 = PACA * R1 = host R1 * R2 = host R2 * R3 = shadow vcpu * all other volatile GPRS = free except R4, R6 * SVCPU[CR] = guest CR * SVCPU[XER] = guest XER * SVCPU[CTR] = guest CTR * SVCPU[LR] = guest LR */ BEGIN_FW_FTR_SECTION /* Declare SLB shadow as 0 entries big */ ld r11, PACA_SLBSHADOWPTR(r13) li r8, 0 stb r8, 3(r11) END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR) /* Flush SLB */ li r10, 0 slbmte r10, r10 slbia /* Fill SLB with our shadow */ lbz r12, SVCPU_SLB_MAX(r3) mulli r12, r12, 16 addi r12, r12, SVCPU_SLB add r12, r12, r3 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */ li r11, SVCPU_SLB add r11, r11, r3 slb_loop_enter: ld r10, 0(r11) andis. r9, r10, SLB_ESID_V@h beq slb_loop_enter_skip ld r9, 8(r11) slbmte r9, r10 slb_loop_enter_skip: addi r11, r11, 16 cmpd cr0, r11, r12 blt slb_loop_enter slb_do_enter: .endm /****************************************************************************** * * * Exit code * * * *****************************************************************************/ .macro LOAD_HOST_SEGMENTS /* Register usage at this point: * * R1 = host R1 * R2 = host R2 * R12 = exit handler id * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] * SVCPU.* = guest * * SVCPU[CR] = guest CR * SVCPU[XER] = guest XER * SVCPU[CTR] = guest CTR * SVCPU[LR] = guest LR * */ /* Remove all SLB entries that are in use. */ li r0, 0 slbmte r0, r0 slbia /* Restore bolted entries from the shadow */ ld r11, PACA_SLBSHADOWPTR(r13) BEGIN_FW_FTR_SECTION /* Declare SLB shadow as SLB_NUM_BOLTED entries big */ li r8, SLB_NUM_BOLTED stb r8, 3(r11) END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR) /* Manually load all entries from shadow SLB */ li r8, SLBSHADOW_SAVEAREA li r7, SLBSHADOW_SAVEAREA + 8 .rept SLB_NUM_BOLTED LDX_BE r10, r11, r8 cmpdi r10, 0 beq 1f LDX_BE r9, r11, r7 slbmte r9, r10 1: addi r7, r7, SHADOW_SLB_ENTRY_LEN addi r8, r8, SHADOW_SLB_ENTRY_LEN .endr isync sync slb_do_exit: .endm
aixcc-public/challenge-001-exemplar-source
3,744
arch/powerpc/kvm/book3s_hv_interrupts.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * * Derived from book3s_interrupts.S, which is: * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ #include <asm/ppc_asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/asm-offsets.h> #include <asm/exception-64s.h> #include <asm/ppc-opcode.h> #include <asm/asm-compat.h> #include <asm/feature-fixups.h> /***************************************************************************** * * * Guest entry / exit code that is in kernel module memory (vmalloc) * * * ****************************************************************************/ /* Registers: * none */ _GLOBAL(__kvmppc_vcore_entry) /* Write correct stack frame */ mflr r0 std r0,PPC_LR_STKOFF(r1) /* Save host state to the stack */ stdu r1, -SWITCH_FRAME_SIZE(r1) /* Save non-volatile registers (r14 - r31) and CR */ SAVE_NVGPRS(r1) mfcr r3 std r3, _CCR(r1) /* Save host DSCR */ mfspr r3, SPRN_DSCR std r3, HSTATE_DSCR(r13) BEGIN_FTR_SECTION /* Save host DABR */ mfspr r3, SPRN_DABR std r3, HSTATE_DABR(r13) END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* Save host PMU registers */ bl kvmhv_save_host_pmu /* * Put whatever is in the decrementer into the * hypervisor decrementer. * Because of a hardware deviation in P8, * we need to set LPCR[HDICE] before writing HDEC. */ ld r5, HSTATE_KVM_VCORE(r13) ld r6, VCORE_KVM(r5) ld r9, KVM_HOST_LPCR(r6) ori r8, r9, LPCR_HDICE mtspr SPRN_LPCR, r8 isync mfspr r8,SPRN_DEC mftb r7 extsw r8,r8 mtspr SPRN_HDEC,r8 add r8,r8,r7 std r8,HSTATE_DECEXP(r13) /* Jump to partition switch code */ bl kvmppc_hv_entry_trampoline nop /* * We return here in virtual mode after the guest exits * with something that we can't handle in real mode. * Interrupts are still hard-disabled. */ /* * Register usage at this point: * * R1 = host R1 * R2 = host R2 * R3 = trap number on this thread * R12 = exit handler id * R13 = PACA */ /* Restore non-volatile host registers (r14 - r31) and CR */ REST_NVGPRS(r1) ld r4, _CCR(r1) mtcr r4 addi r1, r1, SWITCH_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr /* * void kvmhv_save_host_pmu(void) */ kvmhv_save_host_pmu: BEGIN_FTR_SECTION /* Work around P8 PMAE bug */ li r3, -1 clrrdi r3, r3, 10 mfspr r8, SPRN_MMCR2 mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */ isync END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) li r3, 1 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ mfspr r7, SPRN_MMCR0 /* save MMCR0 */ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ mfspr r6, SPRN_MMCRA /* Clear MMCRA in order to disable SDAR updates */ li r5, 0 mtspr SPRN_MMCRA, r5 isync lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */ cmpwi r5, 0 beq 31f /* skip if not */ mfspr r5, SPRN_MMCR1 mfspr r9, SPRN_SIAR mfspr r10, SPRN_SDAR std r7, HSTATE_MMCR0(r13) std r5, HSTATE_MMCR1(r13) std r6, HSTATE_MMCRA(r13) std r9, HSTATE_SIAR(r13) std r10, HSTATE_SDAR(r13) BEGIN_FTR_SECTION mfspr r9, SPRN_SIER std r8, HSTATE_MMCR2(r13) std r9, HSTATE_SIER(r13) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mfspr r3, SPRN_PMC1 mfspr r5, SPRN_PMC2 mfspr r6, SPRN_PMC3 mfspr r7, SPRN_PMC4 mfspr r8, SPRN_PMC5 mfspr r9, SPRN_PMC6 stw r3, HSTATE_PMC1(r13) stw r5, HSTATE_PMC2(r13) stw r6, HSTATE_PMC3(r13) stw r7, HSTATE_PMC4(r13) stw r8, HSTATE_PMC5(r13) stw r9, HSTATE_PMC6(r13) 31: blr
aixcc-public/challenge-001-exemplar-source
73,024
arch/powerpc/kvm/book3s_hv_rmhandlers.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * * Derived from book3s_rmhandlers.S and other files, which are: * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ #include <asm/ppc_asm.h> #include <asm/code-patching-asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> #include <asm/mmu.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/hvcall.h> #include <asm/asm-offsets.h> #include <asm/exception-64s.h> #include <asm/kvm_book3s_asm.h> #include <asm/book3s/64/mmu-hash.h> #include <asm/export.h> #include <asm/tm.h> #include <asm/opal.h> #include <asm/thread_info.h> #include <asm/asm-compat.h> #include <asm/feature-fixups.h> #include <asm/cpuidle.h> /* Values in HSTATE_NAPPING(r13) */ #define NAPPING_CEDE 1 #define NAPPING_NOVCPU 2 #define NAPPING_UNSPLIT 3 /* Stack frame offsets for kvmppc_hv_entry */ #define SFS 160 #define STACK_SLOT_TRAP (SFS-4) #define STACK_SLOT_TID (SFS-16) #define STACK_SLOT_PSSCR (SFS-24) #define STACK_SLOT_PID (SFS-32) #define STACK_SLOT_IAMR (SFS-40) #define STACK_SLOT_CIABR (SFS-48) #define STACK_SLOT_DAWR0 (SFS-56) #define STACK_SLOT_DAWRX0 (SFS-64) #define STACK_SLOT_HFSCR (SFS-72) #define STACK_SLOT_AMR (SFS-80) #define STACK_SLOT_UAMOR (SFS-88) #define STACK_SLOT_FSCR (SFS-96) /* * Use the last LPID (all implemented LPID bits = 1) for partition switching. * This is reserved in the LPID allocator. POWER7 only implements 0x3ff, but * we write 0xfff into the LPID SPR anyway, which seems to work and just * ignores the top bits. */ #define LPID_RSVD 0xfff /* * Call kvmppc_hv_entry in real mode. * Must be called with interrupts hard-disabled. * * Input Registers: * * LR = return address to continue at after eventually re-enabling MMU */ _GLOBAL_TOC(kvmppc_hv_entry_trampoline) mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -112(r1) mfmsr r10 std r10, HSTATE_HOST_MSR(r13) LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) li r0,MSR_RI andc r0,r10,r0 li r6,MSR_IR | MSR_DR andc r6,r10,r6 mtmsrd r0,1 /* clear RI in MSR */ mtsrr0 r5 mtsrr1 r6 RFI_TO_KERNEL kvmppc_call_hv_entry: ld r4, HSTATE_KVM_VCPU(r13) bl kvmppc_hv_entry /* Back from guest - restore host state and return to caller */ BEGIN_FTR_SECTION /* Restore host DABR and DABRX */ ld r5,HSTATE_DABR(r13) li r6,7 mtspr SPRN_DABR,r5 mtspr SPRN_DABRX,r6 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* Restore SPRG3 */ ld r3,PACA_SPRG_VDSO(r13) mtspr SPRN_SPRG_VDSO_WRITE,r3 /* Reload the host's PMU registers */ bl kvmhv_load_host_pmu /* * Reload DEC. HDEC interrupts were disabled when * we reloaded the host's LPCR value. */ ld r3, HSTATE_DECEXP(r13) mftb r4 subf r4, r4, r3 mtspr SPRN_DEC, r4 /* hwthread_req may have got set by cede or no vcpu, so clear it */ li r0, 0 stb r0, HSTATE_HWTHREAD_REQ(r13) /* * For external interrupts we need to call the Linux * handler to process the interrupt. We do that by jumping * to absolute address 0x500 for external interrupts. * The [h]rfid at the end of the handler will return to * the book3s_hv_interrupts.S code. For other interrupts * we do the rfid to get back to the book3s_hv_interrupts.S * code here. */ ld r8, 112+PPC_LR_STKOFF(r1) addi r1, r1, 112 ld r7, HSTATE_HOST_MSR(r13) /* Return the trap number on this thread as the return value */ mr r3, r12 /* RFI into the highmem handler */ mfmsr r6 li r0, MSR_RI andc r6, r6, r0 mtmsrd r6, 1 /* Clear RI in MSR */ mtsrr0 r8 mtsrr1 r7 RFI_TO_KERNEL kvmppc_primary_no_guest: /* We handle this much like a ceded vcpu */ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ /* HDEC may be larger than DEC for arch >= v3.00, but since the */ /* HDEC value came from DEC in the first place, it will fit */ mfspr r3, SPRN_HDEC mtspr SPRN_DEC, r3 /* * Make sure the primary has finished the MMU switch. * We should never get here on a secondary thread, but * check it for robustness' sake. */ ld r5, HSTATE_KVM_VCORE(r13) 65: lbz r0, VCORE_IN_GUEST(r5) cmpwi r0, 0 beq 65b /* Set LPCR. */ ld r8,VCORE_LPCR(r5) mtspr SPRN_LPCR,r8 isync /* set our bit in napping_threads */ ld r5, HSTATE_KVM_VCORE(r13) lbz r7, HSTATE_PTID(r13) li r0, 1 sld r0, r0, r7 addi r6, r5, VCORE_NAPPING_THREADS 1: lwarx r3, 0, r6 or r3, r3, r0 stwcx. r3, 0, r6 bne 1b /* order napping_threads update vs testing entry_exit_map */ isync li r12, 0 lwz r7, VCORE_ENTRY_EXIT(r5) cmpwi r7, 0x100 bge kvm_novcpu_exit /* another thread already exiting */ li r3, NAPPING_NOVCPU stb r3, HSTATE_NAPPING(r13) li r3, 0 /* Don't wake on privileged (OS) doorbell */ b kvm_do_nap /* * kvm_novcpu_wakeup * Entered from kvm_start_guest if kvm_hstate.napping is set * to NAPPING_NOVCPU * r2 = kernel TOC * r13 = paca */ kvm_novcpu_wakeup: ld r1, HSTATE_HOST_R1(r13) ld r5, HSTATE_KVM_VCORE(r13) li r0, 0 stb r0, HSTATE_NAPPING(r13) /* check the wake reason */ bl kvmppc_check_wake_reason /* * Restore volatile registers since we could have called * a C routine in kvmppc_check_wake_reason. * r5 = VCORE */ ld r5, HSTATE_KVM_VCORE(r13) /* see if any other thread is already exiting */ lwz r0, VCORE_ENTRY_EXIT(r5) cmpwi r0, 0x100 bge kvm_novcpu_exit /* clear our bit in napping_threads */ lbz r7, HSTATE_PTID(r13) li r0, 1 sld r0, r0, r7 addi r6, r5, VCORE_NAPPING_THREADS 4: lwarx r7, 0, r6 andc r7, r7, r0 stwcx. r7, 0, r6 bne 4b /* See if the wake reason means we need to exit */ cmpdi r3, 0 bge kvm_novcpu_exit /* See if our timeslice has expired (HDEC is negative) */ mfspr r0, SPRN_HDEC extsw r0, r0 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER cmpdi r0, 0 blt kvm_novcpu_exit /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ ld r4, HSTATE_KVM_VCPU(r13) cmpdi r4, 0 beq kvmppc_primary_no_guest #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING addi r3, r4, VCPU_TB_RMENTRY bl kvmhv_start_timing #endif b kvmppc_got_guest kvm_novcpu_exit: #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING ld r4, HSTATE_KVM_VCPU(r13) cmpdi r4, 0 beq 13f addi r3, r4, VCPU_TB_RMEXIT bl kvmhv_accumulate_time #endif 13: mr r3, r12 stw r12, STACK_SLOT_TRAP(r1) bl kvmhv_commence_exit nop b kvmhv_switch_to_host /* * We come in here when wakened from Linux offline idle code. * Relocation is off * r3 contains the SRR1 wakeup value, SRR1 is trashed. */ _GLOBAL(idle_kvm_start_guest) mfcr r5 mflr r0 std r5, 8(r1) // Save CR in caller's frame std r0, 16(r1) // Save LR in caller's frame // Create frame on emergency stack ld r4, PACAEMERGSP(r13) stdu r1, -SWITCH_FRAME_SIZE(r4) // Switch to new frame on emergency stack mr r1, r4 std r3, 32(r1) // Save SRR1 wakeup value SAVE_NVGPRS(r1) /* * Could avoid this and pass it through in r3. For now, * code expects it to be in SRR1. */ mtspr SPRN_SRR1,r3 li r0,0 stb r0,PACA_FTRACE_ENABLED(r13) li r0,KVM_HWTHREAD_IN_KVM stb r0,HSTATE_HWTHREAD_STATE(r13) /* kvm cede / napping does not come through here */ lbz r0,HSTATE_NAPPING(r13) twnei r0,0 b 1f kvm_unsplit_wakeup: li r0, 0 stb r0, HSTATE_NAPPING(r13) 1: /* * We weren't napping due to cede, so this must be a secondary * thread being woken up to run a guest, or being woken up due * to a stray IPI. (Or due to some machine check or hypervisor * maintenance interrupt while the core is in KVM.) */ /* Check the wake reason in SRR1 to see why we got here */ bl kvmppc_check_wake_reason /* * kvmppc_check_wake_reason could invoke a C routine, but we * have no volatile registers to restore when we return. */ cmpdi r3, 0 bge kvm_no_guest /* get vcore pointer, NULL if we have nothing to run */ ld r5,HSTATE_KVM_VCORE(r13) cmpdi r5,0 /* if we have no vcore to run, go back to sleep */ beq kvm_no_guest kvm_secondary_got_guest: // About to go to guest, clear saved SRR1 li r0, 0 std r0, 32(r1) /* Set HSTATE_DSCR(r13) to something sensible */ ld r6, PACA_DSCR_DEFAULT(r13) std r6, HSTATE_DSCR(r13) /* On thread 0 of a subcore, set HDEC to max */ lbz r4, HSTATE_PTID(r13) cmpwi r4, 0 bne 63f lis r6,0x7fff /* MAX_INT@h */ mtspr SPRN_HDEC, r6 /* and set per-LPAR registers, if doing dynamic micro-threading */ ld r6, HSTATE_SPLIT_MODE(r13) cmpdi r6, 0 beq 63f ld r0, KVM_SPLIT_RPR(r6) mtspr SPRN_RPR, r0 ld r0, KVM_SPLIT_PMMAR(r6) mtspr SPRN_PMMAR, r0 ld r0, KVM_SPLIT_LDBAR(r6) mtspr SPRN_LDBAR, r0 isync 63: /* Order load of vcpu after load of vcore */ lwsync ld r4, HSTATE_KVM_VCPU(r13) bl kvmppc_hv_entry /* Back from the guest, go back to nap */ /* Clear our vcpu and vcore pointers so we don't come back in early */ li r0, 0 std r0, HSTATE_KVM_VCPU(r13) /* * Once we clear HSTATE_KVM_VCORE(r13), the code in * kvmppc_run_core() is going to assume that all our vcpu * state is visible in memory. This lwsync makes sure * that that is true. */ lwsync std r0, HSTATE_KVM_VCORE(r13) /* * All secondaries exiting guest will fall through this path. * Before proceeding, just check for HMI interrupt and * invoke opal hmi handler. By now we are sure that the * primary thread on this core/subcore has already made partition * switch/TB resync and we are good to call opal hmi handler. */ cmpwi r12, BOOK3S_INTERRUPT_HMI bne kvm_no_guest li r3,0 /* NULL argument */ bl hmi_exception_realmode /* * At this point we have finished executing in the guest. * We need to wait for hwthread_req to become zero, since * we may not turn on the MMU while hwthread_req is non-zero. * While waiting we also need to check if we get given a vcpu to run. */ kvm_no_guest: lbz r3, HSTATE_HWTHREAD_REQ(r13) cmpwi r3, 0 bne 53f HMT_MEDIUM li r0, KVM_HWTHREAD_IN_KERNEL stb r0, HSTATE_HWTHREAD_STATE(r13) /* need to recheck hwthread_req after a barrier, to avoid race */ sync lbz r3, HSTATE_HWTHREAD_REQ(r13) cmpwi r3, 0 bne 54f /* * Jump to idle_return_gpr_loss, which returns to the * idle_kvm_start_guest caller. */ li r3, LPCR_PECE0 mfspr r4, SPRN_LPCR rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 mtspr SPRN_LPCR, r4 // Return SRR1 wakeup value, or 0 if we went into the guest ld r3, 32(r1) REST_NVGPRS(r1) ld r1, 0(r1) // Switch back to caller stack ld r0, 16(r1) // Reload LR ld r5, 8(r1) // Reload CR mtlr r0 mtcr r5 blr 53: HMT_LOW ld r5, HSTATE_KVM_VCORE(r13) cmpdi r5, 0 bne 60f ld r3, HSTATE_SPLIT_MODE(r13) cmpdi r3, 0 beq kvm_no_guest lbz r0, KVM_SPLIT_DO_NAP(r3) cmpwi r0, 0 beq kvm_no_guest HMT_MEDIUM b kvm_unsplit_nap 60: HMT_MEDIUM b kvm_secondary_got_guest 54: li r0, KVM_HWTHREAD_IN_KVM stb r0, HSTATE_HWTHREAD_STATE(r13) b kvm_no_guest /* * Here the primary thread is trying to return the core to * whole-core mode, so we need to nap. */ kvm_unsplit_nap: /* * When secondaries are napping in kvm_unsplit_nap() with * hwthread_req = 1, HMI goes ignored even though subcores are * already exited the guest. Hence HMI keeps waking up secondaries * from nap in a loop and secondaries always go back to nap since * no vcore is assigned to them. This makes impossible for primary * thread to get hold of secondary threads resulting into a soft * lockup in KVM path. * * Let us check if HMI is pending and handle it before we go to nap. */ cmpwi r12, BOOK3S_INTERRUPT_HMI bne 55f li r3, 0 /* NULL argument */ bl hmi_exception_realmode 55: /* * Ensure that secondary doesn't nap when it has * its vcore pointer set. */ sync /* matches smp_mb() before setting split_info.do_nap */ ld r0, HSTATE_KVM_VCORE(r13) cmpdi r0, 0 bne kvm_no_guest /* clear any pending message */ BEGIN_FTR_SECTION lis r6, (PPC_DBELL_SERVER << (63-36))@h PPC_MSGCLR(6) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* Set kvm_split_mode.napped[tid] = 1 */ ld r3, HSTATE_SPLIT_MODE(r13) li r0, 1 lhz r4, PACAPACAINDEX(r13) clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ addi r4, r4, KVM_SPLIT_NAPPED stbx r0, r3, r4 /* Check the do_nap flag again after setting napped[] */ sync lbz r0, KVM_SPLIT_DO_NAP(r3) cmpwi r0, 0 beq 57f li r3, NAPPING_UNSPLIT stb r3, HSTATE_NAPPING(r13) li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 mfspr r5, SPRN_LPCR rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) b kvm_nap_sequence 57: li r0, 0 stbx r0, r3, r4 b kvm_no_guest /****************************************************************************** * * * Entry code * * * *****************************************************************************/ .global kvmppc_hv_entry kvmppc_hv_entry: /* Required state: * * R4 = vcpu pointer (or NULL) * MSR = ~IR|DR * R13 = PACA * R1 = host R1 * R2 = TOC * all other volatile GPRS = free * Does not preserve non-volatile GPRs or CR fields */ mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -SFS(r1) /* Save R1 in the PACA */ std r1, HSTATE_HOST_R1(r13) li r6, KVM_GUEST_MODE_HOST_HV stb r6, HSTATE_IN_GUEST(r13) #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING /* Store initial timestamp */ cmpdi r4, 0 beq 1f addi r3, r4, VCPU_TB_RMENTRY bl kvmhv_start_timing 1: #endif ld r5, HSTATE_KVM_VCORE(r13) ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ /* * POWER7/POWER8 host -> guest partition switch code. * We don't have to lock against concurrent tlbies, * but we do have to coordinate across hardware threads. */ /* Set bit in entry map iff exit map is zero. */ li r7, 1 lbz r6, HSTATE_PTID(r13) sld r7, r7, r6 addi r8, r5, VCORE_ENTRY_EXIT 21: lwarx r3, 0, r8 cmpwi r3, 0x100 /* any threads starting to exit? */ bge secondary_too_late /* if so we're too late to the party */ or r3, r3, r7 stwcx. r3, 0, r8 bne 21b /* Primary thread switches to guest partition. */ cmpwi r6,0 bne 10f lwz r7,KVM_LPID(r9) ld r6,KVM_SDR1(r9) li r0,LPID_RSVD /* switch to reserved LPID */ mtspr SPRN_LPID,r0 ptesync mtspr SPRN_SDR1,r6 /* switch to partition page table */ mtspr SPRN_LPID,r7 isync /* See if we need to flush the TLB. */ mr r3, r9 /* kvm pointer */ lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ li r5, 0 /* nested vcpu pointer */ bl kvmppc_check_need_tlb_flush nop ld r5, HSTATE_KVM_VCORE(r13) /* Add timebase offset onto timebase */ 22: ld r8,VCORE_TB_OFFSET(r5) cmpdi r8,0 beq 37f std r8, VCORE_TB_OFFSET_APPL(r5) mftb r6 /* current host timebase */ add r8,r8,r6 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ mftb r7 /* check if lower 24 bits overflowed */ clrldi r6,r6,40 clrldi r7,r7,40 cmpld r7,r6 bge 37f addis r8,r8,0x100 /* if so, increment upper 40 bits */ mtspr SPRN_TBU40,r8 /* Load guest PCR value to select appropriate compat mode */ 37: ld r7, VCORE_PCR(r5) LOAD_REG_IMMEDIATE(r6, PCR_MASK) cmpld r7, r6 beq 38f or r7, r7, r6 mtspr SPRN_PCR, r7 38: BEGIN_FTR_SECTION /* DPDES and VTB are shared between threads */ ld r8, VCORE_DPDES(r5) ld r7, VCORE_VTB(r5) mtspr SPRN_DPDES, r8 mtspr SPRN_VTB, r7 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* Mark the subcore state as inside guest */ bl kvmppc_subcore_enter_guest nop ld r5, HSTATE_KVM_VCORE(r13) ld r4, HSTATE_KVM_VCPU(r13) li r0,1 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ /* Do we have a guest vcpu to run? */ 10: cmpdi r4, 0 beq kvmppc_primary_no_guest kvmppc_got_guest: /* Increment yield count if they have a VPA */ ld r3, VCPU_VPA(r4) cmpdi r3, 0 beq 25f li r6, LPPACA_YIELDCOUNT LWZX_BE r5, r3, r6 addi r5, r5, 1 STWX_BE r5, r3, r6 li r6, 1 stb r6, VCPU_VPA_DIRTY(r4) 25: /* Save purr/spurr */ mfspr r5,SPRN_PURR mfspr r6,SPRN_SPURR std r5,HSTATE_PURR(r13) std r6,HSTATE_SPURR(r13) ld r7,VCPU_PURR(r4) ld r8,VCPU_SPURR(r4) mtspr SPRN_PURR,r7 mtspr SPRN_SPURR,r8 /* Save host values of some registers */ BEGIN_FTR_SECTION mfspr r5, SPRN_CIABR mfspr r6, SPRN_DAWR0 mfspr r7, SPRN_DAWRX0 mfspr r8, SPRN_IAMR std r5, STACK_SLOT_CIABR(r1) std r6, STACK_SLOT_DAWR0(r1) std r7, STACK_SLOT_DAWRX0(r1) std r8, STACK_SLOT_IAMR(r1) mfspr r5, SPRN_FSCR std r5, STACK_SLOT_FSCR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mfspr r5, SPRN_AMR std r5, STACK_SLOT_AMR(r1) mfspr r6, SPRN_UAMOR std r6, STACK_SLOT_UAMOR(r1) BEGIN_FTR_SECTION /* Set partition DABR */ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ lwz r5,VCPU_DABRX(r4) ld r6,VCPU_DABR(r4) mtspr SPRN_DABRX,r5 mtspr SPRN_DABR,r6 isync END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION b 91f END_FTR_SECTION_IFCLR(CPU_FTR_TM) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ mr r3, r4 ld r4, VCPU_MSR(r3) li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_restore_tm_hv nop ld r4, HSTATE_KVM_VCPU(r13) 91: #endif /* Load guest PMU registers; r4 = vcpu pointer here */ mr r3, r4 bl kvmhv_load_guest_pmu /* Load up FP, VMX and VSX registers */ ld r4, HSTATE_KVM_VCPU(r13) bl kvmppc_load_fp ld r14, VCPU_GPR(R14)(r4) ld r15, VCPU_GPR(R15)(r4) ld r16, VCPU_GPR(R16)(r4) ld r17, VCPU_GPR(R17)(r4) ld r18, VCPU_GPR(R18)(r4) ld r19, VCPU_GPR(R19)(r4) ld r20, VCPU_GPR(R20)(r4) ld r21, VCPU_GPR(R21)(r4) ld r22, VCPU_GPR(R22)(r4) ld r23, VCPU_GPR(R23)(r4) ld r24, VCPU_GPR(R24)(r4) ld r25, VCPU_GPR(R25)(r4) ld r26, VCPU_GPR(R26)(r4) ld r27, VCPU_GPR(R27)(r4) ld r28, VCPU_GPR(R28)(r4) ld r29, VCPU_GPR(R29)(r4) ld r30, VCPU_GPR(R30)(r4) ld r31, VCPU_GPR(R31)(r4) /* Switch DSCR to guest value */ ld r5, VCPU_DSCR(r4) mtspr SPRN_DSCR, r5 BEGIN_FTR_SECTION /* Skip next section on POWER7 */ b 8f END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* Load up POWER8-specific registers */ ld r5, VCPU_IAMR(r4) lwz r6, VCPU_PSPB(r4) ld r7, VCPU_FSCR(r4) mtspr SPRN_IAMR, r5 mtspr SPRN_PSPB, r6 mtspr SPRN_FSCR, r7 /* * Handle broken DAWR case by not writing it. This means we * can still store the DAWR register for migration. */ LOAD_REG_ADDR(r5, dawr_force_enable) lbz r5, 0(r5) cmpdi r5, 0 beq 1f ld r5, VCPU_DAWR0(r4) ld r6, VCPU_DAWRX0(r4) mtspr SPRN_DAWR0, r5 mtspr SPRN_DAWRX0, r6 1: ld r7, VCPU_CIABR(r4) ld r8, VCPU_TAR(r4) mtspr SPRN_CIABR, r7 mtspr SPRN_TAR, r8 ld r5, VCPU_IC(r4) ld r8, VCPU_EBBHR(r4) mtspr SPRN_IC, r5 mtspr SPRN_EBBHR, r8 ld r5, VCPU_EBBRR(r4) ld r6, VCPU_BESCR(r4) lwz r7, VCPU_GUEST_PID(r4) ld r8, VCPU_WORT(r4) mtspr SPRN_EBBRR, r5 mtspr SPRN_BESCR, r6 mtspr SPRN_PID, r7 mtspr SPRN_WORT, r8 /* POWER8-only registers */ ld r5, VCPU_TCSCR(r4) ld r6, VCPU_ACOP(r4) ld r7, VCPU_CSIGR(r4) ld r8, VCPU_TACR(r4) mtspr SPRN_TCSCR, r5 mtspr SPRN_ACOP, r6 mtspr SPRN_CSIGR, r7 mtspr SPRN_TACR, r8 nop 8: ld r5, VCPU_SPRG0(r4) ld r6, VCPU_SPRG1(r4) ld r7, VCPU_SPRG2(r4) ld r8, VCPU_SPRG3(r4) mtspr SPRN_SPRG0, r5 mtspr SPRN_SPRG1, r6 mtspr SPRN_SPRG2, r7 mtspr SPRN_SPRG3, r8 /* Load up DAR and DSISR */ ld r5, VCPU_DAR(r4) lwz r6, VCPU_DSISR(r4) mtspr SPRN_DAR, r5 mtspr SPRN_DSISR, r6 /* Restore AMR and UAMOR, set AMOR to all 1s */ ld r5,VCPU_AMR(r4) ld r6,VCPU_UAMOR(r4) mtspr SPRN_AMR,r5 mtspr SPRN_UAMOR,r6 /* Restore state of CTRL run bit; the host currently has it set to 1 */ lwz r5,VCPU_CTRL(r4) andi. r5,r5,1 bne 4f li r6,0 mtspr SPRN_CTRLT,r6 4: /* Secondary threads wait for primary to have done partition switch */ ld r5, HSTATE_KVM_VCORE(r13) lbz r6, HSTATE_PTID(r13) cmpwi r6, 0 beq 21f lbz r0, VCORE_IN_GUEST(r5) cmpwi r0, 0 bne 21f HMT_LOW 20: lwz r3, VCORE_ENTRY_EXIT(r5) cmpwi r3, 0x100 bge no_switch_exit lbz r0, VCORE_IN_GUEST(r5) cmpwi r0, 0 beq 20b HMT_MEDIUM 21: /* Set LPCR. */ ld r8,VCORE_LPCR(r5) mtspr SPRN_LPCR,r8 isync /* * Set the decrementer to the guest decrementer. */ ld r8,VCPU_DEC_EXPIRES(r4) mftb r7 subf r3,r7,r8 mtspr SPRN_DEC,r3 /* Check if HDEC expires soon */ mfspr r3, SPRN_HDEC extsw r3, r3 cmpdi r3, 512 /* 1 microsecond */ blt hdec_soon /* Clear out and reload the SLB */ li r6, 0 slbmte r6, r6 PPC_SLBIA(6) ptesync /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ lwz r5,VCPU_SLB_MAX(r4) cmpwi r5,0 beq 9f mtctr r5 addi r6,r4,VCPU_SLB 1: ld r8,VCPU_SLB_E(r6) ld r9,VCPU_SLB_V(r6) slbmte r9,r8 addi r6,r6,VCPU_SLB_SIZE bdnz 1b 9: deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ /* Check if we can deliver an external or decrementer interrupt now */ ld r0, VCPU_PENDING_EXC(r4) cmpdi r0, 0 beq 71f mr r3, r4 bl kvmppc_guest_entry_inject_int ld r4, HSTATE_KVM_VCPU(r13) 71: ld r6, VCPU_SRR0(r4) ld r7, VCPU_SRR1(r4) mtspr SPRN_SRR0, r6 mtspr SPRN_SRR1, r7 ld r10, VCPU_PC(r4) ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */ rldicl r11, r11, 63 - MSR_HV_LG, 1 rotldi r11, r11, 1 + MSR_HV_LG ori r11, r11, MSR_ME ld r6, VCPU_CTR(r4) ld r7, VCPU_XER(r4) mtctr r6 mtxer r7 /* * Required state: * R4 = vcpu * R10: value for HSRR0 * R11: value for HSRR1 * R13 = PACA */ fast_guest_return: li r0,0 stb r0,VCPU_CEDED(r4) /* cancel cede */ mtspr SPRN_HSRR0,r10 mtspr SPRN_HSRR1,r11 /* Activate guest mode, so faults get handled by KVM */ li r9, KVM_GUEST_MODE_GUEST_HV stb r9, HSTATE_IN_GUEST(r13) #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING /* Accumulate timing */ addi r3, r4, VCPU_TB_GUEST bl kvmhv_accumulate_time #endif /* Enter guest */ BEGIN_FTR_SECTION ld r5, VCPU_CFAR(r4) mtspr SPRN_CFAR, r5 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) BEGIN_FTR_SECTION ld r0, VCPU_PPR(r4) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r5, VCPU_LR(r4) mtlr r5 ld r1, VCPU_GPR(R1)(r4) ld r5, VCPU_GPR(R5)(r4) ld r8, VCPU_GPR(R8)(r4) ld r9, VCPU_GPR(R9)(r4) ld r10, VCPU_GPR(R10)(r4) ld r11, VCPU_GPR(R11)(r4) ld r12, VCPU_GPR(R12)(r4) ld r13, VCPU_GPR(R13)(r4) BEGIN_FTR_SECTION mtspr SPRN_PPR, r0 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r6, VCPU_GPR(R6)(r4) ld r7, VCPU_GPR(R7)(r4) ld r0, VCPU_CR(r4) mtcr r0 ld r0, VCPU_GPR(R0)(r4) ld r2, VCPU_GPR(R2)(r4) ld r3, VCPU_GPR(R3)(r4) ld r4, VCPU_GPR(R4)(r4) HRFI_TO_GUEST b . secondary_too_late: li r12, 0 stw r12, STACK_SLOT_TRAP(r1) cmpdi r4, 0 beq 11f stw r12, VCPU_TRAP(r4) #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING addi r3, r4, VCPU_TB_RMEXIT bl kvmhv_accumulate_time #endif 11: b kvmhv_switch_to_host no_switch_exit: HMT_MEDIUM li r12, 0 b 12f hdec_soon: li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 12: stw r12, VCPU_TRAP(r4) mr r9, r4 #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING addi r3, r4, VCPU_TB_RMEXIT bl kvmhv_accumulate_time #endif b guest_bypass /****************************************************************************** * * * Exit code * * * *****************************************************************************/ /* * We come here from the first-level interrupt handlers. */ .globl kvmppc_interrupt_hv kvmppc_interrupt_hv: /* * Register contents: * R9 = HSTATE_IN_GUEST * R12 = (guest CR << 32) | interrupt vector * R13 = PACA * guest R12 saved in shadow VCPU SCRATCH0 * guest R13 saved in SPRN_SCRATCH0 * guest R9 saved in HSTATE_SCRATCH2 */ /* We're now back in the host but in guest MMU context */ cmpwi r9,KVM_GUEST_MODE_HOST_HV beq kvmppc_bad_host_intr li r9, KVM_GUEST_MODE_HOST_HV stb r9, HSTATE_IN_GUEST(r13) ld r9, HSTATE_KVM_VCPU(r13) /* Save registers */ std r0, VCPU_GPR(R0)(r9) std r1, VCPU_GPR(R1)(r9) std r2, VCPU_GPR(R2)(r9) std r3, VCPU_GPR(R3)(r9) std r4, VCPU_GPR(R4)(r9) std r5, VCPU_GPR(R5)(r9) std r6, VCPU_GPR(R6)(r9) std r7, VCPU_GPR(R7)(r9) std r8, VCPU_GPR(R8)(r9) ld r0, HSTATE_SCRATCH2(r13) std r0, VCPU_GPR(R9)(r9) std r10, VCPU_GPR(R10)(r9) std r11, VCPU_GPR(R11)(r9) ld r3, HSTATE_SCRATCH0(r13) std r3, VCPU_GPR(R12)(r9) /* CR is in the high half of r12 */ srdi r4, r12, 32 std r4, VCPU_CR(r9) BEGIN_FTR_SECTION ld r3, HSTATE_CFAR(r13) std r3, VCPU_CFAR(r9) END_FTR_SECTION_IFSET(CPU_FTR_CFAR) BEGIN_FTR_SECTION ld r4, HSTATE_PPR(r13) std r4, VCPU_PPR(r9) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Restore R1/R2 so we can handle faults */ ld r1, HSTATE_HOST_R1(r13) LOAD_PACA_TOC() mfspr r10, SPRN_SRR0 mfspr r11, SPRN_SRR1 std r10, VCPU_SRR0(r9) std r11, VCPU_SRR1(r9) /* trap is in the low half of r12, clear CR from the high half */ clrldi r12, r12, 32 andi. r0, r12, 2 /* need to read HSRR0/1? */ beq 1f mfspr r10, SPRN_HSRR0 mfspr r11, SPRN_HSRR1 clrrdi r12, r12, 2 1: std r10, VCPU_PC(r9) std r11, VCPU_MSR(r9) GET_SCRATCH0(r3) mflr r4 std r3, VCPU_GPR(R13)(r9) std r4, VCPU_LR(r9) stw r12,VCPU_TRAP(r9) /* * Now that we have saved away SRR0/1 and HSRR0/1, * interrupts are recoverable in principle, so set MSR_RI. * This becomes important for relocation-on interrupts from * the guest, which we can get in radix mode on POWER9. */ li r0, MSR_RI mtmsrd r0, 1 #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING addi r3, r9, VCPU_TB_RMINTR mr r4, r9 bl kvmhv_accumulate_time ld r5, VCPU_GPR(R5)(r9) ld r6, VCPU_GPR(R6)(r9) ld r7, VCPU_GPR(R7)(r9) ld r8, VCPU_GPR(R8)(r9) #endif /* Save HEIR (HV emulation assist reg) in emul_inst if this is an HEI (HV emulation interrupt, e40) */ li r3,KVM_INST_FETCH_FAILED stw r3,VCPU_LAST_INST(r9) cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST bne 11f mfspr r3,SPRN_HEIR 11: stw r3,VCPU_HEIR(r9) /* these are volatile across C function calls */ mfctr r3 mfxer r4 std r3, VCPU_CTR(r9) std r4, VCPU_XER(r9) /* Save more register state */ mfdar r3 mfdsisr r4 std r3, VCPU_DAR(r9) stw r4, VCPU_DSISR(r9) /* If this is a page table miss then see if it's theirs or ours */ cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE beq kvmppc_hdsi std r3, VCPU_FAULT_DAR(r9) stw r4, VCPU_FAULT_DSISR(r9) cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE beq kvmppc_hisi /* See if this is a leftover HDEC interrupt */ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER bne 2f mfspr r3,SPRN_HDEC extsw r3, r3 cmpdi r3,0 mr r4,r9 bge fast_guest_return 2: /* See if this is an hcall we can handle in real mode */ cmpwi r12,BOOK3S_INTERRUPT_SYSCALL beq hcall_try_real_mode /* Hypervisor doorbell - exit only if host IPI flag set */ cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL bne 3f lbz r0, HSTATE_HOST_IPI(r13) cmpwi r0, 0 beq maybe_reenter_guest b guest_exit_cont 3: /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL bne 14f mfspr r3, SPRN_HFSCR std r3, VCPU_HFSCR(r9) b guest_exit_cont 14: /* External interrupt ? */ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL beq kvmppc_guest_external /* See if it is a machine check */ cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK beq machine_check_realmode /* Or a hypervisor maintenance interrupt */ cmpwi r12, BOOK3S_INTERRUPT_HMI beq hmi_realmode guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING addi r3, r9, VCPU_TB_RMEXIT mr r4, r9 bl kvmhv_accumulate_time #endif /* * Possibly flush the link stack here, before we do a blr in * kvmhv_switch_to_host. */ 1: nop patch_site 1b patch__call_kvm_flush_link_stack /* For hash guest, read the guest SLB and save it away */ li r5, 0 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ mtctr r0 li r6,0 addi r7,r9,VCPU_SLB 1: slbmfee r8,r6 andis. r0,r8,SLB_ESID_V@h beq 2f add r8,r8,r6 /* put index in */ slbmfev r3,r6 std r8,VCPU_SLB_E(r7) std r3,VCPU_SLB_V(r7) addi r7,r7,VCPU_SLB_SIZE addi r5,r5,1 2: addi r6,r6,1 bdnz 1b /* Finally clear out the SLB */ li r0,0 slbmte r0,r0 PPC_SLBIA(6) ptesync stw r5,VCPU_SLB_MAX(r9) /* load host SLB entries */ ld r8,PACA_SLBSHADOWPTR(r13) .rept SLB_NUM_BOLTED li r3, SLBSHADOW_SAVEAREA LDX_BE r5, r8, r3 addi r3, r3, 8 LDX_BE r6, r8, r3 andis. r7,r5,SLB_ESID_V@h beq 1f slbmte r6,r5 1: addi r8,r8,16 .endr guest_bypass: stw r12, STACK_SLOT_TRAP(r1) /* Save DEC */ /* Do this before kvmhv_commence_exit so we know TB is guest TB */ ld r3, HSTATE_KVM_VCORE(r13) mfspr r5,SPRN_DEC mftb r6 extsw r5,r5 16: add r5,r5,r6 std r5,VCPU_DEC_EXPIRES(r9) /* Increment exit count, poke other threads to exit */ mr r3, r12 bl kvmhv_commence_exit nop ld r9, HSTATE_KVM_VCPU(r13) /* Stop others sending VCPU interrupts to this physical CPU */ li r0, -1 stw r0, VCPU_CPU(r9) stw r0, VCPU_THREAD_CPU(r9) /* Save guest CTRL register, set runlatch to 1 if it was clear */ mfspr r6,SPRN_CTRLF stw r6,VCPU_CTRL(r9) andi. r0,r6,1 bne 4f li r6,1 mtspr SPRN_CTRLT,r6 4: /* * Save the guest PURR/SPURR */ mfspr r5,SPRN_PURR mfspr r6,SPRN_SPURR ld r7,VCPU_PURR(r9) ld r8,VCPU_SPURR(r9) std r5,VCPU_PURR(r9) std r6,VCPU_SPURR(r9) subf r5,r7,r5 subf r6,r8,r6 /* * Restore host PURR/SPURR and add guest times * so that the time in the guest gets accounted. */ ld r3,HSTATE_PURR(r13) ld r4,HSTATE_SPURR(r13) add r3,r3,r5 add r4,r4,r6 mtspr SPRN_PURR,r3 mtspr SPRN_SPURR,r4 BEGIN_FTR_SECTION b 8f END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* Save POWER8-specific registers */ mfspr r5, SPRN_IAMR mfspr r6, SPRN_PSPB mfspr r7, SPRN_FSCR std r5, VCPU_IAMR(r9) stw r6, VCPU_PSPB(r9) std r7, VCPU_FSCR(r9) mfspr r5, SPRN_IC mfspr r7, SPRN_TAR std r5, VCPU_IC(r9) std r7, VCPU_TAR(r9) mfspr r8, SPRN_EBBHR std r8, VCPU_EBBHR(r9) mfspr r5, SPRN_EBBRR mfspr r6, SPRN_BESCR mfspr r7, SPRN_PID mfspr r8, SPRN_WORT std r5, VCPU_EBBRR(r9) std r6, VCPU_BESCR(r9) stw r7, VCPU_GUEST_PID(r9) std r8, VCPU_WORT(r9) mfspr r5, SPRN_TCSCR mfspr r6, SPRN_ACOP mfspr r7, SPRN_CSIGR mfspr r8, SPRN_TACR std r5, VCPU_TCSCR(r9) std r6, VCPU_ACOP(r9) std r7, VCPU_CSIGR(r9) std r8, VCPU_TACR(r9) BEGIN_FTR_SECTION ld r5, STACK_SLOT_FSCR(r1) mtspr SPRN_FSCR, r5 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* * Restore various registers to 0, where non-zero values * set by the guest could disrupt the host. */ li r0, 0 mtspr SPRN_PSPB, r0 mtspr SPRN_WORT, r0 mtspr SPRN_TCSCR, r0 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ li r0, 1 sldi r0, r0, 31 mtspr SPRN_MMCRS, r0 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ ld r8, STACK_SLOT_IAMR(r1) mtspr SPRN_IAMR, r8 8: /* Power7 jumps back in here */ mfspr r5,SPRN_AMR mfspr r6,SPRN_UAMOR std r5,VCPU_AMR(r9) std r6,VCPU_UAMOR(r9) ld r5,STACK_SLOT_AMR(r1) ld r6,STACK_SLOT_UAMOR(r1) mtspr SPRN_AMR, r5 mtspr SPRN_UAMOR, r6 /* Switch DSCR back to host value */ mfspr r8, SPRN_DSCR ld r7, HSTATE_DSCR(r13) std r8, VCPU_DSCR(r9) mtspr SPRN_DSCR, r7 /* Save non-volatile GPRs */ std r14, VCPU_GPR(R14)(r9) std r15, VCPU_GPR(R15)(r9) std r16, VCPU_GPR(R16)(r9) std r17, VCPU_GPR(R17)(r9) std r18, VCPU_GPR(R18)(r9) std r19, VCPU_GPR(R19)(r9) std r20, VCPU_GPR(R20)(r9) std r21, VCPU_GPR(R21)(r9) std r22, VCPU_GPR(R22)(r9) std r23, VCPU_GPR(R23)(r9) std r24, VCPU_GPR(R24)(r9) std r25, VCPU_GPR(R25)(r9) std r26, VCPU_GPR(R26)(r9) std r27, VCPU_GPR(R27)(r9) std r28, VCPU_GPR(R28)(r9) std r29, VCPU_GPR(R29)(r9) std r30, VCPU_GPR(R30)(r9) std r31, VCPU_GPR(R31)(r9) /* Save SPRGs */ mfspr r3, SPRN_SPRG0 mfspr r4, SPRN_SPRG1 mfspr r5, SPRN_SPRG2 mfspr r6, SPRN_SPRG3 std r3, VCPU_SPRG0(r9) std r4, VCPU_SPRG1(r9) std r5, VCPU_SPRG2(r9) std r6, VCPU_SPRG3(r9) /* save FP state */ mr r3, r9 bl kvmppc_save_fp #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION b 91f END_FTR_SECTION_IFCLR(CPU_FTR_TM) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ mr r3, r9 ld r4, VCPU_MSR(r3) li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_save_tm_hv nop ld r9, HSTATE_KVM_VCPU(r13) 91: #endif /* Increment yield count if they have a VPA */ ld r8, VCPU_VPA(r9) /* do they have a VPA? */ cmpdi r8, 0 beq 25f li r4, LPPACA_YIELDCOUNT LWZX_BE r3, r8, r4 addi r3, r3, 1 STWX_BE r3, r8, r4 li r3, 1 stb r3, VCPU_VPA_DIRTY(r9) 25: /* Save PMU registers if requested */ /* r8 and cr0.eq are live here */ mr r3, r9 li r4, 1 beq 21f /* if no VPA, save PMU stuff anyway */ lbz r4, LPPACA_PMCINUSE(r8) 21: bl kvmhv_save_guest_pmu ld r9, HSTATE_KVM_VCPU(r13) /* Restore host values of some registers */ BEGIN_FTR_SECTION ld r5, STACK_SLOT_CIABR(r1) ld r6, STACK_SLOT_DAWR0(r1) ld r7, STACK_SLOT_DAWRX0(r1) mtspr SPRN_CIABR, r5 /* * If the DAWR doesn't work, it's ok to write these here as * this value should always be zero */ mtspr SPRN_DAWR0, r6 mtspr SPRN_DAWRX0, r7 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* * POWER7/POWER8 guest -> host partition switch code. * We don't have to lock against tlbies but we do * have to coordinate the hardware threads. * Here STACK_SLOT_TRAP(r1) contains the trap number. */ kvmhv_switch_to_host: /* Secondary threads wait for primary to do partition switch */ ld r5,HSTATE_KVM_VCORE(r13) ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ lbz r3,HSTATE_PTID(r13) cmpwi r3,0 beq 15f HMT_LOW 13: lbz r3,VCORE_IN_GUEST(r5) cmpwi r3,0 bne 13b HMT_MEDIUM b 16f /* Primary thread waits for all the secondaries to exit guest */ 15: lwz r3,VCORE_ENTRY_EXIT(r5) rlwinm r0,r3,32-8,0xff clrldi r3,r3,56 cmpw r3,r0 bne 15b isync /* Did we actually switch to the guest at all? */ lbz r6, VCORE_IN_GUEST(r5) cmpwi r6, 0 beq 19f /* Primary thread switches back to host partition */ lwz r7,KVM_HOST_LPID(r4) ld r6,KVM_HOST_SDR1(r4) li r8,LPID_RSVD /* switch to reserved LPID */ mtspr SPRN_LPID,r8 ptesync mtspr SPRN_SDR1,r6 /* switch to host page table */ mtspr SPRN_LPID,r7 isync BEGIN_FTR_SECTION /* DPDES and VTB are shared between threads */ mfspr r7, SPRN_DPDES mfspr r8, SPRN_VTB std r7, VCORE_DPDES(r5) std r8, VCORE_VTB(r5) /* clear DPDES so we don't get guest doorbells in the host */ li r8, 0 mtspr SPRN_DPDES, r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* Subtract timebase offset from timebase */ ld r8, VCORE_TB_OFFSET_APPL(r5) cmpdi r8,0 beq 17f li r0, 0 std r0, VCORE_TB_OFFSET_APPL(r5) mftb r6 /* current guest timebase */ subf r8,r8,r6 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ mftb r7 /* check if lower 24 bits overflowed */ clrldi r6,r6,40 clrldi r7,r7,40 cmpld r7,r6 bge 17f addis r8,r8,0x100 /* if so, increment upper 40 bits */ mtspr SPRN_TBU40,r8 17: /* * If this is an HMI, we called kvmppc_realmode_hmi_handler * above, which may or may not have already called * kvmppc_subcore_exit_guest. Fortunately, all that * kvmppc_subcore_exit_guest does is clear a flag, so calling * it again here is benign even if kvmppc_realmode_hmi_handler * has already called it. */ bl kvmppc_subcore_exit_guest nop 30: ld r5,HSTATE_KVM_VCORE(r13) ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ /* Reset PCR */ ld r0, VCORE_PCR(r5) LOAD_REG_IMMEDIATE(r6, PCR_MASK) cmpld r0, r6 beq 18f mtspr SPRN_PCR, r6 18: /* Signal secondary CPUs to continue */ li r0, 0 stb r0,VCORE_IN_GUEST(r5) 19: lis r8,0x7fff /* MAX_INT@h */ mtspr SPRN_HDEC,r8 16: ld r8,KVM_HOST_LPCR(r4) mtspr SPRN_LPCR,r8 isync #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING /* Finish timing, if we have a vcpu */ ld r4, HSTATE_KVM_VCPU(r13) cmpdi r4, 0 li r3, 0 beq 2f bl kvmhv_accumulate_time 2: #endif /* Unset guest mode */ li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ ld r0, SFS+PPC_LR_STKOFF(r1) addi r1, r1, SFS mtlr r0 blr .balign 32 .global kvm_flush_link_stack kvm_flush_link_stack: /* Save LR into r0 */ mflr r0 /* Flush the link stack. On Power8 it's up to 32 entries in size. */ .rept 32 bl .+4 .endr /* And on Power9 it's up to 64. */ BEGIN_FTR_SECTION .rept 32 bl .+4 .endr END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) /* Restore LR */ mtlr r0 blr kvmppc_guest_external: /* External interrupt, first check for host_ipi. If this is * set, we know the host wants us out so let's do it now */ bl kvmppc_read_intr /* * Restore the active volatile registers after returning from * a C function. */ ld r9, HSTATE_KVM_VCPU(r13) li r12, BOOK3S_INTERRUPT_EXTERNAL /* * kvmppc_read_intr return codes: * * Exit to host (r3 > 0) * 1 An interrupt is pending that needs to be handled by the host * Exit guest and return to host by branching to guest_exit_cont * * 2 Passthrough that needs completion in the host * Exit guest and return to host by branching to guest_exit_cont * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD * to indicate to the host to complete handling the interrupt * * Before returning to guest, we check if any CPU is heading out * to the host and if so, we head out also. If no CPUs are heading * check return values <= 0. * * Return to guest (r3 <= 0) * 0 No external interrupt is pending * -1 A guest wakeup IPI (which has now been cleared) * In either case, we return to guest to deliver any pending * guest interrupts. * * -2 A PCI passthrough external interrupt was handled * (interrupt was delivered directly to guest) * Return to guest to deliver any pending guest interrupts. */ cmpdi r3, 1 ble 1f /* Return code = 2 */ li r12, BOOK3S_INTERRUPT_HV_RM_HARD stw r12, VCPU_TRAP(r9) b guest_exit_cont 1: /* Return code <= 1 */ cmpdi r3, 0 bgt guest_exit_cont /* Return code <= 0 */ maybe_reenter_guest: ld r5, HSTATE_KVM_VCORE(r13) lwz r0, VCORE_ENTRY_EXIT(r5) cmpwi r0, 0x100 mr r4, r9 blt deliver_guest_interrupt b guest_exit_cont /* * Check whether an HDSI is an HPTE not found fault or something else. * If it is an HPTE not found fault that is due to the guest accessing * a page that they have mapped but which we have paged out, then * we continue on with the guest exit path. In all other cases, * reflect the HDSI to the guest as a DSI. */ kvmppc_hdsi: mfspr r4, SPRN_HDAR mfspr r6, SPRN_HDSISR /* HPTE not found fault or protection fault? */ andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h beq 1f /* if not, send it to the guest */ andi. r0, r11, MSR_DR /* data relocation enabled? */ beq 3f clrrdi r0, r4, 28 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ li r0, BOOK3S_INTERRUPT_DATA_SEGMENT bne 7f /* if no SLB entry found */ 4: std r4, VCPU_FAULT_DAR(r9) stw r6, VCPU_FAULT_DSISR(r9) /* Search the hash table. */ mr r3, r9 /* vcpu pointer */ li r7, 1 /* data fault */ bl kvmppc_hpte_hv_fault ld r9, HSTATE_KVM_VCPU(r13) ld r10, VCPU_PC(r9) ld r11, VCPU_MSR(r9) li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE cmpdi r3, 0 /* retry the instruction */ beq 6f cmpdi r3, -1 /* handle in kernel mode */ beq guest_exit_cont cmpdi r3, -2 /* MMIO emulation; need instr word */ beq 2f /* Synthesize a DSI (or DSegI) for the guest */ ld r4, VCPU_FAULT_DAR(r9) mr r6, r3 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE mtspr SPRN_DSISR, r6 7: mtspr SPRN_DAR, r4 mtspr SPRN_SRR0, r10 mtspr SPRN_SRR1, r11 mr r10, r0 bl kvmppc_msr_interrupt fast_interrupt_c_return: 6: ld r7, VCPU_CTR(r9) ld r8, VCPU_XER(r9) mtctr r7 mtxer r8 mr r4, r9 b fast_guest_return 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ ld r5, KVM_VRMA_SLB_V(r5) b 4b /* If this is for emulated MMIO, load the instruction word */ 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ /* Set guest mode to 'jump over instruction' so if lwz faults * we'll just continue at the next IP. */ li r0, KVM_GUEST_MODE_SKIP stb r0, HSTATE_IN_GUEST(r13) /* Do the access with MSR:DR enabled */ mfmsr r3 ori r4, r3, MSR_DR /* Enable paging for data */ mtmsrd r4 lwz r8, 0(r10) mtmsrd r3 /* Store the result */ stw r8, VCPU_LAST_INST(r9) /* Unset guest mode. */ li r0, KVM_GUEST_MODE_HOST_HV stb r0, HSTATE_IN_GUEST(r13) b guest_exit_cont /* * Similarly for an HISI, reflect it to the guest as an ISI unless * it is an HPTE not found fault for a page that we have paged out. */ kvmppc_hisi: andis. r0, r11, SRR1_ISI_NOPT@h beq 1f andi. r0, r11, MSR_IR /* instruction relocation enabled? */ beq 3f clrrdi r0, r10, 28 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ li r0, BOOK3S_INTERRUPT_INST_SEGMENT bne 7f /* if no SLB entry found */ 4: /* Search the hash table. */ mr r3, r9 /* vcpu pointer */ mr r4, r10 mr r6, r11 li r7, 0 /* instruction fault */ bl kvmppc_hpte_hv_fault ld r9, HSTATE_KVM_VCPU(r13) ld r10, VCPU_PC(r9) ld r11, VCPU_MSR(r9) li r12, BOOK3S_INTERRUPT_H_INST_STORAGE cmpdi r3, 0 /* retry the instruction */ beq fast_interrupt_c_return cmpdi r3, -1 /* handle in kernel mode */ beq guest_exit_cont /* Synthesize an ISI (or ISegI) for the guest */ mr r11, r3 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE 7: mtspr SPRN_SRR0, r10 mtspr SPRN_SRR1, r11 mr r10, r0 bl kvmppc_msr_interrupt b fast_interrupt_c_return 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ ld r5, KVM_VRMA_SLB_V(r6) b 4b /* * Try to handle an hcall in real mode. * Returns to the guest if we handle it, or continues on up to * the kernel if we can't (i.e. if we don't have a handler for * it, or if the handler returns H_TOO_HARD). * * r5 - r8 contain hcall args, * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca */ hcall_try_real_mode: ld r3,VCPU_GPR(R3)(r9) andi. r0,r11,MSR_PR /* sc 1 from userspace - reflect to guest syscall */ bne sc_1_fast_return clrrdi r3,r3,2 cmpldi r3,hcall_real_table_end - hcall_real_table bge guest_exit_cont /* See if this hcall is enabled for in-kernel handling */ ld r4, VCPU_KVM(r9) srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ add r4, r4, r0 ld r0, KVM_ENABLED_HCALLS(r4) rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ srd r0, r0, r4 andi. r0, r0, 1 beq guest_exit_cont /* Get pointer to handler, if any, and call it */ LOAD_REG_ADDR(r4, hcall_real_table) lwax r3,r3,r4 cmpwi r3,0 beq guest_exit_cont add r12,r3,r4 mtctr r12 mr r3,r9 /* get vcpu pointer */ ld r4,VCPU_GPR(R4)(r9) bctrl cmpdi r3,H_TOO_HARD beq hcall_real_fallback ld r4,HSTATE_KVM_VCPU(r13) std r3,VCPU_GPR(R3)(r4) ld r10,VCPU_PC(r4) ld r11,VCPU_MSR(r4) b fast_guest_return sc_1_fast_return: mtspr SPRN_SRR0,r10 mtspr SPRN_SRR1,r11 li r10, BOOK3S_INTERRUPT_SYSCALL bl kvmppc_msr_interrupt mr r4,r9 b fast_guest_return /* We've attempted a real mode hcall, but it's punted it back * to userspace. We need to restore some clobbered volatiles * before resuming the pass-it-to-qemu path */ hcall_real_fallback: li r12,BOOK3S_INTERRUPT_SYSCALL ld r9, HSTATE_KVM_VCPU(r13) b guest_exit_cont .globl hcall_real_table hcall_real_table: .long 0 /* 0 - unused */ .long DOTSYM(kvmppc_h_remove) - hcall_real_table .long DOTSYM(kvmppc_h_enter) - hcall_real_table .long DOTSYM(kvmppc_h_read) - hcall_real_table .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table .long DOTSYM(kvmppc_h_protect) - hcall_real_table .long 0 /* 0x1c */ .long 0 /* 0x20 */ .long 0 /* 0x24 - H_SET_SPRG0 */ .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table .long 0 /* 0x30 */ .long 0 /* 0x34 */ .long 0 /* 0x38 */ .long 0 /* 0x3c */ .long 0 /* 0x40 */ .long 0 /* 0x44 */ .long 0 /* 0x48 */ .long 0 /* 0x4c */ .long 0 /* 0x50 */ .long 0 /* 0x54 */ .long 0 /* 0x58 */ .long 0 /* 0x5c */ .long 0 /* 0x60 */ #ifdef CONFIG_KVM_XICS .long DOTSYM(xics_rm_h_eoi) - hcall_real_table .long DOTSYM(xics_rm_h_cppr) - hcall_real_table .long DOTSYM(xics_rm_h_ipi) - hcall_real_table .long 0 /* 0x70 - H_IPOLL */ .long DOTSYM(xics_rm_h_xirr) - hcall_real_table #else .long 0 /* 0x64 - H_EOI */ .long 0 /* 0x68 - H_CPPR */ .long 0 /* 0x6c - H_IPI */ .long 0 /* 0x70 - H_IPOLL */ .long 0 /* 0x74 - H_XIRR */ #endif .long 0 /* 0x78 */ .long 0 /* 0x7c */ .long 0 /* 0x80 */ .long 0 /* 0x84 */ .long 0 /* 0x88 */ .long 0 /* 0x8c */ .long 0 /* 0x90 */ .long 0 /* 0x94 */ .long 0 /* 0x98 */ .long 0 /* 0x9c */ .long 0 /* 0xa0 */ .long 0 /* 0xa4 */ .long 0 /* 0xa8 */ .long 0 /* 0xac */ .long 0 /* 0xb0 */ .long 0 /* 0xb4 */ .long 0 /* 0xb8 */ .long 0 /* 0xbc */ .long 0 /* 0xc0 */ .long 0 /* 0xc4 */ .long 0 /* 0xc8 */ .long 0 /* 0xcc */ .long 0 /* 0xd0 */ .long 0 /* 0xd4 */ .long 0 /* 0xd8 */ .long 0 /* 0xdc */ .long DOTSYM(kvmppc_h_cede) - hcall_real_table .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table .long 0 /* 0xe8 */ .long 0 /* 0xec */ .long 0 /* 0xf0 */ .long 0 /* 0xf4 */ .long 0 /* 0xf8 */ .long 0 /* 0xfc */ .long 0 /* 0x100 */ .long 0 /* 0x104 */ .long 0 /* 0x108 */ .long 0 /* 0x10c */ .long 0 /* 0x110 */ .long 0 /* 0x114 */ .long 0 /* 0x118 */ .long 0 /* 0x11c */ .long 0 /* 0x120 */ .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table .long 0 /* 0x128 */ .long 0 /* 0x12c */ .long 0 /* 0x130 */ .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table .long 0 /* 0x138 */ .long 0 /* 0x13c */ .long 0 /* 0x140 */ .long 0 /* 0x144 */ .long 0 /* 0x148 */ .long 0 /* 0x14c */ .long 0 /* 0x150 */ .long 0 /* 0x154 */ .long 0 /* 0x158 */ .long 0 /* 0x15c */ .long 0 /* 0x160 */ .long 0 /* 0x164 */ .long 0 /* 0x168 */ .long 0 /* 0x16c */ .long 0 /* 0x170 */ .long 0 /* 0x174 */ .long 0 /* 0x178 */ .long 0 /* 0x17c */ .long 0 /* 0x180 */ .long 0 /* 0x184 */ .long 0 /* 0x188 */ .long 0 /* 0x18c */ .long 0 /* 0x190 */ .long 0 /* 0x194 */ .long 0 /* 0x198 */ .long 0 /* 0x19c */ .long 0 /* 0x1a0 */ .long 0 /* 0x1a4 */ .long 0 /* 0x1a8 */ .long 0 /* 0x1ac */ .long 0 /* 0x1b0 */ .long 0 /* 0x1b4 */ .long 0 /* 0x1b8 */ .long 0 /* 0x1bc */ .long 0 /* 0x1c0 */ .long 0 /* 0x1c4 */ .long 0 /* 0x1c8 */ .long 0 /* 0x1cc */ .long 0 /* 0x1d0 */ .long 0 /* 0x1d4 */ .long 0 /* 0x1d8 */ .long 0 /* 0x1dc */ .long 0 /* 0x1e0 */ .long 0 /* 0x1e4 */ .long 0 /* 0x1e8 */ .long 0 /* 0x1ec */ .long 0 /* 0x1f0 */ .long 0 /* 0x1f4 */ .long 0 /* 0x1f8 */ .long 0 /* 0x1fc */ .long 0 /* 0x200 */ .long 0 /* 0x204 */ .long 0 /* 0x208 */ .long 0 /* 0x20c */ .long 0 /* 0x210 */ .long 0 /* 0x214 */ .long 0 /* 0x218 */ .long 0 /* 0x21c */ .long 0 /* 0x220 */ .long 0 /* 0x224 */ .long 0 /* 0x228 */ .long 0 /* 0x22c */ .long 0 /* 0x230 */ .long 0 /* 0x234 */ .long 0 /* 0x238 */ .long 0 /* 0x23c */ .long 0 /* 0x240 */ .long 0 /* 0x244 */ .long 0 /* 0x248 */ .long 0 /* 0x24c */ .long 0 /* 0x250 */ .long 0 /* 0x254 */ .long 0 /* 0x258 */ .long 0 /* 0x25c */ .long 0 /* 0x260 */ .long 0 /* 0x264 */ .long 0 /* 0x268 */ .long 0 /* 0x26c */ .long 0 /* 0x270 */ .long 0 /* 0x274 */ .long 0 /* 0x278 */ .long 0 /* 0x27c */ .long 0 /* 0x280 */ .long 0 /* 0x284 */ .long 0 /* 0x288 */ .long 0 /* 0x28c */ .long 0 /* 0x290 */ .long 0 /* 0x294 */ .long 0 /* 0x298 */ .long 0 /* 0x29c */ .long 0 /* 0x2a0 */ .long 0 /* 0x2a4 */ .long 0 /* 0x2a8 */ .long 0 /* 0x2ac */ .long 0 /* 0x2b0 */ .long 0 /* 0x2b4 */ .long 0 /* 0x2b8 */ .long 0 /* 0x2bc */ .long 0 /* 0x2c0 */ .long 0 /* 0x2c4 */ .long 0 /* 0x2c8 */ .long 0 /* 0x2cc */ .long 0 /* 0x2d0 */ .long 0 /* 0x2d4 */ .long 0 /* 0x2d8 */ .long 0 /* 0x2dc */ .long 0 /* 0x2e0 */ .long 0 /* 0x2e4 */ .long 0 /* 0x2e8 */ .long 0 /* 0x2ec */ .long 0 /* 0x2f0 */ .long 0 /* 0x2f4 */ .long 0 /* 0x2f8 */ #ifdef CONFIG_KVM_XICS .long DOTSYM(xics_rm_h_xirr_x) - hcall_real_table #else .long 0 /* 0x2fc - H_XIRR_X*/ #endif .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table .globl hcall_real_table_end hcall_real_table_end: _GLOBAL_TOC(kvmppc_h_set_xdabr) EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) andi. r0, r5, DABRX_USER | DABRX_KERNEL beq 6f li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI andc. r0, r5, r0 beq 3f 6: li r3, H_PARAMETER blr _GLOBAL_TOC(kvmppc_h_set_dabr) EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) li r5, DABRX_USER | DABRX_KERNEL 3: BEGIN_FTR_SECTION b 2f END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) std r4,VCPU_DABR(r3) stw r5, VCPU_DABRX(r3) mtspr SPRN_DABRX, r5 /* Work around P7 bug where DABR can get corrupted on mtspr */ 1: mtspr SPRN_DABR,r4 mfspr r5, SPRN_DABR cmpd r4, r5 bne 1b isync li r3,0 blr 2: LOAD_REG_ADDR(r11, dawr_force_enable) lbz r11, 0(r11) cmpdi r11, 0 bne 3f li r3, H_HARDWARE blr 3: /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 2, DAWRX_WT clrrdi r4, r4, 3 std r4, VCPU_DAWR0(r3) std r5, VCPU_DAWRX0(r3) /* * If came in through the real mode hcall handler then it is necessary * to write the registers since the return path won't. Otherwise it is * sufficient to store then in the vcpu struct as they will be loaded * next time the vcpu is run. */ mfmsr r6 andi. r6, r6, MSR_DR /* in real mode? */ bne 4f mtspr SPRN_DAWR0, r4 mtspr SPRN_DAWRX0, r5 4: li r3, 0 blr _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ ori r11,r11,MSR_EE std r11,VCPU_MSR(r3) li r0,1 stb r0,VCPU_CEDED(r3) sync /* order setting ceded vs. testing prodded */ lbz r5,VCPU_PRODDED(r3) cmpwi r5,0 bne kvm_cede_prodded li r12,0 /* set trap to 0 to say hcall is handled */ stw r12,VCPU_TRAP(r3) li r0,H_SUCCESS std r0,VCPU_GPR(R3)(r3) /* * Set our bit in the bitmask of napping threads unless all the * other threads are already napping, in which case we send this * up to the host. */ ld r5,HSTATE_KVM_VCORE(r13) lbz r6,HSTATE_PTID(r13) lwz r8,VCORE_ENTRY_EXIT(r5) clrldi r8,r8,56 li r0,1 sld r0,r0,r6 addi r6,r5,VCORE_NAPPING_THREADS 31: lwarx r4,0,r6 or r4,r4,r0 cmpw r4,r8 beq kvm_cede_exit stwcx. r4,0,r6 bne 31b /* order napping_threads update vs testing entry_exit_map */ isync li r0,NAPPING_CEDE stb r0,HSTATE_NAPPING(r13) lwz r7,VCORE_ENTRY_EXIT(r5) cmpwi r7,0x100 bge 33f /* another thread already exiting */ /* * Although not specifically required by the architecture, POWER7 * preserves the following registers in nap mode, even if an SMT mode * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. */ /* Save non-volatile GPRs */ std r14, VCPU_GPR(R14)(r3) std r15, VCPU_GPR(R15)(r3) std r16, VCPU_GPR(R16)(r3) std r17, VCPU_GPR(R17)(r3) std r18, VCPU_GPR(R18)(r3) std r19, VCPU_GPR(R19)(r3) std r20, VCPU_GPR(R20)(r3) std r21, VCPU_GPR(R21)(r3) std r22, VCPU_GPR(R22)(r3) std r23, VCPU_GPR(R23)(r3) std r24, VCPU_GPR(R24)(r3) std r25, VCPU_GPR(R25)(r3) std r26, VCPU_GPR(R26)(r3) std r27, VCPU_GPR(R27)(r3) std r28, VCPU_GPR(R28)(r3) std r29, VCPU_GPR(R29)(r3) std r30, VCPU_GPR(R30)(r3) std r31, VCPU_GPR(R31)(r3) /* save FP state */ bl kvmppc_save_fp #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION b 91f END_FTR_SECTION_IFCLR(CPU_FTR_TM) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ ld r3, HSTATE_KVM_VCPU(r13) ld r4, VCPU_MSR(r3) li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_save_tm_hv nop 91: #endif /* * Set DEC to the smaller of DEC and HDEC, so that we wake * no later than the end of our timeslice (HDEC interrupts * don't wake us from nap). */ mfspr r3, SPRN_DEC mfspr r4, SPRN_HDEC mftb r5 extsw r3, r3 extsw r4, r4 cmpd r3, r4 ble 67f mtspr SPRN_DEC, r4 67: /* save expiry time of guest decrementer */ add r3, r3, r5 ld r4, HSTATE_KVM_VCPU(r13) std r3, VCPU_DEC_EXPIRES(r4) #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING ld r4, HSTATE_KVM_VCPU(r13) addi r3, r4, VCPU_TB_CEDE bl kvmhv_accumulate_time #endif lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ /* Go back to host stack */ ld r1, HSTATE_HOST_R1(r13) /* * Take a nap until a decrementer or external or doobell interrupt * occurs, with PECE1 and PECE0 set in LPCR. * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. * Also clear the runlatch bit before napping. */ kvm_do_nap: li r0,0 mtspr SPRN_CTRLT, r0 li r0,1 stb r0,HSTATE_HWTHREAD_REQ(r13) mfspr r5,SPRN_LPCR ori r5,r5,LPCR_PECE0 | LPCR_PECE1 BEGIN_FTR_SECTION ori r5, r5, LPCR_PECEDH rlwimi r5, r3, 0, LPCR_PECEDP END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) kvm_nap_sequence: /* desired LPCR value in r5 */ li r3, PNV_THREAD_NAP mtspr SPRN_LPCR,r5 isync bl isa206_idle_insn_mayloss li r0,1 mtspr SPRN_CTRLT, r0 mtspr SPRN_SRR1, r3 li r0, 0 stb r0, PACA_FTRACE_ENABLED(r13) li r0, KVM_HWTHREAD_IN_KVM stb r0, HSTATE_HWTHREAD_STATE(r13) lbz r0, HSTATE_NAPPING(r13) cmpwi r0, NAPPING_CEDE beq kvm_end_cede cmpwi r0, NAPPING_NOVCPU beq kvm_novcpu_wakeup cmpwi r0, NAPPING_UNSPLIT beq kvm_unsplit_wakeup twi 31,0,0 /* Nap state must not be zero */ 33: mr r4, r3 li r3, 0 li r12, 0 b 34f kvm_end_cede: /* Woken by external or decrementer interrupt */ /* get vcpu pointer */ ld r4, HSTATE_KVM_VCPU(r13) #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING addi r3, r4, VCPU_TB_RMINTR bl kvmhv_accumulate_time #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION b 91f END_FTR_SECTION_IFCLR(CPU_FTR_TM) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ mr r3, r4 ld r4, VCPU_MSR(r3) li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_restore_tm_hv nop ld r4, HSTATE_KVM_VCPU(r13) 91: #endif /* load up FP state */ bl kvmppc_load_fp /* Restore guest decrementer */ ld r3, VCPU_DEC_EXPIRES(r4) mftb r7 subf r3, r7, r3 mtspr SPRN_DEC, r3 /* Load NV GPRS */ ld r14, VCPU_GPR(R14)(r4) ld r15, VCPU_GPR(R15)(r4) ld r16, VCPU_GPR(R16)(r4) ld r17, VCPU_GPR(R17)(r4) ld r18, VCPU_GPR(R18)(r4) ld r19, VCPU_GPR(R19)(r4) ld r20, VCPU_GPR(R20)(r4) ld r21, VCPU_GPR(R21)(r4) ld r22, VCPU_GPR(R22)(r4) ld r23, VCPU_GPR(R23)(r4) ld r24, VCPU_GPR(R24)(r4) ld r25, VCPU_GPR(R25)(r4) ld r26, VCPU_GPR(R26)(r4) ld r27, VCPU_GPR(R27)(r4) ld r28, VCPU_GPR(R28)(r4) ld r29, VCPU_GPR(R29)(r4) ld r30, VCPU_GPR(R30)(r4) ld r31, VCPU_GPR(R31)(r4) /* Check the wake reason in SRR1 to see why we got here */ bl kvmppc_check_wake_reason /* * Restore volatile registers since we could have called a * C routine in kvmppc_check_wake_reason * r4 = VCPU * r3 tells us whether we need to return to host or not * WARNING: it gets checked further down: * should not modify r3 until this check is done. */ ld r4, HSTATE_KVM_VCPU(r13) /* clear our bit in vcore->napping_threads */ 34: ld r5,HSTATE_KVM_VCORE(r13) lbz r7,HSTATE_PTID(r13) li r0,1 sld r0,r0,r7 addi r6,r5,VCORE_NAPPING_THREADS 32: lwarx r7,0,r6 andc r7,r7,r0 stwcx. r7,0,r6 bne 32b li r0,0 stb r0,HSTATE_NAPPING(r13) /* See if the wake reason saved in r3 means we need to exit */ stw r12, VCPU_TRAP(r4) mr r9, r4 cmpdi r3, 0 bgt guest_exit_cont b maybe_reenter_guest /* cede when already previously prodded case */ kvm_cede_prodded: li r0,0 stb r0,VCPU_PRODDED(r3) sync /* order testing prodded vs. clearing ceded */ stb r0,VCPU_CEDED(r3) li r3,H_SUCCESS blr /* we've ceded but we want to give control to the host */ kvm_cede_exit: ld r9, HSTATE_KVM_VCPU(r13) b guest_exit_cont /* Try to do machine check recovery in real mode */ machine_check_realmode: mr r3, r9 /* get vcpu pointer */ bl kvmppc_realmode_machine_check nop /* all machine checks go to virtual mode for further handling */ ld r9, HSTATE_KVM_VCPU(r13) li r12, BOOK3S_INTERRUPT_MACHINE_CHECK b guest_exit_cont /* * Call C code to handle a HMI in real mode. * Only the primary thread does the call, secondary threads are handled * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. * r9 points to the vcpu on entry */ hmi_realmode: lbz r0, HSTATE_PTID(r13) cmpwi r0, 0 bne guest_exit_cont bl kvmppc_realmode_hmi_handler ld r9, HSTATE_KVM_VCPU(r13) li r12, BOOK3S_INTERRUPT_HMI b guest_exit_cont /* * Check the reason we woke from nap, and take appropriate action. * Returns (in r3): * 0 if nothing needs to be done * 1 if something happened that needs to be handled by the host * -1 if there was a guest wakeup (IPI or msgsnd) * -2 if we handled a PCI passthrough interrupt (returned by * kvmppc_read_intr only) * * Also sets r12 to the interrupt vector for any interrupt that needs * to be handled now by the host (0x500 for external interrupt), or zero. * Modifies all volatile registers (since it may call a C function). * This routine calls kvmppc_read_intr, a C function, if an external * interrupt is pending. */ kvmppc_check_wake_reason: mfspr r6, SPRN_SRR1 BEGIN_FTR_SECTION rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ FTR_SECTION_ELSE rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) cmpwi r6, 8 /* was it an external interrupt? */ beq 7f /* if so, see what it was */ li r3, 0 li r12, 0 cmpwi r6, 6 /* was it the decrementer? */ beq 0f BEGIN_FTR_SECTION cmpwi r6, 5 /* privileged doorbell? */ beq 0f cmpwi r6, 3 /* hypervisor doorbell? */ beq 3f END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) cmpwi r6, 0xa /* Hypervisor maintenance ? */ beq 4f li r3, 1 /* anything else, return 1 */ 0: blr /* hypervisor doorbell */ 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL /* * Clear the doorbell as we will invoke the handler * explicitly in the guest exit path. */ lis r6, (PPC_DBELL_SERVER << (63-36))@h PPC_MSGCLR(6) /* see if it's a host IPI */ li r3, 1 lbz r0, HSTATE_HOST_IPI(r13) cmpwi r0, 0 bnelr /* if not, return -1 */ li r3, -1 blr /* Woken up due to Hypervisor maintenance interrupt */ 4: li r12, BOOK3S_INTERRUPT_HMI li r3, 1 blr /* external interrupt - create a stack frame so we can call C */ 7: mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -PPC_MIN_STKFRM(r1) bl kvmppc_read_intr nop li r12, BOOK3S_INTERRUPT_EXTERNAL cmpdi r3, 1 ble 1f /* * Return code of 2 means PCI passthrough interrupt, but * we need to return back to host to complete handling the * interrupt. Trap reason is expected in r12 by guest * exit code. */ li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1: ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) addi r1, r1, PPC_MIN_STKFRM mtlr r0 blr /* * Save away FP, VMX and VSX registers. * r3 = vcpu pointer * N.B. r30 and r31 are volatile across this function, * thus it is not callable from C. */ kvmppc_save_fp: mflr r30 mr r31,r3 mfmsr r5 ori r8,r5,MSR_FP #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION oris r8,r8,MSR_VEC@h END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif #ifdef CONFIG_VSX BEGIN_FTR_SECTION oris r8,r8,MSR_VSX@h END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif mtmsrd r8 addi r3,r3,VCPU_FPRS bl store_fp_state #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION addi r3,r31,VCPU_VRS bl store_vr_state END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif mfspr r6,SPRN_VRSAVE stw r6,VCPU_VRSAVE(r31) mtlr r30 blr /* * Load up FP, VMX and VSX registers * r4 = vcpu pointer * N.B. r30 and r31 are volatile across this function, * thus it is not callable from C. */ kvmppc_load_fp: mflr r30 mr r31,r4 mfmsr r9 ori r8,r9,MSR_FP #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION oris r8,r8,MSR_VEC@h END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif #ifdef CONFIG_VSX BEGIN_FTR_SECTION oris r8,r8,MSR_VSX@h END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif mtmsrd r8 addi r3,r4,VCPU_FPRS bl load_fp_state #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION addi r3,r31,VCPU_VRS bl load_vr_state END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif lwz r7,VCPU_VRSAVE(r31) mtspr SPRN_VRSAVE,r7 mtlr r30 mr r4,r31 blr #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Save transactional state and TM-related registers. * Called with r3 pointing to the vcpu struct and r4 containing * the guest MSR value. * r5 is non-zero iff non-volatile register state needs to be maintained. * If r5 == 0, this can modify all checkpointed registers, but * restores r1 and r2 before exit. */ _GLOBAL_TOC(kvmppc_save_tm_hv) EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) /* See if we need to handle fake suspend mode */ BEGIN_FTR_SECTION b __kvmppc_save_tm END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ cmpwi r0, 0 beq __kvmppc_save_tm /* The following code handles the fake_suspend = 1 case */ mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -TM_FRAME_SIZE(r1) /* Turn on TM. */ mfmsr r8 li r0, 1 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG mtmsrd r8 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ beq 4f BEGIN_FTR_SECTION bl pnv_power9_force_smt4_catch END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) nop /* * It's possible that treclaim. may modify registers, if we have lost * track of fake-suspend state in the guest due to it using rfscv. * Save and restore registers in case this occurs. */ mfspr r3, SPRN_DSCR mfspr r4, SPRN_XER mfspr r5, SPRN_AMR /* SPRN_TAR would need to be saved here if the kernel ever used it */ mfcr r12 SAVE_NVGPRS(r1) SAVE_GPR(2, r1) SAVE_GPR(3, r1) SAVE_GPR(4, r1) SAVE_GPR(5, r1) stw r12, 8(r1) std r1, HSTATE_HOST_R1(r13) /* We have to treclaim here because that's the only way to do S->N */ li r3, TM_CAUSE_KVM_RESCHED TRECLAIM(R3) GET_PACA(r13) ld r1, HSTATE_HOST_R1(r13) REST_GPR(2, r1) REST_GPR(3, r1) REST_GPR(4, r1) REST_GPR(5, r1) lwz r12, 8(r1) REST_NVGPRS(r1) mtspr SPRN_DSCR, r3 mtspr SPRN_XER, r4 mtspr SPRN_AMR, r5 mtcr r12 HMT_MEDIUM /* * We were in fake suspend, so we are not going to save the * register state as the guest checkpointed state (since * we already have it), therefore we can now use any volatile GPR. * In fact treclaim in fake suspend state doesn't modify * any registers. */ BEGIN_FTR_SECTION bl pnv_power9_force_smt4_release END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) nop 4: mfspr r3, SPRN_PSSCR /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ li r0, PSSCR_FAKE_SUSPEND andc r3, r3, r0 mtspr SPRN_PSSCR, r3 /* Don't save TEXASR, use value from last exit in real suspend state */ ld r9, HSTATE_KVM_VCPU(r13) mfspr r5, SPRN_TFHAR mfspr r6, SPRN_TFIAR std r5, VCPU_TFHAR(r9) std r6, VCPU_TFIAR(r9) addi r1, r1, TM_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr /* * Restore transactional state and TM-related registers. * Called with r3 pointing to the vcpu struct * and r4 containing the guest MSR value. * r5 is non-zero iff non-volatile register state needs to be maintained. * This potentially modifies all checkpointed registers. * It restores r1 and r2 from the PACA. */ _GLOBAL_TOC(kvmppc_restore_tm_hv) EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) /* * If we are doing TM emulation for the guest on a POWER9 DD2, * then we don't actually do a trechkpt -- we either set up * fake-suspend mode, or emulate a TM rollback. */ BEGIN_FTR_SECTION b __kvmppc_restore_tm END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) mflr r0 std r0, PPC_LR_STKOFF(r1) li r0, 0 stb r0, HSTATE_FAKE_SUSPEND(r13) /* Turn on TM so we can restore TM SPRs */ mfmsr r5 li r0, 1 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG mtmsrd r5 /* * The user may change these outside of a transaction, so they must * always be context switched. */ ld r5, VCPU_TFHAR(r3) ld r6, VCPU_TFIAR(r3) ld r7, VCPU_TEXASR(r3) mtspr SPRN_TFHAR, r5 mtspr SPRN_TFIAR, r6 mtspr SPRN_TEXASR, r7 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 beqlr /* TM not active in guest */ /* Make sure the failure summary is set */ oris r7, r7, (TEXASR_FS)@h mtspr SPRN_TEXASR, r7 cmpwi r5, 1 /* check for suspended state */ bgt 10f stb r5, HSTATE_FAKE_SUSPEND(r13) b 9f /* and return */ 10: stdu r1, -PPC_MIN_STKFRM(r1) /* guest is in transactional state, so simulate rollback */ bl kvmhv_emulate_tm_rollback nop addi r1, r1, PPC_MIN_STKFRM 9: ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* * We come here if we get any exception or interrupt while we are * executing host real mode code while in guest MMU context. * r12 is (CR << 32) | vector * r13 points to our PACA * r12 is saved in HSTATE_SCRATCH0(r13) * r9 is saved in HSTATE_SCRATCH2(r13) * r13 is saved in HSPRG1 * cfar is saved in HSTATE_CFAR(r13) * ppr is saved in HSTATE_PPR(r13) */ kvmppc_bad_host_intr: /* * Switch to the emergency stack, but start half-way down in * case we were already on it. */ mr r9, r1 std r1, PACAR1(r13) ld r1, PACAEMERGSP(r13) subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE std r9, 0(r1) std r0, GPR0(r1) std r9, GPR1(r1) std r2, GPR2(r1) SAVE_GPRS(3, 8, r1) srdi r0, r12, 32 clrldi r12, r12, 32 std r0, _CCR(r1) std r12, _TRAP(r1) andi. r0, r12, 2 beq 1f mfspr r3, SPRN_HSRR0 mfspr r4, SPRN_HSRR1 mfspr r5, SPRN_HDAR mfspr r6, SPRN_HDSISR b 2f 1: mfspr r3, SPRN_SRR0 mfspr r4, SPRN_SRR1 mfspr r5, SPRN_DAR mfspr r6, SPRN_DSISR 2: std r3, _NIP(r1) std r4, _MSR(r1) std r5, _DAR(r1) std r6, _DSISR(r1) ld r9, HSTATE_SCRATCH2(r13) ld r12, HSTATE_SCRATCH0(r13) GET_SCRATCH0(r0) SAVE_GPRS(9, 12, r1) std r0, GPR13(r1) SAVE_NVGPRS(r1) ld r5, HSTATE_CFAR(r13) std r5, ORIG_GPR3(r1) mflr r3 mfctr r4 mfxer r5 lbz r6, PACAIRQSOFTMASK(r13) std r3, _LINK(r1) std r4, _CTR(r1) std r5, _XER(r1) std r6, SOFTE(r1) LOAD_PACA_TOC() LOAD_REG_IMMEDIATE(3, STACK_FRAME_REGS_MARKER) std r3, STACK_FRAME_OVERHEAD-16(r1) /* * XXX On POWER7 and POWER8, we just spin here since we don't * know what the other threads are doing (and we don't want to * coordinate with them) - but at least we now have register state * in memory that we might be able to look at from another CPU. */ b . /* * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken * from VCPU_INTR_MSR and is modified based on the required TM state changes. * r11 has the guest MSR value (in/out) * r9 has a vcpu pointer (in) * r0 is used as a scratch register */ kvmppc_msr_interrupt: rldicl r0, r11, 64 - MSR_TS_S_LG, 62 cmpwi r0, 2 /* Check if we are in transactional state.. */ ld r11, VCPU_INTR_MSR(r9) bne 1f /* ... if transactional, change to suspended */ li r0, 1 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG blr /* * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu) * * Load up guest PMU state. R3 points to the vcpu struct. */ kvmhv_load_guest_pmu: mr r4, r3 mflr r0 li r3, 1 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ isync BEGIN_FTR_SECTION ld r3, VCPU_MMCR(r4) andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO cmpwi r5, MMCR0_PMAO beql kvmppc_fix_pmao END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ lwz r6, VCPU_PMC + 8(r4) lwz r7, VCPU_PMC + 12(r4) lwz r8, VCPU_PMC + 16(r4) lwz r9, VCPU_PMC + 20(r4) mtspr SPRN_PMC1, r3 mtspr SPRN_PMC2, r5 mtspr SPRN_PMC3, r6 mtspr SPRN_PMC4, r7 mtspr SPRN_PMC5, r8 mtspr SPRN_PMC6, r9 ld r3, VCPU_MMCR(r4) ld r5, VCPU_MMCR + 8(r4) ld r6, VCPU_MMCRA(r4) ld r7, VCPU_SIAR(r4) ld r8, VCPU_SDAR(r4) mtspr SPRN_MMCR1, r5 mtspr SPRN_MMCRA, r6 mtspr SPRN_SIAR, r7 mtspr SPRN_SDAR, r8 BEGIN_FTR_SECTION ld r5, VCPU_MMCR + 16(r4) ld r6, VCPU_SIER(r4) mtspr SPRN_MMCR2, r5 mtspr SPRN_SIER, r6 lwz r7, VCPU_PMC + 24(r4) lwz r8, VCPU_PMC + 28(r4) ld r9, VCPU_MMCRS(r4) mtspr SPRN_SPMC1, r7 mtspr SPRN_SPMC2, r8 mtspr SPRN_MMCRS, r9 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mtspr SPRN_MMCR0, r3 isync mtlr r0 blr /* * void kvmhv_load_host_pmu(void) * * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. */ kvmhv_load_host_pmu: mflr r0 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ cmpwi r4, 0 beq 23f /* skip if not */ BEGIN_FTR_SECTION ld r3, HSTATE_MMCR0(r13) andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO cmpwi r4, MMCR0_PMAO beql kvmppc_fix_pmao END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) lwz r3, HSTATE_PMC1(r13) lwz r4, HSTATE_PMC2(r13) lwz r5, HSTATE_PMC3(r13) lwz r6, HSTATE_PMC4(r13) lwz r8, HSTATE_PMC5(r13) lwz r9, HSTATE_PMC6(r13) mtspr SPRN_PMC1, r3 mtspr SPRN_PMC2, r4 mtspr SPRN_PMC3, r5 mtspr SPRN_PMC4, r6 mtspr SPRN_PMC5, r8 mtspr SPRN_PMC6, r9 ld r3, HSTATE_MMCR0(r13) ld r4, HSTATE_MMCR1(r13) ld r5, HSTATE_MMCRA(r13) ld r6, HSTATE_SIAR(r13) ld r7, HSTATE_SDAR(r13) mtspr SPRN_MMCR1, r4 mtspr SPRN_MMCRA, r5 mtspr SPRN_SIAR, r6 mtspr SPRN_SDAR, r7 BEGIN_FTR_SECTION ld r8, HSTATE_MMCR2(r13) ld r9, HSTATE_SIER(r13) mtspr SPRN_MMCR2, r8 mtspr SPRN_SIER, r9 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mtspr SPRN_MMCR0, r3 isync mtlr r0 23: blr /* * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use) * * Save guest PMU state into the vcpu struct. * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) */ kvmhv_save_guest_pmu: mr r9, r3 mr r8, r4 BEGIN_FTR_SECTION /* * POWER8 seems to have a hardware bug where setting * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] * when some counters are already negative doesn't seem * to cause a performance monitor alert (and hence interrupt). * The effect of this is that when saving the PMU state, * if there is no PMU alert pending when we read MMCR0 * before freezing the counters, but one becomes pending * before we read the counters, we lose it. * To work around this, we need a way to freeze the counters * before reading MMCR0. Normally, freezing the counters * is done by writing MMCR0 (to set MMCR0[FC]) which * unavoidably writes MMCR0[PMA0] as well. On POWER8, * we can also freeze the counters using MMCR2, by writing * 1s to all the counter freeze condition bits (there are * 9 bits each for 6 counters). */ li r3, -1 /* set all freeze bits */ clrrdi r3, r3, 10 mfspr r10, SPRN_MMCR2 mtspr SPRN_MMCR2, r3 isync END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) li r3, 1 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ mfspr r4, SPRN_MMCR0 /* save MMCR0 */ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ mfspr r6, SPRN_MMCRA /* Clear MMCRA in order to disable SDAR updates */ li r7, 0 mtspr SPRN_MMCRA, r7 isync cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ bne 21f std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ b 22f 21: mfspr r5, SPRN_MMCR1 mfspr r7, SPRN_SIAR mfspr r8, SPRN_SDAR std r4, VCPU_MMCR(r9) std r5, VCPU_MMCR + 8(r9) std r6, VCPU_MMCRA(r9) BEGIN_FTR_SECTION std r10, VCPU_MMCR + 16(r9) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) std r7, VCPU_SIAR(r9) std r8, VCPU_SDAR(r9) mfspr r3, SPRN_PMC1 mfspr r4, SPRN_PMC2 mfspr r5, SPRN_PMC3 mfspr r6, SPRN_PMC4 mfspr r7, SPRN_PMC5 mfspr r8, SPRN_PMC6 stw r3, VCPU_PMC(r9) stw r4, VCPU_PMC + 4(r9) stw r5, VCPU_PMC + 8(r9) stw r6, VCPU_PMC + 12(r9) stw r7, VCPU_PMC + 16(r9) stw r8, VCPU_PMC + 20(r9) BEGIN_FTR_SECTION mfspr r5, SPRN_SIER std r5, VCPU_SIER(r9) mfspr r6, SPRN_SPMC1 mfspr r7, SPRN_SPMC2 mfspr r8, SPRN_MMCRS stw r6, VCPU_PMC + 24(r9) stw r7, VCPU_PMC + 28(r9) std r8, VCPU_MMCRS(r9) lis r4, 0x8000 mtspr SPRN_MMCRS, r4 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 22: blr /* * This works around a hardware bug on POWER8E processors, where * writing a 1 to the MMCR0[PMAO] bit doesn't generate a * performance monitor interrupt. Instead, when we need to have * an interrupt pending, we have to arrange for a counter to overflow. */ kvmppc_fix_pmao: li r3, 0 mtspr SPRN_MMCR2, r3 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN mtspr SPRN_MMCR0, r3 lis r3, 0x7fff ori r3, r3, 0xffff mtspr SPRN_PMC6, r3 isync blr #ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING /* * Start timing an activity * r3 = pointer to time accumulation struct, r4 = vcpu */ kvmhv_start_timing: ld r5, HSTATE_KVM_VCORE(r13) ld r6, VCORE_TB_OFFSET_APPL(r5) mftb r5 subf r5, r6, r5 /* subtract current timebase offset */ std r3, VCPU_CUR_ACTIVITY(r4) std r5, VCPU_ACTIVITY_START(r4) blr /* * Accumulate time to one activity and start another. * r3 = pointer to new time accumulation struct, r4 = vcpu */ kvmhv_accumulate_time: ld r5, HSTATE_KVM_VCORE(r13) ld r8, VCORE_TB_OFFSET_APPL(r5) ld r5, VCPU_CUR_ACTIVITY(r4) ld r6, VCPU_ACTIVITY_START(r4) std r3, VCPU_CUR_ACTIVITY(r4) mftb r7 subf r7, r8, r7 /* subtract current timebase offset */ std r7, VCPU_ACTIVITY_START(r4) cmpdi r5, 0 beqlr subf r3, r6, r7 ld r8, TAS_SEQCOUNT(r5) cmpdi r8, 0 addi r8, r8, 1 std r8, TAS_SEQCOUNT(r5) lwsync ld r7, TAS_TOTAL(r5) add r7, r7, r3 std r7, TAS_TOTAL(r5) ld r6, TAS_MIN(r5) ld r7, TAS_MAX(r5) beq 3f cmpd r3, r6 bge 1f 3: std r3, TAS_MIN(r5) 1: cmpd r3, r7 ble 2f std r3, TAS_MAX(r5) 2: lwsync addi r8, r8, 1 std r8, TAS_SEQCOUNT(r5) blr #endif
aixcc-public/challenge-001-exemplar-source
3,131
arch/powerpc/boot/div64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Divide a 64-bit unsigned number by a 32-bit unsigned number. * This routine assumes that the top 32 bits of the dividend are * non-zero to start with. * On entry, r3 points to the dividend, which get overwritten with * the 64-bit quotient, and r4 contains the divisor. * On exit, r3 contains the remainder. * * Copyright (C) 2002 Paul Mackerras, IBM Corp. */ #include "ppc_asm.h" .globl __div64_32 __div64_32: lwz r5,0(r3) # get the dividend into r5/r6 lwz r6,4(r3) cmplw r5,r4 li r7,0 li r8,0 blt 1f divwu r7,r5,r4 # if dividend.hi >= divisor, mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor subf. r5,r0,r5 # dividend.hi %= divisor beq 3f 1: mr r11,r5 # here dividend.hi != 0 andis. r0,r5,0xc000 bne 2f cntlzw r0,r5 # we are shifting the dividend right li r10,-1 # to make it < 2^32, and shifting srw r10,r10,r0 # the divisor right the same amount, addc r9,r4,r10 # rounding up (so the estimate cannot andc r11,r6,r10 # ever be too large, only too small) andc r9,r9,r10 addze r9,r9 or r11,r5,r11 rotlw r9,r9,r0 rotlw r11,r11,r0 divwu r11,r11,r9 # then we divide the shifted quantities 2: mullw r10,r11,r4 # to get an estimate of the quotient, mulhwu r9,r11,r4 # multiply the estimate by the divisor, subfc r6,r10,r6 # take the product from the divisor, add r8,r8,r11 # and add the estimate to the accumulated subfe. r5,r9,r5 # quotient bne 1b 3: cmplw r6,r4 blt 4f divwu r0,r6,r4 # perform the remaining 32-bit division mullw r10,r0,r4 # and get the remainder add r8,r8,r0 subf r6,r10,r6 4: stw r7,0(r3) # return the quotient in *r3 stw r8,4(r3) mr r3,r6 # return the remainder in r3 blr /* * Extended precision shifts. * * Updated to be valid for shift counts from 0 to 63 inclusive. * -- Gabriel * * R3/R4 has 64 bit value * R5 has shift count * result in R3/R4 * * ashrdi3: arithmetic right shift (sign propagation) * lshrdi3: logical right shift * ashldi3: left shift */ .globl __ashrdi3 __ashrdi3: subfic r6,r5,32 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count addi r7,r5,32 # could be xori, or addi with -32 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 sraw r7,r3,r7 # t2 = MSW >> (count-32) or r4,r4,r6 # LSW |= t1 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 sraw r3,r3,r5 # MSW = MSW >> count or r4,r4,r7 # LSW |= t2 blr .globl __ashldi3 __ashldi3: subfic r6,r5,32 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count addi r7,r5,32 # could be xori, or addi with -32 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) or r3,r3,r6 # MSW |= t1 slw r4,r4,r5 # LSW = LSW << count or r3,r3,r7 # MSW |= t2 blr .globl __lshrdi3 __lshrdi3: subfic r6,r5,32 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count addi r7,r5,32 # could be xori, or addi with -32 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) or r4,r4,r6 # LSW |= t1 srw r3,r3,r5 # MSW = MSW >> count or r4,r4,r7 # LSW |= t2 blr
aixcc-public/challenge-001-exemplar-source
1,475
arch/powerpc/boot/ps3-head.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * PS3 bootwrapper entry. * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. */ #include "ppc_asm.h" .machine "ppc64" .text /* * __system_reset_overlay - The PS3 first stage entry. * * The bootwraper build script copies the 512 bytes at symbol * __system_reset_overlay to offset 0x100 of the rom image. This symbol * must occupy 512 or less bytes. * * The PS3 has a single processor with two threads. */ .globl __system_reset_overlay __system_reset_overlay: /* Switch to 32-bit mode. */ mfmsr r9 clrldi r9,r9,1 mtmsrd r9 nop /* Get thread number in r3 and branch. */ mfspr r3, 0x88 cntlzw. r3, r3 beq 1f /* Secondary goes to __secondary_hold in kernel. */ li r4, 0x60 mtctr r4 bctr 1: /* Primary delays then goes to _zimage_start in wrapper. */ or 31, 31, 31 /* db16cyc */ or 31, 31, 31 /* db16cyc */ lis r4, _zimage_start@ha addi r4, r4, _zimage_start@l mtctr r4 bctr . = __system_reset_overlay + 512 /* * __system_reset_kernel - Place holder for the kernel reset vector. * * The bootwrapper build script copies 512 bytes from offset 0x100 * of the rom image to the symbol __system_reset_kernel. At runtime * the bootwrapper program copies the 512 bytes at __system_reset_kernel * to ram address 0x100. This symbol must occupy 512 bytes. */ .globl __system_reset_kernel __system_reset_kernel: . = __system_reset_kernel + 512
aixcc-public/challenge-001-exemplar-source
1,416
arch/powerpc/boot/zImage.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/vmlinux.lds.h> #ifdef CONFIG_PPC64_BOOT_WRAPPER OUTPUT_ARCH(powerpc:common64) #else OUTPUT_ARCH(powerpc:common) #endif ENTRY(_zimage_start) EXTERN(_zimage_start) SECTIONS { .text : { _start = .; *(.text) *(.fixup) _etext = .; } . = ALIGN(4096); .data : { *(.rodata*) *(.data*) *(.sdata*) #ifndef CONFIG_PPC64_BOOT_WRAPPER *(.got2) #endif } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .dynamic : { __dynamic_start = .; *(.dynamic) } #ifdef CONFIG_PPC64_BOOT_WRAPPER .got : ALIGN(256) { *(.got .toc) } #endif .hash : { *(.hash) } .interp : { *(.interp) } .rela.dyn : { #ifdef CONFIG_PPC64_BOOT_WRAPPER __rela_dyn_start = .; #endif *(.rela*) } . = ALIGN(8); .kernel:dtb : { _dtb_start = .; *(.kernel:dtb) _dtb_end = .; } . = ALIGN(4096); .kernel:vmlinux.strip : { _vmlinux_start = .; *(.kernel:vmlinux.strip) _vmlinux_end = .; } . = ALIGN(4096); .kernel:initrd : { _initrd_start = .; *(.kernel:initrd) _initrd_end = .; } . = ALIGN(4096); .kernel:esm_blob : { _esm_blob_start = .; *(.kernel:esm_blob) _esm_blob_end = .; } . = ALIGN(4096); .bss : { _edata = .; __bss_start = .; *(.sbss) *(.bss) *(COMMON) _end = . ; } }
aixcc-public/challenge-001-exemplar-source
2,365
arch/powerpc/boot/ps3-hvcall.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * PS3 bootwrapper hvcalls. * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. */ #include "ppc_asm.h" .machine "ppc64" /* * The PS3 hypervisor uses a 64 bit "C" language calling convention. * The routines here marshal arguments between the 32 bit wrapper * program and the 64 bit hvcalls. * * wrapper lv1 * 32-bit (h,l) 64-bit * * 1: r3,r4 <-> r3 * 2: r5,r6 <-> r4 * 3: r7,r8 <-> r5 * 4: r9,r10 <-> r6 * 5: 8(r1),12(r1) <-> r7 * 6: 16(r1),20(r1) <-> r8 * 7: 24(r1),28(r1) <-> r9 * 8: 32(r1),36(r1) <-> r10 * */ .macro GLOBAL name .section ".text" .balign 4 .globl \name \name: .endm .macro NO_SUPPORT name GLOBAL \name b ps3_no_support .endm .macro HVCALL num li r11, \num .long 0x44000022 extsw r3, r3 .endm .macro SAVE_LR offset=4 mflr r0 stw r0, \offset(r1) .endm .macro LOAD_LR offset=4 lwz r0, \offset(r1) mtlr r0 .endm .macro LOAD_64_REG target,high,low sldi r11, \high, 32 or \target, r11, \low .endm .macro LOAD_64_STACK target,offset ld \target, \offset(r1) .endm .macro LOAD_R3 LOAD_64_REG r3,r3,r4 .endm .macro LOAD_R4 LOAD_64_REG r4,r5,r6 .endm .macro LOAD_R5 LOAD_64_REG r5,r7,r8 .endm .macro LOAD_R6 LOAD_64_REG r6,r9,r10 .endm .macro LOAD_R7 LOAD_64_STACK r7,8 .endm .macro LOAD_R8 LOAD_64_STACK r8,16 .endm .macro LOAD_R9 LOAD_64_STACK r9,24 .endm .macro LOAD_R10 LOAD_64_STACK r10,32 .endm .macro LOAD_REGS_0 stwu 1,-16(1) stw 3, 8(1) .endm .macro LOAD_REGS_5 LOAD_R3 LOAD_R4 LOAD_R5 LOAD_R6 LOAD_R7 .endm .macro LOAD_REGS_6 LOAD_REGS_5 LOAD_R8 .endm .macro LOAD_REGS_8 LOAD_REGS_6 LOAD_R9 LOAD_R10 .endm .macro STORE_REGS_0_1 lwz r11, 8(r1) std r4, 0(r11) mr r4, r3 li r3, 0 addi r1,r1,16 .endm .macro STORE_REGS_5_2 lwz r11, 16(r1) std r4, 0(r11) lwz r11, 20(r1) std r5, 0(r11) .endm .macro STORE_REGS_6_1 lwz r11, 24(r1) std r4, 0(r11) .endm GLOBAL lv1_get_logical_ppe_id SAVE_LR LOAD_REGS_0 HVCALL 69 STORE_REGS_0_1 LOAD_LR blr GLOBAL lv1_get_logical_partition_id SAVE_LR LOAD_REGS_0 HVCALL 74 STORE_REGS_0_1 LOAD_LR blr GLOBAL lv1_get_repository_node_value SAVE_LR LOAD_REGS_5 HVCALL 91 STORE_REGS_5_2 LOAD_LR blr GLOBAL lv1_panic SAVE_LR LOAD_REGS_8 HVCALL 255 LOAD_LR blr
aixcc-public/challenge-001-exemplar-source
1,682
arch/powerpc/boot/util.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copied from <file:arch/powerpc/kernel/misc_32.S> * * This file contains miscellaneous low-level functions. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras. * * kexec bits: * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz */ #include "ppc_asm.h" #define SPRN_PVR 0x11F /* Processor Version Register */ .text /* udelay needs to know the period of the * timebase in nanoseconds. This used to be hardcoded to be 60ns * (period of 66MHz/4). Now a variable is used that is initialized to * 60 for backward compatibility, but it can be overridden as necessary * with code something like this: * extern unsigned long timebase_period_ns; * timebase_period_ns = 1000000000 / bd->bi_tbfreq; */ .data .globl timebase_period_ns timebase_period_ns: .long 60 .text /* * Delay for a number of microseconds */ .globl udelay udelay: mulli r4,r3,1000 /* nanoseconds */ /* Change r4 to be the number of ticks using: * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns * timebase_period_ns defaults to 60 (16.6MHz) */ mflr r5 bcl 20,31,0f 0: mflr r6 mtlr r5 addis r5,r6,(timebase_period_ns-0b)@ha lwz r5,(timebase_period_ns-0b)@l(r5) add r4,r4,r5 addi r4,r4,-1 divw r4,r4,r5 /* BUS ticks */ 1: MFTBU(r5) MFTBL(r6) MFTBU(r7) cmpw 0,r5,r7 bne 1b /* Get [synced] base time */ addc r9,r6,r4 /* Compute end time */ addze r8,r5 2: MFTBU(r5) cmpw 0,r5,r8 blt 2b bgt 3f MFTBL(r6) cmpw 0,r6,r9 blt 2b 3: blr
aixcc-public/challenge-001-exemplar-source
1,203
arch/powerpc/boot/opal-calls.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2016 IBM Corporation. */ #include "ppc_asm.h" #include "../include/asm/opal-api.h" .text .globl opal_kentry opal_kentry: /* r3 is the fdt ptr */ mtctr r4 li r4, 0 li r5, 0 li r6, 0 li r7, 0 LOAD_REG_ADDR(r11, opal) ld r8,0(r11) ld r9,8(r11) bctr #define OPAL_CALL(name, token) \ .globl name; \ name: \ li r0, token; \ b opal_call; opal_call: mflr r11 std r11,16(r1) mfcr r12 stw r12,8(r1) mr r13,r2 /* Set opal return address */ LOAD_REG_ADDR(r11, opal_return) mtlr r11 mfmsr r12 /* switch to BE when we enter OPAL */ li r11,MSR_LE andc r12,r12,r11 mtspr SPRN_HSRR1,r12 /* load the opal call entry point and base */ LOAD_REG_ADDR(r11, opal) ld r12,8(r11) ld r2,0(r11) mtspr SPRN_HSRR0,r12 hrfid opal_return: FIXUP_ENDIAN mr r2,r13; lwz r11,8(r1); ld r12,16(r1) mtcr r11; mtlr r12 blr OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE); OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ); OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE); OPAL_CALL(opal_poll_events, OPAL_POLL_EVENTS); OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH);
aixcc-public/challenge-001-exemplar-source
3,603
arch/powerpc/boot/string.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) Paul Mackerras 1997. * * NOTE: this code runs in 32 bit mode and is packaged as ELF32. */ #include "ppc_asm.h" .text .globl strcpy strcpy: addi r5,r3,-1 addi r4,r4,-1 1: lbzu r0,1(r4) cmpwi 0,r0,0 stbu r0,1(r5) bne 1b blr .globl strncpy strncpy: cmpwi 0,r5,0 beqlr mtctr r5 addi r6,r3,-1 addi r4,r4,-1 1: lbzu r0,1(r4) cmpwi 0,r0,0 stbu r0,1(r6) bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ blr .globl strcat strcat: addi r5,r3,-1 addi r4,r4,-1 1: lbzu r0,1(r5) cmpwi 0,r0,0 bne 1b addi r5,r5,-1 1: lbzu r0,1(r4) cmpwi 0,r0,0 stbu r0,1(r5) bne 1b blr .globl strchr strchr: addi r3,r3,-1 1: lbzu r0,1(r3) cmpw 0,r0,r4 beqlr cmpwi 0,r0,0 bne 1b li r3,0 blr .globl strcmp strcmp: addi r5,r3,-1 addi r4,r4,-1 1: lbzu r3,1(r5) cmpwi 1,r3,0 lbzu r0,1(r4) subf. r3,r0,r3 beqlr 1 beq 1b blr .globl strncmp strncmp: mtctr r5 addi r5,r3,-1 addi r4,r4,-1 1: lbzu r3,1(r5) cmpwi 1,r3,0 lbzu r0,1(r4) subf. r3,r0,r3 beqlr 1 bdnzt eq,1b blr .globl strlen strlen: addi r4,r3,-1 1: lbzu r0,1(r4) cmpwi 0,r0,0 bne 1b subf r3,r3,r4 blr .globl memset memset: rlwimi r4,r4,8,16,23 rlwimi r4,r4,16,0,15 addi r6,r3,-4 cmplwi 0,r5,4 blt 7f stwu r4,4(r6) beqlr andi. r0,r6,3 add r5,r0,r5 subf r6,r0,r6 rlwinm r0,r5,32-2,2,31 mtctr r0 bdz 6f 1: stwu r4,4(r6) bdnz 1b 6: andi. r5,r5,3 7: cmpwi 0,r5,0 beqlr mtctr r5 addi r6,r6,3 8: stbu r4,1(r6) bdnz 8b blr .globl memmove memmove: cmplw 0,r3,r4 bgt backwards_memcpy /* fall through */ .globl memcpy memcpy: rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */ addi r6,r3,-4 addi r4,r4,-4 beq 3f /* if less than 8 bytes to do */ andi. r0,r6,3 /* get dest word aligned */ mtctr r7 bne 5f andi. r0,r4,3 /* check src word aligned too */ bne 3f 1: lwz r7,4(r4) lwzu r8,8(r4) stw r7,4(r6) stwu r8,8(r6) bdnz 1b andi. r5,r5,7 2: cmplwi 0,r5,4 blt 3f lwzu r0,4(r4) addi r5,r5,-4 stwu r0,4(r6) 3: cmpwi 0,r5,0 beqlr mtctr r5 addi r4,r4,3 addi r6,r6,3 4: lbzu r0,1(r4) stbu r0,1(r6) bdnz 4b blr 5: subfic r0,r0,4 cmpw cr1,r0,r5 add r7,r0,r4 andi. r7,r7,3 /* will source be word-aligned too? */ ble cr1,3b bne 3b /* do byte-by-byte if not */ mtctr r0 6: lbz r7,4(r4) addi r4,r4,1 stb r7,4(r6) addi r6,r6,1 bdnz 6b subf r5,r0,r5 rlwinm. r7,r5,32-3,3,31 beq 2b mtctr r7 b 1b .globl backwards_memcpy backwards_memcpy: rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */ add r6,r3,r5 add r4,r4,r5 beq 3f andi. r0,r6,3 mtctr r7 bne 5f andi. r0,r4,3 bne 3f 1: lwz r7,-4(r4) lwzu r8,-8(r4) stw r7,-4(r6) stwu r8,-8(r6) bdnz 1b andi. r5,r5,7 2: cmplwi 0,r5,4 blt 3f lwzu r0,-4(r4) subi r5,r5,4 stwu r0,-4(r6) 3: cmpwi 0,r5,0 beqlr mtctr r5 4: lbzu r0,-1(r4) stbu r0,-1(r6) bdnz 4b blr 5: cmpw cr1,r0,r5 subf r7,r0,r4 andi. r7,r7,3 ble cr1,3b bne 3b mtctr r0 6: lbzu r7,-1(r4) stbu r7,-1(r6) bdnz 6b subf r5,r0,r5 rlwinm. r7,r5,32-3,3,31 beq 2b mtctr r7 b 1b .globl memchr memchr: cmpwi 0,r5,0 blelr mtctr r5 addi r3,r3,-1 1: lbzu r0,1(r3) cmpw r0,r4 beqlr bdnz 1b li r3,0 blr .globl memcmp memcmp: cmpwi 0,r5,0 ble 2f mtctr r5 addi r6,r3,-1 addi r4,r4,-1 1: lbzu r3,1(r6) lbzu r0,1(r4) subf. r3,r0,r3 bdnzt 2,1b blr 2: li r3,0 blr /* * Flush the dcache and invalidate the icache for a range of addresses. * * flush_cache(addr, len) */ .global flush_cache flush_cache: addi 4,4,0x1f /* len = (len + 0x1f) / 0x20 */ rlwinm. 4,4,27,5,31 mtctr 4 beqlr 1: dcbf 0,3 icbi 0,3 addi 3,3,0x20 bdnz 1b sync isync blr
aixcc-public/challenge-001-exemplar-source
6,798
arch/powerpc/boot/crt0.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) Paul Mackerras 1997. * * Adapted for 64 bit LE PowerPC by Andrew Tauferner */ #include "ppc_asm.h" RELA = 7 RELASZ = 8 RELAENT = 9 .data /* A procedure descriptor used when booting this as a COFF file. * When making COFF, this comes first in the link and we're * linked at 0x500000. */ .globl _zimage_start_opd _zimage_start_opd: .long 0x500000, 0, 0, 0 .text b _zimage_start #ifdef __powerpc64__ .balign 8 p_start: .8byte _start p_etext: .8byte _etext p_bss_start: .8byte __bss_start p_end: .8byte _end p_toc: .8byte .TOC. - p_base p_dyn: .8byte __dynamic_start - p_base p_rela: .8byte __rela_dyn_start - p_base p_prom: .8byte 0 .weak _platform_stack_top p_pstack: .8byte _platform_stack_top #else p_start: .long _start p_etext: .long _etext p_bss_start: .long __bss_start p_end: .long _end .weak _platform_stack_top p_pstack: .long _platform_stack_top #endif .weak _zimage_start _zimage_start: .globl _zimage_start_lib _zimage_start_lib: /* Work out the offset between the address we were linked at and the address where we're running. */ bl .+4 p_base: mflr r10 /* r10 now points to runtime addr of p_base */ #ifndef __powerpc64__ /* grab the link address of the dynamic section in r11 */ addis r11,r10,(_GLOBAL_OFFSET_TABLE_-p_base)@ha lwz r11,(_GLOBAL_OFFSET_TABLE_-p_base)@l(r11) cmpwi r11,0 beq 3f /* if not linked -pie */ /* get the runtime address of the dynamic section in r12 */ .weak __dynamic_start addis r12,r10,(__dynamic_start-p_base)@ha addi r12,r12,(__dynamic_start-p_base)@l subf r11,r11,r12 /* runtime - linktime offset */ /* The dynamic section contains a series of tagged entries. * We need the RELA and RELACOUNT entries. */ li r9,0 li r0,0 9: lwz r8,0(r12) /* get tag */ cmpwi r8,0 beq 10f /* end of list */ cmpwi r8,RELA bne 11f lwz r9,4(r12) /* get RELA pointer in r9 */ b 12f 11: cmpwi r8,RELASZ bne .Lcheck_for_relaent lwz r0,4(r12) /* get RELASZ value in r0 */ b 12f .Lcheck_for_relaent: cmpwi r8,RELAENT bne 12f lwz r14,4(r12) /* get RELAENT value in r14 */ 12: addi r12,r12,8 b 9b /* The relocation section contains a list of relocations. * We now do the R_PPC_RELATIVE ones, which point to words * which need to be initialized with addend + offset */ 10: /* skip relocation if we don't have both */ cmpwi r0,0 beq 3f cmpwi r9,0 beq 3f cmpwi r14,0 beq 3f add r9,r9,r11 /* Relocate RELA pointer */ divwu r0,r0,r14 /* RELASZ / RELAENT */ mtctr r0 2: lbz r0,4+3(r9) /* ELF32_R_INFO(reloc->r_info) */ cmpwi r0,22 /* R_PPC_RELATIVE */ bne .Lnext lwz r12,0(r9) /* reloc->r_offset */ lwz r0,8(r9) /* reloc->r_addend */ add r0,r0,r11 stwx r0,r11,r12 .Lnext: add r9,r9,r14 bdnz 2b /* Do a cache flush for our text, in case the loader didn't */ 3: lwz r9,p_start-p_base(r10) /* note: these are relocated now */ lwz r8,p_etext-p_base(r10) 4: dcbf r0,r9 icbi r0,r9 addi r9,r9,0x20 cmplw cr0,r9,r8 blt 4b sync isync /* Clear the BSS */ lwz r9,p_bss_start-p_base(r10) lwz r8,p_end-p_base(r10) li r0,0 5: stw r0,0(r9) addi r9,r9,4 cmplw cr0,r9,r8 blt 5b /* Possibly set up a custom stack */ lwz r8,p_pstack-p_base(r10) cmpwi r8,0 beq 6f lwz r1,0(r8) li r0,0 stwu r0,-16(r1) /* establish a stack frame */ 6: #else /* __powerpc64__ */ /* Save the prom pointer at p_prom. */ std r5,(p_prom-p_base)(r10) /* Set r2 to the TOC. */ ld r2,(p_toc-p_base)(r10) add r2,r2,r10 /* Grab the link address of the dynamic section in r11. */ ld r11,-32768(r2) cmpwi r11,0 beq 3f /* if not linked -pie then no dynamic section */ ld r11,(p_dyn-p_base)(r10) add r11,r11,r10 ld r9,(p_rela-p_base)(r10) add r9,r9,r10 li r13,0 li r8,0 9: ld r12,0(r11) /* get tag */ cmpdi r12,0 beq 12f /* end of list */ cmpdi r12,RELA bne 10f ld r13,8(r11) /* get RELA pointer in r13 */ b 11f 10: cmpwi r12,RELASZ bne .Lcheck_for_relaent lwz r8,8(r11) /* get RELASZ pointer in r8 */ b 11f .Lcheck_for_relaent: cmpwi r12,RELAENT bne 11f lwz r14,8(r11) /* get RELAENT pointer in r14 */ 11: addi r11,r11,16 b 9b 12: cmpdi r13,0 /* check we have both RELA, RELASZ, RELAENT*/ cmpdi cr1,r8,0 beq 3f beq cr1,3f cmpdi r14,0 beq 3f /* Calcuate the runtime offset. */ subf r13,r13,r9 /* Run through the list of relocations and process the * R_PPC64_RELATIVE ones. */ divdu r8,r8,r14 /* RELASZ / RELAENT */ mtctr r8 13: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */ cmpdi r0,22 /* R_PPC64_RELATIVE */ bne .Lnext ld r12,0(r9) /* reloc->r_offset */ ld r0,16(r9) /* reloc->r_addend */ add r0,r0,r13 stdx r0,r13,r12 .Lnext: add r9,r9,r14 bdnz 13b /* Do a cache flush for our text, in case the loader didn't */ 3: ld r9,p_start-p_base(r10) /* note: these are relocated now */ ld r8,p_etext-p_base(r10) 4: dcbf r0,r9 icbi r0,r9 addi r9,r9,0x20 cmpld cr0,r9,r8 blt 4b sync isync /* Clear the BSS */ ld r9,p_bss_start-p_base(r10) ld r8,p_end-p_base(r10) li r0,0 5: std r0,0(r9) addi r9,r9,8 cmpld cr0,r9,r8 blt 5b /* Possibly set up a custom stack */ ld r8,p_pstack-p_base(r10) cmpdi r8,0 beq 6f ld r1,0(r8) li r0,0 stdu r0,-112(r1) /* establish a stack frame */ 6: #endif /* __powerpc64__ */ /* Call platform_init() */ bl platform_init /* Call start */ b start #ifdef __powerpc64__ #define PROM_FRAME_SIZE 512 .macro OP_REGS op, width, start, end, base, offset .Lreg=\start .rept (\end - \start + 1) \op .Lreg,\offset+\width*.Lreg(\base) .Lreg=.Lreg+1 .endr .endm #define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, 0 #define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, 0 #define SAVE_GPR(n, base) SAVE_GPRS(n, n, base) #define REST_GPR(n, base) REST_GPRS(n, n, base) /* prom handles the jump into and return from firmware. The prom args pointer is loaded in r3. */ .globl prom prom: mflr r0 std r0,16(r1) stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ SAVE_GPR(2, r1) SAVE_GPRS(13, 31, r1) mfcr r10 std r10,8*32(r1) mfmsr r10 std r10,8*33(r1) /* remove MSR_LE from msr but keep MSR_SF */ mfmsr r10 rldicr r10,r10,0,62 mtsrr1 r10 /* Load FW address, set LR to label 1, and jump to FW */ bl 0f 0: mflr r10 addi r11,r10,(1f-0b) mtlr r11 ld r10,(p_prom-0b)(r10) mtsrr0 r10 rfid 1: /* Return from OF */ FIXUP_ENDIAN /* Restore registers and return. */ rldicl r1,r1,0,32 /* Restore the MSR (back to 64 bits) */ ld r10,8*(33)(r1) mtmsr r10 isync /* Restore other registers */ REST_GPR(2, r1) REST_GPRS(13, 31, r1) ld r10,8*32(r1) mtcr r10 addi r1,r1,PROM_FRAME_SIZE ld r0,16(r1) mtlr r0 blr #endif
aixcc-public/challenge-001-exemplar-source
6,125
arch/powerpc/boot/crtsavres.S
/* * Special support for eabi and SVR4 * * Copyright (C) 1995, 1996, 1998, 2000, 2001 Free Software Foundation, Inc. * Copyright 2008 Freescale Semiconductor, Inc. * Written By Michael Meissner * * Based on gcc/config/rs6000/crtsavres.asm from gcc * * This file is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * In addition to the permissions in the GNU General Public License, the * Free Software Foundation gives you unlimited permission to link the * compiled version of this file with other programs, and to distribute * those programs without any restriction coming from the use of this * file. (The General Public License restrictions do apply in other * respects; for example, they cover modification of the file, and * distribution when not linked into another program.) * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * * As a special exception, if you link this library with files * compiled with GCC to produce an executable, this does not cause * the resulting executable to be covered by the GNU General Public License. * This exception does not however invalidate any other reasons why * the executable file might be covered by the GNU General Public License. */ #ifdef __powerpc64__ #error "On PPC64, FPR save/restore functions are provided by the linker." #endif .file "crtsavres.S" .section ".text" #define _GLOBAL(name) \ .type name,@function; \ .globl name; \ name: /* Routines for saving integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ /* function, just beyond the end of the integer save area. */ _GLOBAL(_savegpr_14) _GLOBAL(_save32gpr_14) stw 14,-72(11) /* save gp registers */ _GLOBAL(_savegpr_15) _GLOBAL(_save32gpr_15) stw 15,-68(11) _GLOBAL(_savegpr_16) _GLOBAL(_save32gpr_16) stw 16,-64(11) _GLOBAL(_savegpr_17) _GLOBAL(_save32gpr_17) stw 17,-60(11) _GLOBAL(_savegpr_18) _GLOBAL(_save32gpr_18) stw 18,-56(11) _GLOBAL(_savegpr_19) _GLOBAL(_save32gpr_19) stw 19,-52(11) _GLOBAL(_savegpr_20) _GLOBAL(_save32gpr_20) stw 20,-48(11) _GLOBAL(_savegpr_21) _GLOBAL(_save32gpr_21) stw 21,-44(11) _GLOBAL(_savegpr_22) _GLOBAL(_save32gpr_22) stw 22,-40(11) _GLOBAL(_savegpr_23) _GLOBAL(_save32gpr_23) stw 23,-36(11) _GLOBAL(_savegpr_24) _GLOBAL(_save32gpr_24) stw 24,-32(11) _GLOBAL(_savegpr_25) _GLOBAL(_save32gpr_25) stw 25,-28(11) _GLOBAL(_savegpr_26) _GLOBAL(_save32gpr_26) stw 26,-24(11) _GLOBAL(_savegpr_27) _GLOBAL(_save32gpr_27) stw 27,-20(11) _GLOBAL(_savegpr_28) _GLOBAL(_save32gpr_28) stw 28,-16(11) _GLOBAL(_savegpr_29) _GLOBAL(_save32gpr_29) stw 29,-12(11) _GLOBAL(_savegpr_30) _GLOBAL(_save32gpr_30) stw 30,-8(11) _GLOBAL(_savegpr_31) _GLOBAL(_save32gpr_31) stw 31,-4(11) blr /* Routines for restoring integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ /* function, just beyond the end of the integer restore area. */ _GLOBAL(_restgpr_14) _GLOBAL(_rest32gpr_14) lwz 14,-72(11) /* restore gp registers */ _GLOBAL(_restgpr_15) _GLOBAL(_rest32gpr_15) lwz 15,-68(11) _GLOBAL(_restgpr_16) _GLOBAL(_rest32gpr_16) lwz 16,-64(11) _GLOBAL(_restgpr_17) _GLOBAL(_rest32gpr_17) lwz 17,-60(11) _GLOBAL(_restgpr_18) _GLOBAL(_rest32gpr_18) lwz 18,-56(11) _GLOBAL(_restgpr_19) _GLOBAL(_rest32gpr_19) lwz 19,-52(11) _GLOBAL(_restgpr_20) _GLOBAL(_rest32gpr_20) lwz 20,-48(11) _GLOBAL(_restgpr_21) _GLOBAL(_rest32gpr_21) lwz 21,-44(11) _GLOBAL(_restgpr_22) _GLOBAL(_rest32gpr_22) lwz 22,-40(11) _GLOBAL(_restgpr_23) _GLOBAL(_rest32gpr_23) lwz 23,-36(11) _GLOBAL(_restgpr_24) _GLOBAL(_rest32gpr_24) lwz 24,-32(11) _GLOBAL(_restgpr_25) _GLOBAL(_rest32gpr_25) lwz 25,-28(11) _GLOBAL(_restgpr_26) _GLOBAL(_rest32gpr_26) lwz 26,-24(11) _GLOBAL(_restgpr_27) _GLOBAL(_rest32gpr_27) lwz 27,-20(11) _GLOBAL(_restgpr_28) _GLOBAL(_rest32gpr_28) lwz 28,-16(11) _GLOBAL(_restgpr_29) _GLOBAL(_rest32gpr_29) lwz 29,-12(11) _GLOBAL(_restgpr_30) _GLOBAL(_rest32gpr_30) lwz 30,-8(11) _GLOBAL(_restgpr_31) _GLOBAL(_rest32gpr_31) lwz 31,-4(11) blr /* Routines for restoring integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ /* function, just beyond the end of the integer restore area. */ _GLOBAL(_restgpr_14_x) _GLOBAL(_rest32gpr_14_x) lwz 14,-72(11) /* restore gp registers */ _GLOBAL(_restgpr_15_x) _GLOBAL(_rest32gpr_15_x) lwz 15,-68(11) _GLOBAL(_restgpr_16_x) _GLOBAL(_rest32gpr_16_x) lwz 16,-64(11) _GLOBAL(_restgpr_17_x) _GLOBAL(_rest32gpr_17_x) lwz 17,-60(11) _GLOBAL(_restgpr_18_x) _GLOBAL(_rest32gpr_18_x) lwz 18,-56(11) _GLOBAL(_restgpr_19_x) _GLOBAL(_rest32gpr_19_x) lwz 19,-52(11) _GLOBAL(_restgpr_20_x) _GLOBAL(_rest32gpr_20_x) lwz 20,-48(11) _GLOBAL(_restgpr_21_x) _GLOBAL(_rest32gpr_21_x) lwz 21,-44(11) _GLOBAL(_restgpr_22_x) _GLOBAL(_rest32gpr_22_x) lwz 22,-40(11) _GLOBAL(_restgpr_23_x) _GLOBAL(_rest32gpr_23_x) lwz 23,-36(11) _GLOBAL(_restgpr_24_x) _GLOBAL(_rest32gpr_24_x) lwz 24,-32(11) _GLOBAL(_restgpr_25_x) _GLOBAL(_rest32gpr_25_x) lwz 25,-28(11) _GLOBAL(_restgpr_26_x) _GLOBAL(_rest32gpr_26_x) lwz 26,-24(11) _GLOBAL(_restgpr_27_x) _GLOBAL(_rest32gpr_27_x) lwz 27,-20(11) _GLOBAL(_restgpr_28_x) _GLOBAL(_rest32gpr_28_x) lwz 28,-16(11) _GLOBAL(_restgpr_29_x) _GLOBAL(_rest32gpr_29_x) lwz 29,-12(11) _GLOBAL(_restgpr_30_x) _GLOBAL(_rest32gpr_30_x) lwz 30,-8(11) _GLOBAL(_restgpr_31_x) _GLOBAL(_rest32gpr_31_x) lwz 0,4(11) lwz 31,-4(11) mtlr 0 mr 1,11 blr
aixcc-public/challenge-001-exemplar-source
2,970
arch/powerpc/boot/wii-head.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * arch/powerpc/boot/wii-head.S * * Nintendo Wii bootwrapper entry. * Copyright (C) 2008-2009 The GameCube Linux Team * Copyright (C) 2008,2009 Albert Herranz */ #include "ppc_asm.h" /* * The entry code does no assumptions regarding: * - if the data and instruction caches are enabled or not * - if the MMU is enabled or not * - if the high BATs are enabled or not * * We enable the high BATs, enable the caches if not already enabled, * enable the MMU with an identity mapping scheme and jump to the start code. */ .text .globl _zimage_start _zimage_start: /* turn the MMU off */ mfmsr 9 rlwinm 9, 9, 0, ~((1<<4)|(1<<5)) /* MSR_DR|MSR_IR */ bcl 20, 31, 1f 1: mflr 8 clrlwi 8, 8, 3 /* convert to a real address */ addi 8, 8, _mmu_off - 1b mtsrr0 8 mtsrr1 9 rfi _mmu_off: /* MMU disabled */ /* setup BATs */ isync li 8, 0 mtspr 0x210, 8 /* IBAT0U */ mtspr 0x212, 8 /* IBAT1U */ mtspr 0x214, 8 /* IBAT2U */ mtspr 0x216, 8 /* IBAT3U */ mtspr 0x218, 8 /* DBAT0U */ mtspr 0x21a, 8 /* DBAT1U */ mtspr 0x21c, 8 /* DBAT2U */ mtspr 0x21e, 8 /* DBAT3U */ mtspr 0x230, 8 /* IBAT4U */ mtspr 0x232, 8 /* IBAT5U */ mtspr 0x234, 8 /* IBAT6U */ mtspr 0x236, 8 /* IBAT7U */ mtspr 0x238, 8 /* DBAT4U */ mtspr 0x23a, 8 /* DBAT5U */ mtspr 0x23c, 8 /* DBAT6U */ mtspr 0x23e, 8 /* DBAT7U */ li 8, 0x01ff /* first 16MiB */ li 9, 0x0002 /* rw */ mtspr 0x211, 9 /* IBAT0L */ mtspr 0x210, 8 /* IBAT0U */ mtspr 0x219, 9 /* DBAT0L */ mtspr 0x218, 8 /* DBAT0U */ lis 8, 0x0c00 /* I/O mem */ ori 8, 8, 0x3ff /* 32MiB */ lis 9, 0x0c00 ori 9, 9, 0x002a /* uncached, guarded, rw */ mtspr 0x21b, 9 /* DBAT1L */ mtspr 0x21a, 8 /* DBAT1U */ lis 8, 0x0100 /* next 8MiB */ ori 8, 8, 0x00ff /* 8MiB */ lis 9, 0x0100 ori 9, 9, 0x0002 /* rw */ mtspr 0x215, 9 /* IBAT2L */ mtspr 0x214, 8 /* IBAT2U */ mtspr 0x21d, 9 /* DBAT2L */ mtspr 0x21c, 8 /* DBAT2U */ lis 8, 0x1000 /* MEM2 */ ori 8, 8, 0x07ff /* 64MiB */ lis 9, 0x1000 ori 9, 9, 0x0002 /* rw */ mtspr 0x216, 8 /* IBAT3U */ mtspr 0x217, 9 /* IBAT3L */ mtspr 0x21e, 8 /* DBAT3U */ mtspr 0x21f, 9 /* DBAT3L */ /* enable the high BATs */ mfspr 8, 0x3f3 /* HID4 */ oris 8, 8, 0x0200 mtspr 0x3f3, 8 /* HID4 */ /* enable and invalidate the caches if not already enabled */ mfspr 8, 0x3f0 /* HID0 */ andi. 0, 8, (1<<15) /* HID0_ICE */ bne 1f ori 8, 8, (1<<15)|(1<<11) /* HID0_ICE|HID0_ICFI*/ 1: andi. 0, 8, (1<<14) /* HID0_DCE */ bne 1f ori 8, 8, (1<<14)|(1<<10) /* HID0_DCE|HID0_DCFI*/ 1: mtspr 0x3f0, 8 /* HID0 */ isync /* initialize arguments */ li 3, 0 li 4, 0 li 5, 0 /* turn the MMU on */ bcl 20, 31, 1f 1: mflr 8 addi 8, 8, _mmu_on - 1b mfmsr 9 ori 9, 9, (1<<4)|(1<<5) /* MSR_DR|MSR_IR */ mtsrr0 8 mtsrr1 9 sync rfi _mmu_on: /* turn on the front blue led (aka: yay! we got here!) */ lis 8, 0x0d00 ori 8, 8, 0x00c0 lwz 9, 0(8) ori 9, 9, 0x20 stw 9, 0(8) b _zimage_start_lib
aixcc-public/challenge-001-exemplar-source
2,232
arch/powerpc/boot/gamecube-head.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * arch/powerpc/boot/gamecube-head.S * * Nintendo GameCube bootwrapper entry. * Copyright (C) 2004-2009 The GameCube Linux Team * Copyright (C) 2008,2009 Albert Herranz */ #include "ppc_asm.h" /* * The entry code does no assumptions regarding: * - if the data and instruction caches are enabled or not * - if the MMU is enabled or not * * We enable the caches if not already enabled, enable the MMU with an * identity mapping scheme and jump to the start code. */ .text .globl _zimage_start _zimage_start: /* turn the MMU off */ mfmsr 9 rlwinm 9, 9, 0, ~((1<<4)|(1<<5)) /* MSR_DR|MSR_IR */ bcl 20, 31, 1f 1: mflr 8 clrlwi 8, 8, 3 /* convert to a real address */ addi 8, 8, _mmu_off - 1b mtsrr0 8 mtsrr1 9 rfi _mmu_off: /* MMU disabled */ /* setup BATs */ isync li 8, 0 mtspr 0x210, 8 /* IBAT0U */ mtspr 0x212, 8 /* IBAT1U */ mtspr 0x214, 8 /* IBAT2U */ mtspr 0x216, 8 /* IBAT3U */ mtspr 0x218, 8 /* DBAT0U */ mtspr 0x21a, 8 /* DBAT1U */ mtspr 0x21c, 8 /* DBAT2U */ mtspr 0x21e, 8 /* DBAT3U */ li 8, 0x01ff /* first 16MiB */ li 9, 0x0002 /* rw */ mtspr 0x211, 9 /* IBAT0L */ mtspr 0x210, 8 /* IBAT0U */ mtspr 0x219, 9 /* DBAT0L */ mtspr 0x218, 8 /* DBAT0U */ lis 8, 0x0c00 /* I/O mem */ ori 8, 8, 0x3ff /* 32MiB */ lis 9, 0x0c00 ori 9, 9, 0x002a /* uncached, guarded, rw */ mtspr 0x21b, 9 /* DBAT1L */ mtspr 0x21a, 8 /* DBAT1U */ lis 8, 0x0100 /* next 8MiB */ ori 8, 8, 0x00ff /* 8MiB */ lis 9, 0x0100 ori 9, 9, 0x0002 /* rw */ mtspr 0x215, 9 /* IBAT2L */ mtspr 0x214, 8 /* IBAT2U */ mtspr 0x21d, 9 /* DBAT2L */ mtspr 0x21c, 8 /* DBAT2U */ /* enable and invalidate the caches if not already enabled */ mfspr 8, 0x3f0 /* HID0 */ andi. 0, 8, (1<<15) /* HID0_ICE */ bne 1f ori 8, 8, (1<<15)|(1<<11) /* HID0_ICE|HID0_ICFI*/ 1: andi. 0, 8, (1<<14) /* HID0_DCE */ bne 1f ori 8, 8, (1<<14)|(1<<10) /* HID0_DCE|HID0_DCFI*/ 1: mtspr 0x3f0, 8 /* HID0 */ isync /* initialize arguments */ li 3, 0 li 4, 0 li 5, 0 /* turn the MMU on */ bcl 20, 31, 1f 1: mflr 8 addi 8, 8, _mmu_on - 1b mfmsr 9 ori 9, 9, (1<<4)|(1<<5) /* MSR_DR|MSR_IR */ mtsrr0 8 mtsrr1 9 sync rfi _mmu_on: b _zimage_start_lib
aixcc-public/challenge-001-exemplar-source
10,478
arch/powerpc/kernel/vdso/sigtramp64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Signal trampoline for 64 bits processes in a ppc64 kernel for * use in the vDSO * * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp. * Copyright (C) 2004 Alan Modra (amodra@au.ibm.com)), IBM Corp. */ #include <asm/cache.h> /* IFETCH_ALIGN_BYTES */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/unistd.h> #include <asm/vdso.h> #include <asm/ptrace.h> /* XXX for __SIGNAL_FRAMESIZE */ .text /* * __kernel_start_sigtramp_rt64 and __kernel_sigtramp_rt64 together * are one function split in two parts. The kernel jumps to the former * and the signal handler indirectly (by blr) returns to the latter. * __kernel_sigtramp_rt64 needs to point to the return address so * glibc can correctly identify the trampoline stack frame. */ .balign 8 .balign IFETCH_ALIGN_BYTES V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64) .Lsigrt_start: bctrl /* call the handler */ V_FUNCTION_END(__kernel_start_sigtramp_rt64) V_FUNCTION_BEGIN(__kernel_sigtramp_rt64) addi r1, r1, __SIGNAL_FRAMESIZE li r0,__NR_rt_sigreturn sc .Lsigrt_end: V_FUNCTION_END(__kernel_sigtramp_rt64) /* The .balign 8 above and the following zeros mimic the old stack trampoline layout. The last magic value is the ucontext pointer, chosen in such a way that older libgcc unwind code returns a zero for a sigcontext pointer. */ .long 0,0,0 .quad 0,-21*8 /* Register r1 can be found at offset 8 of a pt_regs structure. A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */ #define cfa_save \ .byte 0x0f; /* DW_CFA_def_cfa_expression */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x23; .uleb128 RSIZE; /* DW_OP_plus_uconst */ \ .byte 0x06; /* DW_OP_deref */ \ 9: /* Register REGNO can be found at offset OFS of a pt_regs structure. A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */ #define rsave(regno, ofs) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ .byte 0x06; /* DW_OP_deref */ \ .ifne ofs; \ .byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \ .endif; \ 9: /* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16 of the VMX reg struct. A pointer to the VMX reg struct is at VREGS in the pt_regs struct. This macro is for REGNO == 0, and contains 'subroutines' that the other macros jump to. */ #define vsave_msr0(regno) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno + 77; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x30 + regno; /* DW_OP_lit0 */ \ 2: \ .byte 0x40; /* DW_OP_lit16 */ \ .byte 0x1e; /* DW_OP_mul */ \ 3: \ .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x12; /* DW_OP_dup */ \ .byte 0x23; /* DW_OP_plus_uconst */ \ .uleb128 33*RSIZE; /* msr offset */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x0c; .long 1 << 25; /* DW_OP_const4u */ \ .byte 0x1a; /* DW_OP_and */ \ .byte 0x12; /* DW_OP_dup, ret 0 if bra taken */ \ .byte 0x30; /* DW_OP_lit0 */ \ .byte 0x29; /* DW_OP_eq */ \ .byte 0x28; .short 0x7fff; /* DW_OP_bra to end */ \ .byte 0x13; /* DW_OP_drop, pop the 0 */ \ .byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x22; /* DW_OP_plus */ \ .byte 0x2f; .short 0x7fff; /* DW_OP_skip to end */ \ 9: /* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16 of the VMX reg struct. REGNO is 1 thru 31. */ #define vsave_msr1(regno) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno + 77; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x30 + regno; /* DW_OP_lit n */ \ .byte 0x2f; .short 2b - 9f; /* DW_OP_skip */ \ 9: /* If msr bit 1<<25 is set, then VMX register REGNO is at offset OFS of the VMX save block. */ #define vsave_msr2(regno, ofs) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno + 77; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x0a; .short ofs; /* DW_OP_const2u */ \ .byte 0x2f; .short 3b - 9f; /* DW_OP_skip */ \ 9: /* VMX register REGNO is at offset OFS of the VMX save area. */ #define vsave(regno, ofs) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno + 77; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \ 9: /* This is where the pt_regs pointer can be found on the stack. */ #define PTREGS 128+168+56 /* Size of regs. */ #define RSIZE 8 /* Size of CR reg in DWARF unwind info. */ #define CRSIZE 4 /* Offset of CR reg within a full word. */ #ifdef __LITTLE_ENDIAN__ #define CROFF 0 #else #define CROFF (RSIZE - CRSIZE) #endif /* This is the offset of the VMX reg pointer. */ #define VREGS 48*RSIZE+33*8 /* Describe where general purpose regs are saved. */ #define EH_FRAME_GEN \ cfa_save; \ rsave ( 0, 0*RSIZE); \ rsave ( 2, 2*RSIZE); \ rsave ( 3, 3*RSIZE); \ rsave ( 4, 4*RSIZE); \ rsave ( 5, 5*RSIZE); \ rsave ( 6, 6*RSIZE); \ rsave ( 7, 7*RSIZE); \ rsave ( 8, 8*RSIZE); \ rsave ( 9, 9*RSIZE); \ rsave (10, 10*RSIZE); \ rsave (11, 11*RSIZE); \ rsave (12, 12*RSIZE); \ rsave (13, 13*RSIZE); \ rsave (14, 14*RSIZE); \ rsave (15, 15*RSIZE); \ rsave (16, 16*RSIZE); \ rsave (17, 17*RSIZE); \ rsave (18, 18*RSIZE); \ rsave (19, 19*RSIZE); \ rsave (20, 20*RSIZE); \ rsave (21, 21*RSIZE); \ rsave (22, 22*RSIZE); \ rsave (23, 23*RSIZE); \ rsave (24, 24*RSIZE); \ rsave (25, 25*RSIZE); \ rsave (26, 26*RSIZE); \ rsave (27, 27*RSIZE); \ rsave (28, 28*RSIZE); \ rsave (29, 29*RSIZE); \ rsave (30, 30*RSIZE); \ rsave (31, 31*RSIZE); \ rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \ rsave (65, 36*RSIZE); /* lr */ \ rsave (68, 38*RSIZE + CROFF); /* cr fields */ \ rsave (69, 38*RSIZE + CROFF); \ rsave (70, 38*RSIZE + CROFF); \ rsave (71, 38*RSIZE + CROFF); \ rsave (72, 38*RSIZE + CROFF); \ rsave (73, 38*RSIZE + CROFF); \ rsave (74, 38*RSIZE + CROFF); \ rsave (75, 38*RSIZE + CROFF) /* Describe where the FP regs are saved. */ #define EH_FRAME_FP \ rsave (32, 48*RSIZE + 0*8); \ rsave (33, 48*RSIZE + 1*8); \ rsave (34, 48*RSIZE + 2*8); \ rsave (35, 48*RSIZE + 3*8); \ rsave (36, 48*RSIZE + 4*8); \ rsave (37, 48*RSIZE + 5*8); \ rsave (38, 48*RSIZE + 6*8); \ rsave (39, 48*RSIZE + 7*8); \ rsave (40, 48*RSIZE + 8*8); \ rsave (41, 48*RSIZE + 9*8); \ rsave (42, 48*RSIZE + 10*8); \ rsave (43, 48*RSIZE + 11*8); \ rsave (44, 48*RSIZE + 12*8); \ rsave (45, 48*RSIZE + 13*8); \ rsave (46, 48*RSIZE + 14*8); \ rsave (47, 48*RSIZE + 15*8); \ rsave (48, 48*RSIZE + 16*8); \ rsave (49, 48*RSIZE + 17*8); \ rsave (50, 48*RSIZE + 18*8); \ rsave (51, 48*RSIZE + 19*8); \ rsave (52, 48*RSIZE + 20*8); \ rsave (53, 48*RSIZE + 21*8); \ rsave (54, 48*RSIZE + 22*8); \ rsave (55, 48*RSIZE + 23*8); \ rsave (56, 48*RSIZE + 24*8); \ rsave (57, 48*RSIZE + 25*8); \ rsave (58, 48*RSIZE + 26*8); \ rsave (59, 48*RSIZE + 27*8); \ rsave (60, 48*RSIZE + 28*8); \ rsave (61, 48*RSIZE + 29*8); \ rsave (62, 48*RSIZE + 30*8); \ rsave (63, 48*RSIZE + 31*8) /* Describe where the VMX regs are saved. */ #ifdef CONFIG_ALTIVEC #define EH_FRAME_VMX \ vsave_msr0 ( 0); \ vsave_msr1 ( 1); \ vsave_msr1 ( 2); \ vsave_msr1 ( 3); \ vsave_msr1 ( 4); \ vsave_msr1 ( 5); \ vsave_msr1 ( 6); \ vsave_msr1 ( 7); \ vsave_msr1 ( 8); \ vsave_msr1 ( 9); \ vsave_msr1 (10); \ vsave_msr1 (11); \ vsave_msr1 (12); \ vsave_msr1 (13); \ vsave_msr1 (14); \ vsave_msr1 (15); \ vsave_msr1 (16); \ vsave_msr1 (17); \ vsave_msr1 (18); \ vsave_msr1 (19); \ vsave_msr1 (20); \ vsave_msr1 (21); \ vsave_msr1 (22); \ vsave_msr1 (23); \ vsave_msr1 (24); \ vsave_msr1 (25); \ vsave_msr1 (26); \ vsave_msr1 (27); \ vsave_msr1 (28); \ vsave_msr1 (29); \ vsave_msr1 (30); \ vsave_msr1 (31); \ vsave_msr2 (33, 32*16+12); \ vsave (32, 33*16) #else #define EH_FRAME_VMX #endif .section .eh_frame,"a",@progbits .Lcie: .long .Lcie_end - .Lcie_start .Lcie_start: .long 0 /* CIE ID */ .byte 1 /* Version number */ .string "zRS" /* NUL-terminated augmentation string */ .uleb128 4 /* Code alignment factor */ .sleb128 -8 /* Data alignment factor */ .byte 67 /* Return address register column, ap */ .uleb128 1 /* Augmentation value length */ .byte 0x14 /* DW_EH_PE_pcrel | DW_EH_PE_udata8. */ .byte 0x0c,1,0 /* DW_CFA_def_cfa: r1 ofs 0 */ .balign 8 .Lcie_end: .long .Lfde0_end - .Lfde0_start .Lfde0_start: .long .Lfde0_start - .Lcie /* CIE pointer. */ .quad .Lsigrt_start - . /* PC start, length */ .quad .Lsigrt_end - .Lsigrt_start .uleb128 0 /* Augmentation */ EH_FRAME_GEN EH_FRAME_FP EH_FRAME_VMX # Do we really need to describe the frame at this point? ie. will # we ever have some call chain that returns somewhere past the addi? # I don't think so, since gcc doesn't support async signals. # .byte 0x41 /* DW_CFA_advance_loc 1*4 */ #undef PTREGS #define PTREGS 168+56 # EH_FRAME_GEN # EH_FRAME_FP # EH_FRAME_VMX .balign 8 .Lfde0_end:
aixcc-public/challenge-001-exemplar-source
2,889
arch/powerpc/kernel/vdso/vdso64.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This is the infamous ld script for the 64 bits vdso * library */ #include <asm/vdso.h> #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> #ifdef __LITTLE_ENDIAN__ OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle") #else OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc") #endif OUTPUT_ARCH(powerpc:common64) SECTIONS { PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE); . = SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note . = ALIGN(16); .text : { *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) *(.sfpr .glink) } :text PROVIDE(__etext = .); PROVIDE(_etext = .); PROVIDE(etext = .); . = ALIGN(8); VDSO_ftr_fixup_start = .; __ftr_fixup : { *(__ftr_fixup) } VDSO_ftr_fixup_end = .; . = ALIGN(8); VDSO_mmu_ftr_fixup_start = .; __mmu_ftr_fixup : { *(__mmu_ftr_fixup) } VDSO_mmu_ftr_fixup_end = .; . = ALIGN(8); VDSO_lwsync_fixup_start = .; __lwsync_fixup : { *(__lwsync_fixup) } VDSO_lwsync_fixup_end = .; . = ALIGN(8); VDSO_fw_ftr_fixup_start = .; __fw_ftr_fixup : { *(__fw_ftr_fixup) } VDSO_fw_ftr_fixup_end = .; /* * Other stuff is appended to the text segment: */ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } .dynamic : { *(.dynamic) } :text :dynamic .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .gcc_except_table : { *(.gcc_except_table) } .rela.dyn ALIGN(8) : { *(.rela.dyn) } .got ALIGN(8) : { *(.got .toc) } _end = .; PROVIDE(end = .); DWARF_DEBUG ELF_DETAILS /DISCARD/ : { *(.note.GNU-stack) *(.branch_lt) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) *(.opd) } } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } /* * This controls what symbols we export from the DSO. */ VERSION { VDSO_VERSION_STRING { global: __kernel_get_syscall_map; __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; __kernel_get_tbfreq; __kernel_sync_dicache; __kernel_sigtramp_rt64; __kernel_getcpu; __kernel_time; local: *; }; } /* * Make the sigreturn code visible to the kernel. */ VDSO_sigtramp_rt64 = __kernel_start_sigtramp_rt64;
aixcc-public/challenge-001-exemplar-source
3,082
arch/powerpc/kernel/vdso/vdso32.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This is the infamous ld script for the 32 bits vdso * library */ #include <asm/vdso.h> #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> #ifdef __LITTLE_ENDIAN__ OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle") #else OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc") #endif OUTPUT_ARCH(powerpc:common) SECTIONS { PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE); . = SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note . = ALIGN(16); .text : { *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) } :text PROVIDE(__etext = .); PROVIDE(_etext = .); PROVIDE(etext = .); . = ALIGN(8); VDSO_ftr_fixup_start = .; __ftr_fixup : { *(__ftr_fixup) } VDSO_ftr_fixup_end = .; . = ALIGN(8); VDSO_mmu_ftr_fixup_start = .; __mmu_ftr_fixup : { *(__mmu_ftr_fixup) } VDSO_mmu_ftr_fixup_end = .; . = ALIGN(8); VDSO_lwsync_fixup_start = .; __lwsync_fixup : { *(__lwsync_fixup) } VDSO_lwsync_fixup_end = .; #ifdef CONFIG_PPC64 . = ALIGN(8); VDSO_fw_ftr_fixup_start = .; __fw_ftr_fixup : { *(__fw_ftr_fixup) } VDSO_fw_ftr_fixup_end = .; #endif /* * Other stuff is appended to the text segment: */ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .gcc_except_table : { *(.gcc_except_table) } .fixup : { *(.fixup) } .dynamic : { *(.dynamic) } :text :dynamic .got : { *(.got) } :text .plt : { *(.plt) } _end = .; __end = .; PROVIDE(end = .); DWARF_DEBUG ELF_DETAILS /DISCARD/ : { *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) *(.got1) } } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } /* * This controls what symbols we export from the DSO. */ VERSION { VDSO_VERSION_STRING { global: __kernel_get_syscall_map; __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_gettime64; __kernel_clock_getres; __kernel_time; __kernel_get_tbfreq; __kernel_sync_dicache; __kernel_sigtramp32; __kernel_sigtramp_rt32; #if defined(CONFIG_PPC64) || !defined(CONFIG_SMP) __kernel_getcpu; #endif local: *; }; } /* * Make the sigreturn code visible to the kernel. */ VDSO_sigtramp32 = __kernel_sigtramp32; VDSO_sigtramp_rt32 = __kernel_sigtramp_rt32;
aixcc-public/challenge-001-exemplar-source
1,473
arch/powerpc/kernel/vdso/datapage.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Access to the shared data page by the vDSO & syscall map * * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp. */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/vdso.h> #include <asm/vdso_datapage.h> .text /* * void *__kernel_get_syscall_map(unsigned int *syscall_count) ; * * returns a pointer to the syscall map. the map is agnostic to the * size of "long", unlike kernel bitops, it stores bits from top to * bottom so that memory actually contains a linear bitmap * check for syscall N by testing bit (0x80000000 >> (N & 0x1f)) of * 32 bits int at N >> 5. */ V_FUNCTION_BEGIN(__kernel_get_syscall_map) .cfi_startproc mflr r12 .cfi_register lr,r12 mr. r4,r3 get_datapage r3 mtlr r12 #ifdef __powerpc64__ addi r3,r3,CFG_SYSCALL_MAP64 #else addi r3,r3,CFG_SYSCALL_MAP32 #endif crclr cr0*4+so beqlr li r0,NR_syscalls stw r0,0(r4) blr .cfi_endproc V_FUNCTION_END(__kernel_get_syscall_map) /* * void unsigned long long __kernel_get_tbfreq(void); * * returns the timebase frequency in HZ */ V_FUNCTION_BEGIN(__kernel_get_tbfreq) .cfi_startproc mflr r12 .cfi_register lr,r12 get_datapage r3 #ifndef __powerpc64__ lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3) #endif PPC_LL r3,CFG_TB_TICKS_PER_SEC(r3) mtlr r12 crclr cr0*4+so blr .cfi_endproc V_FUNCTION_END(__kernel_get_tbfreq)
aixcc-public/challenge-001-exemplar-source
1,981
arch/powerpc/kernel/vdso/cacheflush.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * vDSO provided cache flush routines * * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), * IBM Corp. */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/vdso.h> #include <asm/vdso_datapage.h> #include <asm/asm-offsets.h> #include <asm/cache.h> .text /* * Default "generic" version of __kernel_sync_dicache. * * void __kernel_sync_dicache(unsigned long start, unsigned long end) * * Flushes the data cache & invalidate the instruction cache for the * provided range [start, end[ */ V_FUNCTION_BEGIN(__kernel_sync_dicache) .cfi_startproc BEGIN_FTR_SECTION b 3f END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) #ifdef CONFIG_PPC64 mflr r12 .cfi_register lr,r12 get_datapage r10 mtlr r12 .cfi_restore lr #endif #ifdef CONFIG_PPC64 lwz r7,CFG_DCACHE_BLOCKSZ(r10) addi r5,r7,-1 #else li r5, L1_CACHE_BYTES - 1 #endif andc r6,r3,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ #ifdef CONFIG_PPC64 lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10) PPC_SRL. r8,r8,r9 /* compute line count */ #else srwi. r8, r8, L1_CACHE_SHIFT mr r7, r6 #endif crclr cr0*4+so beqlr /* nothing to do? */ mtctr r8 1: dcbst 0,r6 #ifdef CONFIG_PPC64 add r6,r6,r7 #else addi r6, r6, L1_CACHE_BYTES #endif bdnz 1b sync /* Now invalidate the instruction cache */ #ifdef CONFIG_PPC64 lwz r7,CFG_ICACHE_BLOCKSZ(r10) addi r5,r7,-1 andc r6,r3,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10) PPC_SRL. r8,r8,r9 /* compute line count */ crclr cr0*4+so beqlr /* nothing to do? */ #endif mtctr r8 #ifdef CONFIG_PPC64 2: icbi 0,r6 add r6,r6,r7 #else 2: icbi 0, r7 addi r7, r7, L1_CACHE_BYTES #endif bdnz 2b isync li r3,0 blr 3: crclr cr0*4+so sync icbi 0,r1 isync li r3,0 blr .cfi_endproc V_FUNCTION_END(__kernel_sync_dicache)
aixcc-public/challenge-001-exemplar-source
3,085
arch/powerpc/kernel/vdso/gettimeofday.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Userland implementation of gettimeofday() for processes * for use in the vDSO * * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org, * IBM Corp. */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/vdso.h> #include <asm/vdso_datapage.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> /* * The macro sets two stack frames, one for the caller and one for the callee * because there are no requirement for the caller to set a stack frame when * calling VDSO so it may have omitted to set one, especially on PPC64 */ .macro cvdso_call funct call_time=0 .cfi_startproc PPC_STLU r1, -PPC_MIN_STKFRM(r1) .cfi_adjust_cfa_offset PPC_MIN_STKFRM mflr r0 PPC_STLU r1, -PPC_MIN_STKFRM(r1) .cfi_adjust_cfa_offset PPC_MIN_STKFRM PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) .cfi_rel_offset lr, PPC_MIN_STKFRM + PPC_LR_STKOFF #ifdef __powerpc64__ PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1) .cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT #endif get_datapage r5 .ifeq \call_time addi r5, r5, VDSO_DATA_OFFSET .else addi r4, r5, VDSO_DATA_OFFSET .endif bl DOTSYM(\funct) PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) #ifdef __powerpc64__ PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) .cfi_restore r2 #endif .ifeq \call_time cmpwi r3, 0 .endif mtlr r0 addi r1, r1, 2 * PPC_MIN_STKFRM .cfi_restore lr .cfi_def_cfa_offset 0 crclr so .ifeq \call_time beqlr+ crset so neg r3, r3 .endif blr .cfi_endproc .endm .text /* * Exact prototype of gettimeofday * * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); * */ V_FUNCTION_BEGIN(__kernel_gettimeofday) cvdso_call __c_kernel_gettimeofday V_FUNCTION_END(__kernel_gettimeofday) /* * Exact prototype of clock_gettime() * * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); * */ V_FUNCTION_BEGIN(__kernel_clock_gettime) cvdso_call __c_kernel_clock_gettime V_FUNCTION_END(__kernel_clock_gettime) /* * Exact prototype of clock_gettime64() * * int __kernel_clock_gettime64(clockid_t clock_id, struct __timespec64 *ts); * */ #ifndef __powerpc64__ V_FUNCTION_BEGIN(__kernel_clock_gettime64) cvdso_call __c_kernel_clock_gettime64 V_FUNCTION_END(__kernel_clock_gettime64) #endif /* * Exact prototype of clock_getres() * * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); * */ V_FUNCTION_BEGIN(__kernel_clock_getres) cvdso_call __c_kernel_clock_getres V_FUNCTION_END(__kernel_clock_getres) /* * Exact prototype of time() * * time_t time(time *t); * */ V_FUNCTION_BEGIN(__kernel_time) cvdso_call __c_kernel_time call_time=1 V_FUNCTION_END(__kernel_time) /* Routines for restoring integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ /* function, just beyond the end of the integer restore area. */ #ifndef __powerpc64__ _GLOBAL(_restgpr_31_x) _GLOBAL(_rest32gpr_31_x) lwz r0,4(r11) lwz r31,-4(r11) mtlr r0 mr r1,r11 blr #endif
aixcc-public/challenge-001-exemplar-source
9,496
arch/powerpc/kernel/vdso/sigtramp32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Signal trampolines for 32 bits processes in a ppc64 kernel for * use in the vDSO * * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp. * Copyright (C) 2004 Alan Modra (amodra@au.ibm.com)), IBM Corp. */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/unistd.h> #include <asm/vdso.h> .text /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by adding a nop before the real start. */ nop V_FUNCTION_BEGIN(__kernel_sigtramp32) .Lsig_start = . - 4 li r0,__NR_sigreturn sc .Lsig_end: V_FUNCTION_END(__kernel_sigtramp32) .Lsigrt_start: nop V_FUNCTION_BEGIN(__kernel_sigtramp_rt32) li r0,__NR_rt_sigreturn sc .Lsigrt_end: V_FUNCTION_END(__kernel_sigtramp_rt32) .section .eh_frame,"a",@progbits /* Register r1 can be found at offset 4 of a pt_regs structure. A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */ #define cfa_save \ .byte 0x0f; /* DW_CFA_def_cfa_expression */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x23; .uleb128 RSIZE; /* DW_OP_plus_uconst */ \ .byte 0x06; /* DW_OP_deref */ \ 9: /* Register REGNO can be found at offset OFS of a pt_regs structure. A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */ #define rsave(regno, ofs) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ .byte 0x06; /* DW_OP_deref */ \ .ifne ofs; \ .byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \ .endif; \ 9: /* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16 of the VMX reg struct. The VMX reg struct is at offset VREGS of the pt_regs struct. This macro is for REGNO == 0, and contains 'subroutines' that the other macros jump to. */ #define vsave_msr0(regno) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno + 77; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x30 + regno; /* DW_OP_lit0 */ \ 2: \ .byte 0x40; /* DW_OP_lit16 */ \ .byte 0x1e; /* DW_OP_mul */ \ 3: \ .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x12; /* DW_OP_dup */ \ .byte 0x23; /* DW_OP_plus_uconst */ \ .uleb128 33*RSIZE; /* msr offset */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x0c; .long 1 << 25; /* DW_OP_const4u */ \ .byte 0x1a; /* DW_OP_and */ \ .byte 0x12; /* DW_OP_dup, ret 0 if bra taken */ \ .byte 0x30; /* DW_OP_lit0 */ \ .byte 0x29; /* DW_OP_eq */ \ .byte 0x28; .short 0x7fff; /* DW_OP_bra to end */ \ .byte 0x13; /* DW_OP_drop, pop the 0 */ \ .byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \ .byte 0x22; /* DW_OP_plus */ \ .byte 0x2f; .short 0x7fff; /* DW_OP_skip to end */ \ 9: /* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16 of the VMX reg struct. REGNO is 1 thru 31. */ #define vsave_msr1(regno) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno + 77; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x30 + regno; /* DW_OP_lit n */ \ .byte 0x2f; .short 2b - 9f; /* DW_OP_skip */ \ 9: /* If msr bit 1<<25 is set, then VMX register REGNO is at offset OFS of the VMX save block. */ #define vsave_msr2(regno, ofs) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno + 77; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x0a; .short ofs; /* DW_OP_const2u */ \ .byte 0x2f; .short 3b - 9f; /* DW_OP_skip */ \ 9: /* VMX register REGNO is at offset OFS of the VMX save area. */ #define vsave(regno, ofs) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno + 77; /* regno */ \ .uleb128 9f - 1f; /* length */ \ 1: \ .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \ .byte 0x06; /* DW_OP_deref */ \ .byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \ .byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \ 9: /* This is where the pt_regs pointer can be found on the stack. */ #define PTREGS 64+28 /* Size of regs. */ #define RSIZE 4 /* This is the offset of the VMX regs. */ #define VREGS 48*RSIZE+34*8 /* Describe where general purpose regs are saved. */ #define EH_FRAME_GEN \ cfa_save; \ rsave ( 0, 0*RSIZE); \ rsave ( 2, 2*RSIZE); \ rsave ( 3, 3*RSIZE); \ rsave ( 4, 4*RSIZE); \ rsave ( 5, 5*RSIZE); \ rsave ( 6, 6*RSIZE); \ rsave ( 7, 7*RSIZE); \ rsave ( 8, 8*RSIZE); \ rsave ( 9, 9*RSIZE); \ rsave (10, 10*RSIZE); \ rsave (11, 11*RSIZE); \ rsave (12, 12*RSIZE); \ rsave (13, 13*RSIZE); \ rsave (14, 14*RSIZE); \ rsave (15, 15*RSIZE); \ rsave (16, 16*RSIZE); \ rsave (17, 17*RSIZE); \ rsave (18, 18*RSIZE); \ rsave (19, 19*RSIZE); \ rsave (20, 20*RSIZE); \ rsave (21, 21*RSIZE); \ rsave (22, 22*RSIZE); \ rsave (23, 23*RSIZE); \ rsave (24, 24*RSIZE); \ rsave (25, 25*RSIZE); \ rsave (26, 26*RSIZE); \ rsave (27, 27*RSIZE); \ rsave (28, 28*RSIZE); \ rsave (29, 29*RSIZE); \ rsave (30, 30*RSIZE); \ rsave (31, 31*RSIZE); \ rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \ rsave (65, 36*RSIZE); /* lr */ \ rsave (70, 38*RSIZE) /* cr */ /* Describe where the FP regs are saved. */ #define EH_FRAME_FP \ rsave (32, 48*RSIZE + 0*8); \ rsave (33, 48*RSIZE + 1*8); \ rsave (34, 48*RSIZE + 2*8); \ rsave (35, 48*RSIZE + 3*8); \ rsave (36, 48*RSIZE + 4*8); \ rsave (37, 48*RSIZE + 5*8); \ rsave (38, 48*RSIZE + 6*8); \ rsave (39, 48*RSIZE + 7*8); \ rsave (40, 48*RSIZE + 8*8); \ rsave (41, 48*RSIZE + 9*8); \ rsave (42, 48*RSIZE + 10*8); \ rsave (43, 48*RSIZE + 11*8); \ rsave (44, 48*RSIZE + 12*8); \ rsave (45, 48*RSIZE + 13*8); \ rsave (46, 48*RSIZE + 14*8); \ rsave (47, 48*RSIZE + 15*8); \ rsave (48, 48*RSIZE + 16*8); \ rsave (49, 48*RSIZE + 17*8); \ rsave (50, 48*RSIZE + 18*8); \ rsave (51, 48*RSIZE + 19*8); \ rsave (52, 48*RSIZE + 20*8); \ rsave (53, 48*RSIZE + 21*8); \ rsave (54, 48*RSIZE + 22*8); \ rsave (55, 48*RSIZE + 23*8); \ rsave (56, 48*RSIZE + 24*8); \ rsave (57, 48*RSIZE + 25*8); \ rsave (58, 48*RSIZE + 26*8); \ rsave (59, 48*RSIZE + 27*8); \ rsave (60, 48*RSIZE + 28*8); \ rsave (61, 48*RSIZE + 29*8); \ rsave (62, 48*RSIZE + 30*8); \ rsave (63, 48*RSIZE + 31*8) /* Describe where the VMX regs are saved. */ #ifdef CONFIG_ALTIVEC #define EH_FRAME_VMX \ vsave_msr0 ( 0); \ vsave_msr1 ( 1); \ vsave_msr1 ( 2); \ vsave_msr1 ( 3); \ vsave_msr1 ( 4); \ vsave_msr1 ( 5); \ vsave_msr1 ( 6); \ vsave_msr1 ( 7); \ vsave_msr1 ( 8); \ vsave_msr1 ( 9); \ vsave_msr1 (10); \ vsave_msr1 (11); \ vsave_msr1 (12); \ vsave_msr1 (13); \ vsave_msr1 (14); \ vsave_msr1 (15); \ vsave_msr1 (16); \ vsave_msr1 (17); \ vsave_msr1 (18); \ vsave_msr1 (19); \ vsave_msr1 (20); \ vsave_msr1 (21); \ vsave_msr1 (22); \ vsave_msr1 (23); \ vsave_msr1 (24); \ vsave_msr1 (25); \ vsave_msr1 (26); \ vsave_msr1 (27); \ vsave_msr1 (28); \ vsave_msr1 (29); \ vsave_msr1 (30); \ vsave_msr1 (31); \ vsave_msr2 (33, 32*16+12); \ vsave (32, 32*16) #else #define EH_FRAME_VMX #endif .Lcie: .long .Lcie_end - .Lcie_start .Lcie_start: .long 0 /* CIE ID */ .byte 1 /* Version number */ .string "zRS" /* NUL-terminated augmentation string */ .uleb128 4 /* Code alignment factor */ .sleb128 -4 /* Data alignment factor */ .byte 67 /* Return address register column, ap */ .uleb128 1 /* Augmentation value length */ .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ .byte 0x0c,1,0 /* DW_CFA_def_cfa: r1 ofs 0 */ .balign 4 .Lcie_end: .long .Lfde0_end - .Lfde0_start .Lfde0_start: .long .Lfde0_start - .Lcie /* CIE pointer. */ .long .Lsig_start - . /* PC start, length */ .long .Lsig_end - .Lsig_start .uleb128 0 /* Augmentation */ EH_FRAME_GEN EH_FRAME_FP EH_FRAME_VMX .balign 4 .Lfde0_end: /* We have a different stack layout for rt_sigreturn. */ #undef PTREGS #define PTREGS 64+16+128+20+28 .long .Lfde1_end - .Lfde1_start .Lfde1_start: .long .Lfde1_start - .Lcie /* CIE pointer. */ .long .Lsigrt_start - . /* PC start, length */ .long .Lsigrt_end - .Lsigrt_start .uleb128 0 /* Augmentation */ EH_FRAME_GEN EH_FRAME_FP EH_FRAME_VMX .balign 4 .Lfde1_end:
aixcc-public/challenge-001-exemplar-source
5,905
arch/powerpc/kernel/trace/ftrace_mprofile.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Split from ftrace_64.S */ #include <linux/magic.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ftrace.h> #include <asm/ppc-opcode.h> #include <asm/export.h> #include <asm/thread_info.h> #include <asm/bug.h> #include <asm/ptrace.h> /* * * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount() * when ftrace is active. * * We arrive here after a function A calls function B, and we are the trace * function for B. When we enter r1 points to A's stack frame, B has not yet * had a chance to allocate one yet. * * Additionally r2 may point either to the TOC for A, or B, depending on * whether B did a TOC setup sequence before calling us. * * On entry the LR points back to the _mcount() call site, and r0 holds the * saved LR as it was on entry to B, ie. the original return address at the * call site in A. * * Our job is to save the register state into a struct pt_regs (on the stack) * and then arrange for the ftrace function to be called. */ .macro ftrace_regs_entry allregs /* Create a minimal stack frame for representing B */ PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1) /* Create our stack frame + pt_regs */ PPC_STLU r1,-SWITCH_FRAME_SIZE(r1) /* Save all gprs to pt_regs */ SAVE_GPR(0, r1) SAVE_GPRS(3, 10, r1) #ifdef CONFIG_PPC64 /* Save the original return address in A's stack frame */ std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1) /* Ok to continue? */ lbz r3, PACA_FTRACE_ENABLED(r13) cmpdi r3, 0 beq ftrace_no_trace #endif .if \allregs == 1 SAVE_GPR(2, r1) SAVE_GPRS(11, 31, r1) .else #ifdef CONFIG_LIVEPATCH_64 SAVE_GPR(14, r1) #endif .endif /* Save previous stack pointer (r1) */ addi r8, r1, SWITCH_FRAME_SIZE PPC_STL r8, GPR1(r1) .if \allregs == 1 /* Load special regs for save below */ mfmsr r8 mfctr r9 mfxer r10 mfcr r11 .else /* Clear MSR to flag as ftrace_caller versus frace_regs_caller */ li r8, 0 .endif /* Get the _mcount() call site out of LR */ mflr r7 /* Save it as pt_regs->nip */ PPC_STL r7, _NIP(r1) /* Also save it in B's stackframe header for proper unwind */ PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1) /* Save the read LR in pt_regs->link */ PPC_STL r0, _LINK(r1) #ifdef CONFIG_PPC64 /* Save callee's TOC in the ABI compliant location */ std r2, STK_GOT(r1) LOAD_PACA_TOC() /* get kernel TOC in r2 */ LOAD_REG_ADDR(r3, function_trace_op) ld r5,0(r3) #else lis r3,function_trace_op@ha lwz r5,function_trace_op@l(r3) #endif #ifdef CONFIG_LIVEPATCH_64 mr r14, r7 /* remember old NIP */ #endif /* Calculate ip from nip-4 into r3 for call below */ subi r3, r7, MCOUNT_INSN_SIZE /* Put the original return address in r4 as parent_ip */ mr r4, r0 /* Save special regs */ PPC_STL r8, _MSR(r1) .if \allregs == 1 PPC_STL r9, _CTR(r1) PPC_STL r10, _XER(r1) PPC_STL r11, _CCR(r1) .endif /* Load &pt_regs in r6 for call below */ addi r6, r1, STACK_FRAME_OVERHEAD .endm .macro ftrace_regs_exit allregs /* Load ctr with the possibly modified NIP */ PPC_LL r3, _NIP(r1) mtctr r3 #ifdef CONFIG_LIVEPATCH_64 cmpd r14, r3 /* has NIP been altered? */ #endif /* Restore gprs */ .if \allregs == 1 REST_GPRS(2, 31, r1) .else REST_GPRS(3, 10, r1) #ifdef CONFIG_LIVEPATCH_64 REST_GPR(14, r1) #endif .endif /* Restore possibly modified LR */ PPC_LL r0, _LINK(r1) mtlr r0 #ifdef CONFIG_PPC64 /* Restore callee's TOC */ ld r2, STK_GOT(r1) #endif /* Pop our stack frame */ addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE #ifdef CONFIG_LIVEPATCH_64 /* Based on the cmpd above, if the NIP was altered handle livepatch */ bne- livepatch_handler #endif bctr /* jump after _mcount site */ .endm _GLOBAL(ftrace_regs_caller) ftrace_regs_entry 1 /* ftrace_call(r3, r4, r5, r6) */ .globl ftrace_regs_call ftrace_regs_call: bl ftrace_stub nop ftrace_regs_exit 1 _GLOBAL(ftrace_caller) ftrace_regs_entry 0 /* ftrace_call(r3, r4, r5, r6) */ .globl ftrace_call ftrace_call: bl ftrace_stub nop ftrace_regs_exit 0 _GLOBAL(ftrace_stub) blr #ifdef CONFIG_PPC64 ftrace_no_trace: mflr r3 mtctr r3 REST_GPR(3, r1) addi r1, r1, SWITCH_FRAME_SIZE mtlr r0 bctr #endif #ifdef CONFIG_LIVEPATCH_64 /* * This function runs in the mcount context, between two functions. As * such it can only clobber registers which are volatile and used in * function linkage. * * We get here when a function A, calls another function B, but B has * been live patched with a new function C. * * On entry: * - we have no stack frame and can not allocate one * - LR points back to the original caller (in A) * - CTR holds the new NIP in C * - r0, r11 & r12 are free */ livepatch_handler: ld r12, PACA_THREAD_INFO(r13) /* Allocate 3 x 8 bytes */ ld r11, TI_livepatch_sp(r12) addi r11, r11, 24 std r11, TI_livepatch_sp(r12) /* Save toc & real LR on livepatch stack */ std r2, -24(r11) mflr r12 std r12, -16(r11) /* Store stack end marker */ lis r12, STACK_END_MAGIC@h ori r12, r12, STACK_END_MAGIC@l std r12, -8(r11) /* Put ctr in r12 for global entry and branch there */ mfctr r12 bctrl /* * Now we are returning from the patched function to the original * caller A. We are free to use r11, r12 and we can use r2 until we * restore it. */ ld r12, PACA_THREAD_INFO(r13) ld r11, TI_livepatch_sp(r12) /* Check stack marker hasn't been trashed */ lis r2, STACK_END_MAGIC@h ori r2, r2, STACK_END_MAGIC@l ld r12, -8(r11) 1: tdne r12, r2 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 /* Restore LR & toc from livepatch stack */ ld r12, -16(r11) mtlr r12 ld r2, -24(r11) /* Pop livepatch stack frame */ ld r12, PACA_THREAD_INFO(r13) subi r11, r11, 24 std r11, TI_livepatch_sp(r12) /* Return to original caller of live patched function */ blr #endif /* CONFIG_LIVEPATCH */
aixcc-public/challenge-001-exemplar-source
1,268
arch/powerpc/kernel/trace/ftrace_64_pg.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Split from ftrace_64.S */ #include <linux/magic.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ftrace.h> #include <asm/ppc-opcode.h> #include <asm/export.h> _GLOBAL_TOC(ftrace_caller) lbz r3, PACA_FTRACE_ENABLED(r13) cmpdi r3, 0 beqlr /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) stdu r1, -112(r1) std r3, 128(r1) ld r4, 16(r11) subi r3, r3, MCOUNT_INSN_SIZE .globl ftrace_call ftrace_call: bl ftrace_stub nop #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: b ftrace_graph_stub _GLOBAL(ftrace_graph_stub) #endif ld r0, 128(r1) mtlr r0 addi r1, r1, 112 _GLOBAL(ftrace_stub) blr #ifdef CONFIG_FUNCTION_GRAPH_TRACER _GLOBAL(ftrace_graph_caller) addi r5, r1, 112 /* load r4 with local address */ ld r4, 128(r1) subi r4, r4, MCOUNT_INSN_SIZE /* Grab the LR out of the caller stack frame */ ld r11, 112(r1) ld r3, 16(r11) bl prepare_ftrace_return nop /* * prepare_ftrace_return gives us the address we divert to. * Change the LR in the callers stack frame to this. */ ld r11, 112(r1) std r3, 16(r11) ld r0, 128(r1) mtlr r0 addi r1, r1, 112 blr #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
aixcc-public/challenge-001-exemplar-source
1,348
arch/powerpc/kernel/trace/ftrace_low.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Split from entry_64.S */ #include <linux/magic.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ftrace.h> #include <asm/ppc-opcode.h> #include <asm/export.h> #ifdef CONFIG_PPC64 .pushsection ".tramp.ftrace.text","aw",@progbits; .globl ftrace_tramp_text ftrace_tramp_text: .space 64 .popsection .pushsection ".tramp.ftrace.init","aw",@progbits; .globl ftrace_tramp_init ftrace_tramp_init: .space 64 .popsection #endif _GLOBAL(mcount) _GLOBAL(_mcount) EXPORT_SYMBOL(_mcount) mflr r12 mtctr r12 mtlr r0 bctr #ifdef CONFIG_FUNCTION_GRAPH_TRACER _GLOBAL(return_to_handler) /* need to save return values */ #ifdef CONFIG_PPC64 std r4, -32(r1) std r3, -24(r1) /* save TOC */ std r2, -16(r1) std r31, -8(r1) mr r31, r1 stdu r1, -112(r1) /* * We might be called from a module. * Switch to our TOC to run inside the core kernel. */ LOAD_PACA_TOC() #else stwu r1, -16(r1) stw r3, 8(r1) stw r4, 12(r1) #endif bl ftrace_return_to_handler nop /* return value has real return address */ mtlr r3 #ifdef CONFIG_PPC64 ld r1, 0(r1) ld r4, -32(r1) ld r3, -24(r1) ld r2, -16(r1) ld r31, -8(r1) #else lwz r3, 8(r1) lwz r4, 12(r1) addi r1, r1, 16 #endif /* Jump back to real return address */ blr #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
aixcc-public/challenge-001-exemplar-source
1,432
arch/powerpc/platforms/powernv/opal-wrappers.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PowerNV OPAL API wrappers * * Copyright 2011 IBM Corp. */ #include <linux/jump_label.h> #include <asm/ppc_asm.h> #include <asm/hvcall.h> #include <asm/asm-offsets.h> #include <asm/opal.h> #include <asm/asm-compat.h> #include <asm/feature-fixups.h> .section ".text" /* * r3-r10 - OPAL call arguments * STK_PARAM(R11) - OPAL opcode * STK_PARAM(R12) - MSR to restore */ _GLOBAL_TOC(__opal_call) mflr r0 std r0,PPC_LR_STKOFF(r1) ld r12,STK_PARAM(R12)(r1) li r0,MSR_IR|MSR_DR|MSR_LE andc r12,r12,r0 LOAD_REG_ADDR(r11, opal_return) mtlr r11 LOAD_REG_ADDR(r11, opal) ld r2,0(r11) ld r11,8(r11) mtspr SPRN_HSRR0,r11 mtspr SPRN_HSRR1,r12 /* set token to r0 */ ld r0,STK_PARAM(R11)(r1) hrfid opal_return: /* * Restore MSR on OPAL return. The MSR is set to big-endian. */ #ifdef __BIG_ENDIAN__ ld r11,STK_PARAM(R12)(r1) mtmsrd r11 #else /* Endian can only be switched with rfi, must byte reverse MSR load */ .short 0x4039 /* li r10,STK_PARAM(R12) */ .byte (STK_PARAM(R12) >> 8) & 0xff .byte STK_PARAM(R12) & 0xff .long 0x280c6a7d /* ldbrx r11,r10,r1 */ .long 0x05009f42 /* bcl 20,31,$+4 */ .long 0xa602487d /* mflr r10 */ .long 0x14004a39 /* addi r10,r10,20 */ .long 0xa64b5a7d /* mthsrr0 r10 */ .long 0xa64b7b7d /* mthsrr1 r11 */ .long 0x2402004c /* hrfid */ #endif LOAD_PACA_TOC() ld r0,PPC_LR_STKOFF(r1) mtlr r0 blr
aixcc-public/challenge-001-exemplar-source
1,806
arch/powerpc/platforms/powernv/subcore-asm.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2013, Michael (Ellerman|Neuling), IBM Corporation. */ #include <asm/asm-offsets.h> #include <asm/ppc_asm.h> #include <asm/reg.h> #include "subcore.h" _GLOBAL(split_core_secondary_loop) /* * r3 = u8 *state, used throughout the routine * r4 = temp * r5 = temp * .. * r12 = MSR */ mfmsr r12 /* Disable interrupts so SRR0/1 don't get trashed */ li r4,0 ori r4,r4,MSR_EE|MSR_SE|MSR_BE|MSR_RI andc r4,r12,r4 sync mtmsrd r4 /* Switch to real mode and leave interrupts off */ li r5, MSR_IR|MSR_DR andc r5, r4, r5 LOAD_REG_ADDR(r4, real_mode) mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r5 rfid b . /* prevent speculative execution */ real_mode: /* Grab values from unsplit SPRs */ mfspr r6, SPRN_LDBAR mfspr r7, SPRN_PMMAR mfspr r8, SPRN_PMCR mfspr r9, SPRN_RPR mfspr r10, SPRN_SDR1 /* Order reading the SPRs vs telling the primary we are ready to split */ sync /* Tell thread 0 we are in real mode */ li r4, SYNC_STEP_REAL_MODE stb r4, 0(r3) li r5, (HID0_POWER8_4LPARMODE | HID0_POWER8_2LPARMODE)@highest sldi r5, r5, 48 /* Loop until we see the split happen in HID0 */ 1: mfspr r4, SPRN_HID0 and. r4, r4, r5 beq 1b /* * We only need to initialise the below regs once for each subcore, * but it's simpler and harmless to do it on each thread. */ /* Make sure various SPRS have sane values */ li r4, 0 mtspr SPRN_LPID, r4 mtspr SPRN_PCR, r4 mtspr SPRN_HDEC, r4 /* Restore SPR values now we are split */ mtspr SPRN_LDBAR, r6 mtspr SPRN_PMMAR, r7 mtspr SPRN_PMCR, r8 mtspr SPRN_RPR, r9 mtspr SPRN_SDR1, r10 LOAD_REG_ADDR(r5, virtual_mode) /* Get out of real mode */ mtspr SPRN_SRR0,r5 mtspr SPRN_SRR1,r12 rfid b . /* prevent speculative execution */ virtual_mode: blr
aixcc-public/challenge-001-exemplar-source
7,896
arch/powerpc/platforms/52xx/lite5200_sleep.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/reg.h> #include <asm/ppc_asm.h> #include <asm/processor.h> #include <asm/cache.h> #define SDRAM_CTRL 0x104 #define SC_MODE_EN (1<<31) #define SC_CKE (1<<30) #define SC_REF_EN (1<<28) #define SC_SOFT_PRE (1<<1) #define GPIOW_GPIOE 0xc00 #define GPIOW_DDR 0xc08 #define GPIOW_DVO 0xc0c #define CDM_CE 0x214 #define CDM_SDRAM (1<<3) /* helpers... beware: r10 and r4 are overwritten */ #define SAVE_SPRN(reg, addr) \ mfspr r10, SPRN_##reg; \ stw r10, ((addr)*4)(r4); #define LOAD_SPRN(reg, addr) \ lwz r10, ((addr)*4)(r4); \ mtspr SPRN_##reg, r10; \ sync; \ isync; .data registers: .space 0x5c*4 .text /* ---------------------------------------------------------------------- */ /* low-power mode with help of M68HLC908QT1 */ .globl lite5200_low_power lite5200_low_power: mr r7, r3 /* save SRAM va */ mr r8, r4 /* save MBAR va */ /* setup wakeup address for u-boot at physical location 0x0 */ lis r3, CONFIG_KERNEL_START@h lis r4, lite5200_wakeup@h ori r4, r4, lite5200_wakeup@l sub r4, r4, r3 stw r4, 0(r3) /* * save stuff BDI overwrites * 0xf0 (0xe0->0x100 gets overwritten when BDI connected; * even when CONFIG_BDI_SWITCH is disabled and MMU XLAT commented; heisenbug?)) * WARNING: self-refresh doesn't seem to work when BDI2000 is connected, * possibly because BDI sets SDRAM registers before wakeup code does */ lis r4, registers@h ori r4, r4, registers@l lwz r10, 0xf0(r3) stw r10, (0x1d*4)(r4) /* save registers to r4 [destroys r10] */ SAVE_SPRN(LR, 0x1c) bl save_regs /* flush caches [destroys r3, r4] */ bl flush_data_cache /* copy code to sram */ mr r4, r7 li r3, (sram_code_end - sram_code)/4 mtctr r3 lis r3, sram_code@h ori r3, r3, sram_code@l 1: lwz r5, 0(r3) stw r5, 0(r4) addi r3, r3, 4 addi r4, r4, 4 bdnz 1b /* get tb_ticks_per_usec */ lis r3, tb_ticks_per_usec@h lwz r11, tb_ticks_per_usec@l(r3) /* disable I and D caches */ mfspr r3, SPRN_HID0 ori r3, r3, HID0_ICE | HID0_DCE xori r3, r3, HID0_ICE | HID0_DCE sync; isync; mtspr SPRN_HID0, r3 sync; isync; /* jump to sram */ mtlr r7 blrl /* doesn't return */ sram_code: /* self refresh */ lwz r4, SDRAM_CTRL(r8) /* send NOP (precharge) */ oris r4, r4, SC_MODE_EN@h /* mode_en */ stw r4, SDRAM_CTRL(r8) sync ori r4, r4, SC_SOFT_PRE /* soft_pre */ stw r4, SDRAM_CTRL(r8) sync xori r4, r4, SC_SOFT_PRE xoris r4, r4, SC_MODE_EN@h /* !mode_en */ stw r4, SDRAM_CTRL(r8) sync /* delay (for NOP to finish) */ li r12, 1 bl udelay /* * mode_en must not be set when enabling self-refresh * send AR with CKE low (self-refresh) */ oris r4, r4, (SC_REF_EN | SC_CKE)@h xoris r4, r4, (SC_CKE)@h /* ref_en !cke */ stw r4, SDRAM_CTRL(r8) sync /* delay (after !CKE there should be two cycles) */ li r12, 1 bl udelay /* disable clock */ lwz r4, CDM_CE(r8) ori r4, r4, CDM_SDRAM xori r4, r4, CDM_SDRAM stw r4, CDM_CE(r8) sync /* delay a bit */ li r12, 1 bl udelay /* turn off with QT chip */ li r4, 0x02 stb r4, GPIOW_GPIOE(r8) /* enable gpio_wkup1 */ sync stb r4, GPIOW_DVO(r8) /* "output" high */ sync stb r4, GPIOW_DDR(r8) /* output */ sync stb r4, GPIOW_DVO(r8) /* output high */ sync /* 10uS delay */ li r12, 10 bl udelay /* turn off */ li r4, 0 stb r4, GPIOW_DVO(r8) /* output low */ sync /* wait until we're offline */ 1: b 1b /* local udelay in sram is needed */ udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ mullw r12, r12, r11 mftb r13 /* start */ add r12, r13, r12 /* end */ 1: mftb r13 /* current */ cmp cr0, r13, r12 blt 1b blr sram_code_end: /* uboot jumps here on resume */ lite5200_wakeup: bl restore_regs /* HIDs, MSR */ LOAD_SPRN(HID1, 0x19) LOAD_SPRN(HID2, 0x1a) /* address translation is tricky (see turn_on_mmu) */ mfmsr r10 ori r10, r10, MSR_DR | MSR_IR mtspr SPRN_SRR1, r10 lis r10, mmu_on@h ori r10, r10, mmu_on@l mtspr SPRN_SRR0, r10 sync rfi mmu_on: /* kernel offset (r4 is still set from restore_registers) */ addis r4, r4, CONFIG_KERNEL_START@h /* restore MSR */ lwz r10, (4*0x1b)(r4) mtmsr r10 sync; isync; /* invalidate caches */ mfspr r10, SPRN_HID0 ori r5, r10, HID0_ICFI | HID0_DCI mtspr SPRN_HID0, r5 /* invalidate caches */ sync; isync; mtspr SPRN_HID0, r10 sync; isync; /* enable caches */ lwz r10, (4*0x18)(r4) mtspr SPRN_HID0, r10 /* restore (enable caches, DPM) */ /* ^ this has to be after address translation set in MSR */ sync isync /* restore 0xf0 (BDI2000) */ lis r3, CONFIG_KERNEL_START@h lwz r10, (0x1d*4)(r4) stw r10, 0xf0(r3) LOAD_SPRN(LR, 0x1c) blr _ASM_NOKPROBE_SYMBOL(lite5200_wakeup) /* ---------------------------------------------------------------------- */ /* boring code: helpers */ /* save registers */ #define SAVE_BAT(n, addr) \ SAVE_SPRN(DBAT##n##L, addr); \ SAVE_SPRN(DBAT##n##U, addr+1); \ SAVE_SPRN(IBAT##n##L, addr+2); \ SAVE_SPRN(IBAT##n##U, addr+3); #define SAVE_SR(n, addr) \ mfsr r10, n; \ stw r10, ((addr)*4)(r4); #define SAVE_4SR(n, addr) \ SAVE_SR(n, addr); \ SAVE_SR(n+1, addr+1); \ SAVE_SR(n+2, addr+2); \ SAVE_SR(n+3, addr+3); save_regs: stw r0, 0(r4) stw r1, 0x4(r4) stw r2, 0x8(r4) stmw r11, 0xc(r4) /* 0xc -> 0x5f, (0x18*4-1) */ SAVE_SPRN(HID0, 0x18) SAVE_SPRN(HID1, 0x19) SAVE_SPRN(HID2, 0x1a) mfmsr r10 stw r10, (4*0x1b)(r4) /*SAVE_SPRN(LR, 0x1c) have to save it before the call */ /* 0x1d reserved by 0xf0 */ SAVE_SPRN(RPA, 0x1e) SAVE_SPRN(SDR1, 0x1f) /* save MMU regs */ SAVE_BAT(0, 0x20) SAVE_BAT(1, 0x24) SAVE_BAT(2, 0x28) SAVE_BAT(3, 0x2c) SAVE_BAT(4, 0x30) SAVE_BAT(5, 0x34) SAVE_BAT(6, 0x38) SAVE_BAT(7, 0x3c) SAVE_4SR(0, 0x40) SAVE_4SR(4, 0x44) SAVE_4SR(8, 0x48) SAVE_4SR(12, 0x4c) SAVE_SPRN(SPRG0, 0x50) SAVE_SPRN(SPRG1, 0x51) SAVE_SPRN(SPRG2, 0x52) SAVE_SPRN(SPRG3, 0x53) SAVE_SPRN(SPRG4, 0x54) SAVE_SPRN(SPRG5, 0x55) SAVE_SPRN(SPRG6, 0x56) SAVE_SPRN(SPRG7, 0x57) SAVE_SPRN(IABR, 0x58) SAVE_SPRN(DABR, 0x59) SAVE_SPRN(TBRL, 0x5a) SAVE_SPRN(TBRU, 0x5b) blr /* restore registers */ #define LOAD_BAT(n, addr) \ LOAD_SPRN(DBAT##n##L, addr); \ LOAD_SPRN(DBAT##n##U, addr+1); \ LOAD_SPRN(IBAT##n##L, addr+2); \ LOAD_SPRN(IBAT##n##U, addr+3); #define LOAD_SR(n, addr) \ lwz r10, ((addr)*4)(r4); \ mtsr n, r10; #define LOAD_4SR(n, addr) \ LOAD_SR(n, addr); \ LOAD_SR(n+1, addr+1); \ LOAD_SR(n+2, addr+2); \ LOAD_SR(n+3, addr+3); restore_regs: lis r4, registers@h ori r4, r4, registers@l /* MMU is not up yet */ subis r4, r4, CONFIG_KERNEL_START@h lwz r0, 0(r4) lwz r1, 0x4(r4) lwz r2, 0x8(r4) lmw r11, 0xc(r4) /* * these are a bit tricky * * 0x18 - HID0 * 0x19 - HID1 * 0x1a - HID2 * 0x1b - MSR * 0x1c - LR * 0x1d - reserved by 0xf0 (BDI2000) */ LOAD_SPRN(RPA, 0x1e); LOAD_SPRN(SDR1, 0x1f); /* restore MMU regs */ LOAD_BAT(0, 0x20) LOAD_BAT(1, 0x24) LOAD_BAT(2, 0x28) LOAD_BAT(3, 0x2c) LOAD_BAT(4, 0x30) LOAD_BAT(5, 0x34) LOAD_BAT(6, 0x38) LOAD_BAT(7, 0x3c) LOAD_4SR(0, 0x40) LOAD_4SR(4, 0x44) LOAD_4SR(8, 0x48) LOAD_4SR(12, 0x4c) /* rest of regs */ LOAD_SPRN(SPRG0, 0x50); LOAD_SPRN(SPRG1, 0x51); LOAD_SPRN(SPRG2, 0x52); LOAD_SPRN(SPRG3, 0x53); LOAD_SPRN(SPRG4, 0x54); LOAD_SPRN(SPRG5, 0x55); LOAD_SPRN(SPRG6, 0x56); LOAD_SPRN(SPRG7, 0x57); LOAD_SPRN(IABR, 0x58); LOAD_SPRN(DABR, 0x59); LOAD_SPRN(TBWL, 0x5a); /* these two have separate R/W regs */ LOAD_SPRN(TBWU, 0x5b); blr _ASM_NOKPROBE_SYMBOL(restore_regs) /* cache flushing code. copied from arch/ppc/boot/util.S */ #define NUM_CACHE_LINES (128*8) /* * Flush data cache * Do this by just reading lots of stuff into the cache. */ flush_data_cache: lis r3,CONFIG_KERNEL_START@h ori r3,r3,CONFIG_KERNEL_START@l li r4,NUM_CACHE_LINES mtctr r4 1: lwz r4,0(r3) addi r3,r3,L1_CACHE_BYTES /* Next line, please */ bdnz 1b blr
aixcc-public/challenge-001-exemplar-source
2,634
arch/powerpc/platforms/52xx/mpc52xx_sleep.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/reg.h> #include <asm/ppc_asm.h> #include <asm/processor.h> .text _GLOBAL(mpc52xx_deep_sleep) mpc52xx_deep_sleep: /* args r3-r6: SRAM, SDRAM regs, CDM regs, INTR regs */ /* enable interrupts */ mfmsr r7 ori r7, r7, 0x8000 /* EE */ mtmsr r7 sync; isync; li r10, 0 /* flag that irq handler sets */ /* enable tmr7 (or any other) interrupt */ lwz r8, 0x14(r6) /* intr->main_mask */ ori r8, r8, 0x1 xori r8, r8, 0x1 stw r8, 0x14(r6) sync /* emulate tmr7 interrupt */ li r8, 0x1 stw r8, 0x40(r6) /* intr->main_emulate */ sync /* wait for it to happen */ 1: cmpi cr0, r10, 1 bne cr0, 1b /* lock icache */ mfspr r10, SPRN_HID0 ori r10, r10, 0x2000 sync; isync; mtspr SPRN_HID0, r10 sync; isync; mflr r9 /* save LR */ /* jump to sram */ mtlr r3 blrl mtlr r9 /* restore LR */ /* unlock icache */ mfspr r10, SPRN_HID0 ori r10, r10, 0x2000 xori r10, r10, 0x2000 sync; isync; mtspr SPRN_HID0, r10 sync; isync; /* return to C code */ blr _GLOBAL(mpc52xx_ds_sram) mpc52xx_ds_sram: /* put SDRAM into self-refresh */ lwz r8, 0x4(r4) /* sdram->ctrl */ oris r8, r8, 0x8000 /* mode_en */ stw r8, 0x4(r4) sync ori r8, r8, 0x0002 /* soft_pre */ stw r8, 0x4(r4) sync xori r8, r8, 0x0002 xoris r8, r8, 0x8000 /* !mode_en */ stw r8, 0x4(r4) sync oris r8, r8, 0x5000 xoris r8, r8, 0x4000 /* ref_en !cke */ stw r8, 0x4(r4) sync /* disable SDRAM clock */ lwz r8, 0x14(r5) /* cdm->clkenable */ ori r8, r8, 0x0008 xori r8, r8, 0x0008 stw r8, 0x14(r5) sync /* put mpc5200 to sleep */ mfmsr r10 oris r10, r10, 0x0004 /* POW = 1 */ sync; isync; mtmsr r10 sync; isync; /* enable clock */ lwz r8, 0x14(r5) ori r8, r8, 0x0008 stw r8, 0x14(r5) sync /* get ram out of self-refresh */ lwz r8, 0x4(r4) oris r8, r8, 0x5000 /* cke ref_en */ stw r8, 0x4(r4) sync blr _GLOBAL(mpc52xx_ds_sram_size) mpc52xx_ds_sram_size: .long $-mpc52xx_ds_sram /* ### interrupt handler for wakeup from deep-sleep ### */ _GLOBAL(mpc52xx_ds_cached) mpc52xx_ds_cached: mtspr SPRN_SPRG0, r7 mtspr SPRN_SPRG1, r8 /* disable emulated interrupt */ mfspr r7, 311 /* MBAR */ addi r7, r7, 0x540 /* intr->main_emul */ li r8, 0 stw r8, 0(r7) sync dcbf 0, r7 /* acknowledge wakeup, so CCS releases power pown */ mfspr r7, 311 /* MBAR */ addi r7, r7, 0x524 /* intr->enc_status */ lwz r8, 0(r7) ori r8, r8, 0x0400 stw r8, 0(r7) sync dcbf 0, r7 /* flag - we handled the interrupt */ li r10, 1 mfspr r8, SPRN_SPRG1 mfspr r7, SPRN_SPRG0 rfi _GLOBAL(mpc52xx_ds_cached_size) mpc52xx_ds_cached_size: .long $-mpc52xx_ds_cached
aixcc-public/challenge-001-exemplar-source
14,560
arch/powerpc/platforms/ps3/hvcall.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * PS3 hvcall interface. * * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006 Sony Corp. * Copyright 2003, 2004 (c) MontaVista Software, Inc. */ #include <asm/processor.h> #include <asm/ppc_asm.h> #define lv1call .long 0x44000022; extsw r3, r3 #define LV1_N_IN_0_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_0_IN_0_OUT LV1_N_IN_0_OUT #define LV1_1_IN_0_OUT LV1_N_IN_0_OUT #define LV1_2_IN_0_OUT LV1_N_IN_0_OUT #define LV1_3_IN_0_OUT LV1_N_IN_0_OUT #define LV1_4_IN_0_OUT LV1_N_IN_0_OUT #define LV1_5_IN_0_OUT LV1_N_IN_0_OUT #define LV1_6_IN_0_OUT LV1_N_IN_0_OUT #define LV1_7_IN_0_OUT LV1_N_IN_0_OUT #define LV1_0_IN_1_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ stdu r3, -8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_0_IN_2_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r3, -8(r1); \ stdu r4, -16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_0_IN_3_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r3, -8(r1); \ std r4, -16(r1); \ stdu r5, -24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_0_IN_7_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r3, -8(r1); \ std r4, -16(r1); \ std r5, -24(r1); \ std r6, -32(r1); \ std r7, -40(r1); \ std r8, -48(r1); \ stdu r9, -56(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 56; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ ld r11, -32(r1); \ std r7, 0(r11); \ ld r11, -40(r1); \ std r8, 0(r11); \ ld r11, -48(r1); \ std r9, 0(r11); \ ld r11, -56(r1); \ std r10, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_1_IN_1_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ stdu r4, -8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_1_IN_2_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r4, -8(r1); \ stdu r5, -16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_1_IN_3_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ stdu r6, -24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_1_IN_4_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ std r6, -24(r1); \ stdu r7, -32(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 32; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ ld r11, -32(r1); \ std r7, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_1_IN_5_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ std r6, -24(r1); \ std r7, -32(r1); \ stdu r8, -40(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 40; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ ld r11, -32(r1); \ std r7, 0(r11); \ ld r11, -40(r1); \ std r8, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_1_IN_6_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ std r6, -24(r1); \ std r7, -32(r1); \ std r8, -40(r1); \ stdu r9, -48(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 48; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ ld r11, -32(r1); \ std r7, 0(r11); \ ld r11, -40(r1); \ std r8, 0(r11); \ ld r11, -48(r1); \ std r9, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_1_IN_7_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r4, -8(r1); \ std r5, -16(r1); \ std r6, -24(r1); \ std r7, -32(r1); \ std r8, -40(r1); \ std r9, -48(r1); \ stdu r10, -56(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 56; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ ld r11, -32(r1); \ std r7, 0(r11); \ ld r11, -40(r1); \ std r8, 0(r11); \ ld r11, -48(r1); \ std r9, 0(r11); \ ld r11, -56(r1); \ std r10, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_2_IN_1_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ stdu r5, -8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_2_IN_2_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r5, -8(r1); \ stdu r6, -16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_2_IN_3_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r5, -8(r1); \ std r6, -16(r1); \ stdu r7, -24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_2_IN_4_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r5, -8(r1); \ std r6, -16(r1); \ std r7, -24(r1); \ stdu r8, -32(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 32; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ ld r11, -32(r1); \ std r7, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_2_IN_5_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r5, -8(r1); \ std r6, -16(r1); \ std r7, -24(r1); \ std r8, -32(r1); \ stdu r9, -40(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 40; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ ld r11, -32(r1); \ std r7, 0(r11); \ ld r11, -40(r1); \ std r8, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_3_IN_1_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ stdu r6, -8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_3_IN_2_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r6, -8(r1); \ stdu r7, -16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_3_IN_3_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r6, -8(r1); \ std r7, -16(r1); \ stdu r8, -24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_4_IN_1_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ stdu r7, -8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_4_IN_2_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r7, -8(r1); \ stdu r8, -16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_4_IN_3_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r7, -8(r1); \ std r8, -16(r1); \ stdu r9, -24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_5_IN_1_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ stdu r8, -8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_5_IN_2_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r8, -8(r1); \ stdu r9, -16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_5_IN_3_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r8, -8(r1); \ std r9, -16(r1); \ stdu r10, -24(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 24; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, -24(r1); \ std r6, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_6_IN_1_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ stdu r9, -8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_6_IN_2_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r9, -8(r1); \ stdu r10, -16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_6_IN_3_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r9, -8(r1); \ stdu r10, -16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 16; \ ld r11, -8(r1); \ std r4, 0(r11); \ ld r11, -16(r1); \ std r5, 0(r11); \ ld r11, 48+8*8(r1); \ std r6, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_7_IN_1_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ stdu r10, -8(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ addi r1, r1, 8; \ ld r11, -8(r1); \ std r4, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_7_IN_6_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ std r10, 48+8*7(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ ld r11, 48+8*7(r1); \ std r4, 0(r11); \ ld r11, 48+8*8(r1); \ std r5, 0(r11); \ ld r11, 48+8*9(r1); \ std r6, 0(r11); \ ld r11, 48+8*10(r1); \ std r7, 0(r11); \ ld r11, 48+8*11(r1); \ std r8, 0(r11); \ ld r11, 48+8*12(r1); \ std r9, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr #define LV1_8_IN_1_OUT(API_NAME, API_NUMBER) \ _GLOBAL(_##API_NAME) \ \ mflr r0; \ std r0, 16(r1); \ \ li r11, API_NUMBER; \ lv1call; \ \ ld r11, 48+8*8(r1); \ std r4, 0(r11); \ \ ld r0, 16(r1); \ mtlr r0; \ blr .text /* the lv1 underscored call definitions expand here */ #define LV1_CALL(name, in, out, num) LV1_##in##_IN_##out##_OUT(lv1_##name, num) #include <asm/lv1call.h>
aixcc-public/challenge-001-exemplar-source
6,925
arch/powerpc/platforms/pseries/hvCall.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains the generic code to perform a call to the * pSeries LPAR hypervisor. */ #include <linux/jump_label.h> #include <asm/hvcall.h> #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/feature-fixups.h> .section ".text" #ifdef CONFIG_TRACEPOINTS #ifndef CONFIG_JUMP_LABEL .data .globl hcall_tracepoint_refcount hcall_tracepoint_refcount: .8byte 0 .section ".text" #endif /* * precall must preserve all registers. use unused STK_PARAM() * areas to save snapshots and opcode. */ #define HCALL_INST_PRECALL(FIRST_REG) \ mflr r0; \ std r3,STK_PARAM(R3)(r1); \ std r4,STK_PARAM(R4)(r1); \ std r5,STK_PARAM(R5)(r1); \ std r6,STK_PARAM(R6)(r1); \ std r7,STK_PARAM(R7)(r1); \ std r8,STK_PARAM(R8)(r1); \ std r9,STK_PARAM(R9)(r1); \ std r10,STK_PARAM(R10)(r1); \ std r0,16(r1); \ addi r4,r1,STK_PARAM(FIRST_REG); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \ bl __trace_hcall_entry; \ ld r3,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1); \ ld r4,STACK_FRAME_OVERHEAD+STK_PARAM(R4)(r1); \ ld r5,STACK_FRAME_OVERHEAD+STK_PARAM(R5)(r1); \ ld r6,STACK_FRAME_OVERHEAD+STK_PARAM(R6)(r1); \ ld r7,STACK_FRAME_OVERHEAD+STK_PARAM(R7)(r1); \ ld r8,STACK_FRAME_OVERHEAD+STK_PARAM(R8)(r1); \ ld r9,STACK_FRAME_OVERHEAD+STK_PARAM(R9)(r1); \ ld r10,STACK_FRAME_OVERHEAD+STK_PARAM(R10)(r1) /* * postcall is performed immediately before function return which * allows liberal use of volatile registers. */ #define __HCALL_INST_POSTCALL \ ld r0,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1); \ std r3,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1); \ mr r4,r3; \ mr r3,r0; \ bl __trace_hcall_exit; \ ld r0,STACK_FRAME_OVERHEAD+16(r1); \ addi r1,r1,STACK_FRAME_OVERHEAD; \ ld r3,STK_PARAM(R3)(r1); \ mtlr r0 #define HCALL_INST_POSTCALL_NORETS \ li r5,0; \ __HCALL_INST_POSTCALL #define HCALL_INST_POSTCALL(BUFREG) \ mr r5,BUFREG; \ __HCALL_INST_POSTCALL #ifdef CONFIG_JUMP_LABEL #define HCALL_BRANCH(LABEL) \ ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key) #else /* * We branch around this in early init (eg when populating the MMU * hashtable) by using an unconditional cpu feature. */ #define HCALL_BRANCH(LABEL) \ BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ LOAD_REG_ADDR(r12, hcall_tracepoint_refcount) ; \ ld r12,0(r12); \ std r12,32(r1); \ cmpdi r12,0; \ bne- LABEL; \ 1: #endif #else #define HCALL_INST_PRECALL(FIRST_ARG) #define HCALL_INST_POSTCALL_NORETS #define HCALL_INST_POSTCALL(BUFREG) #define HCALL_BRANCH(LABEL) #endif _GLOBAL_TOC(plpar_hcall_norets_notrace) HMT_MEDIUM mfcr r0 stw r0,8(r1) HVSC /* invoke the hypervisor */ li r4,0 stb r4,PACASRR_VALID(r13) lwz r0,8(r1) mtcrf 0xff,r0 blr /* return r3 = status */ _GLOBAL_TOC(plpar_hcall_norets) HMT_MEDIUM mfcr r0 stw r0,8(r1) HCALL_BRANCH(plpar_hcall_norets_trace) HVSC /* invoke the hypervisor */ li r4,0 stb r4,PACASRR_VALID(r13) lwz r0,8(r1) mtcrf 0xff,r0 blr /* return r3 = status */ #ifdef CONFIG_TRACEPOINTS plpar_hcall_norets_trace: HCALL_INST_PRECALL(R4) HVSC HCALL_INST_POSTCALL_NORETS li r4,0 stb r4,PACASRR_VALID(r13) lwz r0,8(r1) mtcrf 0xff,r0 blr #endif _GLOBAL_TOC(plpar_hcall) HMT_MEDIUM mfcr r0 stw r0,8(r1) HCALL_BRANCH(plpar_hcall_trace) std r4,STK_PARAM(R4)(r1) /* Save ret buffer */ mr r4,r5 mr r5,r6 mr r6,r7 mr r7,r8 mr r8,r9 mr r9,r10 HVSC /* invoke the hypervisor */ ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) std r7, 24(r12) li r4,0 stb r4,PACASRR_VALID(r13) lwz r0,8(r1) mtcrf 0xff,r0 blr /* return r3 = status */ #ifdef CONFIG_TRACEPOINTS plpar_hcall_trace: HCALL_INST_PRECALL(R5) std r4,STK_PARAM(R4)(r1) mr r0,r4 mr r4,r5 mr r5,r6 mr r6,r7 mr r7,r8 mr r8,r9 mr r9,r10 HVSC ld r12,STK_PARAM(R4)(r1) std r4,0(r12) std r5,8(r12) std r6,16(r12) std r7,24(r12) HCALL_INST_POSTCALL(r12) li r4,0 stb r4,PACASRR_VALID(r13) lwz r0,8(r1) mtcrf 0xff,r0 blr #endif /* * plpar_hcall_raw can be called in real mode. kexec/kdump need some * hypervisor calls to be executed in real mode. So plpar_hcall_raw * does not access the per cpu hypervisor call statistics variables, * since these variables may not be present in the RMO region. */ _GLOBAL(plpar_hcall_raw) HMT_MEDIUM mfcr r0 stw r0,8(r1) std r4,STK_PARAM(R4)(r1) /* Save ret buffer */ mr r4,r5 mr r5,r6 mr r6,r7 mr r7,r8 mr r8,r9 mr r9,r10 HVSC /* invoke the hypervisor */ ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) std r7, 24(r12) li r4,0 stb r4,PACASRR_VALID(r13) lwz r0,8(r1) mtcrf 0xff,r0 blr /* return r3 = status */ _GLOBAL_TOC(plpar_hcall9) HMT_MEDIUM mfcr r0 stw r0,8(r1) HCALL_BRANCH(plpar_hcall9_trace) std r4,STK_PARAM(R4)(r1) /* Save ret buffer */ mr r4,r5 mr r5,r6 mr r6,r7 mr r7,r8 mr r8,r9 mr r9,r10 ld r10,STK_PARAM(R11)(r1) /* put arg7 in R10 */ ld r11,STK_PARAM(R12)(r1) /* put arg8 in R11 */ ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */ HVSC /* invoke the hypervisor */ mr r0,r12 ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) std r7, 24(r12) std r8, 32(r12) std r9, 40(r12) std r10,48(r12) std r11,56(r12) std r0, 64(r12) li r4,0 stb r4,PACASRR_VALID(r13) lwz r0,8(r1) mtcrf 0xff,r0 blr /* return r3 = status */ #ifdef CONFIG_TRACEPOINTS plpar_hcall9_trace: HCALL_INST_PRECALL(R5) std r4,STK_PARAM(R4)(r1) mr r0,r4 mr r4,r5 mr r5,r6 mr r6,r7 mr r7,r8 mr r8,r9 mr r9,r10 ld r10,STACK_FRAME_OVERHEAD+STK_PARAM(R11)(r1) ld r11,STACK_FRAME_OVERHEAD+STK_PARAM(R12)(r1) ld r12,STACK_FRAME_OVERHEAD+STK_PARAM(R13)(r1) HVSC mr r0,r12 ld r12,STACK_FRAME_OVERHEAD+STK_PARAM(R4)(r1) std r4,0(r12) std r5,8(r12) std r6,16(r12) std r7,24(r12) std r8,32(r12) std r9,40(r12) std r10,48(r12) std r11,56(r12) std r0,64(r12) HCALL_INST_POSTCALL(r12) li r4,0 stb r4,PACASRR_VALID(r13) lwz r0,8(r1) mtcrf 0xff,r0 blr #endif /* See plpar_hcall_raw to see why this is needed */ _GLOBAL(plpar_hcall9_raw) HMT_MEDIUM mfcr r0 stw r0,8(r1) std r4,STK_PARAM(R4)(r1) /* Save ret buffer */ mr r4,r5 mr r5,r6 mr r6,r7 mr r7,r8 mr r8,r9 mr r9,r10 ld r10,STK_PARAM(R11)(r1) /* put arg7 in R10 */ ld r11,STK_PARAM(R12)(r1) /* put arg8 in R11 */ ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */ HVSC /* invoke the hypervisor */ mr r0,r12 ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) std r7, 24(r12) std r8, 32(r12) std r9, 40(r12) std r10,48(r12) std r11,56(r12) std r0, 64(r12) li r4,0 stb r4,PACASRR_VALID(r13) lwz r0,8(r1) mtcrf 0xff,r0 blr /* return r3 = status */
aixcc-public/challenge-001-exemplar-source
7,369
arch/powerpc/platforms/powermac/cache.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains low-level cache management functions * used for sleep and CPU speed changes on Apple machines. * (In fact the only thing that is Apple-specific is that we assume * that we can read from ROM at physical address 0xfff00000.) * * Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and * Benjamin Herrenschmidt (benh@kernel.crashing.org) */ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/cputable.h> #include <asm/feature-fixups.h> /* * Flush and disable all data caches (dL1, L2, L3). This is used * when going to sleep, when doing a PMU based cpufreq transition, * or when "offlining" a CPU on SMP machines. This code is over * paranoid, but I've had enough issues with various CPU revs and * bugs that I decided it was worth being over cautious */ _GLOBAL(flush_disable_caches) #ifndef CONFIG_PPC_BOOK3S_32 blr #else BEGIN_FTR_SECTION b flush_disable_745x END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) BEGIN_FTR_SECTION b flush_disable_75x END_FTR_SECTION_IFSET(CPU_FTR_L2CR) b __flush_disable_L1 /* This is the code for G3 and 74[01]0 */ flush_disable_75x: mflr r10 /* Turn off EE and DR in MSR */ mfmsr r11 rlwinm r0,r11,0,~MSR_EE rlwinm r0,r0,0,~MSR_DR sync mtmsr r0 isync /* Stop DST streams */ BEGIN_FTR_SECTION PPC_DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* Stop DPM */ mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */ rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */ sync mtspr SPRN_HID0,r4 /* Disable DPM */ sync /* Disp-flush L1. We have a weird problem here that I never * totally figured out. On 750FX, using the ROM for the flush * results in a non-working flush. We use that workaround for * now until I finally understand what's going on. --BenH */ /* ROM base by default */ lis r4,0xfff0 mfpvr r3 srwi r3,r3,16 cmplwi cr0,r3,0x7000 bne+ 1f /* RAM base on 750FX */ li r4,0 1: li r4,0x4000 mtctr r4 1: lwz r0,0(r4) addi r4,r4,32 bdnz 1b sync isync /* Disable / invalidate / enable L1 data */ mfspr r3,SPRN_HID0 rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE) mtspr SPRN_HID0,r3 sync isync ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI) sync isync mtspr SPRN_HID0,r3 xori r3,r3,(HID0_DCI|HID0_ICFI) mtspr SPRN_HID0,r3 sync /* Get the current enable bit of the L2CR into r4 */ mfspr r5,SPRN_L2CR /* Set to data-only (pre-745x bit) */ oris r3,r5,L2CR_L2DO@h b 2f /* When disabling L2, code must be in L1 */ .balign 32 1: mtspr SPRN_L2CR,r3 3: sync isync b 1f 2: b 3f 3: sync isync b 1b 1: /* disp-flush L2. The interesting thing here is that the L2 can be * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory * but that is probbaly fine. We disp-flush over 4Mb to be safe */ lis r4,2 mtctr r4 lis r4,0xfff0 1: lwz r0,0(r4) addi r4,r4,32 bdnz 1b sync isync lis r4,2 mtctr r4 lis r4,0xfff0 1: dcbf 0,r4 addi r4,r4,32 bdnz 1b sync isync /* now disable L2 */ rlwinm r5,r5,0,~L2CR_L2E b 2f /* When disabling L2, code must be in L1 */ .balign 32 1: mtspr SPRN_L2CR,r5 3: sync isync b 1f 2: b 3f 3: sync isync b 1b 1: sync isync /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */ oris r4,r5,L2CR_L2I@h mtspr SPRN_L2CR,r4 sync isync /* Wait for the invalidation to complete */ 1: mfspr r3,SPRN_L2CR rlwinm. r0,r3,0,31,31 bne 1b /* Clear L2I */ xoris r4,r4,L2CR_L2I@h sync mtspr SPRN_L2CR,r4 sync /* now disable the L1 data cache */ mfspr r0,SPRN_HID0 rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE) mtspr SPRN_HID0,r0 sync isync /* Restore HID0[DPM] to whatever it was before */ sync mfspr r0,SPRN_HID0 rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */ mtspr SPRN_HID0,r0 sync /* restore DR and EE */ sync mtmsr r11 isync mtlr r10 blr _ASM_NOKPROBE_SYMBOL(flush_disable_75x) /* This code is for 745x processors */ flush_disable_745x: /* Turn off EE and DR in MSR */ mfmsr r11 rlwinm r0,r11,0,~MSR_EE rlwinm r0,r0,0,~MSR_DR sync mtmsr r0 isync /* Stop prefetch streams */ PPC_DSSALL sync /* Disable L2 prefetching */ mfspr r0,SPRN_MSSCR0 rlwinm r0,r0,0,0,29 mtspr SPRN_MSSCR0,r0 sync isync lis r4,0 dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 dcbf 0,r4 /* Due to a bug with the HW flush on some CPU revs, we occasionally * experience data corruption. I'm adding a displacement flush along * with a dcbf loop over a few Mb to "help". The problem isn't totally * fixed by this in theory, but at least, in practice, I couldn't reproduce * it even with a big hammer... */ lis r4,0x0002 mtctr r4 li r4,0 1: lwz r0,0(r4) addi r4,r4,32 /* Go to start of next cache line */ bdnz 1b isync /* Now, flush the first 4MB of memory */ lis r4,0x0002 mtctr r4 li r4,0 sync 1: dcbf 0,r4 addi r4,r4,32 /* Go to start of next cache line */ bdnz 1b /* Flush and disable the L1 data cache */ mfspr r6,SPRN_LDSTCR lis r3,0xfff0 /* read from ROM for displacement flush */ li r4,0xfe /* start with only way 0 unlocked */ li r5,128 /* 128 lines in each way */ 1: mtctr r5 rlwimi r6,r4,0,24,31 mtspr SPRN_LDSTCR,r6 sync isync 2: lwz r0,0(r3) /* touch each cache line */ addi r3,r3,32 bdnz 2b rlwinm r4,r4,1,24,30 /* move on to the next way */ ori r4,r4,1 cmpwi r4,0xff /* all done? */ bne 1b /* now unlock the L1 data cache */ li r4,0 rlwimi r6,r4,0,24,31 sync mtspr SPRN_LDSTCR,r6 sync isync /* Flush the L2 cache using the hardware assist */ mfspr r3,SPRN_L2CR cmpwi r3,0 /* check if it is enabled first */ bge 4f oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h b 2f /* When disabling/locking L2, code must be in L1 */ .balign 32 1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */ 3: sync isync b 1f 2: b 3f 3: sync isync b 1b 1: sync isync ori r0,r3,L2CR_L2HWF_745x sync mtspr SPRN_L2CR,r0 /* set the hardware flush bit */ 3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */ andi. r0,r0,L2CR_L2HWF_745x bne 3b sync rlwinm r3,r3,0,~L2CR_L2E b 2f /* When disabling L2, code must be in L1 */ .balign 32 1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */ 3: sync isync b 1f 2: b 3f 3: sync isync b 1b 1: sync isync oris r4,r3,L2CR_L2I@h mtspr SPRN_L2CR,r4 sync isync 1: mfspr r4,SPRN_L2CR andis. r0,r4,L2CR_L2I@h bne 1b sync BEGIN_FTR_SECTION /* Flush the L3 cache using the hardware assist */ 4: mfspr r3,SPRN_L3CR cmpwi r3,0 /* check if it is enabled */ bge 6f oris r0,r3,L3CR_L3IO@h ori r0,r0,L3CR_L3DO sync mtspr SPRN_L3CR,r0 /* lock the L3 cache */ sync isync ori r0,r0,L3CR_L3HWF sync mtspr SPRN_L3CR,r0 /* set the hardware flush bit */ 5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */ andi. r0,r0,L3CR_L3HWF bne 5b rlwinm r3,r3,0,~L3CR_L3E sync mtspr SPRN_L3CR,r3 /* disable the L3 cache */ sync ori r4,r3,L3CR_L3I mtspr SPRN_L3CR,r4 1: mfspr r4,SPRN_L3CR andi. r0,r4,L3CR_L3I bne 1b sync END_FTR_SECTION_IFSET(CPU_FTR_L3CR) 6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */ rlwinm r0,r0,0,~HID0_DCE mtspr SPRN_HID0,r0 sync isync mtmsr r11 /* restore DR and EE */ isync blr _ASM_NOKPROBE_SYMBOL(flush_disable_745x) #endif /* CONFIG_PPC_BOOK3S_32 */
aixcc-public/challenge-001-exemplar-source
9,186
arch/powerpc/platforms/powermac/sleep.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains sleep low-level functions for PowerBook G3. * Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org) * and Paul Mackerras (paulus@samba.org). */ #include <asm/processor.h> #include <asm/page.h> #include <asm/ppc_asm.h> #include <asm/cputable.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/mmu.h> #include <asm/feature-fixups.h> #define MAGIC 0x4c617273 /* 'Lars' */ /* * Structure for storing CPU registers on the stack. */ #define SL_SP 0 #define SL_PC 4 #define SL_MSR 8 #define SL_SDR1 0xc #define SL_SPRG0 0x10 /* 4 sprg's */ #define SL_DBAT0 0x20 #define SL_IBAT0 0x28 #define SL_DBAT1 0x30 #define SL_IBAT1 0x38 #define SL_DBAT2 0x40 #define SL_IBAT2 0x48 #define SL_DBAT3 0x50 #define SL_IBAT3 0x58 #define SL_DBAT4 0x60 #define SL_IBAT4 0x68 #define SL_DBAT5 0x70 #define SL_IBAT5 0x78 #define SL_DBAT6 0x80 #define SL_IBAT6 0x88 #define SL_DBAT7 0x90 #define SL_IBAT7 0x98 #define SL_TB 0xa0 #define SL_R2 0xa8 #define SL_CR 0xac #define SL_LR 0xb0 #define SL_R12 0xb4 /* r12 to r31 */ #define SL_SIZE (SL_R12 + 80) .section .text .align 5 #if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC) || \ (defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)) /* This gets called by via-pmu.c late during the sleep process. * The PMU was already send the sleep command and will shut us down * soon. We need to save all that is needed and setup the wakeup * vector that will be called by the ROM on wakeup */ _GLOBAL(low_sleep_handler) #ifndef CONFIG_PPC_BOOK3S_32 blr #else mflr r0 lis r11,sleep_storage@ha addi r11,r11,sleep_storage@l stw r0,SL_LR(r11) mfcr r0 stw r0,SL_CR(r11) stw r1,SL_SP(r11) stw r2,SL_R2(r11) stmw r12,SL_R12(r11) /* Save MSR & SDR1 */ mfmsr r4 stw r4,SL_MSR(r11) mfsdr1 r4 stw r4,SL_SDR1(r11) /* Get a stable timebase and save it */ 1: mftbu r4 stw r4,SL_TB(r11) mftb r5 stw r5,SL_TB+4(r11) mftbu r3 cmpw r3,r4 bne 1b /* Save SPRGs */ mfsprg r4,0 stw r4,SL_SPRG0(r11) mfsprg r4,1 stw r4,SL_SPRG0+4(r11) mfsprg r4,2 stw r4,SL_SPRG0+8(r11) mfsprg r4,3 stw r4,SL_SPRG0+12(r11) /* Save BATs */ mfdbatu r4,0 stw r4,SL_DBAT0(r11) mfdbatl r4,0 stw r4,SL_DBAT0+4(r11) mfdbatu r4,1 stw r4,SL_DBAT1(r11) mfdbatl r4,1 stw r4,SL_DBAT1+4(r11) mfdbatu r4,2 stw r4,SL_DBAT2(r11) mfdbatl r4,2 stw r4,SL_DBAT2+4(r11) mfdbatu r4,3 stw r4,SL_DBAT3(r11) mfdbatl r4,3 stw r4,SL_DBAT3+4(r11) mfibatu r4,0 stw r4,SL_IBAT0(r11) mfibatl r4,0 stw r4,SL_IBAT0+4(r11) mfibatu r4,1 stw r4,SL_IBAT1(r11) mfibatl r4,1 stw r4,SL_IBAT1+4(r11) mfibatu r4,2 stw r4,SL_IBAT2(r11) mfibatl r4,2 stw r4,SL_IBAT2+4(r11) mfibatu r4,3 stw r4,SL_IBAT3(r11) mfibatl r4,3 stw r4,SL_IBAT3+4(r11) BEGIN_MMU_FTR_SECTION mfspr r4,SPRN_DBAT4U stw r4,SL_DBAT4(r11) mfspr r4,SPRN_DBAT4L stw r4,SL_DBAT4+4(r11) mfspr r4,SPRN_DBAT5U stw r4,SL_DBAT5(r11) mfspr r4,SPRN_DBAT5L stw r4,SL_DBAT5+4(r11) mfspr r4,SPRN_DBAT6U stw r4,SL_DBAT6(r11) mfspr r4,SPRN_DBAT6L stw r4,SL_DBAT6+4(r11) mfspr r4,SPRN_DBAT7U stw r4,SL_DBAT7(r11) mfspr r4,SPRN_DBAT7L stw r4,SL_DBAT7+4(r11) mfspr r4,SPRN_IBAT4U stw r4,SL_IBAT4(r11) mfspr r4,SPRN_IBAT4L stw r4,SL_IBAT4+4(r11) mfspr r4,SPRN_IBAT5U stw r4,SL_IBAT5(r11) mfspr r4,SPRN_IBAT5L stw r4,SL_IBAT5+4(r11) mfspr r4,SPRN_IBAT6U stw r4,SL_IBAT6(r11) mfspr r4,SPRN_IBAT6L stw r4,SL_IBAT6+4(r11) mfspr r4,SPRN_IBAT7U stw r4,SL_IBAT7(r11) mfspr r4,SPRN_IBAT7L stw r4,SL_IBAT7+4(r11) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) /* Backup various CPU config stuffs */ bl __save_cpu_setup /* The ROM can wake us up via 2 different vectors: * - On wallstreet & lombard, we must write a magic * value 'Lars' at address 4 and a pointer to a * memory location containing the PC to resume from * at address 0. * - On Core99, we must store the wakeup vector at * address 0x80 and eventually it's parameters * at address 0x84. I've have some trouble with those * parameters however and I no longer use them. */ lis r5,grackle_wake_up@ha addi r5,r5,grackle_wake_up@l tophys(r5,r5) stw r5,SL_PC(r11) lis r4,KERNELBASE@h tophys(r5,r11) addi r5,r5,SL_PC lis r6,MAGIC@ha addi r6,r6,MAGIC@l stw r5,0(r4) stw r6,4(r4) /* Setup stuffs at 0x80-0x84 for Core99 */ lis r3,core99_wake_up@ha addi r3,r3,core99_wake_up@l tophys(r3,r3) stw r3,0x80(r4) stw r5,0x84(r4) .globl low_cpu_offline_self low_cpu_offline_self: /* Flush & disable all caches */ bl flush_disable_caches /* Turn off data relocation. */ mfmsr r3 /* Save MSR in r7 */ rlwinm r3,r3,0,28,26 /* Turn off DR bit */ sync mtmsr r3 isync BEGIN_FTR_SECTION /* Flush any pending L2 data prefetches to work around HW bug */ sync lis r3,0xfff0 lwz r0,0(r3) /* perform cache-inhibited load to ROM */ sync /* (caches are disabled at this point) */ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) /* * Set the HID0 and MSR for sleep. */ mfspr r2,SPRN_HID0 rlwinm r2,r2,0,10,7 /* clear doze, nap */ oris r2,r2,HID0_SLEEP@h sync isync mtspr SPRN_HID0,r2 sync /* This loop puts us back to sleep in case we have a spurrious * wakeup so that the host bridge properly stays asleep. The * CPU will be turned off, either after a known time (about 1 * second) on wallstreet & lombard, or as soon as the CPU enters * SLEEP mode on core99 */ mfmsr r2 oris r2,r2,MSR_POW@h 1: sync mtmsr r2 isync b 1b _ASM_NOKPROBE_SYMBOL(low_cpu_offline_self) /* * Here is the resume code. */ /* * Core99 machines resume here * r4 has the physical address of SL_PC(sp) (unused) */ _GLOBAL(core99_wake_up) /* Make sure HID0 no longer contains any sleep bit and that data cache * is disabled */ mfspr r3,SPRN_HID0 rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */ rlwinm 3,r3,0,18,15 /* clear DCE, ICE */ mtspr SPRN_HID0,r3 sync isync /* sanitize MSR */ mfmsr r3 ori r3,r3,MSR_EE|MSR_IP xori r3,r3,MSR_EE|MSR_IP sync isync mtmsr r3 sync isync /* Recover sleep storage */ lis r3,sleep_storage@ha addi r3,r3,sleep_storage@l tophys(r3,r3) addi r1,r3,SL_PC /* Pass thru to older resume code ... */ _ASM_NOKPROBE_SYMBOL(core99_wake_up) /* * Here is the resume code for older machines. * r1 has the physical address of SL_PC(sp). */ grackle_wake_up: /* Restore the kernel's segment registers before * we do any r1 memory access as we are not sure they * are in a sane state above the first 256Mb region */ bl load_segment_registers sync isync subi r1,r1,SL_PC /* Restore various CPU config stuffs */ bl __restore_cpu_setup /* Make sure all FPRs have been initialized */ bl reloc_offset bl __init_fpu_registers /* Invalidate & enable L1 cache, we don't care about * whatever the ROM may have tried to write to memory */ bl __inval_enable_L1 /* Restore the BATs, and SDR1. Then we can turn on the MMU. */ lwz r4,SL_SDR1(r1) mtsdr1 r4 lwz r4,SL_SPRG0(r1) mtsprg 0,r4 lwz r4,SL_SPRG0+4(r1) mtsprg 1,r4 lwz r4,SL_SPRG0+8(r1) mtsprg 2,r4 lwz r4,SL_SPRG0+12(r1) mtsprg 3,r4 lwz r4,SL_DBAT0(r1) mtdbatu 0,r4 lwz r4,SL_DBAT0+4(r1) mtdbatl 0,r4 lwz r4,SL_DBAT1(r1) mtdbatu 1,r4 lwz r4,SL_DBAT1+4(r1) mtdbatl 1,r4 lwz r4,SL_DBAT2(r1) mtdbatu 2,r4 lwz r4,SL_DBAT2+4(r1) mtdbatl 2,r4 lwz r4,SL_DBAT3(r1) mtdbatu 3,r4 lwz r4,SL_DBAT3+4(r1) mtdbatl 3,r4 lwz r4,SL_IBAT0(r1) mtibatu 0,r4 lwz r4,SL_IBAT0+4(r1) mtibatl 0,r4 lwz r4,SL_IBAT1(r1) mtibatu 1,r4 lwz r4,SL_IBAT1+4(r1) mtibatl 1,r4 lwz r4,SL_IBAT2(r1) mtibatu 2,r4 lwz r4,SL_IBAT2+4(r1) mtibatl 2,r4 lwz r4,SL_IBAT3(r1) mtibatu 3,r4 lwz r4,SL_IBAT3+4(r1) mtibatl 3,r4 BEGIN_MMU_FTR_SECTION lwz r4,SL_DBAT4(r1) mtspr SPRN_DBAT4U,r4 lwz r4,SL_DBAT4+4(r1) mtspr SPRN_DBAT4L,r4 lwz r4,SL_DBAT5(r1) mtspr SPRN_DBAT5U,r4 lwz r4,SL_DBAT5+4(r1) mtspr SPRN_DBAT5L,r4 lwz r4,SL_DBAT6(r1) mtspr SPRN_DBAT6U,r4 lwz r4,SL_DBAT6+4(r1) mtspr SPRN_DBAT6L,r4 lwz r4,SL_DBAT7(r1) mtspr SPRN_DBAT7U,r4 lwz r4,SL_DBAT7+4(r1) mtspr SPRN_DBAT7L,r4 lwz r4,SL_IBAT4(r1) mtspr SPRN_IBAT4U,r4 lwz r4,SL_IBAT4+4(r1) mtspr SPRN_IBAT4L,r4 lwz r4,SL_IBAT5(r1) mtspr SPRN_IBAT5U,r4 lwz r4,SL_IBAT5+4(r1) mtspr SPRN_IBAT5L,r4 lwz r4,SL_IBAT6(r1) mtspr SPRN_IBAT6U,r4 lwz r4,SL_IBAT6+4(r1) mtspr SPRN_IBAT6L,r4 lwz r4,SL_IBAT7(r1) mtspr SPRN_IBAT7U,r4 lwz r4,SL_IBAT7+4(r1) mtspr SPRN_IBAT7L,r4 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) /* Flush all TLBs */ lis r4,0x1000 1: addic. r4,r4,-0x1000 tlbie r4 blt 1b sync /* Restore TB */ li r3,0 mttbl r3 lwz r3,SL_TB(r1) lwz r4,SL_TB+4(r1) mttbu r3 mttbl r4 /* Restore the callee-saved registers and return */ lwz r0,SL_CR(r1) mtcr r0 lwz r2,SL_R2(r1) lmw r12,SL_R12(r1) /* restore the MSR and SP and turn on the MMU and return */ lwz r3,SL_MSR(r1) lwz r4,SL_LR(r1) lwz r1,SL_SP(r1) mtsrr0 r4 mtsrr1 r3 sync isync rfi _ASM_NOKPROBE_SYMBOL(grackle_wake_up) #endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ .section .bss .balign L1_CACHE_BYTES sleep_storage: .space SL_SIZE .balign L1_CACHE_BYTES, 0 #endif /* CONFIG_PPC_BOOK3S_32 */ .section .text
aixcc-public/challenge-001-exemplar-source
10,815
arch/powerpc/platforms/83xx/suspend-asm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Enter and leave deep sleep state on MPC83xx * * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. * Author: Scott Wood <scottwood@freescale.com> */ #include <asm/page.h> #include <asm/ppc_asm.h> #include <asm/reg.h> #include <asm/asm-offsets.h> #define SS_MEMSAVE 0x00 /* First 8 bytes of RAM */ #define SS_HID 0x08 /* 3 HIDs */ #define SS_IABR 0x14 /* 2 IABRs */ #define SS_IBCR 0x1c #define SS_DABR 0x20 /* 2 DABRs */ #define SS_DBCR 0x28 #define SS_SP 0x2c #define SS_SR 0x30 /* 16 segment registers */ #define SS_R2 0x70 #define SS_MSR 0x74 #define SS_SDR1 0x78 #define SS_LR 0x7c #define SS_SPRG 0x80 /* 8 SPRGs */ #define SS_DBAT 0xa0 /* 8 DBATs */ #define SS_IBAT 0xe0 /* 8 IBATs */ #define SS_TB 0x120 #define SS_CR 0x128 #define SS_GPREG 0x12c /* r12-r31 */ #define STATE_SAVE_SIZE 0x17c .section .data .align 5 mpc83xx_sleep_save_area: .space STATE_SAVE_SIZE immrbase: .long 0 .section .text .align 5 /* r3 = physical address of IMMR */ _GLOBAL(mpc83xx_enter_deep_sleep) lis r4, immrbase@ha stw r3, immrbase@l(r4) /* The first 2 words of memory are used to communicate with the * bootloader, to tell it how to resume. * * The first word is the magic number 0xf5153ae5, and the second * is the pointer to mpc83xx_deep_resume. * * The original content of these two words is saved in SS_MEMSAVE. */ lis r3, mpc83xx_sleep_save_area@h ori r3, r3, mpc83xx_sleep_save_area@l lis r4, KERNELBASE@h lwz r5, 0(r4) lwz r6, 4(r4) stw r5, SS_MEMSAVE+0(r3) stw r6, SS_MEMSAVE+4(r3) mfspr r5, SPRN_HID0 mfspr r6, SPRN_HID1 mfspr r7, SPRN_HID2 stw r5, SS_HID+0(r3) stw r6, SS_HID+4(r3) stw r7, SS_HID+8(r3) mfspr r4, SPRN_IABR mfspr r5, SPRN_IABR2 mfspr r6, SPRN_IBCR mfspr r7, SPRN_DABR mfspr r8, SPRN_DABR2 mfspr r9, SPRN_DBCR stw r4, SS_IABR+0(r3) stw r5, SS_IABR+4(r3) stw r6, SS_IBCR(r3) stw r7, SS_DABR+0(r3) stw r8, SS_DABR+4(r3) stw r9, SS_DBCR(r3) mfspr r4, SPRN_SPRG0 mfspr r5, SPRN_SPRG1 mfspr r6, SPRN_SPRG2 mfspr r7, SPRN_SPRG3 mfsdr1 r8 stw r4, SS_SPRG+0(r3) stw r5, SS_SPRG+4(r3) stw r6, SS_SPRG+8(r3) stw r7, SS_SPRG+12(r3) stw r8, SS_SDR1(r3) mfspr r4, SPRN_SPRG4 mfspr r5, SPRN_SPRG5 mfspr r6, SPRN_SPRG6 mfspr r7, SPRN_SPRG7 stw r4, SS_SPRG+16(r3) stw r5, SS_SPRG+20(r3) stw r6, SS_SPRG+24(r3) stw r7, SS_SPRG+28(r3) mfspr r4, SPRN_DBAT0U mfspr r5, SPRN_DBAT0L mfspr r6, SPRN_DBAT1U mfspr r7, SPRN_DBAT1L stw r4, SS_DBAT+0x00(r3) stw r5, SS_DBAT+0x04(r3) stw r6, SS_DBAT+0x08(r3) stw r7, SS_DBAT+0x0c(r3) mfspr r4, SPRN_DBAT2U mfspr r5, SPRN_DBAT2L mfspr r6, SPRN_DBAT3U mfspr r7, SPRN_DBAT3L stw r4, SS_DBAT+0x10(r3) stw r5, SS_DBAT+0x14(r3) stw r6, SS_DBAT+0x18(r3) stw r7, SS_DBAT+0x1c(r3) mfspr r4, SPRN_DBAT4U mfspr r5, SPRN_DBAT4L mfspr r6, SPRN_DBAT5U mfspr r7, SPRN_DBAT5L stw r4, SS_DBAT+0x20(r3) stw r5, SS_DBAT+0x24(r3) stw r6, SS_DBAT+0x28(r3) stw r7, SS_DBAT+0x2c(r3) mfspr r4, SPRN_DBAT6U mfspr r5, SPRN_DBAT6L mfspr r6, SPRN_DBAT7U mfspr r7, SPRN_DBAT7L stw r4, SS_DBAT+0x30(r3) stw r5, SS_DBAT+0x34(r3) stw r6, SS_DBAT+0x38(r3) stw r7, SS_DBAT+0x3c(r3) mfspr r4, SPRN_IBAT0U mfspr r5, SPRN_IBAT0L mfspr r6, SPRN_IBAT1U mfspr r7, SPRN_IBAT1L stw r4, SS_IBAT+0x00(r3) stw r5, SS_IBAT+0x04(r3) stw r6, SS_IBAT+0x08(r3) stw r7, SS_IBAT+0x0c(r3) mfspr r4, SPRN_IBAT2U mfspr r5, SPRN_IBAT2L mfspr r6, SPRN_IBAT3U mfspr r7, SPRN_IBAT3L stw r4, SS_IBAT+0x10(r3) stw r5, SS_IBAT+0x14(r3) stw r6, SS_IBAT+0x18(r3) stw r7, SS_IBAT+0x1c(r3) mfspr r4, SPRN_IBAT4U mfspr r5, SPRN_IBAT4L mfspr r6, SPRN_IBAT5U mfspr r7, SPRN_IBAT5L stw r4, SS_IBAT+0x20(r3) stw r5, SS_IBAT+0x24(r3) stw r6, SS_IBAT+0x28(r3) stw r7, SS_IBAT+0x2c(r3) mfspr r4, SPRN_IBAT6U mfspr r5, SPRN_IBAT6L mfspr r6, SPRN_IBAT7U mfspr r7, SPRN_IBAT7L stw r4, SS_IBAT+0x30(r3) stw r5, SS_IBAT+0x34(r3) stw r6, SS_IBAT+0x38(r3) stw r7, SS_IBAT+0x3c(r3) mfmsr r4 mflr r5 mfcr r6 stw r4, SS_MSR(r3) stw r5, SS_LR(r3) stw r6, SS_CR(r3) stw r1, SS_SP(r3) stw r2, SS_R2(r3) 1: mftbu r4 mftb r5 mftbu r6 cmpw r4, r6 bne 1b stw r4, SS_TB+0(r3) stw r5, SS_TB+4(r3) stmw r12, SS_GPREG(r3) li r4, 0 addi r6, r3, SS_SR-4 1: mfsrin r5, r4 stwu r5, 4(r6) addis r4, r4, 0x1000 cmpwi r4, 0 bne 1b /* Disable machine checks and critical exceptions */ mfmsr r4 rlwinm r4, r4, 0, ~MSR_CE rlwinm r4, r4, 0, ~MSR_ME mtmsr r4 isync #define TMP_VIRT_IMMR 0xf0000000 #define DEFAULT_IMMR_VALUE 0xff400000 #define IMMRBAR_BASE 0x0000 lis r4, immrbase@ha lwz r4, immrbase@l(r4) /* Use DBAT0 to address the current IMMR space */ ori r4, r4, 0x002a mtspr SPRN_DBAT0L, r4 lis r8, TMP_VIRT_IMMR@h ori r4, r8, 0x001e /* 1 MByte accessible from Kernel Space only */ mtspr SPRN_DBAT0U, r4 isync /* Use DBAT1 to address the original IMMR space */ lis r4, DEFAULT_IMMR_VALUE@h ori r4, r4, 0x002a mtspr SPRN_DBAT1L, r4 lis r9, (TMP_VIRT_IMMR + 0x01000000)@h ori r4, r9, 0x001e /* 1 MByte accessible from Kernel Space only */ mtspr SPRN_DBAT1U, r4 isync /* Use DBAT2 to address the beginning of RAM. This isn't done * using the normal virtual mapping, because with page debugging * enabled it will be read-only. */ li r4, 0x0002 mtspr SPRN_DBAT2L, r4 lis r4, KERNELBASE@h ori r4, r4, 0x001e /* 1 MByte accessible from Kernel Space only */ mtspr SPRN_DBAT2U, r4 isync /* Flush the cache with our BAT, as there will be TLB misses * otherwise if page debugging is enabled, and these misses * will disturb the PLRU algorithm. */ bl __flush_disable_L1 /* Keep the i-cache enabled, so the hack below for low-boot * flash will work. */ mfspr r3, SPRN_HID0 ori r3, r3, HID0_ICE mtspr SPRN_HID0, r3 isync lis r6, 0xf515 ori r6, r6, 0x3ae5 lis r7, mpc83xx_deep_resume@h ori r7, r7, mpc83xx_deep_resume@l tophys(r7, r7) lis r5, KERNELBASE@h stw r6, 0(r5) stw r7, 4(r5) /* Reset BARs */ li r4, 0 stw r4, 0x0024(r8) stw r4, 0x002c(r8) stw r4, 0x0034(r8) stw r4, 0x003c(r8) stw r4, 0x0064(r8) stw r4, 0x006c(r8) /* Rev 1 of the 8313 has problems with wakeup events that are * pending during the transition to deep sleep state (such as if * the PCI host sets the state to D3 and then D0 in rapid * succession). This check shrinks the race window somewhat. * * See erratum PCI23, though the problem is not limited * to PCI. */ lwz r3, 0x0b04(r8) andi. r3, r3, 1 bne- mpc83xx_deep_resume /* Move IMMR back to the default location, following the * procedure specified in the MPC8313 manual. */ lwz r4, IMMRBAR_BASE(r8) isync lis r4, DEFAULT_IMMR_VALUE@h stw r4, IMMRBAR_BASE(r8) lis r4, KERNELBASE@h lwz r4, 0(r4) isync lwz r4, IMMRBAR_BASE(r9) mr r8, r9 isync /* Check the Reset Configuration Word to see whether flash needs * to be mapped at a low address or a high address. */ lwz r4, 0x0904(r8) andis. r4, r4, 0x0400 li r4, 0 beq boot_low lis r4, 0xff80 boot_low: stw r4, 0x0020(r8) lis r7, 0x8000 ori r7, r7, 0x0016 mfspr r5, SPRN_HID0 rlwinm r5, r5, 0, ~(HID0_DOZE | HID0_NAP) oris r5, r5, HID0_SLEEP@h mtspr SPRN_HID0, r5 isync mfmsr r5 oris r5, r5, MSR_POW@h /* Enable the flash mapping at the appropriate address. This * mapping will override the RAM mapping if booting low, so there's * no need to disable the latter. This must be done inside the same * cache line as setting MSR_POW, so that no instruction fetches * from RAM happen after the flash mapping is turned on. */ .align 5 stw r7, 0x0024(r8) sync isync mtmsr r5 isync 1: b 1b mpc83xx_deep_resume: lis r4, 1f@h ori r4, r4, 1f@l tophys(r4, r4) mtsrr0 r4 mfmsr r4 rlwinm r4, r4, 0, ~(MSR_IR | MSR_DR) mtsrr1 r4 rfi 1: tlbia bl __inval_enable_L1 lis r3, mpc83xx_sleep_save_area@h ori r3, r3, mpc83xx_sleep_save_area@l tophys(r3, r3) lwz r5, SS_MEMSAVE+0(r3) lwz r6, SS_MEMSAVE+4(r3) stw r5, 0(0) stw r6, 4(0) lwz r5, SS_HID+0(r3) lwz r6, SS_HID+4(r3) lwz r7, SS_HID+8(r3) mtspr SPRN_HID0, r5 mtspr SPRN_HID1, r6 mtspr SPRN_HID2, r7 lwz r4, SS_IABR+0(r3) lwz r5, SS_IABR+4(r3) lwz r6, SS_IBCR(r3) lwz r7, SS_DABR+0(r3) lwz r8, SS_DABR+4(r3) lwz r9, SS_DBCR(r3) mtspr SPRN_IABR, r4 mtspr SPRN_IABR2, r5 mtspr SPRN_IBCR, r6 mtspr SPRN_DABR, r7 mtspr SPRN_DABR2, r8 mtspr SPRN_DBCR, r9 li r4, 0 addi r6, r3, SS_SR-4 1: lwzu r5, 4(r6) mtsrin r5, r4 addis r4, r4, 0x1000 cmpwi r4, 0 bne 1b lwz r4, SS_DBAT+0x00(r3) lwz r5, SS_DBAT+0x04(r3) lwz r6, SS_DBAT+0x08(r3) lwz r7, SS_DBAT+0x0c(r3) mtspr SPRN_DBAT0U, r4 mtspr SPRN_DBAT0L, r5 mtspr SPRN_DBAT1U, r6 mtspr SPRN_DBAT1L, r7 lwz r4, SS_DBAT+0x10(r3) lwz r5, SS_DBAT+0x14(r3) lwz r6, SS_DBAT+0x18(r3) lwz r7, SS_DBAT+0x1c(r3) mtspr SPRN_DBAT2U, r4 mtspr SPRN_DBAT2L, r5 mtspr SPRN_DBAT3U, r6 mtspr SPRN_DBAT3L, r7 lwz r4, SS_DBAT+0x20(r3) lwz r5, SS_DBAT+0x24(r3) lwz r6, SS_DBAT+0x28(r3) lwz r7, SS_DBAT+0x2c(r3) mtspr SPRN_DBAT4U, r4 mtspr SPRN_DBAT4L, r5 mtspr SPRN_DBAT5U, r6 mtspr SPRN_DBAT5L, r7 lwz r4, SS_DBAT+0x30(r3) lwz r5, SS_DBAT+0x34(r3) lwz r6, SS_DBAT+0x38(r3) lwz r7, SS_DBAT+0x3c(r3) mtspr SPRN_DBAT6U, r4 mtspr SPRN_DBAT6L, r5 mtspr SPRN_DBAT7U, r6 mtspr SPRN_DBAT7L, r7 lwz r4, SS_IBAT+0x00(r3) lwz r5, SS_IBAT+0x04(r3) lwz r6, SS_IBAT+0x08(r3) lwz r7, SS_IBAT+0x0c(r3) mtspr SPRN_IBAT0U, r4 mtspr SPRN_IBAT0L, r5 mtspr SPRN_IBAT1U, r6 mtspr SPRN_IBAT1L, r7 lwz r4, SS_IBAT+0x10(r3) lwz r5, SS_IBAT+0x14(r3) lwz r6, SS_IBAT+0x18(r3) lwz r7, SS_IBAT+0x1c(r3) mtspr SPRN_IBAT2U, r4 mtspr SPRN_IBAT2L, r5 mtspr SPRN_IBAT3U, r6 mtspr SPRN_IBAT3L, r7 lwz r4, SS_IBAT+0x20(r3) lwz r5, SS_IBAT+0x24(r3) lwz r6, SS_IBAT+0x28(r3) lwz r7, SS_IBAT+0x2c(r3) mtspr SPRN_IBAT4U, r4 mtspr SPRN_IBAT4L, r5 mtspr SPRN_IBAT5U, r6 mtspr SPRN_IBAT5L, r7 lwz r4, SS_IBAT+0x30(r3) lwz r5, SS_IBAT+0x34(r3) lwz r6, SS_IBAT+0x38(r3) lwz r7, SS_IBAT+0x3c(r3) mtspr SPRN_IBAT6U, r4 mtspr SPRN_IBAT6L, r5 mtspr SPRN_IBAT7U, r6 mtspr SPRN_IBAT7L, r7 lwz r4, SS_SPRG+16(r3) lwz r5, SS_SPRG+20(r3) lwz r6, SS_SPRG+24(r3) lwz r7, SS_SPRG+28(r3) mtspr SPRN_SPRG4, r4 mtspr SPRN_SPRG5, r5 mtspr SPRN_SPRG6, r6 mtspr SPRN_SPRG7, r7 lwz r4, SS_SPRG+0(r3) lwz r5, SS_SPRG+4(r3) lwz r6, SS_SPRG+8(r3) lwz r7, SS_SPRG+12(r3) lwz r8, SS_SDR1(r3) mtspr SPRN_SPRG0, r4 mtspr SPRN_SPRG1, r5 mtspr SPRN_SPRG2, r6 mtspr SPRN_SPRG3, r7 mtsdr1 r8 lwz r4, SS_MSR(r3) lwz r5, SS_LR(r3) lwz r6, SS_CR(r3) lwz r1, SS_SP(r3) lwz r2, SS_R2(r3) mtsrr1 r4 mtsrr0 r5 mtcr r6 li r4, 0 mtspr SPRN_TBWL, r4 lwz r4, SS_TB+0(r3) lwz r5, SS_TB+4(r3) mtspr SPRN_TBWU, r4 mtspr SPRN_TBWL, r5 lmw r12, SS_GPREG(r3) /* Kick decrementer */ li r0, 1 mtdec r0 rfi _ASM_NOKPROBE_SYMBOL(mpc83xx_deep_resume)
aixcc-public/challenge-001-exemplar-source
1,246
arch/powerpc/platforms/pasemi/powersave.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2006-2007 PA Semi, Inc * * Maintained by: Olof Johansson <olof@lixom.net> */ #include <asm/processor.h> #include <asm/page.h> #include <asm/ppc_asm.h> #include <asm/cputable.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> /* Power savings opcodes since not all binutils have them at this time */ #define DOZE .long 0x4c000324 #define NAP .long 0x4c000364 #define SLEEP .long 0x4c0003a4 #define RVW .long 0x4c0003e4 /* Common sequence to do before going to any of the * powersavings modes. */ #define PRE_SLEEP_SEQUENCE \ std r3,8(r1); \ ptesync ; \ ld r3,8(r1); \ 1: cmpd r3,r3; \ bne 1b _doze: PRE_SLEEP_SEQUENCE DOZE b . _GLOBAL(idle_spin) blr _GLOBAL(idle_doze) LOAD_REG_ADDR(r3, _doze) b sleep_common /* Add more modes here later */ sleep_common: mflr r0 std r0, 16(r1) stdu r1,-64(r1) #ifdef CONFIG_PPC_PASEMI_CPUFREQ std r3, 48(r1) /* Only do power savings when in astate 0 */ bl check_astate cmpwi r3,0 bne 1f ld r3, 48(r1) #endif LOAD_REG_IMMEDIATE(r6,MSR_DR|MSR_IR|MSR_ME|MSR_EE) mfmsr r4 andc r5,r4,r6 mtmsrd r5,0 mtctr r3 bctrl mtmsrd r4,0 1: addi r1,r1,64 ld r0,16(r1) mtlr r0 blr
aixcc-public/challenge-001-exemplar-source
2,514
arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * crt0_r.S: Entry function for SPU-side context restore. * * Copyright (C) 2005 IBM * * Entry and exit function for SPU-side of the context restore * sequence. Sets up an initial stack frame, then branches to * 'main'. On return, restores all 128 registers from the LSCSA * and exits. */ #include <asm/spu_csa.h> .data .align 7 .globl regs_spill regs_spill: .space SIZEOF_SPU_SPILL_REGS, 0x0 .text .global _start _start: /* Initialize the stack pointer to point to 16368 * (16kb-16). The back chain pointer is initialized * to NULL. */ il $0, 0 il $SP, 16368 stqd $0, 0($SP) /* Allocate a minimum stack frame for the called main. * This is needed so that main has a place to save the * link register when it calls another function. */ stqd $SP, -160($SP) ai $SP, $SP, -160 /* Call the program's main function. */ brsl $0, main .global exit .global _exit exit: _exit: /* SPU Context Restore, Step 5: Restore the remaining 112 GPRs. */ ila $3, regs_spill + 256 restore_regs: lqr $4, restore_reg_insts restore_reg_loop: ai $4, $4, 4 .balignl 16, 0x40200000 restore_reg_insts: /* must be quad-word aligned. */ lqd $16, 0($3) lqd $17, 16($3) lqd $18, 32($3) lqd $19, 48($3) andi $5, $4, 0x7F stqr $4, restore_reg_insts ai $3, $3, 64 brnz $5, restore_reg_loop /* SPU Context Restore Step 17: Restore the first 16 GPRs. */ lqa $0, regs_spill + 0 lqa $1, regs_spill + 16 lqa $2, regs_spill + 32 lqa $3, regs_spill + 48 lqa $4, regs_spill + 64 lqa $5, regs_spill + 80 lqa $6, regs_spill + 96 lqa $7, regs_spill + 112 lqa $8, regs_spill + 128 lqa $9, regs_spill + 144 lqa $10, regs_spill + 160 lqa $11, regs_spill + 176 lqa $12, regs_spill + 192 lqa $13, regs_spill + 208 lqa $14, regs_spill + 224 lqa $15, regs_spill + 240 /* Under normal circumstances, the 'exit' function * terminates with 'stop SPU_RESTORE_COMPLETE', * indicating that the SPU-side restore code has * completed. * * However it is possible that instructions immediately * following the 'stop 0x3ffc' have been modified at run * time so as to recreate the exact SPU_Status settings * from the application, e.g. illegal instruciton, halt, * etc. */ .global exit_fini .global _exit_fini exit_fini: _exit_fini: stop SPU_RESTORE_COMPLETE stop 0 stop 0 stop 0 /* Pad the size of this crt0.o to be multiple of 16 bytes. */ .balignl 16, 0x0
aixcc-public/challenge-001-exemplar-source
2,041
arch/powerpc/platforms/cell/spufs/spu_save_crt0.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * crt0_s.S: Entry function for SPU-side context save. * * Copyright (C) 2005 IBM * * Entry function for SPU-side of the context save sequence. * Saves all 128 GPRs, sets up an initial stack frame, then * branches to 'main'. */ #include <asm/spu_csa.h> .data .align 7 .globl regs_spill regs_spill: .space SIZEOF_SPU_SPILL_REGS, 0x0 .text .global _start _start: /* SPU Context Save Step 1: Save the first 16 GPRs. */ stqa $0, regs_spill + 0 stqa $1, regs_spill + 16 stqa $2, regs_spill + 32 stqa $3, regs_spill + 48 stqa $4, regs_spill + 64 stqa $5, regs_spill + 80 stqa $6, regs_spill + 96 stqa $7, regs_spill + 112 stqa $8, regs_spill + 128 stqa $9, regs_spill + 144 stqa $10, regs_spill + 160 stqa $11, regs_spill + 176 stqa $12, regs_spill + 192 stqa $13, regs_spill + 208 stqa $14, regs_spill + 224 stqa $15, regs_spill + 240 /* SPU Context Save, Step 8: Save the remaining 112 GPRs. */ ila $3, regs_spill + 256 save_regs: lqr $4, save_reg_insts save_reg_loop: ai $4, $4, 4 .balignl 16, 0x40200000 save_reg_insts: /* must be quad-word aligned. */ stqd $16, 0($3) stqd $17, 16($3) stqd $18, 32($3) stqd $19, 48($3) andi $5, $4, 0x7F stqr $4, save_reg_insts ai $3, $3, 64 brnz $5, save_reg_loop /* Initialize the stack pointer to point to 16368 * (16kb-16). The back chain pointer is initialized * to NULL. */ il $0, 0 il $SP, 16368 stqd $0, 0($SP) /* Allocate a minimum stack frame for the called main. * This is needed so that main has a place to save the * link register when it calls another function. */ stqd $SP, -160($SP) ai $SP, $SP, -160 /* Call the program's main function. */ brsl $0, main /* In this case main should not return; if it does * there has been an error in the sequence. Execute * stop-and-signal with code=0. */ .global exit .global _exit exit: _exit: stop 0x0 /* Pad the size of this crt0.o to be multiple of 16 bytes. */ .balignl 16, 0x0
aixcc-public/challenge-001-exemplar-source
1,403
arch/powerpc/mm/book3s32/nohash_low.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains low-level assembler routines for managing * the PowerPC 603 tlb invalidation. */ #include <asm/page.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> /* * Flush an entry from the TLB */ #ifdef CONFIG_SMP _GLOBAL(_tlbie) lwz r8,TASK_CPU(r2) oris r8,r8,11 mfmsr r10 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear DR */ mtmsr r0 isync lis r9,mmu_hash_lock@h ori r9,r9,mmu_hash_lock@l tophys(r9,r9) 10: lwarx r7,0,r9 cmpwi 0,r7,0 bne- 10b stwcx. r8,0,r9 bne- 10b eieio tlbie r3 sync TLBSYNC li r0,0 stw r0,0(r9) /* clear mmu_hash_lock */ mtmsr r10 isync blr _ASM_NOKPROBE_SYMBOL(_tlbie) #endif /* CONFIG_SMP */ /* * Flush the entire TLB. 603/603e only */ _GLOBAL(_tlbia) #if defined(CONFIG_SMP) lwz r8,TASK_CPU(r2) oris r8,r8,10 mfmsr r10 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear DR */ mtmsr r0 isync lis r9,mmu_hash_lock@h ori r9,r9,mmu_hash_lock@l tophys(r9,r9) 10: lwarx r7,0,r9 cmpwi 0,r7,0 bne- 10b stwcx. r8,0,r9 bne- 10b #endif /* CONFIG_SMP */ li r5, 32 lis r4, KERNELBASE@h mtctr r5 sync 0: tlbie r4 addi r4, r4, 0x1000 bdnz 0b sync #ifdef CONFIG_SMP TLBSYNC li r0,0 stw r0,0(r9) /* clear mmu_hash_lock */ mtmsr r10 isync #endif /* CONFIG_SMP */ blr _ASM_NOKPROBE_SYMBOL(_tlbia)
aixcc-public/challenge-001-exemplar-source
17,766
arch/powerpc/mm/book3s32/hash_low.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> * Adapted for Power Macintosh by Paul Mackerras. * Low-level exception handlers and MMU support * rewritten by Paul Mackerras. * Copyright (C) 1996 Paul Mackerras. * * This file contains low-level assembler routines for managing * the PowerPC MMU hash table. (PPC 8xx processors don't use a * hash table, so this file is not used on them.) */ #include <linux/pgtable.h> #include <linux/init.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/ppc_asm.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/feature-fixups.h> #include <asm/code-patching-asm.h> #ifdef CONFIG_PTE_64BIT #define PTE_T_SIZE 8 #define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */ #else #define PTE_T_SIZE 4 #define PTE_FLAGS_OFFSET 0 #endif /* * Load a PTE into the hash table, if possible. * The address is in r4, and r3 contains an access flag: * _PAGE_RW (0x400) if a write. * r9 contains the SRR1 value, from which we use the MSR_PR bit. * SPRG_THREAD contains the physical address of the current task's thread. * * Returns to the caller if the access is illegal or there is no * mapping for the address. Otherwise it places an appropriate PTE * in the hash table and returns from the exception. * Uses r0, r3 - r6, r8, r10, ctr, lr. */ .text _GLOBAL(hash_page) #ifdef CONFIG_SMP lis r8, (mmu_hash_lock - PAGE_OFFSET)@h ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l lis r0,0x0fff b 10f 11: lwz r6,0(r8) cmpwi 0,r6,0 bne 11b 10: lwarx r6,0,r8 cmpwi 0,r6,0 bne- 11b stwcx. r0,0,r8 bne- 10b isync #endif /* Get PTE (linux-style) and check access */ lis r0, TASK_SIZE@h /* check if kernel address */ cmplw 0,r4,r0 mfspr r8,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ lwz r5,PGDIR(r8) /* virt page-table root */ blt+ 112f /* assume user more likely */ lis r5,swapper_pg_dir@ha /* if kernel address, use */ addi r5,r5,swapper_pg_dir@l /* kernel page table */ rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 112: tophys(r5, r5) #ifndef CONFIG_PTE_64BIT rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ lwz r8,0(r5) /* get pmd entry */ rlwinm. r8,r8,0,0,19 /* extract address of pte page */ #else rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */ lwzx r8,r8,r5 /* Get L1 entry */ rlwinm. r8,r8,0,0,20 /* extract pt base address */ #endif #ifdef CONFIG_SMP beq- .Lhash_page_out /* return if no mapping */ #else /* XXX it seems like the 601 will give a machine fault on the rfi if its alignment is wrong (bottom 4 bits of address are 8 or 0xc) and we have had a not-taken conditional branch to the address following the rfi. */ beqlr- #endif #ifndef CONFIG_PTE_64BIT rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ #else rlwimi r8,r4,23,20,28 /* compute pte address */ /* * If PTE_64BIT is set, the low word is the flags word; use that * word for locking since it contains all the interesting bits. */ addi r8,r8,PTE_FLAGS_OFFSET #endif /* * Update the linux PTE atomically. We do the lwarx up-front * because almost always, there won't be a permission violation * and there won't already be an HPTE, and thus we will have * to update the PTE to set _PAGE_HASHPTE. -- paulus. */ .Lretry: lwarx r6,0,r8 /* get linux-style pte, flag word */ #ifdef CONFIG_PPC_KUAP mfsrin r5,r4 rlwinm r0,r9,28,_PAGE_RW /* MSR[PR] => _PAGE_RW */ rlwinm r5,r5,12,_PAGE_RW /* Ks => _PAGE_RW */ andc r5,r5,r0 /* Ks & ~MSR[PR] */ andc r5,r6,r5 /* Clear _PAGE_RW when Ks = 1 && MSR[PR] = 0 */ andc. r5,r3,r5 /* check access & ~permission */ #else andc. r5,r3,r6 /* check access & ~permission */ #endif rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE #ifdef CONFIG_SMP bne- .Lhash_page_out /* return if access not permitted */ #else bnelr- #endif or r5,r0,r6 /* set accessed/dirty bits */ #ifdef CONFIG_PTE_64BIT #ifdef CONFIG_SMP subf r10,r6,r8 /* create false data dependency */ subi r10,r10,PTE_FLAGS_OFFSET lwzx r10,r6,r10 /* Get upper PTE word */ #else lwz r10,-PTE_FLAGS_OFFSET(r8) #endif /* CONFIG_SMP */ #endif /* CONFIG_PTE_64BIT */ stwcx. r5,0,r8 /* attempt to update PTE */ bne- .Lretry /* retry if someone got there first */ mfsrin r3,r4 /* get segment reg for segment */ bl create_hpte /* add the hash table entry */ #ifdef CONFIG_SMP eieio lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha li r0,0 stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) #endif b fast_hash_page_return #ifdef CONFIG_SMP .Lhash_page_out: eieio lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha li r0,0 stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) blr #endif /* CONFIG_SMP */ _ASM_NOKPROBE_SYMBOL(hash_page) /* * Add an entry for a particular page to the hash table. * * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval) * * We assume any necessary modifications to the pte (e.g. setting * the accessed bit) have already been done and that there is actually * a hash table in use (i.e. we're not on a 603). */ _GLOBAL(add_hash_page) mflr r0 stw r0,4(r1) #ifdef CONFIG_SMP lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */ oris r8,r8,12 #endif /* CONFIG_SMP */ /* * We disable interrupts here, even on UP, because we don't * want to race with hash_page, and because we want the * _PAGE_HASHPTE bit to be a reliable indication of whether * the HPTE exists (or at least whether one did once). * We also turn off the MMU for data accesses so that we * we can't take a hash table miss (assuming the code is * covered by a BAT). -- paulus */ mfmsr r9 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear MSR_DR */ mtmsr r0 isync #ifdef CONFIG_SMP lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l 10: lwarx r0,0,r6 /* take the mmu_hash_lock */ cmpi 0,r0,0 bne- 11f stwcx. r8,0,r6 beq+ 12f 11: lwz r0,0(r6) cmpi 0,r0,0 beq 10b b 11b 12: isync #endif /* * Fetch the linux pte and test and set _PAGE_HASHPTE atomically. * If _PAGE_HASHPTE was already set, we don't replace the existing * HPTE, so we just unlock and return. */ mr r8,r5 #ifndef CONFIG_PTE_64BIT rlwimi r8,r4,22,20,29 #else rlwimi r8,r4,23,20,28 addi r8,r8,PTE_FLAGS_OFFSET #endif 1: lwarx r6,0,r8 andi. r0,r6,_PAGE_HASHPTE bne 9f /* if HASHPTE already set, done */ #ifdef CONFIG_PTE_64BIT #ifdef CONFIG_SMP subf r10,r6,r8 /* create false data dependency */ subi r10,r10,PTE_FLAGS_OFFSET lwzx r10,r6,r10 /* Get upper PTE word */ #else lwz r10,-PTE_FLAGS_OFFSET(r8) #endif /* CONFIG_SMP */ #endif /* CONFIG_PTE_64BIT */ ori r5,r6,_PAGE_HASHPTE stwcx. r5,0,r8 bne- 1b /* Convert context and va to VSID */ mulli r3,r3,897*16 /* multiply context by context skew */ rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ mulli r0,r0,0x111 /* multiply by ESID skew */ add r3,r3,r0 /* note create_hpte trims to 24 bits */ bl create_hpte 9: #ifdef CONFIG_SMP lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l eieio li r0,0 stw r0,0(r6) /* clear mmu_hash_lock */ #endif /* reenable interrupts and DR */ mtmsr r9 isync lwz r0,4(r1) mtlr r0 blr _ASM_NOKPROBE_SYMBOL(add_hash_page) /* * This routine adds a hardware PTE to the hash table. * It is designed to be called with the MMU either on or off. * r3 contains the VSID, r4 contains the virtual address, * r5 contains the linux PTE, r6 contains the old value of the * linux PTE (before setting _PAGE_HASHPTE). r10 contains the * upper half of the PTE if CONFIG_PTE_64BIT. * On SMP, the caller should have the mmu_hash_lock held. * We assume that the caller has (or will) set the _PAGE_HASHPTE * bit in the linux PTE in memory. The value passed in r6 should * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set * this routine will skip the search for an existing HPTE. * This procedure modifies r0, r3 - r6, r8, cr0. * -- paulus. * * For speed, 4 of the instructions get patched once the size and * physical address of the hash table are known. These definitions * of Hash_base and Hash_bits below are for the early hash table. */ Hash_base = early_hash Hash_bits = 12 /* e.g. 256kB hash table */ Hash_msk = (((1 << Hash_bits) - 1) * 64) /* defines for the PTE format for 32-bit PPCs */ #define HPTE_SIZE 8 #define PTEG_SIZE 64 #define LG_PTEG_SIZE 6 #define LDPTEu lwzu #define LDPTE lwz #define STPTE stw #define CMPPTE cmpw #define PTE_H 0x40 #define PTE_V 0x80000000 #define TST_V(r) rlwinm. r,r,0,0,0 #define SET_V(r) oris r,r,PTE_V@h #define CLR_V(r,t) rlwinm r,r,0,1,31 #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) #define HASH_RIGHT 31-LG_PTEG_SIZE __REF _GLOBAL(create_hpte) /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */ rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */ and r8,r8,r0 /* writable if _RW & _DIRTY */ rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */ ori r8,r8,0xe04 /* clear out reserved bits */ andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */ BEGIN_FTR_SECTION rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) #ifdef CONFIG_PTE_64BIT /* Put the XPN bits into the PTE */ rlwimi r8,r10,8,20,22 rlwimi r8,r10,2,29,29 #endif /* Construct the high word of the PPC-style PTE (r5) */ rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ SET_V(r5) /* set V (valid) bit */ patch_site 0f, patch__hash_page_A0 patch_site 1f, patch__hash_page_A1 patch_site 2f, patch__hash_page_A2 /* Get the address of the primary PTE group in the hash table (r3) */ 0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */ 1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r3,r3,r0 /* make primary hash */ li r0,8 /* PTEs/group */ /* * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search * if it is clear, meaning that the HPTE isn't there already... */ andi. r6,r6,_PAGE_HASHPTE beq+ 10f /* no PTE: go look for an empty slot */ tlbie r4 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ mtctr r0 addi r4,r3,-HPTE_SIZE 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ CMPPTE 0,r6,r5 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ .Lfound_slot patch_site 0f, patch__hash_page_B /* Search the secondary PTEG for a matching PTE */ ori r5,r5,PTE_H /* set H (secondary hash) bit */ 0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ xori r4,r4,(-PTEG_SIZE & 0xffff) addi r4,r4,-HPTE_SIZE mtctr r0 2: LDPTEu r6,HPTE_SIZE(r4) CMPPTE 0,r6,r5 bdnzf 2,2b beq+ .Lfound_slot xori r5,r5,PTE_H /* clear H bit again */ /* Search the primary PTEG for an empty slot */ 10: mtctr r0 addi r4,r3,-HPTE_SIZE /* search primary PTEG */ 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ TST_V(r6) /* test valid bit */ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ .Lfound_empty patch_site 0f, patch__hash_page_C /* Search the secondary PTEG for an empty slot */ ori r5,r5,PTE_H /* set H (secondary hash) bit */ 0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ xori r4,r4,(-PTEG_SIZE & 0xffff) addi r4,r4,-HPTE_SIZE mtctr r0 2: LDPTEu r6,HPTE_SIZE(r4) TST_V(r6) bdnzf 2,2b beq+ .Lfound_empty xori r5,r5,PTE_H /* clear H bit again */ /* * Choose an arbitrary slot in the primary PTEG to overwrite. * Since both the primary and secondary PTEGs are full, and we * have no information that the PTEs in the primary PTEG are * more important or useful than those in the secondary PTEG, * and we know there is a definite (although small) speed * advantage to putting the PTE in the primary PTEG, we always * put the PTE in the primary PTEG. */ lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */ lwz r6, (next_slot - PAGE_OFFSET)@l(r4) addi r6,r6,HPTE_SIZE /* search for candidate */ andi. r6,r6,7*HPTE_SIZE stw r6,next_slot@l(r4) add r4,r3,r6 #ifndef CONFIG_SMP /* Store PTE in PTEG */ .Lfound_empty: STPTE r5,0(r4) .Lfound_slot: STPTE r8,HPTE_SIZE/2(r4) #else /* CONFIG_SMP */ /* * Between the tlbie above and updating the hash table entry below, * another CPU could read the hash table entry and put it in its TLB. * There are 3 cases: * 1. using an empty slot * 2. updating an earlier entry to change permissions (i.e. enable write) * 3. taking over the PTE for an unrelated address * * In each case it doesn't really matter if the other CPUs have the old * PTE in their TLB. So we don't need to bother with another tlbie here, * which is convenient as we've overwritten the register that had the * address. :-) The tlbie above is mainly to make sure that this CPU comes * and gets the new PTE from the hash table. * * We do however have to make sure that the PTE is never in an invalid * state with the V bit set. */ .Lfound_empty: .Lfound_slot: CLR_V(r5,r0) /* clear V (valid) bit in PTE */ STPTE r5,0(r4) sync TLBSYNC STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */ sync SET_V(r5) STPTE r5,0(r4) /* finally set V bit in PTE */ #endif /* CONFIG_SMP */ sync /* make sure pte updates get to memory */ blr .previous _ASM_NOKPROBE_SYMBOL(create_hpte) .section .bss .align 2 next_slot: .space 4 .previous /* * Flush the entry for a particular page from the hash table. * * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval, * int count) * * We assume that there is a hash table in use (Hash != 0). */ __REF _GLOBAL(flush_hash_pages) /* * We disable interrupts here, even on UP, because we want * the _PAGE_HASHPTE bit to be a reliable indication of * whether the HPTE exists (or at least whether one did once). * We also turn off the MMU for data accesses so that we * we can't take a hash table miss (assuming the code is * covered by a BAT). -- paulus */ mfmsr r10 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear MSR_DR */ mtmsr r0 isync /* First find a PTE in the range that has _PAGE_HASHPTE set */ #ifndef CONFIG_PTE_64BIT rlwimi r5,r4,22,20,29 #else rlwimi r5,r4,23,20,28 addi r5,r5,PTE_FLAGS_OFFSET #endif 1: lwz r0,0(r5) cmpwi cr1,r6,1 andi. r0,r0,_PAGE_HASHPTE bne 2f ble cr1,19f addi r4,r4,0x1000 addi r5,r5,PTE_T_SIZE addi r6,r6,-1 b 1b /* Convert context and va to VSID */ 2: mulli r3,r3,897*16 /* multiply context by context skew */ rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ mulli r0,r0,0x111 /* multiply by ESID skew */ add r3,r3,r0 /* note code below trims to 24 bits */ /* Construct the high word of the PPC-style PTE (r11) */ rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ SET_V(r11) /* set V (valid) bit */ #ifdef CONFIG_SMP lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l tophys (r8, r2) lwz r8, TASK_CPU(r8) oris r8,r8,9 10: lwarx r0,0,r9 cmpi 0,r0,0 bne- 11f stwcx. r8,0,r9 beq+ 12f 11: lwz r0,0(r9) cmpi 0,r0,0 beq 10b b 11b 12: isync #endif /* * Check the _PAGE_HASHPTE bit in the linux PTE. If it is * already clear, we're done (for this pte). If not, * clear it (atomically) and proceed. -- paulus. */ 33: lwarx r8,0,r5 /* fetch the pte flags word */ andi. r0,r8,_PAGE_HASHPTE beq 8f /* done if HASHPTE is already clear */ rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ stwcx. r8,0,r5 /* update the pte */ bne- 33b patch_site 0f, patch__flush_hash_A0 patch_site 1f, patch__flush_hash_A1 patch_site 2f, patch__flush_hash_A2 /* Get the address of the primary PTE group in the hash table (r3) */ 0: lis r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */ 1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r8,r0,r8 /* make primary hash */ /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ li r0,8 /* PTEs/group */ mtctr r0 addi r12,r8,-HPTE_SIZE 1: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */ CMPPTE 0,r0,r11 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ 3f patch_site 0f, patch__flush_hash_B /* Search the secondary PTEG for a matching PTE */ ori r11,r11,PTE_H /* set H (secondary hash) bit */ li r0,8 /* PTEs/group */ 0: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */ xori r12,r12,(-PTEG_SIZE & 0xffff) addi r12,r12,-HPTE_SIZE mtctr r0 2: LDPTEu r0,HPTE_SIZE(r12) CMPPTE 0,r0,r11 bdnzf 2,2b xori r11,r11,PTE_H /* clear H again */ bne- 4f /* should rarely fail to find it */ 3: li r0,0 STPTE r0,0(r12) /* invalidate entry */ 4: sync tlbie r4 /* in hw tlb too */ sync 8: ble cr1,9f /* if all ptes checked */ 81: addi r6,r6,-1 addi r5,r5,PTE_T_SIZE addi r4,r4,0x1000 lwz r0,0(r5) /* check next pte */ cmpwi cr1,r6,1 andi. r0,r0,_PAGE_HASHPTE bne 33b bgt cr1,81b 9: #ifdef CONFIG_SMP TLBSYNC li r0,0 stw r0,0(r9) /* clear mmu_hash_lock */ #endif 19: mtmsr r10 isync blr .previous EXPORT_SYMBOL(flush_hash_pages) _ASM_NOKPROBE_SYMBOL(flush_hash_pages)
aixcc-public/challenge-001-exemplar-source
9,856
arch/powerpc/mm/nohash/tlb_low.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains low-level functions for performing various * types of TLB invalidations on various processors with no hash * table. * * This file implements the following functions for all no-hash * processors. Some aren't implemented for some variants. Some * are inline in tlbflush.h * * - tlbil_va * - tlbil_pid * - tlbil_all * - tlbivax_bcast * * Code mostly moved over from misc_32.S * * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Partially rewritten by Cort Dougan (cort@cs.nmt.edu) * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt. */ #include <asm/reg.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/processor.h> #include <asm/bug.h> #include <asm/asm-compat.h> #include <asm/feature-fixups.h> #if defined(CONFIG_40x) /* * 40x implementation needs only tlbil_va */ _GLOBAL(__tlbil_va) /* We run the search with interrupts disabled because we have to change * the PID and I don't want to preempt when that happens. */ mfmsr r5 mfspr r6,SPRN_PID wrteei 0 mtspr SPRN_PID,r4 tlbsx. r3, 0, r3 mtspr SPRN_PID,r6 wrtee r5 bne 1f sync /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is * clear. Since 25 is the V bit in the TLB_TAG, loading this value * will invalidate the TLB entry. */ tlbwe r3, r3, TLB_TAG isync 1: blr #elif defined(CONFIG_PPC_8xx) /* * Nothing to do for 8xx, everything is inline */ #elif defined(CONFIG_44x) /* Includes 47x */ /* * 440 implementation uses tlbsx/we for tlbil_va and a full sweep * of the TLB for everything else. */ _GLOBAL(__tlbil_va) mfspr r5,SPRN_MMUCR mfmsr r10 /* * We write 16 bits of STID since 47x supports that much, we * will never be passed out of bounds values on 440 (hopefully) */ rlwimi r5,r4,0,16,31 /* We have to run the search with interrupts disabled, otherwise * an interrupt which causes a TLB miss can clobber the MMUCR * between the mtspr and the tlbsx. * * Critical and Machine Check interrupts take care of saving * and restoring MMUCR, so only normal interrupts have to be * taken care of. */ wrteei 0 mtspr SPRN_MMUCR,r5 tlbsx. r6,0,r3 bne 10f sync #ifndef CONFIG_PPC_47x /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit * 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this * value will invalidate the TLB entry. */ tlbwe r6,r6,PPC44x_TLB_PAGEID #else oris r7,r6,0x8000 /* specify way explicitly */ clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */ ori r4,r4,PPC47x_TLBE_SIZE tlbwe r4,r7,0 /* write it */ #endif /* !CONFIG_PPC_47x */ isync 10: wrtee r10 blr _GLOBAL(_tlbil_all) _GLOBAL(_tlbil_pid) #ifndef CONFIG_PPC_47x li r3,0 sync /* Load high watermark */ lis r4,tlb_44x_hwater@ha lwz r5,tlb_44x_hwater@l(r4) 1: tlbwe r3,r3,PPC44x_TLB_PAGEID addi r3,r3,1 cmpw 0,r3,r5 ble 1b isync blr #else /* 476 variant. There's not simple way to do this, hopefully we'll * try to limit the amount of such full invalidates */ mfmsr r11 /* Interrupts off */ wrteei 0 li r3,-1 /* Current set */ lis r10,tlb_47x_boltmap@h ori r10,r10,tlb_47x_boltmap@l lis r7,0x8000 /* Specify way explicitly */ b 9f /* For each set */ 1: li r9,4 /* Number of ways */ li r4,0 /* Current way */ li r6,0 /* Default entry value 0 */ andi. r0,r8,1 /* Check if way 0 is bolted */ mtctr r9 /* Load way counter */ bne- 3f /* Bolted, skip loading it */ 2: /* For each way */ or r5,r3,r4 /* Make way|index for tlbre */ rlwimi r5,r5,16,8,15 /* Copy index into position */ tlbre r6,r5,0 /* Read entry */ 3: addis r4,r4,0x2000 /* Next way */ andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */ beq 4f /* Nope, skip it */ rlwimi r7,r5,0,1,2 /* Insert way number */ rlwinm r6,r6,0,21,19 /* Clear V */ tlbwe r6,r7,0 /* Write it */ 4: bdnz 2b /* Loop for each way */ srwi r8,r8,1 /* Next boltmap bit */ 9: cmpwi cr1,r3,255 /* Last set done ? */ addi r3,r3,1 /* Next set */ beq cr1,1f /* End of loop */ andi. r0,r3,0x1f /* Need to load a new boltmap word ? */ bne 1b /* No, loop */ lwz r8,0(r10) /* Load boltmap entry */ addi r10,r10,4 /* Next word */ b 1b /* Then loop */ 1: isync /* Sync shadows */ wrtee r11 blr #endif /* !CONFIG_PPC_47x */ #ifdef CONFIG_PPC_47x /* * _tlbivax_bcast is only on 47x. We don't bother doing a runtime * check though, it will blow up soon enough if we mistakenly try * to use it on a 440. */ _GLOBAL(_tlbivax_bcast) mfspr r5,SPRN_MMUCR mfmsr r10 rlwimi r5,r4,0,16,31 wrteei 0 mtspr SPRN_MMUCR,r5 isync PPC_TLBIVAX(0, R3) isync mbar tlbsync BEGIN_FTR_SECTION b 1f END_FTR_SECTION_IFSET(CPU_FTR_476_DD2) sync wrtee r10 blr /* * DD2 HW could hang if in instruction fetch happens before msync completes. * Touch enough instruction cache lines to ensure cache hits */ 1: mflr r9 bcl 20,31,$+4 2: mflr r6 li r7,32 PPC_ICBT(0,R6,R7) /* touch next cache line */ add r6,r6,r7 PPC_ICBT(0,R6,R7) /* touch next cache line */ add r6,r6,r7 PPC_ICBT(0,R6,R7) /* touch next cache line */ sync nop nop nop nop nop nop nop nop mtlr r9 wrtee r10 blr #endif /* CONFIG_PPC_47x */ #elif defined(CONFIG_PPC_85xx) /* * FSL BookE implementations. * * Since feature sections are using _SECTION_ELSE we need * to have the larger code path before the _SECTION_ELSE */ /* * Flush MMU TLB on the local processor */ _GLOBAL(_tlbil_all) BEGIN_MMU_FTR_SECTION li r3,(MMUCSR0_TLBFI)@l mtspr SPRN_MMUCSR0, r3 1: mfspr r3,SPRN_MMUCSR0 andi. r3,r3,MMUCSR0_TLBFI@l bne 1b MMU_FTR_SECTION_ELSE PPC_TLBILX_ALL(0,R0) ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) msync isync blr _GLOBAL(_tlbil_pid) BEGIN_MMU_FTR_SECTION slwi r3,r3,16 mfmsr r10 wrteei 0 mfspr r4,SPRN_MAS6 /* save MAS6 */ mtspr SPRN_MAS6,r3 PPC_TLBILX_PID(0,R0) mtspr SPRN_MAS6,r4 /* restore MAS6 */ wrtee r10 MMU_FTR_SECTION_ELSE li r3,(MMUCSR0_TLBFI)@l mtspr SPRN_MMUCSR0, r3 1: mfspr r3,SPRN_MMUCSR0 andi. r3,r3,MMUCSR0_TLBFI@l bne 1b ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBILX) msync isync blr /* * Flush MMU TLB for a particular address, but only on the local processor * (no broadcast) */ _GLOBAL(__tlbil_va) mfmsr r10 wrteei 0 slwi r4,r4,16 ori r4,r4,(MAS6_ISIZE(BOOK3E_PAGESZ_4K))@l mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ BEGIN_MMU_FTR_SECTION tlbsx 0,r3 mfspr r4,SPRN_MAS1 /* check valid */ andis. r3,r4,MAS1_VALID@h beq 1f rlwinm r4,r4,0,1,31 mtspr SPRN_MAS1,r4 tlbwe MMU_FTR_SECTION_ELSE PPC_TLBILX_VA(0,R3) ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) msync isync 1: wrtee r10 blr #elif defined(CONFIG_PPC_BOOK3E_64) /* * New Book3E (>= 2.06) implementation * * Note: We may be able to get away without the interrupt masking stuff * if we save/restore MAS6 on exceptions that might modify it */ _GLOBAL(_tlbil_pid) slwi r4,r3,MAS6_SPID_SHIFT mfmsr r10 wrteei 0 mtspr SPRN_MAS6,r4 PPC_TLBILX_PID(0,R0) wrtee r10 msync isync blr _GLOBAL(_tlbil_pid_noind) slwi r4,r3,MAS6_SPID_SHIFT mfmsr r10 ori r4,r4,MAS6_SIND wrteei 0 mtspr SPRN_MAS6,r4 PPC_TLBILX_PID(0,R0) wrtee r10 msync isync blr _GLOBAL(_tlbil_all) PPC_TLBILX_ALL(0,R0) msync isync blr _GLOBAL(_tlbil_va) mfmsr r10 wrteei 0 cmpwi cr0,r6,0 slwi r4,r4,MAS6_SPID_SHIFT rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK beq 1f rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ PPC_TLBILX_VA(0,R3) msync isync wrtee r10 blr _GLOBAL(_tlbivax_bcast) mfmsr r10 wrteei 0 cmpwi cr0,r6,0 slwi r4,r4,MAS6_SPID_SHIFT rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK beq 1f rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ PPC_TLBIVAX(0,R3) mbar tlbsync sync wrtee r10 blr #else #error Unsupported processor type ! #endif #if defined(CONFIG_PPC_E500) /* * extern void loadcam_entry(unsigned int index) * * Load TLBCAM[index] entry in to the L2 CAM MMU * Must preserve r7, r8, r9, r10, r11, r12 */ _GLOBAL(loadcam_entry) mflr r5 LOAD_REG_ADDR_PIC(r4, TLBCAM) mtlr r5 mulli r5,r3,TLBCAM_SIZE add r3,r5,r4 lwz r4,TLBCAM_MAS0(r3) mtspr SPRN_MAS0,r4 lwz r4,TLBCAM_MAS1(r3) mtspr SPRN_MAS1,r4 PPC_LL r4,TLBCAM_MAS2(r3) mtspr SPRN_MAS2,r4 lwz r4,TLBCAM_MAS3(r3) mtspr SPRN_MAS3,r4 BEGIN_MMU_FTR_SECTION lwz r4,TLBCAM_MAS7(r3) mtspr SPRN_MAS7,r4 END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) isync tlbwe isync blr /* * Load multiple TLB entries at once, using an alternate-space * trampoline so that we don't have to care about whether the same * TLB entry maps us before and after. * * r3 = first entry to write * r4 = number of entries to write * r5 = temporary tlb entry (0 means no switch to AS1) */ _GLOBAL(loadcam_multi) mflr r8 /* Don't switch to AS=1 if already there */ mfmsr r11 andi. r11,r11,MSR_IS bne 10f mr. r12, r5 beq 10f /* * Set up temporary TLB entry that is the same as what we're * running from, but in AS=1. */ bcl 20,31,$+4 1: mflr r6 tlbsx 0,r8 mfspr r6,SPRN_MAS1 ori r6,r6,MAS1_TS mtspr SPRN_MAS1,r6 mfspr r6,SPRN_MAS0 rlwimi r6,r5,MAS0_ESEL_SHIFT,MAS0_ESEL_MASK mr r7,r5 mtspr SPRN_MAS0,r6 isync tlbwe isync /* Switch to AS=1 */ mfmsr r6 ori r6,r6,MSR_IS|MSR_DS mtmsr r6 isync 10: mr r9,r3 add r10,r3,r4 2: bl loadcam_entry addi r9,r9,1 cmpw r9,r10 mr r3,r9 blt 2b /* Don't return to AS=0 if we were in AS=1 at function start */ andi. r11,r11,MSR_IS bne 3f cmpwi r12, 0 beq 3f /* Return to AS=0 and clear the temporary entry */ mfmsr r6 rlwinm. r6,r6,0,~(MSR_IS|MSR_DS) mtmsr r6 isync li r6,0 mtspr SPRN_MAS1,r6 rlwinm r6,r7,MAS0_ESEL_SHIFT,MAS0_ESEL_MASK oris r6,r6,MAS0_TLBSEL(1)@h mtspr SPRN_MAS0,r6 isync tlbwe isync 3: mtlr r8 blr #endif
aixcc-public/challenge-001-exemplar-source
33,888
arch/powerpc/mm/nohash/tlb_low_64e.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Low level TLB miss handlers for Book3E * * Copyright (C) 2008-2009 * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. */ #include <linux/pgtable.h> #include <asm/processor.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cputable.h> #include <asm/exception-64e.h> #include <asm/ppc-opcode.h> #include <asm/kvm_asm.h> #include <asm/kvm_booke_hv_asm.h> #include <asm/feature-fixups.h> #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE) #define VPTE_PUD_SHIFT (VPTE_PMD_SHIFT + PMD_INDEX_SIZE) #define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE) #define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE) /********************************************************************** * * * TLB miss handling for Book3E with a bolted linear mapping * * No virtual page table, no nested TLB misses * * * **********************************************************************/ /* * Note that, unlike non-bolted handlers, TLB_EXFRAME is not * modified by the TLB miss handlers themselves, since the TLB miss * handler code will not itself cause a recursive TLB miss. * * TLB_EXFRAME will be modified when crit/mc/debug exceptions are * entered/exited. */ .macro tlb_prolog_bolted intnum addr mtspr SPRN_SPRG_GEN_SCRATCH,r12 mfspr r12,SPRN_SPRG_TLB_EXFRAME std r13,EX_TLB_R13(r12) std r10,EX_TLB_R10(r12) mfspr r13,SPRN_SPRG_PACA mfcr r10 std r11,EX_TLB_R11(r12) #ifdef CONFIG_KVM_BOOKE_HV BEGIN_FTR_SECTION mfspr r11, SPRN_SRR1 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) #endif DO_KVM \intnum, SPRN_SRR1 std r16,EX_TLB_R16(r12) mfspr r16,\addr /* get faulting address */ std r14,EX_TLB_R14(r12) ld r14,PACAPGD(r13) std r15,EX_TLB_R15(r12) std r10,EX_TLB_CR(r12) START_BTB_FLUSH_SECTION mfspr r11, SPRN_SRR1 andi. r10,r11,MSR_PR beq 1f BTB_FLUSH(r10) 1: END_BTB_FLUSH_SECTION std r7,EX_TLB_R7(r12) .endm .macro tlb_epilog_bolted ld r14,EX_TLB_CR(r12) ld r7,EX_TLB_R7(r12) ld r10,EX_TLB_R10(r12) ld r11,EX_TLB_R11(r12) ld r13,EX_TLB_R13(r12) mtcr r14 ld r14,EX_TLB_R14(r12) ld r15,EX_TLB_R15(r12) ld r16,EX_TLB_R16(r12) mfspr r12,SPRN_SPRG_GEN_SCRATCH .endm /* Data TLB miss */ START_EXCEPTION(data_tlb_miss_bolted) tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ /* We do the user/kernel test for the PID here along with the RW test */ /* We pre-test some combination of permissions to avoid double * faults: * * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE * ESR_ST is 0x00800000 * _PAGE_BAP_SW is 0x00000010 * So the shift is >> 19. This tests for supervisor writeability. * If the page happens to be supervisor writeable and not user * writeable, we will take a new fault later, but that should be * a rare enough case. * * We also move ESR_ST in _PAGE_DIRTY position * _PAGE_DIRTY is 0x00001000 so the shift is >> 11 * * MAS1 is preset for all we need except for TID that needs to * be cleared for kernel translations */ mfspr r11,SPRN_ESR srdi r15,r16,60 /* get region */ rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 bne- dtlb_miss_fault_bolted /* Bail if fault addr is invalid */ rlwinm r10,r11,32-19,27,27 rlwimi r10,r11,32-16,19,19 cmpwi r15,0 /* user vs kernel check */ ori r10,r10,_PAGE_PRESENT oris r11,r10,_PAGE_ACCESSED@h bne tlb_miss_kernel_bolted tlb_miss_user_bolted: #ifdef CONFIG_PPC_KUAP mfspr r10,SPRN_MAS1 rlwinm. r10,r10,0,0x3fff0000 beq- tlb_miss_fault_bolted /* KUAP fault */ #endif tlb_miss_common_bolted: /* * This is the guts of the TLB miss handler for bolted-linear. * We are entered with: * * r16 = faulting address * r15 = crap (free to use) * r14 = page table base * r13 = PACA * r11 = PTE permission mask * r10 = crap (free to use) */ rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 cmpldi cr0,r14,0 clrrdi r15,r15,3 beq tlb_miss_fault_bolted /* No PGDIR, bail */ ldx r14,r14,r15 /* grab pgd entry */ rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 clrrdi r15,r15,3 cmpdi cr0,r14,0 bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */ ldx r14,r14,r15 /* grab pud entry */ rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 clrrdi r15,r15,3 cmpdi cr0,r14,0 bge tlb_miss_fault_bolted ldx r14,r14,r15 /* Grab pmd entry */ rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3 clrrdi r15,r15,3 cmpdi cr0,r14,0 bge tlb_miss_fault_bolted ldx r14,r14,r15 /* Grab PTE, normal (!huge) page */ /* Check if required permissions are met */ andc. r15,r11,r14 rldicr r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT bne- tlb_miss_fault_bolted /* Now we build the MAS: * * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG * MAS 1 : Almost fully setup * - PID already updated by caller if necessary * - TSIZE need change if !base page size, not * yet implemented for now * MAS 2 : Defaults not useful, need to be redone * MAS 3+7 : Needs to be done */ clrrdi r11,r16,12 /* Clear low crap in EA */ clrldi r15,r15,12 /* Clear crap at the top */ rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */ rlwimi r15,r14,32-8,22,25 /* Move in U bits */ mtspr SPRN_MAS2,r11 andi. r11,r14,_PAGE_DIRTY rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */ /* Mask out SW and UW if !DIRTY (XXX optimize this !) */ bne 1f li r11,MAS3_SW|MAS3_UW andc r15,r15,r11 1: mtspr SPRN_MAS7_MAS3,r15 tlbwe tlb_miss_done_bolted: tlb_epilog_bolted rfi itlb_miss_kernel_bolted: li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */ oris r11,r11,_PAGE_ACCESSED@h tlb_miss_kernel_bolted: mfspr r10,SPRN_MAS1 ld r14,PACA_KERNELPGD(r13) srdi r15,r16,44 /* get kernel region */ andi. r15,r15,1 /* Check for vmalloc region */ rlwinm r10,r10,0,16,1 /* Clear TID */ mtspr SPRN_MAS1,r10 bne+ tlb_miss_common_bolted tlb_miss_fault_bolted: /* We need to check if it was an instruction miss */ andi. r10,r11,_PAGE_BAP_UX|_PAGE_BAP_SX bne itlb_miss_fault_bolted dtlb_miss_fault_bolted: tlb_epilog_bolted b exc_data_storage_book3e itlb_miss_fault_bolted: tlb_epilog_bolted b exc_instruction_storage_book3e /* Instruction TLB miss */ START_EXCEPTION(instruction_tlb_miss_bolted) tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 srdi r15,r16,60 /* get region */ bne- itlb_miss_fault_bolted li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */ /* We do the user/kernel test for the PID here along with the RW test */ cmpldi cr0,r15,0 /* Check for user region */ oris r11,r11,_PAGE_ACCESSED@h beq tlb_miss_user_bolted b itlb_miss_kernel_bolted /* * TLB miss handling for e6500 and derivatives, using hardware tablewalk. * * Linear mapping is bolted: no virtual page table or nested TLB misses * Indirect entries in TLB1, hardware loads resulting direct entries * into TLB0 * No HES or NV hint on TLB1, so we need to do software round-robin * No tlbsrx. so we need a spinlock, and we have to deal * with MAS-damage caused by tlbsx * 4K pages only */ START_EXCEPTION(instruction_tlb_miss_e6500) tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0 ld r11,PACA_TCD_PTR(r13) srdi. r15,r16,60 /* get region */ ori r16,r16,1 bne tlb_miss_kernel_e6500 /* user/kernel test */ b tlb_miss_common_e6500 START_EXCEPTION(data_tlb_miss_e6500) tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR ld r11,PACA_TCD_PTR(r13) srdi. r15,r16,60 /* get region */ rldicr r16,r16,0,62 bne tlb_miss_kernel_e6500 /* user vs kernel check */ /* * This is the guts of the TLB miss handler for e6500 and derivatives. * We are entered with: * * r16 = page of faulting address (low bit 0 if data, 1 if instruction) * r15 = crap (free to use) * r14 = page table base * r13 = PACA * r11 = tlb_per_core ptr * r10 = crap (free to use) * r7 = esel_next */ tlb_miss_common_e6500: crmove cr2*4+2,cr0*4+2 /* cr2.eq != 0 if kernel address */ BEGIN_FTR_SECTION /* CPU_FTR_SMT */ /* * Search if we already have an indirect entry for that virtual * address, and if we do, bail out. * * MAS6:IND should be already set based on MAS4 */ lhz r10,PACAPACAINDEX(r13) addi r10,r10,1 crclr cr1*4+eq /* set cr1.eq = 0 for non-recursive */ 1: lbarx r15,0,r11 cmpdi r15,0 bne 2f stbcx. r10,0,r11 bne 1b 3: .subsection 1 2: cmpd cr1,r15,r10 /* recursive lock due to mcheck/crit/etc? */ beq cr1,3b /* unlock will happen if cr1.eq = 0 */ 10: lbz r15,0(r11) cmpdi r15,0 bne 10b b 1b .previous END_FTR_SECTION_IFSET(CPU_FTR_SMT) lbz r7,TCD_ESEL_NEXT(r11) BEGIN_FTR_SECTION /* CPU_FTR_SMT */ /* * Erratum A-008139 says that we can't use tlbwe to change * an indirect entry in any way (including replacing or * invalidating) if the other thread could be in the process * of a lookup. The workaround is to invalidate the entry * with tlbilx before overwriting. */ rlwinm r10,r7,16,0xff0000 oris r10,r10,MAS0_TLBSEL(1)@h mtspr SPRN_MAS0,r10 isync tlbre mfspr r15,SPRN_MAS1 andis. r15,r15,MAS1_VALID@h beq 5f BEGIN_FTR_SECTION_NESTED(532) mfspr r10,SPRN_MAS8 rlwinm r10,r10,0,0x80000fff /* tgs,tlpid -> sgs,slpid */ mtspr SPRN_MAS5,r10 END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532) mfspr r10,SPRN_MAS1 rlwinm r15,r10,0,0x3fff0000 /* tid -> spid */ rlwimi r15,r10,20,0x00000003 /* ind,ts -> sind,sas */ mfspr r10,SPRN_MAS6 mtspr SPRN_MAS6,r15 mfspr r15,SPRN_MAS2 isync tlbilxva 0,r15 isync mtspr SPRN_MAS6,r10 5: BEGIN_FTR_SECTION_NESTED(532) li r10,0 mtspr SPRN_MAS8,r10 mtspr SPRN_MAS5,r10 END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532) tlbsx 0,r16 mfspr r10,SPRN_MAS1 andis. r15,r10,MAS1_VALID@h bne tlb_miss_done_e6500 FTR_SECTION_ELSE mfspr r10,SPRN_MAS1 ALT_FTR_SECTION_END_IFSET(CPU_FTR_SMT) oris r10,r10,MAS1_VALID@h beq cr2,4f rlwinm r10,r10,0,16,1 /* Clear TID */ 4: mtspr SPRN_MAS1,r10 /* Now, we need to walk the page tables. First check if we are in * range. */ rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 bne- tlb_miss_fault_e6500 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 cmpldi cr0,r14,0 clrrdi r15,r15,3 beq- tlb_miss_fault_e6500 /* No PGDIR, bail */ ldx r14,r14,r15 /* grab pgd entry */ rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 clrrdi r15,r15,3 cmpdi cr0,r14,0 bge tlb_miss_huge_e6500 /* Bad pgd entry or hugepage; bail */ ldx r14,r14,r15 /* grab pud entry */ rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 clrrdi r15,r15,3 cmpdi cr0,r14,0 bge tlb_miss_huge_e6500 ldx r14,r14,r15 /* Grab pmd entry */ mfspr r10,SPRN_MAS0 cmpdi cr0,r14,0 bge tlb_miss_huge_e6500 /* Now we build the MAS for a 2M indirect page: * * MAS 0 : ESEL needs to be filled by software round-robin * MAS 1 : Fully set up * - PID already updated by caller if necessary * - TSIZE for now is base ind page size always * - TID already cleared if necessary * MAS 2 : Default not 2M-aligned, need to be redone * MAS 3+7 : Needs to be done */ ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) mtspr SPRN_MAS7_MAS3,r14 clrrdi r15,r16,21 /* make EA 2M-aligned */ mtspr SPRN_MAS2,r15 tlb_miss_huge_done_e6500: lbz r16,TCD_ESEL_MAX(r11) lbz r14,TCD_ESEL_FIRST(r11) rlwimi r10,r7,16,0x00ff0000 /* insert esel_next into MAS0 */ addi r7,r7,1 /* increment esel_next */ mtspr SPRN_MAS0,r10 cmpw r7,r16 iseleq r7,r14,r7 /* if next == last use first */ stb r7,TCD_ESEL_NEXT(r11) tlbwe tlb_miss_done_e6500: .macro tlb_unlock_e6500 BEGIN_FTR_SECTION beq cr1,1f /* no unlock if lock was recursively grabbed */ li r15,0 isync stb r15,0(r11) 1: END_FTR_SECTION_IFSET(CPU_FTR_SMT) .endm tlb_unlock_e6500 tlb_epilog_bolted rfi tlb_miss_huge_e6500: beq tlb_miss_fault_e6500 li r10,1 andi. r15,r14,HUGEPD_SHIFT_MASK@l /* r15 = psize */ rldimi r14,r10,63,0 /* Set PD_HUGE */ xor r14,r14,r15 /* Clear size bits */ ldx r14,0,r14 /* * Now we build the MAS for a huge page. * * MAS 0 : ESEL needs to be filled by software round-robin * - can be handled by indirect code * MAS 1 : Need to clear IND and set TSIZE * MAS 2,3+7: Needs to be redone similar to non-tablewalk handler */ subi r15,r15,10 /* Convert psize to tsize */ mfspr r10,SPRN_MAS1 rlwinm r10,r10,0,~MAS1_IND rlwimi r10,r15,MAS1_TSIZE_SHIFT,MAS1_TSIZE_MASK mtspr SPRN_MAS1,r10 li r10,-0x400 sld r15,r10,r15 /* Generate mask based on size */ and r10,r16,r15 rldicr r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT rlwimi r10,r14,32-19,27,31 /* Insert WIMGE */ clrldi r15,r15,PAGE_SHIFT /* Clear crap at the top */ rlwimi r15,r14,32-8,22,25 /* Move in U bits */ mtspr SPRN_MAS2,r10 andi. r10,r14,_PAGE_DIRTY rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */ /* Mask out SW and UW if !DIRTY (XXX optimize this !) */ bne 1f li r10,MAS3_SW|MAS3_UW andc r15,r15,r10 1: mtspr SPRN_MAS7_MAS3,r15 mfspr r10,SPRN_MAS0 b tlb_miss_huge_done_e6500 tlb_miss_kernel_e6500: ld r14,PACA_KERNELPGD(r13) srdi r15,r16,44 /* get kernel region */ xoris r15,r15,0xc /* Check for vmalloc region */ cmplwi cr1,r15,1 beq+ cr1,tlb_miss_common_e6500 tlb_miss_fault_e6500: tlb_unlock_e6500 /* We need to check if it was an instruction miss */ andi. r16,r16,1 bne itlb_miss_fault_e6500 dtlb_miss_fault_e6500: tlb_epilog_bolted b exc_data_storage_book3e itlb_miss_fault_e6500: tlb_epilog_bolted b exc_instruction_storage_book3e /********************************************************************** * * * TLB miss handling for Book3E with TLB reservation and HES support * * * **********************************************************************/ /* Data TLB miss */ START_EXCEPTION(data_tlb_miss) TLB_MISS_PROLOG /* Now we handle the fault proper. We only save DEAR in normal * fault case since that's the only interesting values here. * We could probably also optimize by not saving SRR0/1 in the * linear mapping case but I'll leave that for later */ mfspr r14,SPRN_ESR mfspr r16,SPRN_DEAR /* get faulting address */ srdi r15,r16,44 /* get region */ xoris r15,r15,0xc cmpldi cr0,r15,0 /* linear mapping ? */ beq tlb_load_linear /* yes -> go to linear map load */ cmpldi cr1,r15,1 /* vmalloc mapping ? */ /* The page tables are mapped virtually linear. At this point, though, * we don't know whether we are trying to fault in a first level * virtual address or a virtual page table address. We can get that * from bit 0x1 of the region ID which we have set for a page table */ andis. r10,r15,0x1 bne- virt_page_table_tlb_miss std r14,EX_TLB_ESR(r12); /* save ESR */ std r16,EX_TLB_DEAR(r12); /* save DEAR */ /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ li r11,_PAGE_PRESENT oris r11,r11,_PAGE_ACCESSED@h /* We do the user/kernel test for the PID here along with the RW test */ srdi. r15,r16,60 /* Check for user region */ /* We pre-test some combination of permissions to avoid double * faults: * * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE * ESR_ST is 0x00800000 * _PAGE_BAP_SW is 0x00000010 * So the shift is >> 19. This tests for supervisor writeability. * If the page happens to be supervisor writeable and not user * writeable, we will take a new fault later, but that should be * a rare enough case. * * We also move ESR_ST in _PAGE_DIRTY position * _PAGE_DIRTY is 0x00001000 so the shift is >> 11 * * MAS1 is preset for all we need except for TID that needs to * be cleared for kernel translations */ rlwimi r11,r14,32-19,27,27 rlwimi r11,r14,32-16,19,19 beq normal_tlb_miss_user /* XXX replace the RMW cycles with immediate loads + writes */ 1: mfspr r10,SPRN_MAS1 rlwinm r10,r10,0,16,1 /* Clear TID */ mtspr SPRN_MAS1,r10 beq+ cr1,normal_tlb_miss /* We got a crappy address, just fault with whatever DEAR and ESR * are here */ TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e /* Instruction TLB miss */ START_EXCEPTION(instruction_tlb_miss) TLB_MISS_PROLOG /* If we take a recursive fault, the second level handler may need * to know whether we are handling a data or instruction fault in * order to get to the right store fault handler. We provide that * info by writing a crazy value in ESR in our exception frame */ li r14,-1 /* store to exception frame is done later */ /* Now we handle the fault proper. We only save DEAR in the non * linear mapping case since we know the linear mapping case will * not re-enter. We could indeed optimize and also not save SRR0/1 * in the linear mapping case but I'll leave that for later * * Faulting address is SRR0 which is already in r16 */ srdi r15,r16,44 /* get region */ xoris r15,r15,0xc cmpldi cr0,r15,0 /* linear mapping ? */ beq tlb_load_linear /* yes -> go to linear map load */ cmpldi cr1,r15,1 /* vmalloc mapping ? */ /* We do the user/kernel test for the PID here along with the RW test */ li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */ oris r11,r11,_PAGE_ACCESSED@h srdi. r15,r16,60 /* Check for user region */ std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */ beq normal_tlb_miss_user li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */ oris r11,r11,_PAGE_ACCESSED@h /* XXX replace the RMW cycles with immediate loads + writes */ mfspr r10,SPRN_MAS1 rlwinm r10,r10,0,16,1 /* Clear TID */ mtspr SPRN_MAS1,r10 beq+ cr1,normal_tlb_miss /* We got a crappy address, just fault */ TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e /* * This is the guts of the first-level TLB miss handler for direct * misses. We are entered with: * * r16 = faulting address * r15 = region ID * r14 = crap (free to use) * r13 = PACA * r12 = TLB exception frame in PACA * r11 = PTE permission mask * r10 = crap (free to use) */ normal_tlb_miss_user: #ifdef CONFIG_PPC_KUAP mfspr r14,SPRN_MAS1 rlwinm. r14,r14,0,0x3fff0000 beq- normal_tlb_miss_access_fault /* KUAP fault */ #endif normal_tlb_miss: /* So we first construct the page table address. We do that by * shifting the bottom of the address (not the region ID) by * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and * or'ing the fourth high bit. * * NOTE: For 64K pages, we do things slightly differently in * order to handle the weird page table format used by linux */ srdi r15,r16,44 oris r10,r15,0x1 rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4 sldi r15,r10,44 clrrdi r14,r14,19 or r10,r15,r14 ld r14,0(r10) finish_normal_tlb_miss: /* Check if required permissions are met */ andc. r15,r11,r14 bne- normal_tlb_miss_access_fault /* Now we build the MAS: * * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG * MAS 1 : Almost fully setup * - PID already updated by caller if necessary * - TSIZE need change if !base page size, not * yet implemented for now * MAS 2 : Defaults not useful, need to be redone * MAS 3+7 : Needs to be done * * TODO: mix up code below for better scheduling */ clrrdi r10,r16,12 /* Clear low crap in EA */ rlwimi r10,r14,32-19,27,31 /* Insert WIMGE */ mtspr SPRN_MAS2,r10 /* Check page size, if not standard, update MAS1 */ rldicl r10,r14,64-8,64-8 cmpldi cr0,r10,BOOK3E_PAGESZ_4K beq- 1f mfspr r11,SPRN_MAS1 rlwimi r11,r14,31,21,24 rlwinm r11,r11,0,21,19 mtspr SPRN_MAS1,r11 1: /* Move RPN in position */ rldicr r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT clrldi r15,r11,12 /* Clear crap at the top */ rlwimi r15,r14,32-8,22,25 /* Move in U bits */ rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */ /* Mask out SW and UW if !DIRTY (XXX optimize this !) */ andi. r11,r14,_PAGE_DIRTY bne 1f li r11,MAS3_SW|MAS3_UW andc r15,r15,r11 1: srdi r16,r15,32 mtspr SPRN_MAS3,r15 mtspr SPRN_MAS7,r16 tlbwe normal_tlb_miss_done: /* We don't bother with restoring DEAR or ESR since we know we are * level 0 and just going back to userland. They are only needed * if you are going to take an access fault */ TLB_MISS_EPILOG_SUCCESS rfi normal_tlb_miss_access_fault: /* We need to check if it was an instruction miss */ andi. r10,r11,_PAGE_BAP_UX bne 1f ld r14,EX_TLB_DEAR(r12) ld r15,EX_TLB_ESR(r12) mtspr SPRN_DEAR,r14 mtspr SPRN_ESR,r15 TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e 1: TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e /* * This is the guts of the second-level TLB miss handler for direct * misses. We are entered with: * * r16 = virtual page table faulting address * r15 = region (top 4 bits of address) * r14 = crap (free to use) * r13 = PACA * r12 = TLB exception frame in PACA * r11 = crap (free to use) * r10 = crap (free to use) * * Note that this should only ever be called as a second level handler * with the current scheme when using SW load. * That means we can always get the original fault DEAR at * EX_TLB_DEAR-EX_TLB_SIZE(r12) * * It can be re-entered by the linear mapping miss handler. However, to * avoid too much complication, it will restart the whole fault at level * 0 so we don't care too much about clobbers * * XXX That code was written back when we couldn't clobber r14. We can now, * so we could probably optimize things a bit */ virt_page_table_tlb_miss: /* Are we hitting a kernel page table ? */ srdi r15,r16,60 andi. r10,r15,0x8 /* The cool thing now is that r10 contains 0 for user and 8 for kernel, * and we happen to have the swapper_pg_dir at offset 8 from the user * pgdir in the PACA :-). */ add r11,r10,r13 /* If kernel, we need to clear MAS1 TID */ beq 1f /* XXX replace the RMW cycles with immediate loads + writes */ mfspr r10,SPRN_MAS1 rlwinm r10,r10,0,16,1 /* Clear TID */ mtspr SPRN_MAS1,r10 #ifdef CONFIG_PPC_KUAP b 2f 1: mfspr r10,SPRN_MAS1 rlwinm. r10,r10,0,0x3fff0000 beq- virt_page_table_tlb_miss_fault /* KUAP fault */ 2: #else 1: #endif /* Now, we need to walk the page tables. First check if we are in * range. */ rldicl r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4 cmpldi r10,0x80 bne- virt_page_table_tlb_miss_fault /* Get the PGD pointer */ ld r15,PACAPGD(r11) cmpldi cr0,r15,0 beq- virt_page_table_tlb_miss_fault /* Get to PGD entry */ rldicl r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3 clrrdi r10,r11,3 ldx r15,r10,r15 cmpdi cr0,r15,0 bge virt_page_table_tlb_miss_fault /* Get to PUD entry */ rldicl r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3 clrrdi r10,r11,3 ldx r15,r10,r15 cmpdi cr0,r15,0 bge virt_page_table_tlb_miss_fault /* Get to PMD entry */ rldicl r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3 clrrdi r10,r11,3 ldx r15,r10,r15 cmpdi cr0,r15,0 bge virt_page_table_tlb_miss_fault /* Ok, we're all right, we can now create a kernel translation for * a 4K or 64K page from r16 -> r15. */ /* Now we build the MAS: * * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG * MAS 1 : Almost fully setup * - PID already updated by caller if necessary * - TSIZE for now is base page size always * MAS 2 : Use defaults * MAS 3+7 : Needs to be done * * So we only do MAS 2 and 3 for now... */ clrldi r11,r15,4 /* remove region ID from RPN */ ori r10,r11,1 /* Or-in SR */ srdi r16,r10,32 mtspr SPRN_MAS3,r10 mtspr SPRN_MAS7,r16 tlbwe /* Return to caller, normal case */ TLB_MISS_EPILOG_SUCCESS rfi virt_page_table_tlb_miss_fault: /* If we fault here, things are a little bit tricky. We need to call * either data or instruction store fault, and we need to retrieve * the original fault address and ESR (for data). * * The thing is, we know that in normal circumstances, this is * always called as a second level tlb miss for SW load or as a first * level TLB miss for HW load, so we should be able to peek at the * relevant information in the first exception frame in the PACA. * * However, we do need to double check that, because we may just hit * a stray kernel pointer or a userland attack trying to hit those * areas. If that is the case, we do a data fault. (We can't get here * from an instruction tlb miss anyway). * * Note also that when going to a fault, we must unwind the previous * level as well. Since we are doing that, we don't need to clear or * restore the TLB reservation neither. */ subf r10,r13,r12 cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE bne- virt_page_table_tlb_miss_whacko_fault /* We dig the original DEAR and ESR from slot 0 */ ld r15,EX_TLB_DEAR+PACA_EXTLB(r13) ld r16,EX_TLB_ESR+PACA_EXTLB(r13) /* We check for the "special" ESR value for instruction faults */ cmpdi cr0,r16,-1 beq 1f mtspr SPRN_DEAR,r15 mtspr SPRN_ESR,r16 TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e 1: TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e virt_page_table_tlb_miss_whacko_fault: /* The linear fault will restart everything so ESR and DEAR will * not have been clobbered, let's just fault with what we have */ TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e /************************************************************** * * * TLB miss handling for Book3E with hw page table support * * * **************************************************************/ /* Data TLB miss */ START_EXCEPTION(data_tlb_miss_htw) TLB_MISS_PROLOG /* Now we handle the fault proper. We only save DEAR in normal * fault case since that's the only interesting values here. * We could probably also optimize by not saving SRR0/1 in the * linear mapping case but I'll leave that for later */ mfspr r14,SPRN_ESR mfspr r16,SPRN_DEAR /* get faulting address */ srdi r11,r16,44 /* get region */ xoris r11,r11,0xc cmpldi cr0,r11,0 /* linear mapping ? */ beq tlb_load_linear /* yes -> go to linear map load */ cmpldi cr1,r11,1 /* vmalloc mapping ? */ /* We do the user/kernel test for the PID here along with the RW test */ srdi. r11,r16,60 /* Check for user region */ ld r15,PACAPGD(r13) /* Load user pgdir */ beq htw_tlb_miss /* XXX replace the RMW cycles with immediate loads + writes */ 1: mfspr r10,SPRN_MAS1 rlwinm r10,r10,0,16,1 /* Clear TID */ mtspr SPRN_MAS1,r10 ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ beq+ cr1,htw_tlb_miss /* We got a crappy address, just fault with whatever DEAR and ESR * are here */ TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e /* Instruction TLB miss */ START_EXCEPTION(instruction_tlb_miss_htw) TLB_MISS_PROLOG /* If we take a recursive fault, the second level handler may need * to know whether we are handling a data or instruction fault in * order to get to the right store fault handler. We provide that * info by keeping a crazy value for ESR in r14 */ li r14,-1 /* store to exception frame is done later */ /* Now we handle the fault proper. We only save DEAR in the non * linear mapping case since we know the linear mapping case will * not re-enter. We could indeed optimize and also not save SRR0/1 * in the linear mapping case but I'll leave that for later * * Faulting address is SRR0 which is already in r16 */ srdi r11,r16,44 /* get region */ xoris r11,r11,0xc cmpldi cr0,r11,0 /* linear mapping ? */ beq tlb_load_linear /* yes -> go to linear map load */ cmpldi cr1,r11,1 /* vmalloc mapping ? */ /* We do the user/kernel test for the PID here along with the RW test */ srdi. r11,r16,60 /* Check for user region */ ld r15,PACAPGD(r13) /* Load user pgdir */ beq htw_tlb_miss /* XXX replace the RMW cycles with immediate loads + writes */ 1: mfspr r10,SPRN_MAS1 rlwinm r10,r10,0,16,1 /* Clear TID */ mtspr SPRN_MAS1,r10 ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ beq+ htw_tlb_miss /* We got a crappy address, just fault */ TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e /* * This is the guts of the second-level TLB miss handler for direct * misses. We are entered with: * * r16 = virtual page table faulting address * r15 = PGD pointer * r14 = ESR * r13 = PACA * r12 = TLB exception frame in PACA * r11 = crap (free to use) * r10 = crap (free to use) * * It can be re-entered by the linear mapping miss handler. However, to * avoid too much complication, it will save/restore things for us */ htw_tlb_miss: #ifdef CONFIG_PPC_KUAP mfspr r10,SPRN_MAS1 rlwinm. r10,r10,0,0x3fff0000 beq- htw_tlb_miss_fault /* KUAP fault */ #endif /* Search if we already have a TLB entry for that virtual address, and * if we do, bail out. * * MAS1:IND should be already set based on MAS4 */ PPC_TLBSRX_DOT(0,R16) beq htw_tlb_miss_done /* Now, we need to walk the page tables. First check if we are in * range. */ rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 bne- htw_tlb_miss_fault /* Get the PGD pointer */ cmpldi cr0,r15,0 beq- htw_tlb_miss_fault /* Get to PGD entry */ rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3 clrrdi r10,r11,3 ldx r15,r10,r15 cmpdi cr0,r15,0 bge htw_tlb_miss_fault /* Get to PUD entry */ rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3 clrrdi r10,r11,3 ldx r15,r10,r15 cmpdi cr0,r15,0 bge htw_tlb_miss_fault /* Get to PMD entry */ rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3 clrrdi r10,r11,3 ldx r15,r10,r15 cmpdi cr0,r15,0 bge htw_tlb_miss_fault /* Ok, we're all right, we can now create an indirect entry for * a 1M or 256M page. * * The last trick is now that because we use "half" pages for * the HTW (1M IND is 2K and 256M IND is 32K) we need to account * for an added LSB bit to the RPN. For 64K pages, there is no * problem as we already use 32K arrays (half PTE pages), but for * 4K page we need to extract a bit from the virtual address and * insert it into the "PA52" bit of the RPN. */ rlwimi r15,r16,32-9,20,20 /* Now we build the MAS: * * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG * MAS 1 : Almost fully setup * - PID already updated by caller if necessary * - TSIZE for now is base ind page size always * MAS 2 : Use defaults * MAS 3+7 : Needs to be done */ ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) srdi r16,r10,32 mtspr SPRN_MAS3,r10 mtspr SPRN_MAS7,r16 tlbwe htw_tlb_miss_done: /* We don't bother with restoring DEAR or ESR since we know we are * level 0 and just going back to userland. They are only needed * if you are going to take an access fault */ TLB_MISS_EPILOG_SUCCESS rfi htw_tlb_miss_fault: /* We need to check if it was an instruction miss. We know this * though because r14 would contain -1 */ cmpdi cr0,r14,-1 beq 1f mtspr SPRN_DEAR,r16 mtspr SPRN_ESR,r14 TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e 1: TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e /* * This is the guts of "any" level TLB miss handler for kernel linear * mapping misses. We are entered with: * * * r16 = faulting address * r15 = crap (free to use) * r14 = ESR (data) or -1 (instruction) * r13 = PACA * r12 = TLB exception frame in PACA * r11 = crap (free to use) * r10 = crap (free to use) * * In addition we know that we will not re-enter, so in theory, we could * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later. * * We also need to be careful about MAS registers here & TLB reservation, * as we know we'll have clobbered them if we interrupt the main TLB miss * handlers in which case we probably want to do a full restart at level * 0 rather than saving / restoring the MAS. * * Note: If we care about performance of that core, we can easily shuffle * a few things around */ tlb_load_linear: /* For now, we assume the linear mapping is contiguous and stops at * linear_map_top. We also assume the size is a multiple of 1G, thus * we only use 1G pages for now. That might have to be changed in a * final implementation, especially when dealing with hypervisors */ __LOAD_PACA_TOC(r11) LOAD_REG_ADDR_ALTTOC(r11, r11, linear_map_top) ld r10,0(r11) tovirt(10,10) cmpld cr0,r16,r10 bge tlb_load_linear_fault /* MAS1 need whole new setup. */ li r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT) oris r15,r15,MAS1_VALID@h /* MAS1 needs V and TSIZE */ mtspr SPRN_MAS1,r15 /* Already somebody there ? */ PPC_TLBSRX_DOT(0,R16) beq tlb_load_linear_done /* Now we build the remaining MAS. MAS0 and 2 should be fine * with their defaults, which leaves us with MAS 3 and 7. The * mapping is linear, so we just take the address, clear the * region bits, and or in the permission bits which are currently * hard wired */ clrrdi r10,r16,30 /* 1G page index */ clrldi r10,r10,4 /* clear region bits */ ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX srdi r16,r10,32 mtspr SPRN_MAS3,r10 mtspr SPRN_MAS7,r16 tlbwe tlb_load_linear_done: /* We use the "error" epilog for success as we do want to * restore to the initial faulting context, whatever it was. * We do that because we can't resume a fault within a TLB * miss handler, due to MAS and TLB reservation being clobbered. */ TLB_MISS_EPILOG_ERROR rfi tlb_load_linear_fault: /* We keep the DEAR and ESR around, this shouldn't have happened */ cmpdi cr0,r14,-1 beq 1f TLB_MISS_EPILOG_ERROR_SPECIAL b exc_data_storage_book3e 1: TLB_MISS_EPILOG_ERROR_SPECIAL b exc_instruction_storage_book3e
Akagi201/ffmpeg-xcode
12,935
ffmpeg-3.0.2/libavcodec/aarch64/h264pred_neon.S
/* * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" .macro ldcol.8 rd, rs, rt, n=8, hi=0 .if \n >= 8 || \hi == 0 ld1 {\rd\().b}[0], [\rs], \rt ld1 {\rd\().b}[1], [\rs], \rt ld1 {\rd\().b}[2], [\rs], \rt ld1 {\rd\().b}[3], [\rs], \rt .endif .if \n >= 8 || \hi == 1 ld1 {\rd\().b}[4], [\rs], \rt ld1 {\rd\().b}[5], [\rs], \rt ld1 {\rd\().b}[6], [\rs], \rt ld1 {\rd\().b}[7], [\rs], \rt .endif .if \n == 16 ld1 {\rd\().b}[8], [\rs], \rt ld1 {\rd\().b}[9], [\rs], \rt ld1 {\rd\().b}[10], [\rs], \rt ld1 {\rd\().b}[11], [\rs], \rt ld1 {\rd\().b}[12], [\rs], \rt ld1 {\rd\().b}[13], [\rs], \rt ld1 {\rd\().b}[14], [\rs], \rt ld1 {\rd\().b}[15], [\rs], \rt .endif .endm function ff_pred16x16_128_dc_neon, export=1 movi v0.16b, #128 b .L_pred16x16_dc_end endfunc function ff_pred16x16_top_dc_neon, export=1 sub x2, x0, x1 ld1 {v0.16b}, [x2] uaddlv h0, v0.16b rshrn v0.8b, v0.8h, #4 dup v0.16b, v0.b[0] b .L_pred16x16_dc_end endfunc function ff_pred16x16_left_dc_neon, export=1 sub x2, x0, #1 ldcol.8 v0, x2, x1, 16 uaddlv h0, v0.16b rshrn v0.8b, v0.8h, #4 dup v0.16b, v0.b[0] b .L_pred16x16_dc_end endfunc function ff_pred16x16_dc_neon, export=1 sub x2, x0, x1 sub x3, x0, #1 ld1 {v0.16b}, [x2] ldcol.8 v1, x3, x1, 16 uaddlv h0, v0.16b uaddlv h1, v1.16b add v0.4h, v0.4h, v1.4h rshrn v0.8b, v0.8h, #5 dup v0.16b, v0.b[0] .L_pred16x16_dc_end: mov w3, #8 6: st1 {v0.16b}, [x0], x1 st1 {v0.16b}, [x0], x1 subs w3, w3, #1 b.ne 6b ret endfunc function ff_pred16x16_hor_neon, export=1 sub x2, x0, #1 mov w3, #16 1: ld1r {v0.16b}, [x2], x1 st1 {v0.16b}, [x0], x1 subs w3, w3, #1 b.ne 1b ret endfunc function ff_pred16x16_vert_neon, export=1 sub x2, x0, x1 add x1, x1, x1 ld1 {v0.16b}, [x2], x1 mov w3, #8 1: st1 {v0.16b}, [x0], x1 st1 {v0.16b}, [x2], x1 subs w3, w3, #1 b.ne 1b ret endfunc function ff_pred16x16_plane_neon, export=1 sub x3, x0, x1 movrel x4, p16weight add x2, x3, #8 sub x3, x3, #1 ld1 {v0.8b}, [x3] ld1 {v2.8b}, [x2], x1 ldcol.8 v1, x3, x1 add x3, x3, x1 ldcol.8 v3, x3, x1 rev64 v0.8b, v0.8b rev64 v1.8b, v1.8b uaddl v7.8h, v2.8b, v3.8b usubl v2.8h, v2.8b, v0.8b usubl v3.8h, v3.8b, v1.8b ld1 {v0.8h}, [x4] mul v2.8h, v2.8h, v0.8h mul v3.8h, v3.8h, v0.8h addp v2.8h, v2.8h, v3.8h addp v2.8h, v2.8h, v2.8h addp v2.4h, v2.4h, v2.4h sshll v3.4s, v2.4h, #2 saddw v2.4s, v3.4s, v2.4h rshrn v4.4h, v2.4s, #6 trn2 v5.4h, v4.4h, v4.4h add v2.4h, v4.4h, v5.4h shl v3.4h, v2.4h, #3 ext v7.16b, v7.16b, v7.16b, #14 sub v3.4h, v3.4h, v2.4h // 7 * (b + c) add v7.4h, v7.4h, v0.4h shl v2.4h, v7.4h, #4 sub v2.4h, v2.4h, v3.4h shl v3.4h, v4.4h, #4 ext v0.16b, v0.16b, v0.16b, #14 sub v6.4h, v5.4h, v3.4h mov v0.h[0], wzr mul v0.8h, v0.8h, v4.h[0] dup v1.8h, v2.h[0] dup v2.8h, v4.h[0] dup v3.8h, v6.h[0] shl v2.8h, v2.8h, #3 add v1.8h, v1.8h, v0.8h add v3.8h, v3.8h, v2.8h mov w3, #16 1: sqshrun v0.8b, v1.8h, #5 add v1.8h, v1.8h, v2.8h sqshrun2 v0.16b, v1.8h, #5 add v1.8h, v1.8h, v3.8h st1 {v0.16b}, [x0], x1 subs w3, w3, #1 b.ne 1b ret endfunc const p16weight, align=4 .short 1,2,3,4,5,6,7,8 endconst const p8weight, align=4 .short 1,2,3,4,1,2,3,4 endconst function ff_pred8x8_hor_neon, export=1 sub x2, x0, #1 mov w3, #8 1: ld1r {v0.8b}, [x2], x1 st1 {v0.8b}, [x0], x1 subs w3, w3, #1 b.ne 1b ret endfunc function ff_pred8x8_vert_neon, export=1 sub x2, x0, x1 lsl x1, x1, #1 ld1 {v0.8b}, [x2], x1 mov w3, #4 1: st1 {v0.8b}, [x0], x1 st1 {v0.8b}, [x2], x1 subs w3, w3, #1 b.ne 1b ret endfunc function ff_pred8x8_plane_neon, export=1 sub x3, x0, x1 movrel x4, p8weight movrel x5, p16weight add x2, x3, #4 sub x3, x3, #1 ld1 {v0.s}[0], [x3] ld1 {v2.s}[0], [x2], x1 ldcol.8 v0, x3, x1, 4, hi=1 add x3, x3, x1 ldcol.8 v3, x3, x1, 4 uaddl v7.8h, v2.8b, v3.8b rev32 v0.8b, v0.8b trn1 v2.2s, v2.2s, v3.2s usubl v2.8h, v2.8b, v0.8b ld1 {v6.8h}, [x4] mul v2.8h, v2.8h, v6.8h ld1 {v0.8h}, [x5] saddlp v2.4s, v2.8h addp v2.4s, v2.4s, v2.4s shl v3.4s, v2.4s, #4 add v2.4s, v3.4s, v2.4s rshrn v5.4h, v2.4s, #5 addp v2.4h, v5.4h, v5.4h shl v3.4h, v2.4h, #1 add v3.4h, v3.4h, v2.4h rev64 v7.4h, v7.4h add v7.4h, v7.4h, v0.4h shl v2.4h, v7.4h, #4 sub v2.4h, v2.4h, v3.4h ext v0.16b, v0.16b, v0.16b, #14 mov v0.h[0], wzr mul v0.8h, v0.8h, v5.h[0] dup v1.8h, v2.h[0] dup v2.8h, v5.h[1] add v1.8h, v1.8h, v0.8h mov w3, #8 1: sqshrun v0.8b, v1.8h, #5 add v1.8h, v1.8h, v2.8h st1 {v0.8b}, [x0], x1 subs w3, w3, #1 b.ne 1b ret endfunc function ff_pred8x8_128_dc_neon, export=1 movi v0.8b, #128 movi v1.8b, #128 b .L_pred8x8_dc_end endfunc function ff_pred8x8_top_dc_neon, export=1 sub x2, x0, x1 ld1 {v0.8b}, [x2] uaddlp v0.4h, v0.8b addp v0.4h, v0.4h, v0.4h zip1 v0.8h, v0.8h, v0.8h rshrn v2.8b, v0.8h, #2 zip1 v0.8b, v2.8b, v2.8b zip1 v1.8b, v2.8b, v2.8b b .L_pred8x8_dc_end endfunc function ff_pred8x8_left_dc_neon, export=1 sub x2, x0, #1 ldcol.8 v0, x2, x1 uaddlp v0.4h, v0.8b addp v0.4h, v0.4h, v0.4h rshrn v2.8b, v0.8h, #2 dup v1.8b, v2.b[1] dup v0.8b, v2.b[0] b .L_pred8x8_dc_end endfunc function ff_pred8x8_dc_neon, export=1 sub x2, x0, x1 sub x3, x0, #1 ld1 {v0.8b}, [x2] ldcol.8 v1, x3, x1 uaddlp v0.4h, v0.8b uaddlp v1.4h, v1.8b trn1 v2.2s, v0.2s, v1.2s trn2 v3.2s, v0.2s, v1.2s addp v4.4h, v2.4h, v3.4h addp v5.4h, v4.4h, v4.4h rshrn v6.8b, v5.8h, #3 rshrn v7.8b, v4.8h, #2 dup v0.8b, v6.b[0] dup v2.8b, v7.b[2] dup v1.8b, v7.b[3] dup v3.8b, v6.b[1] zip1 v0.2s, v0.2s, v2.2s zip1 v1.2s, v1.2s, v3.2s .L_pred8x8_dc_end: mov w3, #4 add x2, x0, x1, lsl #2 6: st1 {v0.8b}, [x0], x1 st1 {v1.8b}, [x2], x1 subs w3, w3, #1 b.ne 6b ret endfunc function ff_pred8x8_l0t_dc_neon, export=1 sub x2, x0, x1 sub x3, x0, #1 ld1 {v0.8b}, [x2] ldcol.8 v1, x3, x1, 4 zip1 v0.4s, v0.4s, v1.4s uaddlp v0.8h, v0.16b addp v0.8h, v0.8h, v0.8h addp v1.4h, v0.4h, v0.4h rshrn v2.8b, v0.8h, #2 rshrn v3.8b, v1.8h, #3 dup v4.8b, v3.b[0] dup v6.8b, v2.b[2] dup v5.8b, v2.b[0] zip1 v0.2s, v4.2s, v6.2s zip1 v1.2s, v5.2s, v6.2s b .L_pred8x8_dc_end endfunc function ff_pred8x8_l00_dc_neon, export=1 sub x2, x0, #1 ldcol.8 v0, x2, x1, 4 uaddlp v0.4h, v0.8b addp v0.4h, v0.4h, v0.4h rshrn v0.8b, v0.8h, #2 movi v1.8b, #128 dup v0.8b, v0.b[0] b .L_pred8x8_dc_end endfunc function ff_pred8x8_0lt_dc_neon, export=1 add x3, x0, x1, lsl #2 sub x2, x0, x1 sub x3, x3, #1 ld1 {v0.8b}, [x2] ldcol.8 v1, x3, x1, 4, hi=1 zip1 v0.4s, v0.4s, v1.4s uaddlp v0.8h, v0.16b addp v0.8h, v0.8h, v0.8h addp v1.4h, v0.4h, v0.4h rshrn v2.8b, v0.8h, #2 rshrn v3.8b, v1.8h, #3 dup v4.8b, v2.b[0] dup v5.8b, v2.b[3] dup v6.8b, v2.b[2] dup v7.8b, v3.b[1] zip1 v0.2s, v4.2s, v6.2s zip1 v1.2s, v5.2s, v7.2s b .L_pred8x8_dc_end endfunc function ff_pred8x8_0l0_dc_neon, export=1 add x2, x0, x1, lsl #2 sub x2, x2, #1 ldcol.8 v1, x2, x1, 4 uaddlp v2.4h, v1.8b addp v2.4h, v2.4h, v2.4h rshrn v1.8b, v2.8h, #2 movi v0.8b, #128 dup v1.8b, v1.b[0] b .L_pred8x8_dc_end endfunc
Akagi201/ffmpeg-xcode
1,074
ffmpeg-3.0.2/libavcodec/aarch64/videodsp.S
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" function ff_prefetch_aarch64, export=1 subs w2, w2, #2 prfm pldl1strm, [x0] prfm pldl1strm, [x0, x1] add x0, x0, x1, lsl #1 b.gt X(ff_prefetch_aarch64) ret endfunc
Akagi201/ffmpeg-xcode
18,658
ffmpeg-3.0.2/libavcodec/aarch64/h264dsp_neon.S
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" #include "neon.S" .macro h264_loop_filter_start cmp w2, #0 ldr w6, [x4] ccmp w3, #0, #0, ne mov v24.S[0], w6 and w6, w6, w6, lsl #16 b.eq 1f ands w6, w6, w6, lsl #8 b.ge 2f 1: ret 2: .endm .macro h264_loop_filter_luma dup v22.16B, w2 // alpha uxtl v24.8H, v24.8B uabd v21.16B, v16.16B, v0.16B // abs(p0 - q0) uxtl v24.4S, v24.4H uabd v28.16B, v18.16B, v16.16B // abs(p1 - p0) sli v24.8H, v24.8H, #8 uabd v30.16B, v2.16B, v0.16B // abs(q1 - q0) sli v24.4S, v24.4S, #16 cmhi v21.16B, v22.16B, v21.16B // < alpha dup v22.16B, w3 // beta cmlt v23.16B, v24.16B, #0 cmhi v28.16B, v22.16B, v28.16B // < beta cmhi v30.16B, v22.16B, v30.16B // < beta bic v21.16B, v21.16B, v23.16B uabd v17.16B, v20.16B, v16.16B // abs(p2 - p0) and v21.16B, v21.16B, v28.16B uabd v19.16B, v4.16B, v0.16B // abs(q2 - q0) cmhi v17.16B, v22.16B, v17.16B // < beta and v21.16B, v21.16B, v30.16B cmhi v19.16B, v22.16B, v19.16B // < beta and v17.16B, v17.16B, v21.16B and v19.16B, v19.16B, v21.16B and v24.16B, v24.16B, v21.16B urhadd v28.16B, v16.16B, v0.16B sub v21.16B, v24.16B, v17.16B uqadd v23.16B, v18.16B, v24.16B uhadd v20.16B, v20.16B, v28.16B sub v21.16B, v21.16B, v19.16B uhadd v28.16B, v4.16B, v28.16B umin v23.16B, v23.16B, v20.16B uqsub v22.16B, v18.16B, v24.16B uqadd v4.16B, v2.16B, v24.16B umax v23.16B, v23.16B, v22.16B uqsub v22.16B, v2.16B, v24.16B umin v28.16B, v4.16B, v28.16B uxtl v4.8H, v0.8B umax v28.16B, v28.16B, v22.16B uxtl2 v20.8H, v0.16B usubw v4.8H, v4.8H, v16.8B usubw2 v20.8H, v20.8H, v16.16B shl v4.8H, v4.8H, #2 shl v20.8H, v20.8H, #2 uaddw v4.8H, v4.8H, v18.8B uaddw2 v20.8H, v20.8H, v18.16B usubw v4.8H, v4.8H, v2.8B usubw2 v20.8H, v20.8H, v2.16B rshrn v4.8B, v4.8H, #3 rshrn2 v4.16B, v20.8H, #3 bsl v17.16B, v23.16B, v18.16B bsl v19.16B, v28.16B, v2.16B neg v23.16B, v21.16B uxtl v28.8H, v16.8B smin v4.16B, v4.16B, v21.16B uxtl2 v21.8H, v16.16B smax v4.16B, v4.16B, v23.16B uxtl v22.8H, v0.8B uxtl2 v24.8H, v0.16B saddw v28.8H, v28.8H, v4.8B saddw2 v21.8H, v21.8H, v4.16B ssubw v22.8H, v22.8H, v4.8B ssubw2 v24.8H, v24.8H, v4.16B sqxtun v16.8B, v28.8H sqxtun2 v16.16B, v21.8H sqxtun v0.8B, v22.8H sqxtun2 v0.16B, v24.8H .endm function ff_h264_v_loop_filter_luma_neon, export=1 h264_loop_filter_start sxtw x1, w1 ld1 {v0.16B}, [x0], x1 ld1 {v2.16B}, [x0], x1 ld1 {v4.16B}, [x0], x1 sub x0, x0, x1, lsl #2 sub x0, x0, x1, lsl #1 ld1 {v20.16B}, [x0], x1 ld1 {v18.16B}, [x0], x1 ld1 {v16.16B}, [x0], x1 h264_loop_filter_luma sub x0, x0, x1, lsl #1 st1 {v17.16B}, [x0], x1 st1 {v16.16B}, [x0], x1 st1 {v0.16B}, [x0], x1 st1 {v19.16B}, [x0] ret endfunc function ff_h264_h_loop_filter_luma_neon, export=1 h264_loop_filter_start sub x0, x0, #4 ld1 {v6.8B}, [x0], x1 ld1 {v20.8B}, [x0], x1 ld1 {v18.8B}, [x0], x1 ld1 {v16.8B}, [x0], x1 ld1 {v0.8B}, [x0], x1 ld1 {v2.8B}, [x0], x1 ld1 {v4.8B}, [x0], x1 ld1 {v26.8B}, [x0], x1 ld1 {v6.D}[1], [x0], x1 ld1 {v20.D}[1], [x0], x1 ld1 {v18.D}[1], [x0], x1 ld1 {v16.D}[1], [x0], x1 ld1 {v0.D}[1], [x0], x1 ld1 {v2.D}[1], [x0], x1 ld1 {v4.D}[1], [x0], x1 ld1 {v26.D}[1], [x0], x1 transpose_8x16B v6, v20, v18, v16, v0, v2, v4, v26, v21, v23 h264_loop_filter_luma transpose_4x16B v17, v16, v0, v19, v21, v23, v25, v27 sub x0, x0, x1, lsl #4 add x0, x0, #2 st1 {v17.S}[0], [x0], x1 st1 {v16.S}[0], [x0], x1 st1 {v0.S}[0], [x0], x1 st1 {v19.S}[0], [x0], x1 st1 {v17.S}[1], [x0], x1 st1 {v16.S}[1], [x0], x1 st1 {v0.S}[1], [x0], x1 st1 {v19.S}[1], [x0], x1 st1 {v17.S}[2], [x0], x1 st1 {v16.S}[2], [x0], x1 st1 {v0.S}[2], [x0], x1 st1 {v19.S}[2], [x0], x1 st1 {v17.S}[3], [x0], x1 st1 {v16.S}[3], [x0], x1 st1 {v0.S}[3], [x0], x1 st1 {v19.S}[3], [x0], x1 ret endfunc .macro h264_loop_filter_chroma dup v22.8B, w2 // alpha uxtl v24.8H, v24.8B uabd v26.8B, v16.8B, v0.8B // abs(p0 - q0) uxtl v4.8H, v0.8B uabd v28.8B, v18.8B, v16.8B // abs(p1 - p0) usubw v4.8H, v4.8H, v16.8B sli v24.8H, v24.8H, #8 shl v4.8H, v4.8H, #2 uabd v30.8B, v2.8B, v0.8B // abs(q1 - q0) uaddw v4.8H, v4.8H, v18.8B cmhi v26.8B, v22.8B, v26.8B // < alpha usubw v4.8H, v4.8H, v2.8B dup v22.8B, w3 // beta rshrn v4.8B, v4.8H, #3 cmhi v28.8B, v22.8B, v28.8B // < beta cmhi v30.8B, v22.8B, v30.8B // < beta smin v4.8B, v4.8B, v24.8B neg v25.8B, v24.8B and v26.8B, v26.8B, v28.8B smax v4.8B, v4.8B, v25.8B and v26.8B, v26.8B, v30.8B uxtl v22.8H, v0.8B and v4.8B, v4.8B, v26.8B uxtl v28.8H, v16.8B saddw v28.8H, v28.8H, v4.8B ssubw v22.8H, v22.8H, v4.8B sqxtun v16.8B, v28.8H sqxtun v0.8B, v22.8H .endm function ff_h264_v_loop_filter_chroma_neon, export=1 h264_loop_filter_start sub x0, x0, x1, lsl #1 ld1 {v18.8B}, [x0], x1 ld1 {v16.8B}, [x0], x1 ld1 {v0.8B}, [x0], x1 ld1 {v2.8B}, [x0] h264_loop_filter_chroma sub x0, x0, x1, lsl #1 st1 {v16.8B}, [x0], x1 st1 {v0.8B}, [x0], x1 ret endfunc function ff_h264_h_loop_filter_chroma_neon, export=1 h264_loop_filter_start sub x0, x0, #2 ld1 {v18.S}[0], [x0], x1 ld1 {v16.S}[0], [x0], x1 ld1 {v0.S}[0], [x0], x1 ld1 {v2.S}[0], [x0], x1 ld1 {v18.S}[1], [x0], x1 ld1 {v16.S}[1], [x0], x1 ld1 {v0.S}[1], [x0], x1 ld1 {v2.S}[1], [x0], x1 transpose_4x8B v18, v16, v0, v2, v28, v29, v30, v31 h264_loop_filter_chroma transpose_4x8B v18, v16, v0, v2, v28, v29, v30, v31 sub x0, x0, x1, lsl #3 st1 {v18.S}[0], [x0], x1 st1 {v16.S}[0], [x0], x1 st1 {v0.S}[0], [x0], x1 st1 {v2.S}[0], [x0], x1 st1 {v18.S}[1], [x0], x1 st1 {v16.S}[1], [x0], x1 st1 {v0.S}[1], [x0], x1 st1 {v2.S}[1], [x0], x1 ret endfunc .macro biweight_16 macs, macd dup v0.16B, w5 dup v1.16B, w6 mov v4.16B, v16.16B mov v6.16B, v16.16B 1: subs w3, w3, #2 ld1 {v20.16B}, [x0], x2 \macd v4.8H, v0.8B, v20.8B \macd\()2 v6.8H, v0.16B, v20.16B ld1 {v22.16B}, [x1], x2 \macs v4.8H, v1.8B, v22.8B \macs\()2 v6.8H, v1.16B, v22.16B mov v24.16B, v16.16B ld1 {v28.16B}, [x0], x2 mov v26.16B, v16.16B \macd v24.8H, v0.8B, v28.8B \macd\()2 v26.8H, v0.16B, v28.16B ld1 {v30.16B}, [x1], x2 \macs v24.8H, v1.8B, v30.8B \macs\()2 v26.8H, v1.16B, v30.16B sshl v4.8H, v4.8H, v18.8H sshl v6.8H, v6.8H, v18.8H sqxtun v4.8B, v4.8H sqxtun2 v4.16B, v6.8H sshl v24.8H, v24.8H, v18.8H sshl v26.8H, v26.8H, v18.8H sqxtun v24.8B, v24.8H sqxtun2 v24.16B, v26.8H mov v6.16B, v16.16B st1 {v4.16B}, [x7], x2 mov v4.16B, v16.16B st1 {v24.16B}, [x7], x2 b.ne 1b ret .endm .macro biweight_8 macs, macd dup v0.8B, w5 dup v1.8B, w6 mov v2.16B, v16.16B mov v20.16B, v16.16B 1: subs w3, w3, #2 ld1 {v4.8B}, [x0], x2 \macd v2.8H, v0.8B, v4.8B ld1 {v5.8B}, [x1], x2 \macs v2.8H, v1.8B, v5.8B ld1 {v6.8B}, [x0], x2 \macd v20.8H, v0.8B, v6.8B ld1 {v7.8B}, [x1], x2 \macs v20.8H, v1.8B, v7.8B sshl v2.8H, v2.8H, v18.8H sqxtun v2.8B, v2.8H sshl v20.8H, v20.8H, v18.8H sqxtun v4.8B, v20.8H mov v20.16B, v16.16B st1 {v2.8B}, [x7], x2 mov v2.16B, v16.16B st1 {v4.8B}, [x7], x2 b.ne 1b ret .endm .macro biweight_4 macs, macd dup v0.8B, w5 dup v1.8B, w6 mov v2.16B, v16.16B mov v20.16B,v16.16B 1: subs w3, w3, #4 ld1 {v4.S}[0], [x0], x2 ld1 {v4.S}[1], [x0], x2 \macd v2.8H, v0.8B, v4.8B ld1 {v5.S}[0], [x1], x2 ld1 {v5.S}[1], [x1], x2 \macs v2.8H, v1.8B, v5.8B b.lt 2f ld1 {v6.S}[0], [x0], x2 ld1 {v6.S}[1], [x0], x2 \macd v20.8H, v0.8B, v6.8B ld1 {v7.S}[0], [x1], x2 ld1 {v7.S}[1], [x1], x2 \macs v20.8H, v1.8B, v7.8B sshl v2.8H, v2.8H, v18.8H sqxtun v2.8B, v2.8H sshl v20.8H, v20.8H, v18.8H sqxtun v4.8B, v20.8H mov v20.16B, v16.16B st1 {v2.S}[0], [x7], x2 st1 {v2.S}[1], [x7], x2 mov v2.16B, v16.16B st1 {v4.S}[0], [x7], x2 st1 {v4.S}[1], [x7], x2 b.ne 1b ret 2: sshl v2.8H, v2.8H, v18.8H sqxtun v2.8B, v2.8H st1 {v2.S}[0], [x7], x2 st1 {v2.S}[1], [x7], x2 ret .endm .macro biweight_func w function ff_biweight_h264_pixels_\w\()_neon, export=1 sxtw x2, w2 lsr w8, w5, #31 add w7, w7, #1 eor w8, w8, w6, lsr #30 orr w7, w7, #1 dup v18.8H, w4 lsl w7, w7, w4 not v18.16B, v18.16B dup v16.8H, w7 mov x7, x0 cbz w8, 10f subs w8, w8, #1 b.eq 20f subs w8, w8, #1 b.eq 30f b 40f 10: biweight_\w umlal, umlal 20: neg w5, w5 biweight_\w umlal, umlsl 30: neg w5, w5 neg w6, w6 biweight_\w umlsl, umlsl 40: neg w6, w6 biweight_\w umlsl, umlal endfunc .endm biweight_func 16 biweight_func 8 biweight_func 4 .macro weight_16 add dup v0.16B, w4 1: subs w2, w2, #2 ld1 {v20.16B}, [x0], x1 umull v4.8H, v0.8B, v20.8B umull2 v6.8H, v0.16B, v20.16B ld1 {v28.16B}, [x0], x1 umull v24.8H, v0.8B, v28.8B umull2 v26.8H, v0.16B, v28.16B \add v4.8H, v16.8H, v4.8H srshl v4.8H, v4.8H, v18.8H \add v6.8H, v16.8H, v6.8H srshl v6.8H, v6.8H, v18.8H sqxtun v4.8B, v4.8H sqxtun2 v4.16B, v6.8H \add v24.8H, v16.8H, v24.8H srshl v24.8H, v24.8H, v18.8H \add v26.8H, v16.8H, v26.8H srshl v26.8H, v26.8H, v18.8H sqxtun v24.8B, v24.8H sqxtun2 v24.16B, v26.8H st1 {v4.16B}, [x5], x1 st1 {v24.16B}, [x5], x1 b.ne 1b ret .endm .macro weight_8 add dup v0.8B, w4 1: subs w2, w2, #2 ld1 {v4.8B}, [x0], x1 umull v2.8H, v0.8B, v4.8B ld1 {v6.8B}, [x0], x1 umull v20.8H, v0.8B, v6.8B \add v2.8H, v16.8H, v2.8H srshl v2.8H, v2.8H, v18.8H sqxtun v2.8B, v2.8H \add v20.8H, v16.8H, v20.8H srshl v20.8H, v20.8H, v18.8H sqxtun v4.8B, v20.8H st1 {v2.8B}, [x5], x1 st1 {v4.8B}, [x5], x1 b.ne 1b ret .endm .macro weight_4 add dup v0.8B, w4 1: subs w2, w2, #4 ld1 {v4.S}[0], [x0], x1 ld1 {v4.S}[1], [x0], x1 umull v2.8H, v0.8B, v4.8B b.lt 2f ld1 {v6.S}[0], [x0], x1 ld1 {v6.S}[1], [x0], x1 umull v20.8H, v0.8B, v6.8B \add v2.8H, v16.8H, v2.8H srshl v2.8H, v2.8H, v18.8H sqxtun v2.8B, v2.8H \add v20.8H, v16.8H, v20.8H srshl v20.8H, v20.8h, v18.8H sqxtun v4.8B, v20.8H st1 {v2.S}[0], [x5], x1 st1 {v2.S}[1], [x5], x1 st1 {v4.S}[0], [x5], x1 st1 {v4.S}[1], [x5], x1 b.ne 1b ret 2: \add v2.8H, v16.8H, v2.8H srshl v2.8H, v2.8H, v18.8H sqxtun v2.8B, v2.8H st1 {v2.S}[0], [x5], x1 st1 {v2.S}[1], [x5], x1 ret .endm .macro weight_func w function ff_weight_h264_pixels_\w\()_neon, export=1 sxtw x1, w1 cmp w3, #1 mov w6, #1 lsl w5, w5, w3 dup v16.8H, w5 mov x5, x0 b.le 20f sub w6, w6, w3 dup v18.8H, w6 cmp w4, #0 b.lt 10f weight_\w shadd 10: neg w4, w4 weight_\w shsub 20: neg w6, w3 dup v18.8H, w6 cmp w4, #0 b.lt 10f weight_\w add 10: neg w4, w4 weight_\w sub endfunc .endm weight_func 16 weight_func 8 weight_func 4
Akagi201/ffmpeg-xcode
33,748
ffmpeg-3.0.2/libavcodec/aarch64/h264qpel_neon.S
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" #include "neon.S" /* H.264 qpel MC */ .macro lowpass_const r movz \r, #20, lsl #16 movk \r, #5 mov v6.S[0], \r .endm //trashes v0-v5 .macro lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1 ext v2.8B, \r0\().8B, \r1\().8B, #2 ext v3.8B, \r0\().8B, \r1\().8B, #3 uaddl v2.8H, v2.8B, v3.8B ext v4.8B, \r0\().8B, \r1\().8B, #1 ext v5.8B, \r0\().8B, \r1\().8B, #4 uaddl v4.8H, v4.8B, v5.8B ext v1.8B, \r0\().8B, \r1\().8B, #5 uaddl \d0\().8H, \r0\().8B, v1.8B ext v0.8B, \r2\().8B, \r3\().8B, #2 mla \d0\().8H, v2.8H, v6.H[1] ext v1.8B, \r2\().8B, \r3\().8B, #3 uaddl v0.8H, v0.8B, v1.8B ext v1.8B, \r2\().8B, \r3\().8B, #1 mls \d0\().8H, v4.8H, v6.H[0] ext v3.8B, \r2\().8B, \r3\().8B, #4 uaddl v1.8H, v1.8B, v3.8B ext v2.8B, \r2\().8B, \r3\().8B, #5 uaddl \d1\().8H, \r2\().8B, v2.8B mla \d1\().8H, v0.8H, v6.H[1] mls \d1\().8H, v1.8H, v6.H[0] .if \narrow sqrshrun \d0\().8B, \d0\().8H, #5 sqrshrun \d1\().8B, \d1\().8H, #5 .endif .endm //trashes v0-v5, v7, v30-v31 .macro lowpass_8H r0, r1 ext v0.16B, \r0\().16B, \r0\().16B, #2 ext v1.16B, \r0\().16B, \r0\().16B, #3 uaddl v0.8H, v0.8B, v1.8B ext v2.16B, \r0\().16B, \r0\().16B, #1 ext v3.16B, \r0\().16B, \r0\().16B, #4 uaddl v2.8H, v2.8B, v3.8B ext v30.16B, \r0\().16B, \r0\().16B, #5 uaddl \r0\().8H, \r0\().8B, v30.8B ext v4.16B, \r1\().16B, \r1\().16B, #2 mla \r0\().8H, v0.8H, v6.H[1] ext v5.16B, \r1\().16B, \r1\().16B, #3 uaddl v4.8H, v4.8B, v5.8B ext v7.16B, \r1\().16B, \r1\().16B, #1 mls \r0\().8H, v2.8H, v6.H[0] ext v0.16B, \r1\().16B, \r1\().16B, #4 uaddl v7.8H, v7.8B, v0.8B ext v31.16B, \r1\().16B, \r1\().16B, #5 uaddl \r1\().8H, \r1\().8B, v31.8B mla \r1\().8H, v4.8H, v6.H[1] mls \r1\().8H, v7.8H, v6.H[0] .endm // trashes v2-v5, v30 .macro lowpass_8_1 r0, r1, d0, narrow=1 ext v2.8B, \r0\().8B, \r1\().8B, #2 ext v3.8B, \r0\().8B, \r1\().8B, #3 uaddl v2.8H, v2.8B, v3.8B ext v4.8B, \r0\().8B, \r1\().8B, #1 ext v5.8B, \r0\().8B, \r1\().8B, #4 uaddl v4.8H, v4.8B, v5.8B ext v30.8B, \r0\().8B, \r1\().8B, #5 uaddl \d0\().8H, \r0\().8B, v30.8B mla \d0\().8H, v2.8H, v6.H[1] mls \d0\().8H, v4.8H, v6.H[0] .if \narrow sqrshrun \d0\().8B, \d0\().8H, #5 .endif .endm // trashed v0-v7 .macro lowpass_8.16 r0, r1, r2 ext v1.16B, \r0\().16B, \r1\().16B, #4 ext v0.16B, \r0\().16B, \r1\().16B, #6 saddl v5.4S, v1.4H, v0.4H ext v2.16B, \r0\().16B, \r1\().16B, #2 saddl2 v1.4S, v1.8H, v0.8H ext v3.16B, \r0\().16B, \r1\().16B, #8 saddl v6.4S, v2.4H, v3.4H ext \r1\().16B, \r0\().16B, \r1\().16B, #10 saddl2 v2.4S, v2.8H, v3.8H saddl v0.4S, \r0\().4H, \r1\().4H saddl2 v4.4S, \r0\().8H, \r1\().8H shl v3.4S, v5.4S, #4 shl v5.4S, v5.4S, #2 shl v7.4S, v6.4S, #2 add v5.4S, v5.4S, v3.4S add v6.4S, v6.4S, v7.4S shl v3.4S, v1.4S, #4 shl v1.4S, v1.4S, #2 shl v7.4S, v2.4S, #2 add v1.4S, v1.4S, v3.4S add v2.4S, v2.4S, v7.4S add v5.4S, v5.4S, v0.4S sub v5.4S, v5.4S, v6.4S add v1.4S, v1.4S, v4.4S sub v1.4S, v1.4S, v2.4S rshrn v5.4H, v5.4S, #10 rshrn2 v5.8H, v1.4S, #10 sqxtun \r2\().8B, v5.8H .endm function put_h264_qpel16_h_lowpass_neon_packed mov x4, x30 mov x12, #16 mov x3, #8 bl put_h264_qpel8_h_lowpass_neon sub x1, x1, x2, lsl #4 add x1, x1, #8 mov x12, #16 mov x30, x4 b put_h264_qpel8_h_lowpass_neon endfunc .macro h264_qpel_h_lowpass type function \type\()_h264_qpel16_h_lowpass_neon mov x13, x30 mov x12, #16 bl \type\()_h264_qpel8_h_lowpass_neon sub x0, x0, x3, lsl #4 sub x1, x1, x2, lsl #4 add x0, x0, #8 add x1, x1, #8 mov x12, #16 mov x30, x13 endfunc function \type\()_h264_qpel8_h_lowpass_neon 1: ld1 {v28.8B, v29.8B}, [x1], x2 ld1 {v16.8B, v17.8B}, [x1], x2 subs x12, x12, #2 lowpass_8 v28, v29, v16, v17, v28, v16 .ifc \type,avg ld1 {v2.8B}, [x0], x3 urhadd v28.8B, v28.8B, v2.8B ld1 {v3.8B}, [x0] urhadd v16.8B, v16.8B, v3.8B sub x0, x0, x3 .endif st1 {v28.8B}, [x0], x3 st1 {v16.8B}, [x0], x3 b.ne 1b ret endfunc .endm h264_qpel_h_lowpass put h264_qpel_h_lowpass avg .macro h264_qpel_h_lowpass_l2 type function \type\()_h264_qpel16_h_lowpass_l2_neon mov x13, x30 mov x12, #16 bl \type\()_h264_qpel8_h_lowpass_l2_neon sub x0, x0, x2, lsl #4 sub x1, x1, x2, lsl #4 sub x3, x3, x2, lsl #4 add x0, x0, #8 add x1, x1, #8 add x3, x3, #8 mov x12, #16 mov x30, x13 endfunc function \type\()_h264_qpel8_h_lowpass_l2_neon 1: ld1 {v26.8B, v27.8B}, [x1], x2 ld1 {v16.8B, v17.8B}, [x1], x2 ld1 {v28.8B}, [x3], x2 ld1 {v29.8B}, [x3], x2 subs x12, x12, #2 lowpass_8 v26, v27, v16, v17, v26, v27 urhadd v26.8B, v26.8B, v28.8B urhadd v27.8B, v27.8B, v29.8B .ifc \type,avg ld1 {v2.8B}, [x0], x2 urhadd v26.8B, v26.8B, v2.8B ld1 {v3.8B}, [x0] urhadd v27.8B, v27.8B, v3.8B sub x0, x0, x2 .endif st1 {v26.8B}, [x0], x2 st1 {v27.8B}, [x0], x2 b.ne 1b ret endfunc .endm h264_qpel_h_lowpass_l2 put h264_qpel_h_lowpass_l2 avg function put_h264_qpel16_v_lowpass_neon_packed mov x4, x30 mov x2, #8 bl put_h264_qpel8_v_lowpass_neon sub x1, x1, x3, lsl #2 bl put_h264_qpel8_v_lowpass_neon sub x1, x1, x3, lsl #4 sub x1, x1, x3, lsl #2 add x1, x1, #8 bl put_h264_qpel8_v_lowpass_neon sub x1, x1, x3, lsl #2 mov x30, x4 b put_h264_qpel8_v_lowpass_neon endfunc .macro h264_qpel_v_lowpass type function \type\()_h264_qpel16_v_lowpass_neon mov x4, x30 bl \type\()_h264_qpel8_v_lowpass_neon sub x1, x1, x3, lsl #2 bl \type\()_h264_qpel8_v_lowpass_neon sub x0, x0, x2, lsl #4 add x0, x0, #8 sub x1, x1, x3, lsl #4 sub x1, x1, x3, lsl #2 add x1, x1, #8 bl \type\()_h264_qpel8_v_lowpass_neon sub x1, x1, x3, lsl #2 mov x30, x4 endfunc function \type\()_h264_qpel8_v_lowpass_neon ld1 {v16.8B}, [x1], x3 ld1 {v18.8B}, [x1], x3 ld1 {v20.8B}, [x1], x3 ld1 {v22.8B}, [x1], x3 ld1 {v24.8B}, [x1], x3 ld1 {v26.8B}, [x1], x3 ld1 {v28.8B}, [x1], x3 ld1 {v30.8B}, [x1], x3 ld1 {v17.8B}, [x1], x3 ld1 {v19.8B}, [x1], x3 ld1 {v21.8B}, [x1], x3 ld1 {v23.8B}, [x1], x3 ld1 {v25.8B}, [x1] transpose_8x8B v16, v18, v20, v22, v24, v26, v28, v30, v0, v1 transpose_8x8B v17, v19, v21, v23, v25, v27, v29, v31, v0, v1 lowpass_8 v16, v17, v18, v19, v16, v17 lowpass_8 v20, v21, v22, v23, v18, v19 lowpass_8 v24, v25, v26, v27, v20, v21 lowpass_8 v28, v29, v30, v31, v22, v23 transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1 .ifc \type,avg ld1 {v24.8B}, [x0], x2 urhadd v16.8B, v16.8B, v24.8B ld1 {v25.8B}, [x0], x2 urhadd v17.8B, v17.8B, v25.8B ld1 {v26.8B}, [x0], x2 urhadd v18.8B, v18.8B, v26.8B ld1 {v27.8B}, [x0], x2 urhadd v19.8B, v19.8B, v27.8B ld1 {v28.8B}, [x0], x2 urhadd v20.8B, v20.8B, v28.8B ld1 {v29.8B}, [x0], x2 urhadd v21.8B, v21.8B, v29.8B ld1 {v30.8B}, [x0], x2 urhadd v22.8B, v22.8B, v30.8B ld1 {v31.8B}, [x0], x2 urhadd v23.8B, v23.8B, v31.8B sub x0, x0, x2, lsl #3 .endif st1 {v16.8B}, [x0], x2 st1 {v17.8B}, [x0], x2 st1 {v18.8B}, [x0], x2 st1 {v19.8B}, [x0], x2 st1 {v20.8B}, [x0], x2 st1 {v21.8B}, [x0], x2 st1 {v22.8B}, [x0], x2 st1 {v23.8B}, [x0], x2 ret endfunc .endm h264_qpel_v_lowpass put h264_qpel_v_lowpass avg .macro h264_qpel_v_lowpass_l2 type function \type\()_h264_qpel16_v_lowpass_l2_neon mov x4, x30 bl \type\()_h264_qpel8_v_lowpass_l2_neon sub x1, x1, x3, lsl #2 bl \type\()_h264_qpel8_v_lowpass_l2_neon sub x0, x0, x3, lsl #4 sub x12, x12, x2, lsl #4 add x0, x0, #8 add x12, x12, #8 sub x1, x1, x3, lsl #4 sub x1, x1, x3, lsl #2 add x1, x1, #8 bl \type\()_h264_qpel8_v_lowpass_l2_neon sub x1, x1, x3, lsl #2 mov x30, x4 endfunc function \type\()_h264_qpel8_v_lowpass_l2_neon ld1 {v16.8B}, [x1], x3 ld1 {v18.8B}, [x1], x3 ld1 {v20.8B}, [x1], x3 ld1 {v22.8B}, [x1], x3 ld1 {v24.8B}, [x1], x3 ld1 {v26.8B}, [x1], x3 ld1 {v28.8B}, [x1], x3 ld1 {v30.8B}, [x1], x3 ld1 {v17.8B}, [x1], x3 ld1 {v19.8B}, [x1], x3 ld1 {v21.8B}, [x1], x3 ld1 {v23.8B}, [x1], x3 ld1 {v25.8B}, [x1] transpose_8x8B v16, v18, v20, v22, v24, v26, v28, v30, v0, v1 transpose_8x8B v17, v19, v21, v23, v25, v27, v29, v31, v0, v1 lowpass_8 v16, v17, v18, v19, v16, v17 lowpass_8 v20, v21, v22, v23, v18, v19 lowpass_8 v24, v25, v26, v27, v20, v21 lowpass_8 v28, v29, v30, v31, v22, v23 transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1 ld1 {v24.8B}, [x12], x2 ld1 {v25.8B}, [x12], x2 ld1 {v26.8B}, [x12], x2 ld1 {v27.8B}, [x12], x2 ld1 {v28.8B}, [x12], x2 urhadd v16.8B, v24.8B, v16.8B urhadd v17.8B, v25.8B, v17.8B ld1 {v29.8B}, [x12], x2 urhadd v18.8B, v26.8B, v18.8B urhadd v19.8B, v27.8B, v19.8B ld1 {v30.8B}, [x12], x2 urhadd v20.8B, v28.8B, v20.8B urhadd v21.8B, v29.8B, v21.8B ld1 {v31.8B}, [x12], x2 urhadd v22.8B, v30.8B, v22.8B urhadd v23.8B, v31.8B, v23.8B .ifc \type,avg ld1 {v24.8B}, [x0], x3 urhadd v16.8B, v16.8B, v24.8B ld1 {v25.8B}, [x0], x3 urhadd v17.8B, v17.8B, v25.8B ld1 {v26.8B}, [x0], x3 urhadd v18.8B, v18.8B, v26.8B ld1 {v27.8B}, [x0], x3 urhadd v19.8B, v19.8B, v27.8B ld1 {v28.8B}, [x0], x3 urhadd v20.8B, v20.8B, v28.8B ld1 {v29.8B}, [x0], x3 urhadd v21.8B, v21.8B, v29.8B ld1 {v30.8B}, [x0], x3 urhadd v22.8B, v22.8B, v30.8B ld1 {v31.8B}, [x0], x3 urhadd v23.8B, v23.8B, v31.8B sub x0, x0, x3, lsl #3 .endif st1 {v16.8B}, [x0], x3 st1 {v17.8B}, [x0], x3 st1 {v18.8B}, [x0], x3 st1 {v19.8B}, [x0], x3 st1 {v20.8B}, [x0], x3 st1 {v21.8B}, [x0], x3 st1 {v22.8B}, [x0], x3 st1 {v23.8B}, [x0], x3 ret endfunc .endm h264_qpel_v_lowpass_l2 put h264_qpel_v_lowpass_l2 avg function put_h264_qpel8_hv_lowpass_neon_top lowpass_const w12 ld1 {v16.8H}, [x1], x3 ld1 {v17.8H}, [x1], x3 ld1 {v18.8H}, [x1], x3 ld1 {v19.8H}, [x1], x3 ld1 {v20.8H}, [x1], x3 ld1 {v21.8H}, [x1], x3 ld1 {v22.8H}, [x1], x3 ld1 {v23.8H}, [x1], x3 ld1 {v24.8H}, [x1], x3 ld1 {v25.8H}, [x1], x3 ld1 {v26.8H}, [x1], x3 ld1 {v27.8H}, [x1], x3 ld1 {v28.8H}, [x1] lowpass_8H v16, v17 lowpass_8H v18, v19 lowpass_8H v20, v21 lowpass_8H v22, v23 lowpass_8H v24, v25 lowpass_8H v26, v27 lowpass_8H v28, v29 transpose_8x8H v16, v17, v18, v19, v20, v21, v22, v23, v0, v1 transpose_8x8H v24, v25, v26, v27, v28, v29, v30, v31, v0, v1 lowpass_8.16 v16, v24, v16 lowpass_8.16 v17, v25, v17 lowpass_8.16 v18, v26, v18 lowpass_8.16 v19, v27, v19 lowpass_8.16 v20, v28, v20 lowpass_8.16 v21, v29, v21 lowpass_8.16 v22, v30, v22 lowpass_8.16 v23, v31, v23 transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1 ret endfunc .macro h264_qpel8_hv_lowpass type function \type\()_h264_qpel8_hv_lowpass_neon mov x10, x30 bl put_h264_qpel8_hv_lowpass_neon_top .ifc \type,avg ld1 {v0.8B}, [x0], x2 urhadd v16.8B, v16.8B, v0.8B ld1 {v1.8B}, [x0], x2 urhadd v17.8B, v17.8B, v1.8B ld1 {v2.8B}, [x0], x2 urhadd v18.8B, v18.8B, v2.8B ld1 {v3.8B}, [x0], x2 urhadd v19.8B, v19.8B, v3.8B ld1 {v4.8B}, [x0], x2 urhadd v20.8B, v20.8B, v4.8B ld1 {v5.8B}, [x0], x2 urhadd v21.8B, v21.8B, v5.8B ld1 {v6.8B}, [x0], x2 urhadd v22.8B, v22.8B, v6.8B ld1 {v7.8B}, [x0], x2 urhadd v23.8B, v23.8B, v7.8B sub x0, x0, x2, lsl #3 .endif st1 {v16.8B}, [x0], x2 st1 {v17.8B}, [x0], x2 st1 {v18.8B}, [x0], x2 st1 {v19.8B}, [x0], x2 st1 {v20.8B}, [x0], x2 st1 {v21.8B}, [x0], x2 st1 {v22.8B}, [x0], x2 st1 {v23.8B}, [x0], x2 ret x10 endfunc .endm h264_qpel8_hv_lowpass put h264_qpel8_hv_lowpass avg .macro h264_qpel8_hv_lowpass_l2 type function \type\()_h264_qpel8_hv_lowpass_l2_neon mov x10, x30 bl put_h264_qpel8_hv_lowpass_neon_top ld1 {v0.8B, v1.8B}, [x2], #16 ld1 {v2.8B, v3.8B}, [x2], #16 urhadd v0.8B, v0.8B, v16.8B urhadd v1.8B, v1.8B, v17.8B ld1 {v4.8B, v5.8B}, [x2], #16 urhadd v2.8B, v2.8B, v18.8B urhadd v3.8B, v3.8B, v19.8B ld1 {v6.8B, v7.8B}, [x2], #16 urhadd v4.8B, v4.8B, v20.8B urhadd v5.8B, v5.8B, v21.8B urhadd v6.8B, v6.8B, v22.8B urhadd v7.8B, v7.8B, v23.8B .ifc \type,avg ld1 {v16.8B}, [x0], x3 urhadd v0.8B, v0.8B, v16.8B ld1 {v17.8B}, [x0], x3 urhadd v1.8B, v1.8B, v17.8B ld1 {v18.8B}, [x0], x3 urhadd v2.8B, v2.8B, v18.8B ld1 {v19.8B}, [x0], x3 urhadd v3.8B, v3.8B, v19.8B ld1 {v20.8B}, [x0], x3 urhadd v4.8B, v4.8B, v20.8B ld1 {v21.8B}, [x0], x3 urhadd v5.8B, v5.8B, v21.8B ld1 {v22.8B}, [x0], x3 urhadd v6.8B, v6.8B, v22.8B ld1 {v23.8B}, [x0], x3 urhadd v7.8B, v7.8B, v23.8B sub x0, x0, x3, lsl #3 .endif st1 {v0.8B}, [x0], x3 st1 {v1.8B}, [x0], x3 st1 {v2.8B}, [x0], x3 st1 {v3.8B}, [x0], x3 st1 {v4.8B}, [x0], x3 st1 {v5.8B}, [x0], x3 st1 {v6.8B}, [x0], x3 st1 {v7.8B}, [x0], x3 ret x10 endfunc .endm h264_qpel8_hv_lowpass_l2 put h264_qpel8_hv_lowpass_l2 avg .macro h264_qpel16_hv type function \type\()_h264_qpel16_hv_lowpass_neon mov x13, x30 bl \type\()_h264_qpel8_hv_lowpass_neon sub x1, x1, x3, lsl #2 bl \type\()_h264_qpel8_hv_lowpass_neon sub x1, x1, x3, lsl #4 sub x1, x1, x3, lsl #2 add x1, x1, #8 sub x0, x0, x2, lsl #4 add x0, x0, #8 bl \type\()_h264_qpel8_hv_lowpass_neon sub x1, x1, x3, lsl #2 mov x30, x13 b \type\()_h264_qpel8_hv_lowpass_neon endfunc function \type\()_h264_qpel16_hv_lowpass_l2_neon mov x13, x30 sub x2, x4, #256 bl \type\()_h264_qpel8_hv_lowpass_l2_neon sub x1, x1, x3, lsl #2 bl \type\()_h264_qpel8_hv_lowpass_l2_neon sub x1, x1, x3, lsl #4 sub x1, x1, x3, lsl #2 add x1, x1, #8 sub x0, x0, x3, lsl #4 add x0, x0, #8 bl \type\()_h264_qpel8_hv_lowpass_l2_neon sub x1, x1, x3, lsl #2 mov x30, x13 b \type\()_h264_qpel8_hv_lowpass_l2_neon endfunc .endm h264_qpel16_hv put h264_qpel16_hv avg .macro h264_qpel8 type function ff_\type\()_h264_qpel8_mc10_neon, export=1 lowpass_const w3 mov x3, x1 sub x1, x1, #2 mov x12, #8 b \type\()_h264_qpel8_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel8_mc20_neon, export=1 lowpass_const w3 sub x1, x1, #2 mov x3, x2 mov x12, #8 b \type\()_h264_qpel8_h_lowpass_neon endfunc function ff_\type\()_h264_qpel8_mc30_neon, export=1 lowpass_const w3 add x3, x1, #1 sub x1, x1, #2 mov x12, #8 b \type\()_h264_qpel8_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel8_mc01_neon, export=1 mov x14, x30 mov x12, x1 \type\()_h264_qpel8_mc01: lowpass_const w3 mov x3, x2 sub x1, x1, x2, lsl #1 bl \type\()_h264_qpel8_v_lowpass_l2_neon ret x14 endfunc function ff_\type\()_h264_qpel8_mc11_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 \type\()_h264_qpel8_mc11: lowpass_const w3 mov x11, sp sub sp, sp, #64 mov x0, sp sub x1, x1, #2 mov x3, #8 mov x12, #8 bl put_h264_qpel8_h_lowpass_neon mov x0, x8 mov x3, x2 mov x12, sp sub x1, x9, x2, lsl #1 mov x2, #8 bl \type\()_h264_qpel8_v_lowpass_l2_neon mov sp, x11 ret x14 endfunc function ff_\type\()_h264_qpel8_mc21_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 \type\()_h264_qpel8_mc21: lowpass_const w3 mov x11, sp sub sp, sp, #(8*8+16*12) sub x1, x1, #2 mov x3, #8 mov x0, sp mov x12, #8 bl put_h264_qpel8_h_lowpass_neon mov x4, x0 mov x0, x8 sub x1, x9, x2, lsl #1 sub x1, x1, #2 mov x3, x2 sub x2, x4, #64 bl \type\()_h264_qpel8_hv_lowpass_l2_neon mov sp, x11 ret x14 endfunc function ff_\type\()_h264_qpel8_mc31_neon, export=1 add x1, x1, #1 mov x14, x30 mov x8, x0 mov x9, x1 sub x1, x1, #1 b \type\()_h264_qpel8_mc11 endfunc function ff_\type\()_h264_qpel8_mc02_neon, export=1 mov x14, x30 lowpass_const w3 sub x1, x1, x2, lsl #1 mov x3, x2 bl \type\()_h264_qpel8_v_lowpass_neon ret x14 endfunc function ff_\type\()_h264_qpel8_mc12_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 \type\()_h264_qpel8_mc12: lowpass_const w3 mov x11, sp sub sp, sp, #(8*8+16*12) sub x1, x1, x2, lsl #1 mov x3, x2 mov x2, #8 mov x0, sp bl put_h264_qpel8_v_lowpass_neon mov x4, x0 mov x0, x8 sub x1, x9, x3, lsl #1 sub x1, x1, #2 sub x2, x4, #64 bl \type\()_h264_qpel8_hv_lowpass_l2_neon mov sp, x11 ret x14 endfunc function ff_\type\()_h264_qpel8_mc22_neon, export=1 mov x14, x30 mov x11, sp sub x1, x1, x2, lsl #1 sub x1, x1, #2 mov x3, x2 bl \type\()_h264_qpel8_hv_lowpass_neon mov sp, x11 ret x14 endfunc function ff_\type\()_h264_qpel8_mc32_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 add x1, x1, #1 b \type\()_h264_qpel8_mc12 endfunc function ff_\type\()_h264_qpel8_mc03_neon, export=1 mov x14, x30 add x12, x1, x2 b \type\()_h264_qpel8_mc01 endfunc function ff_\type\()_h264_qpel8_mc13_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 add x1, x1, x2 b \type\()_h264_qpel8_mc11 endfunc function ff_\type\()_h264_qpel8_mc23_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 add x1, x1, x2 b \type\()_h264_qpel8_mc21 endfunc function ff_\type\()_h264_qpel8_mc33_neon, export=1 add x1, x1, #1 mov x14, x30 mov x8, x0 mov x9, x1 add x1, x1, x2 sub x1, x1, #1 b \type\()_h264_qpel8_mc11 endfunc .endm h264_qpel8 put h264_qpel8 avg .macro h264_qpel16 type function ff_\type\()_h264_qpel16_mc10_neon, export=1 lowpass_const w3 mov x3, x1 sub x1, x1, #2 b \type\()_h264_qpel16_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel16_mc20_neon, export=1 lowpass_const w3 sub x1, x1, #2 mov x3, x2 b \type\()_h264_qpel16_h_lowpass_neon endfunc function ff_\type\()_h264_qpel16_mc30_neon, export=1 lowpass_const w3 add x3, x1, #1 sub x1, x1, #2 b \type\()_h264_qpel16_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel16_mc01_neon, export=1 mov x14, x30 mov x12, x1 \type\()_h264_qpel16_mc01: lowpass_const w3 mov x3, x2 sub x1, x1, x2, lsl #1 bl \type\()_h264_qpel16_v_lowpass_l2_neon ret x14 endfunc function ff_\type\()_h264_qpel16_mc11_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 \type\()_h264_qpel16_mc11: lowpass_const w3 mov x11, sp sub sp, sp, #256 mov x0, sp sub x1, x1, #2 mov x3, #16 bl put_h264_qpel16_h_lowpass_neon mov x0, x8 mov x3, x2 mov x12, sp sub x1, x9, x2, lsl #1 mov x2, #16 bl \type\()_h264_qpel16_v_lowpass_l2_neon mov sp, x11 ret x14 endfunc function ff_\type\()_h264_qpel16_mc21_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 \type\()_h264_qpel16_mc21: lowpass_const w3 mov x11, sp sub sp, sp, #(16*16+16*12) sub x1, x1, #2 mov x0, sp bl put_h264_qpel16_h_lowpass_neon_packed mov x4, x0 mov x0, x8 sub x1, x9, x2, lsl #1 sub x1, x1, #2 mov x3, x2 bl \type\()_h264_qpel16_hv_lowpass_l2_neon mov sp, x11 ret x14 endfunc function ff_\type\()_h264_qpel16_mc31_neon, export=1 add x1, x1, #1 mov x14, x30 mov x8, x0 mov x9, x1 sub x1, x1, #1 b \type\()_h264_qpel16_mc11 endfunc function ff_\type\()_h264_qpel16_mc02_neon, export=1 mov x14, x30 lowpass_const w3 sub x1, x1, x2, lsl #1 mov x3, x2 bl \type\()_h264_qpel16_v_lowpass_neon ret x14 endfunc function ff_\type\()_h264_qpel16_mc12_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 \type\()_h264_qpel16_mc12: lowpass_const w3 mov x11, sp sub sp, sp, #(16*16+16*12) sub x1, x1, x2, lsl #1 mov x0, sp mov x3, x2 bl put_h264_qpel16_v_lowpass_neon_packed mov x4, x0 mov x0, x8 sub x1, x9, x3, lsl #1 sub x1, x1, #2 mov x2, x3 bl \type\()_h264_qpel16_hv_lowpass_l2_neon mov sp, x11 ret x14 endfunc function ff_\type\()_h264_qpel16_mc22_neon, export=1 mov x14, x30 lowpass_const w3 mov x11, sp sub x1, x1, x2, lsl #1 sub x1, x1, #2 mov x3, x2 bl \type\()_h264_qpel16_hv_lowpass_neon mov sp, x11 // restore stack ret x14 endfunc function ff_\type\()_h264_qpel16_mc32_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 add x1, x1, #1 b \type\()_h264_qpel16_mc12 endfunc function ff_\type\()_h264_qpel16_mc03_neon, export=1 mov x14, x30 add x12, x1, x2 b \type\()_h264_qpel16_mc01 endfunc function ff_\type\()_h264_qpel16_mc13_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 add x1, x1, x2 b \type\()_h264_qpel16_mc11 endfunc function ff_\type\()_h264_qpel16_mc23_neon, export=1 mov x14, x30 mov x8, x0 mov x9, x1 add x1, x1, x2 b \type\()_h264_qpel16_mc21 endfunc function ff_\type\()_h264_qpel16_mc33_neon, export=1 add x1, x1, #1 mov x14, x30 mov x8, x0 mov x9, x1 add x1, x1, x2 sub x1, x1, #1 b \type\()_h264_qpel16_mc11 endfunc .endm h264_qpel16 put h264_qpel16 avg
Akagi201/ffmpeg-xcode
16,047
ffmpeg-3.0.2/libavcodec/aarch64/h264idct_neon.S
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" #include "neon.S" function ff_h264_idct_add_neon, export=1 ld1 {v0.4H, v1.4H, v2.4H, v3.4H}, [x1] sxtw x2, w2 movi v30.8H, #0 add v4.4H, v0.4H, v2.4H sshr v16.4H, v1.4H, #1 st1 {v30.8H}, [x1], #16 sshr v17.4H, v3.4H, #1 st1 {v30.8H}, [x1], #16 sub v5.4H, v0.4H, v2.4H add v6.4H, v1.4H, v17.4H sub v7.4H, v16.4H, v3.4H add v0.4H, v4.4H, v6.4H add v1.4H, v5.4H, v7.4H sub v3.4H, v4.4H, v6.4H sub v2.4H, v5.4H, v7.4H transpose_4x4H v0, v1, v2, v3, v4, v5, v6, v7 add v4.4H, v0.4H, v3.4H ld1 {v18.S}[0], [x0], x2 sshr v16.4H, v2.4H, #1 sshr v17.4H, v1.4H, #1 ld1 {v19.S}[1], [x0], x2 sub v5.4H, v0.4H, v3.4H ld1 {v18.S}[1], [x0], x2 add v6.4H, v16.4H, v1.4H ins v4.D[1], v5.D[0] sub v7.4H, v2.4H, v17.4H ld1 {v19.S}[0], [x0], x2 ins v6.D[1], v7.D[0] sub x0, x0, x2, lsl #2 add v0.8H, v4.8H, v6.8H sub v1.8H, v4.8H, v6.8H srshr v0.8H, v0.8H, #6 srshr v1.8H, v1.8H, #6 uaddw v0.8H, v0.8H, v18.8B uaddw v1.8H, v1.8H, v19.8B sqxtun v0.8B, v0.8H sqxtun v1.8B, v1.8H st1 {v0.S}[0], [x0], x2 st1 {v1.S}[1], [x0], x2 st1 {v0.S}[1], [x0], x2 st1 {v1.S}[0], [x0], x2 sub x1, x1, #32 ret endfunc function ff_h264_idct_dc_add_neon, export=1 sxtw x2, w2 mov w3, #0 ld1r {v2.8H}, [x1] strh w3, [x1] srshr v2.8H, v2.8H, #6 ld1 {v0.S}[0], [x0], x2 ld1 {v0.S}[1], [x0], x2 uaddw v3.8H, v2.8H, v0.8B ld1 {v1.S}[0], [x0], x2 ld1 {v1.S}[1], [x0], x2 uaddw v4.8H, v2.8H, v1.8B sqxtun v0.8B, v3.8H sqxtun v1.8B, v4.8H sub x0, x0, x2, lsl #2 st1 {v0.S}[0], [x0], x2 st1 {v0.S}[1], [x0], x2 st1 {v1.S}[0], [x0], x2 st1 {v1.S}[1], [x0], x2 ret endfunc function ff_h264_idct_add16_neon, export=1 mov x12, x30 mov x6, x0 // dest mov x5, x1 // block_offset mov x1, x2 // block mov w9, w3 // stride movrel x7, scan8 mov x10, #16 movrel x13, X(ff_h264_idct_dc_add_neon) movrel x14, X(ff_h264_idct_add_neon) 1: mov w2, w9 ldrb w3, [x7], #1 ldrsw x0, [x5], #4 ldrb w3, [x4, w3, uxtw] subs w3, w3, #1 b.lt 2f ldrsh w3, [x1] add x0, x0, x6 ccmp w3, #0, #4, eq csel x15, x13, x14, ne blr x15 2: subs x10, x10, #1 add x1, x1, #32 b.ne 1b ret x12 endfunc function ff_h264_idct_add16intra_neon, export=1 mov x12, x30 mov x6, x0 // dest mov x5, x1 // block_offset mov x1, x2 // block mov w9, w3 // stride movrel x7, scan8 mov x10, #16 movrel x13, X(ff_h264_idct_dc_add_neon) movrel x14, X(ff_h264_idct_add_neon) 1: mov w2, w9 ldrb w3, [x7], #1 ldrsw x0, [x5], #4 ldrb w3, [x4, w3, uxtw] add x0, x0, x6 cmp w3, #0 ldrsh w3, [x1] csel x15, x13, x14, eq ccmp w3, #0, #0, eq b.eq 2f blr x15 2: subs x10, x10, #1 add x1, x1, #32 b.ne 1b ret x12 endfunc function ff_h264_idct_add8_neon, export=1 sub sp, sp, #0x40 stp x19, x20, [sp] mov x12, x30 ldp x6, x15, [x0] // dest[0], dest[1] add x5, x1, #16*4 // block_offset add x9, x2, #16*32 // block mov w19, w3 // stride movrel x13, X(ff_h264_idct_dc_add_neon) movrel x14, X(ff_h264_idct_add_neon) movrel x7, scan8+16 mov x10, #0 mov x11, #16 1: mov w2, w19 ldrb w3, [x7, x10] // scan8[i] ldrsw x0, [x5, x10, lsl #2] // block_offset[i] ldrb w3, [x4, w3, uxtw] // nnzc[ scan8[i] ] add x0, x0, x6 // block_offset[i] + dst[j-1] add x1, x9, x10, lsl #5 // block + i * 16 cmp w3, #0 ldrsh w3, [x1] // block[i*16] csel x20, x13, x14, eq ccmp w3, #0, #0, eq b.eq 2f blr x20 2: add x10, x10, #1 cmp x10, #4 csel x10, x11, x10, eq // mov x10, #16 csel x6, x15, x6, eq cmp x10, #20 b.lt 1b ldp x19, x20, [sp] add sp, sp, #0x40 ret x12 endfunc .macro idct8x8_cols pass .if \pass == 0 va .req v18 vb .req v30 sshr v18.8H, v26.8H, #1 add v16.8H, v24.8H, v28.8H ld1 {v30.8H, v31.8H}, [x1] st1 {v19.8H}, [x1], #16 st1 {v19.8H}, [x1], #16 sub v17.8H, v24.8H, v28.8H sshr v19.8H, v30.8H, #1 sub v18.8H, v18.8H, v30.8H add v19.8H, v19.8H, v26.8H .else va .req v30 vb .req v18 sshr v30.8H, v26.8H, #1 sshr v19.8H, v18.8H, #1 add v16.8H, v24.8H, v28.8H sub v17.8H, v24.8H, v28.8H sub v30.8H, v30.8H, v18.8H add v19.8H, v19.8H, v26.8H .endif add v26.8H, v17.8H, va.8H sub v28.8H, v17.8H, va.8H add v24.8H, v16.8H, v19.8H sub vb.8H, v16.8H, v19.8H sub v16.8H, v29.8H, v27.8H add v17.8H, v31.8H, v25.8H sub va.8H, v31.8H, v25.8H add v19.8H, v29.8H, v27.8H sub v16.8H, v16.8H, v31.8H sub v17.8H, v17.8H, v27.8H add va.8H, va.8H, v29.8H add v19.8H, v19.8H, v25.8H sshr v25.8H, v25.8H, #1 sshr v27.8H, v27.8H, #1 sshr v29.8H, v29.8H, #1 sshr v31.8H, v31.8H, #1 sub v16.8H, v16.8H, v31.8H sub v17.8H, v17.8H, v27.8H add va.8H, va.8H, v29.8H add v19.8H, v19.8H, v25.8H sshr v25.8H, v16.8H, #2 sshr v27.8H, v17.8H, #2 sshr v29.8H, va.8H, #2 sshr v31.8H, v19.8H, #2 sub v19.8H, v19.8H, v25.8H sub va.8H, v27.8H, va.8H add v17.8H, v17.8H, v29.8H add v16.8H, v16.8H, v31.8H .if \pass == 0 sub v31.8H, v24.8H, v19.8H add v24.8H, v24.8H, v19.8H add v25.8H, v26.8H, v18.8H sub v18.8H, v26.8H, v18.8H add v26.8H, v28.8H, v17.8H add v27.8H, v30.8H, v16.8H sub v29.8H, v28.8H, v17.8H sub v28.8H, v30.8H, v16.8H .else sub v31.8H, v24.8H, v19.8H add v24.8H, v24.8H, v19.8H add v25.8H, v26.8H, v30.8H sub v30.8H, v26.8H, v30.8H add v26.8H, v28.8H, v17.8H sub v29.8H, v28.8H, v17.8H add v27.8H, v18.8H, v16.8H sub v28.8H, v18.8H, v16.8H .endif .unreq va .unreq vb .endm function ff_h264_idct8_add_neon, export=1 movi v19.8H, #0 ld1 {v24.8H, v25.8H}, [x1] st1 {v19.8H}, [x1], #16 st1 {v19.8H}, [x1], #16 ld1 {v26.8H, v27.8H}, [x1] st1 {v19.8H}, [x1], #16 st1 {v19.8H}, [x1], #16 ld1 {v28.8H, v29.8H}, [x1] st1 {v19.8H}, [x1], #16 st1 {v19.8H}, [x1], #16 idct8x8_cols 0 transpose_8x8H v24, v25, v26, v27, v28, v29, v18, v31, v6, v7 idct8x8_cols 1 mov x3, x0 srshr v24.8H, v24.8H, #6 ld1 {v0.8B}, [x0], x2 srshr v25.8H, v25.8H, #6 ld1 {v1.8B}, [x0], x2 srshr v26.8H, v26.8H, #6 ld1 {v2.8B}, [x0], x2 srshr v27.8H, v27.8H, #6 ld1 {v3.8B}, [x0], x2 srshr v28.8H, v28.8H, #6 ld1 {v4.8B}, [x0], x2 srshr v29.8H, v29.8H, #6 ld1 {v5.8B}, [x0], x2 srshr v30.8H, v30.8H, #6 ld1 {v6.8B}, [x0], x2 srshr v31.8H, v31.8H, #6 ld1 {v7.8B}, [x0], x2 uaddw v24.8H, v24.8H, v0.8B uaddw v25.8H, v25.8H, v1.8B uaddw v26.8H, v26.8H, v2.8B sqxtun v0.8B, v24.8H uaddw v27.8H, v27.8H, v3.8B sqxtun v1.8B, v25.8H uaddw v28.8H, v28.8H, v4.8B sqxtun v2.8B, v26.8H st1 {v0.8B}, [x3], x2 uaddw v29.8H, v29.8H, v5.8B sqxtun v3.8B, v27.8H st1 {v1.8B}, [x3], x2 uaddw v30.8H, v30.8H, v6.8B sqxtun v4.8B, v28.8H st1 {v2.8B}, [x3], x2 uaddw v31.8H, v31.8H, v7.8B sqxtun v5.8B, v29.8H st1 {v3.8B}, [x3], x2 sqxtun v6.8B, v30.8H sqxtun v7.8B, v31.8H st1 {v4.8B}, [x3], x2 st1 {v5.8B}, [x3], x2 st1 {v6.8B}, [x3], x2 st1 {v7.8B}, [x3], x2 sub x1, x1, #128 ret endfunc function ff_h264_idct8_dc_add_neon, export=1 mov w3, #0 sxtw x2, w2 ld1r {v31.8H}, [x1] strh w3, [x1] ld1 {v0.8B}, [x0], x2 srshr v31.8H, v31.8H, #6 ld1 {v1.8B}, [x0], x2 ld1 {v2.8B}, [x0], x2 uaddw v24.8H, v31.8H, v0.8B ld1 {v3.8B}, [x0], x2 uaddw v25.8H, v31.8H, v1.8B ld1 {v4.8B}, [x0], x2 uaddw v26.8H, v31.8H, v2.8B ld1 {v5.8B}, [x0], x2 uaddw v27.8H, v31.8H, v3.8B ld1 {v6.8B}, [x0], x2 uaddw v28.8H, v31.8H, v4.8B ld1 {v7.8B}, [x0], x2 uaddw v29.8H, v31.8H, v5.8B uaddw v30.8H, v31.8H, v6.8B uaddw v31.8H, v31.8H, v7.8B sqxtun v0.8B, v24.8H sqxtun v1.8B, v25.8H sqxtun v2.8B, v26.8H sqxtun v3.8B, v27.8H sub x0, x0, x2, lsl #3 st1 {v0.8B}, [x0], x2 sqxtun v4.8B, v28.8H st1 {v1.8B}, [x0], x2 sqxtun v5.8B, v29.8H st1 {v2.8B}, [x0], x2 sqxtun v6.8B, v30.8H st1 {v3.8B}, [x0], x2 sqxtun v7.8B, v31.8H st1 {v4.8B}, [x0], x2 st1 {v5.8B}, [x0], x2 st1 {v6.8B}, [x0], x2 st1 {v7.8B}, [x0], x2 ret endfunc function ff_h264_idct8_add4_neon, export=1 mov x12, x30 mov x6, x0 mov x5, x1 mov x1, x2 mov w2, w3 movrel x7, scan8 mov w10, #16 movrel x13, X(ff_h264_idct8_dc_add_neon) movrel x14, X(ff_h264_idct8_add_neon) 1: ldrb w9, [x7], #4 ldrsw x0, [x5], #16 ldrb w9, [x4, w9, UXTW] subs w9, w9, #1 b.lt 2f ldrsh w11, [x1] add x0, x6, x0 ccmp w11, #0, #4, eq csel x15, x13, x14, ne blr x15 2: subs w10, w10, #4 add x1, x1, #128 b.ne 1b ret x12 endfunc const scan8 .byte 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8 .byte 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8 .byte 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8 .byte 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8 .byte 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8 .byte 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8 .byte 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8 .byte 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8 .byte 4+11*8, 5+11*8, 4+12*8, 5+12*8 .byte 6+11*8, 7+11*8, 6+12*8, 7+12*8 .byte 4+13*8, 5+13*8, 4+14*8, 5+14*8 .byte 6+13*8, 7+13*8, 6+14*8, 7+14*8 endconst
Akagi201/ffmpeg-xcode
2,770
ffmpeg-3.0.2/libavcodec/aarch64/fmtconvert_neon.S
/* * ARM NEON optimised Format Conversion Utils * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * Copyright (c) 2015 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "libavutil/aarch64/asm.S" function ff_int32_to_float_fmul_scalar_neon, export=1 ld1 {v1.4s,v2.4s}, [x1], #32 scvtf v1.4s, v1.4s scvtf v2.4s, v2.4s 1: subs w2, w2, #8 fmul v3.4s, v1.4s, v0.s[0] fmul v4.4s, v2.4s, v0.s[0] b.le 2f ld1 {v1.4s,v2.4s}, [x1], #32 st1 {v3.4s,v4.4s}, [x0], #32 scvtf v1.4s, v1.4s scvtf v2.4s, v2.4s b 1b 2: st1 {v3.4s,v4.4s}, [x0] ret endfunc function ff_int32_to_float_fmul_array8_neon, export=1 lsr w4, w4, #3 subs w5, w4, #1 b.eq 1f 2: ld1 {v0.4s,v1.4s}, [x2], #32 ld1 {v2.4s,v3.4s}, [x2], #32 scvtf v0.4s, v0.4s scvtf v1.4s, v1.4s ld1 {v16.2s}, [x3], #8 scvtf v2.4s, v2.4s scvtf v3.4s, v3.4s fmul v4.4s, v0.4s, v16.s[0] fmul v5.4s, v1.4s, v16.s[0] fmul v6.4s, v2.4s, v16.s[1] fmul v7.4s, v3.4s, v16.s[1] st1 {v4.4s,v5.4s}, [x1], #32 st1 {v6.4s,v7.4s}, [x1], #32 subs w5, w5, #2 b.gt 2b b.eq 1f ret 1: ld1 {v0.4s,v1.4s}, [x2] ld1 {v16.s}[0], [x3] scvtf v0.4s, v0.4s scvtf v1.4s, v1.4s fmul v4.4s, v0.4s, v16.s[0] fmul v5.4s, v1.4s, v16.s[0] st1 {v4.4s,v5.4s}, [x1] ret endfunc
Akagi201/ffmpeg-xcode
14,212
ffmpeg-3.0.2/libavcodec/aarch64/mdct_neon.S
/* * AArch64 NEON optimised MDCT * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" function ff_imdct_half_neon, export=1 sub sp, sp, #32 stp x19, x20, [sp] str x30, [sp, #16] mov x12, #1 ldr w14, [x0, #28] // mdct_bits ldr x4, [x0, #32] // tcos ldr x3, [x0, #8] // revtab lsl x12, x12, x14 // n = 1 << nbits lsr x14, x12, #2 // n4 = n >> 2 add x7, x2, x12, lsl #1 mov x12, #-16 sub x7, x7, #16 ld2 {v16.2s,v17.2s}, [x7], x12 // d16=x,n1 d17=x,n0 ld2 {v0.2s,v1.2s}, [x2], #16 // d0 =m0,x d1 =m1,x rev64 v17.2s, v17.2s ld2 {v2.2s,v3.2s}, [x4], #16 // d2=c0,c1 d3=s0,s2 fmul v6.2s, v17.2s, v2.2s fmul v7.2s, v0.2s, v2.2s 1: subs x14, x14, #2 ldr w6, [x3], #4 fmul v4.2s, v0.2s, v3.2s fmul v5.2s, v17.2s, v3.2s fsub v4.2s, v6.2s, v4.2s fadd v5.2s, v5.2s, v7.2s ubfm x8, x6, #16, #31 ubfm x6, x6, #0, #15 add x8, x1, x8, lsl #3 add x6, x1, x6, lsl #3 b.eq 2f ld2 {v16.2s,v17.2s}, [x7], x12 ld2 {v0.2s,v1.2s}, [x2], #16 rev64 v17.2s, v17.2s ld2 {v2.2s,v3.2s}, [x4], #16 // d2=c0,c1 d3=s0,s2 fmul v6.2s, v17.2s, v2.2s fmul v7.2s, v0.2s, v2.2s st2 {v4.s,v5.s}[0], [x6] st2 {v4.s,v5.s}[1], [x8] b 1b 2: st2 {v4.s,v5.s}[0], [x6] st2 {v4.s,v5.s}[1], [x8] mov x19, x0 mov x20, x1 bl X(ff_fft_calc_neon) mov x12, #1 ldr w14, [x19, #28] // mdct_bits ldr x4, [x19, #32] // tcos lsl x12, x12, x14 // n = 1 << nbits lsr x14, x12, #3 // n8 = n >> 3 add x4, x4, x14, lsl #3 add x6, x20, x14, lsl #3 sub x1, x4, #16 sub x3, x6, #16 mov x7, #-16 mov x8, x6 mov x0, x3 ld2 {v0.2s,v1.2s}, [x3], x7 // d0 =i1,r1 d1 =i0,r0 ld2 {v20.2s,v21.2s},[x6], #16 // d20=i2,r2 d21=i3,r3 ld2 {v16.2s,v17.2s},[x1], x7 // d16=c1,c0 d18=s1,s0 3: subs x14, x14, #2 fmul v7.2s, v0.2s, v17.2s ld2 {v18.2s,v19.2s},[x4], #16 // d17=c2,c3 d19=s2,s3 fmul v4.2s, v1.2s, v17.2s fmul v6.2s, v21.2s, v19.2s fmul v5.2s, v20.2s, v19.2s fmul v22.2s, v1.2s, v16.2s fmul v23.2s, v21.2s, v18.2s fmul v24.2s, v0.2s, v16.2s fmul v25.2s, v20.2s, v18.2s fadd v7.2s, v7.2s, v22.2s fadd v5.2s, v5.2s, v23.2s fsub v4.2s, v4.2s, v24.2s fsub v6.2s, v6.2s, v25.2s b.eq 4f ld2 {v0.2s,v1.2s}, [x3], x7 ld2 {v20.2s,v21.2s},[x6], #16 ld2 {v16.2s,v17.2s},[x1], x7 // d16=c1,c0 d18=s1,s0 rev64 v5.2s, v5.2s rev64 v7.2s, v7.2s st2 {v4.2s,v5.2s}, [x0], x7 st2 {v6.2s,v7.2s}, [x8], #16 b 3b 4: rev64 v5.2s, v5.2s rev64 v7.2s, v7.2s st2 {v4.2s,v5.2s}, [x0] st2 {v6.2s,v7.2s}, [x8] ldp x19, x20, [sp] ldr x30, [sp, #16] add sp, sp, #32 ret endfunc function ff_imdct_calc_neon, export=1 sub sp, sp, #32 stp x19, x20, [sp] str x30, [sp, #16] ldr w3, [x0, #28] // mdct_bits mov x19, #1 mov x20, x1 lsl x19, x19, x3 add x1, x1, x19 bl X(ff_imdct_half_neon) add x0, x20, x19, lsl #2 add x1, x20, x19, lsl #1 sub x0, x0, #8 sub x2, x1, #16 mov x3, #-16 mov x6, #-8 1: ld1 {v0.4s}, [x2], x3 prfum pldl1keep, [x0, #-16] rev64 v0.4s, v0.4s ld1 {v2.2s,v3.2s}, [x1], #16 fneg v4.4s, v0.4s prfum pldl1keep, [x2, #-16] rev64 v2.2s, v2.2s rev64 v3.2s, v3.2s ext v4.16b, v4.16b, v4.16b, #8 st1 {v2.2s}, [x0], x6 st1 {v3.2s}, [x0], x6 st1 {v4.4s}, [x20], #16 subs x19, x19, #16 b.gt 1b ldp x19, x20, [sp], #16 ldr x30, [sp], #16 ret endfunc function ff_mdct_calc_neon, export=1 sub sp, sp, #32 stp x19, x20, [sp] str x30, [sp, #16] mov x12, #1 ldr w14, [x0, #28] // mdct_bits ldr x4, [x0, #32] // tcos ldr x3, [x0, #8] // revtab lsl x14, x12, x14 // n = 1 << nbits add x7, x2, x14 // in4u sub x9, x7, #16 // in4d add x2, x7, x14, lsl #1 // in3u add x8, x9, x14, lsl #1 // in3d add x5, x4, x14, lsl #1 sub x5, x5, #16 sub x3, x3, #4 mov x12, #-16 lsr x13, x14, #1 ld2 {v16.2s,v17.2s}, [x9], x12 // in0u0,in0u1 in4d1,in4d0 ld2 {v18.2s,v19.2s}, [x8], x12 // in2u0,in2u1 in3d1,in3d0 ld2 {v0.2s, v1.2s}, [x7], #16 // in4u0,in4u1 in2d1,in2d0 rev64 v17.2s, v17.2s // in4d0,in4d1 in3d0,in3d1 rev64 v19.2s, v19.2s // in4d0,in4d1 in3d0,in3d1 ld2 {v2.2s, v3.2s}, [x2], #16 // in3u0,in3u1 in1d1,in1d0 fsub v0.2s, v17.2s, v0.2s // in4d-in4u I ld2 {v20.2s,v21.2s}, [x4], #16 // c0,c1 s0,s1 rev64 v1.2s, v1.2s // in2d0,in2d1 in1d0,in1d1 rev64 v3.2s, v3.2s // in2d0,in2d1 in1d0,in1d1 ld2 {v30.2s,v31.2s}, [x5], x12 // c2,c3 s2,s3 fadd v2.2s, v2.2s, v19.2s // in3u+in3d -R fsub v16.2s, v16.2s, v1.2s // in0u-in2d R fadd v18.2s, v18.2s, v3.2s // in2u+in1d -I 1: fmul v7.2s, v0.2s, v21.2s // I*s ldr w10, [x3, x13] fmul v6.2s, v2.2s, v20.2s // -R*c ldr w6, [x3, #4]! fmul v4.2s, v2.2s, v21.2s // -R*s fmul v5.2s, v0.2s, v20.2s // I*c fmul v24.2s, v16.2s, v30.2s // R*c fmul v25.2s, v18.2s, v31.2s // -I*s fmul v22.2s, v16.2s, v31.2s // R*s fmul v23.2s, v18.2s, v30.2s // I*c subs x14, x14, #16 subs x13, x13, #8 fsub v6.2s, v6.2s, v7.2s // -R*c-I*s fadd v7.2s, v4.2s, v5.2s // -R*s+I*c fsub v24.2s, v25.2s, v24.2s // I*s-R*c fadd v25.2s, v22.2s, v23.2s // R*s-I*c b.eq 1f mov x12, #-16 ld2 {v16.2s,v17.2s}, [x9], x12 // in0u0,in0u1 in4d1,in4d0 ld2 {v18.2s,v19.2s}, [x8], x12 // in2u0,in2u1 in3d1,in3d0 fneg v7.2s, v7.2s // R*s-I*c ld2 {v0.2s, v1.2s}, [x7], #16 // in4u0,in4u1 in2d1,in2d0 rev64 v17.2s, v17.2s // in4d0,in4d1 in3d0,in3d1 rev64 v19.2s, v19.2s // in4d0,in4d1 in3d0,in3d1 ld2 {v2.2s, v3.2s}, [x2], #16 // in3u0,in3u1 in1d1,in1d0 fsub v0.2s, v17.2s, v0.2s // in4d-in4u I ld2 {v20.2s,v21.2s}, [x4], #16 // c0,c1 s0,s1 rev64 v1.2s, v1.2s // in2d0,in2d1 in1d0,in1d1 rev64 v3.2s, v3.2s // in2d0,in2d1 in1d0,in1d1 ld2 {v30.2s,v31.2s}, [x5], x12 // c2,c3 s2,s3 fadd v2.2s, v2.2s, v19.2s // in3u+in3d -R fsub v16.2s, v16.2s, v1.2s // in0u-in2d R fadd v18.2s, v18.2s, v3.2s // in2u+in1d -I ubfm x12, x6, #16, #31 ubfm x6, x6, #0, #15 add x12, x1, x12, lsl #3 add x6, x1, x6, lsl #3 st2 {v6.s,v7.s}[0], [x6] st2 {v6.s,v7.s}[1], [x12] ubfm x6, x10, #16, #31 ubfm x10, x10, #0, #15 add x6 , x1, x6, lsl #3 add x10, x1, x10, lsl #3 st2 {v24.s,v25.s}[0], [x10] st2 {v24.s,v25.s}[1], [x6] b 1b 1: fneg v7.2s, v7.2s // R*s-I*c ubfm x12, x6, #16, #31 ubfm x6, x6, #0, #15 add x12, x1, x12, lsl #3 add x6, x1, x6, lsl #3 st2 {v6.s,v7.s}[0], [x6] st2 {v6.s,v7.s}[1], [x12] ubfm x6, x10, #16, #31 ubfm x10, x10, #0, #15 add x6 , x1, x6, lsl #3 add x10, x1, x10, lsl #3 st2 {v24.s,v25.s}[0], [x10] st2 {v24.s,v25.s}[1], [x6] mov x19, x0 mov x20, x1 bl X(ff_fft_calc_neon) mov x12, #1 ldr w14, [x19, #28] // mdct_bits ldr x4, [x19, #32] // tcos lsl x12, x12, x14 // n = 1 << nbits lsr x14, x12, #3 // n8 = n >> 3 add x4, x4, x14, lsl #3 add x6, x20, x14, lsl #3 sub x1, x4, #16 sub x3, x6, #16 mov x7, #-16 mov x8, x6 mov x0, x3 ld2 {v0.2s,v1.2s}, [x3], x7 // d0 =r1,i1 d1 =r0,i0 ld2 {v20.2s,v21.2s}, [x6], #16 // d20=r2,i2 d21=r3,i3 ld2 {v16.2s,v17.2s}, [x1], x7 // c1,c0 s1,s0 1: subs x14, x14, #2 fmul v7.2s, v0.2s, v17.2s // r1*s1,r0*s0 ld2 {v18.2s,v19.2s}, [x4], #16 // c2,c3 s2,s3 fmul v4.2s, v1.2s, v17.2s // i1*s1,i0*s0 fmul v6.2s, v21.2s, v19.2s // i2*s2,i3*s3 fmul v5.2s, v20.2s, v19.2s // r2*s2,r3*s3 fmul v24.2s, v0.2s, v16.2s // r1*c1,r0*c0 fmul v25.2s, v20.2s, v18.2s // r2*c2,r3*c3 fmul v22.2s, v21.2s, v18.2s // i2*c2,i3*c3 fmul v23.2s, v1.2s, v16.2s // i1*c1,i0*c0 fadd v4.2s, v4.2s, v24.2s // i1*s1+r1*c1,i0*s0+r0*c0 fadd v6.2s, v6.2s, v25.2s // i2*s2+r2*c2,i3*s3+r3*c3 fsub v5.2s, v22.2s, v5.2s // i2*c2-r2*s2,i3*c3-r3*s3 fsub v7.2s, v23.2s, v7.2s // i1*c1-r1*s1,i0*c0-r0*s0 fneg v4.2s, v4.2s fneg v6.2s, v6.2s b.eq 1f ld2 {v0.2s, v1.2s}, [x3], x7 ld2 {v20.2s,v21.2s}, [x6], #16 ld2 {v16.2s,v17.2s}, [x1], x7 // c1,c0 s1,s0 rev64 v5.2s, v5.2s rev64 v7.2s, v7.2s st2 {v4.2s,v5.2s}, [x0], x7 st2 {v6.2s,v7.2s}, [x8], #16 b 1b 1: rev64 v5.2s, v5.2s rev64 v7.2s, v7.2s st2 {v4.2s,v5.2s}, [x0] st2 {v6.2s,v7.2s}, [x8] ldp x19, x20, [sp], #16 ldr x30, [sp], #16 ret endfunc
Akagi201/ffmpeg-xcode
22,776
ffmpeg-3.0.2/libavcodec/aarch64/imdct15_neon.S
/* * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" #include "asm-offsets.h" .macro shuffle a, b, c, d const shuffle_\a\b\c\d, align=4 .byte (\a * 4), (\a * 4 + 1), (\a * 4 + 2), (\a * 4 + 3) .byte (\b * 4), (\b * 4 + 1), (\b * 4 + 2), (\b * 4 + 3) .byte (\c * 4), (\c * 4 + 1), (\c * 4 + 2), (\c * 4 + 3) .byte (\d * 4), (\d * 4 + 1), (\d * 4 + 2), (\d * 4 + 3) endconst .endm shuffle 0, 2, 1, 3 shuffle 1, 0, 3, 2 shuffle 2, 3, 0, 1 shuffle 3, 1, 2, 0 function fft5_neon lsl x2, x2, #3 ld1 {v24.2s}, [x1], x2 ld2 {v25.s,v26.s}[0], [x1], x2 ld2 {v25.s,v26.s}[1], [x1], x2 ld2 {v25.s,v26.s}[2], [x1], x2 ld2 {v25.s,v26.s}[3], [x1] dup v6.4s, v24.s[0] dup v7.4s, v24.s[1] faddp v0.4s, v25.4s, v26.4s // z[][0], z[][3] fmul v16.4s, v25.4s, v15.s[0] // rr fmul v17.4s, v25.4s, v15.s[1] // ri fmul v18.4s, v26.4s, v15.s[0] // ir fmul v19.4s, v26.4s, v15.s[1] // ii faddp v0.4s, v0.4s, v0.4s // z[][1], z[][2] fmul v20.4s, v25.4s, v15.s[2] // rr fmul v21.4s, v25.4s, v15.s[3] // ri fmul v22.4s, v26.4s, v15.s[2] // ir fmul v23.4s, v26.4s, v15.s[3] // ii fadd v0.2s, v24.2s, v0.2s // out[0] // z[0123][0], z[0123][3] fsub v24.4s, v16.4s, v19.4s // (c).re = rr - ii; fadd v27.4s, v16.4s, v19.4s // (d).re = rr + ii; ld1 {v16.16b}, [x11] ld1 {v19.16b}, [x14] fadd v28.4s, v17.4s, v18.4s // (c).im = ri + ir; fsub v31.4s, v18.4s, v17.4s // (d).im = -ri + ir; ld1 {v17.16b}, [x12] // z[0123][1], z[0123][2] fsub v25.4s, v20.4s, v23.4s // (c).re = rr - ii; fadd v26.4s, v20.4s, v23.4s // (d).re = rr + ii; ld1 {v18.16b}, [x13] fadd v29.4s, v21.4s, v22.4s // (c).im = ri + ir; fsub v30.4s, v22.4s, v21.4s // (d).im = -ri + ir; //real tbl v20.16b, {v24.16b}, v16.16b tbl v21.16b, {v25.16b}, v17.16b tbl v22.16b, {v26.16b}, v18.16b tbl v23.16b, {v27.16b}, v19.16b //imag tbl v16.16b, {v28.16b}, v16.16b tbl v17.16b, {v29.16b}, v17.16b tbl v18.16b, {v30.16b}, v18.16b tbl v19.16b, {v31.16b}, v19.16b fadd v6.4s, v6.4s, v20.4s fadd v22.4s, v22.4s, v23.4s fadd v7.4s, v7.4s, v16.4s fadd v18.4s, v18.4s, v19.4s fadd v21.4s, v21.4s, v22.4s fadd v17.4s, v17.4s, v18.4s fadd v6.4s, v6.4s, v21.4s fadd v7.4s, v7.4s, v17.4s ret endfunc function fft15_neon mov x8, x1 mov x9, x30 add x2, x3, x3, lsl #1 // 3 * stride add x1, x8, x3, lsl #3 // in + 1 * stride bl fft5_neon mov v1.8b, v0.8b mov v2.16b, v6.16b mov v3.16b, v7.16b add x1, x8, x3, lsl #4 // in + 2 * stride add x2, x3, x3, lsl #1 // 3 * stride bl fft5_neon zip1 v1.4s, v1.4s, v0.4s mov v4.16b, v6.16b mov v5.16b, v7.16b mov x1, x8 // in + 0 * stride add x2, x3, x3, lsl #1 // 3 * stride bl fft5_neon faddp v20.4s, v1.4s, v1.4s ext v18.16b, v8.16b, v8.16b, #4 ext v19.16b, v9.16b, v9.16b, #4 mov v16.16b, v6.16b mov v17.16b, v7.16b fadd v20.2s, v20.2s, v0.2s uzp1 v18.4s, v18.4s, v10.4s // exp[2,4,6,8].re uzp1 v19.4s, v19.4s, v11.4s // exp[2,4,6,8].im st1 {v20.2s}, [x0], #8 // out[0] fmla v16.4s, v2.4s, v8.4s fmls v16.4s, v3.4s, v9.4s fmla v17.4s, v2.4s, v9.4s fmla v17.4s, v3.4s, v8.4s fmla v16.4s, v4.4s, v18.4s fmls v16.4s, v5.4s, v19.4s fmla v17.4s, v4.4s, v19.4s fmla v17.4s, v5.4s, v18.4s zip1 v18.4s, v16.4s, v17.4s zip2 v19.4s, v16.4s, v17.4s rev64 v31.4s, v14.4s trn1 v28.2d, v1.2d, v1.2d trn2 v29.2d, v1.2d, v1.2d zip1 v30.2d, v14.2d, v31.2d zip2 v31.2d, v14.2d, v31.2d st1 {v18.4s,v19.4s}, [x0], #32 // out[1-4] fmul v16.4s, v28.4s, v30.4s fmul v17.4s, v29.4s, v30.4s fmls v16.4s, v29.4s, v31.4s fmla v17.4s, v28.4s, v31.4s faddp v16.4s, v16.4s, v16.4s faddp v17.4s, v17.4s, v17.4s zip1 v18.2s, v16.2s, v17.2s zip2 v19.2s, v16.2s, v17.2s fadd v18.2s, v18.2s, v0.2s fadd v0.2s, v19.2s, v0.2s ext v30.16b, v12.16b, v12.16b, #4 ext v31.16b, v13.16b, v13.16b, #4 mov v16.16b, v6.16b mov v17.16b, v7.16b uzp1 v30.4s, v30.4s, v8.4s uzp1 v31.4s, v31.4s, v9.4s st1 {v18.2s}, [x0], #8 // out[5] fmla v16.4s, v2.4s, v10.4s fmls v16.4s, v3.4s, v11.4s fmla v17.4s, v2.4s, v11.4s fmla v17.4s, v3.4s, v10.4s fmla v16.4s, v4.4s, v30.4s fmls v16.4s, v5.4s, v31.4s fmla v17.4s, v4.4s, v31.4s fmla v17.4s, v5.4s, v30.4s zip1 v18.4s, v16.4s, v17.4s zip2 v19.4s, v16.4s, v17.4s ext v30.16b, v10.16b, v10.16b, #4 ext v31.16b, v11.16b, v11.16b, #4 fmla v6.4s, v2.4s, v12.4s fmls v6.4s, v3.4s, v13.4s st1 {v18.4s,v19.4s}, [x0], #32 // out[6-9] uzp1 v30.4s, v30.4s, v12.4s uzp1 v31.4s, v31.4s, v13.4s fmla v7.4s, v2.4s, v13.4s fmla v7.4s, v3.4s, v12.4s st1 {v0.2s}, [x0], #8 // out[10] fmla v6.4s, v4.4s, v30.4s fmls v6.4s, v5.4s, v31.4s fmla v7.4s, v4.4s, v31.4s fmla v7.4s, v5.4s, v30.4s zip1 v18.4s, v6.4s, v7.4s zip2 v19.4s, v6.4s, v7.4s st1 {v18.4s,v19.4s}, [x0], #32 // out[11-14] ret x9 endfunc // x0: out, x1: out+len2, x2: exptab, x3: len2 function fft15_pass ands x6, x3, #3 mov x4, x0 mov x5, x1 b.eq 9f ld1 {v0.2s}, [x0], #8 ld1 {v1.2s}, [x1], #8 sub x3, x3, x6 subs x6, x6, #1 fadd v2.2s, v0.2s, v1.2s fsub v3.2s, v0.2s, v1.2s add x2, x2, #8 st1 {v2.2s}, [x4], #8 st1 {v3.2s}, [x5], #8 b.eq 9f 1: subs x6, x6, #1 ldp s4, s5, [x2], #8 ldp s2, s3, [x1], #8 ldp s0, s1, [x0], #8 fmul s6, s2, s4 fmul s7, s2, s5 fmls s6, s3, v5.s[0] fmla s7, s3, v4.s[0] fsub s2, s0, s6 fsub s3, s1, s7 fadd s0, s0, s6 fadd s1, s1, s7 stp s2, s3, [x5], #8 stp s0, s1, [x4], #8 b.gt 1b 9: ld1 {v4.4s,v5.4s}, [x2], #32 ld2 {v2.4s,v3.4s}, [x1], #32 uzp1 v6.4s, v4.4s, v5.4s uzp2 v7.4s, v4.4s, v5.4s ld2 {v0.4s,v1.4s}, [x0], #32 8: subs x3, x3, #8 fmul v4.4s, v2.4s, v6.4s fmul v5.4s, v2.4s, v7.4s b.lt 4f ld1 {v18.4s,v19.4s}, [x2], #32 fmls v4.4s, v3.4s, v7.4s fmla v5.4s, v3.4s, v6.4s ld2 {v22.4s,v23.4s}, [x1], #32 fsub v2.4s, v0.4s, v4.4s fadd v0.4s, v0.4s, v4.4s fsub v3.4s, v1.4s, v5.4s fadd v1.4s, v1.4s, v5.4s uzp1 v16.4s, v18.4s, v19.4s uzp2 v17.4s, v18.4s, v19.4s st2 {v2.4s,v3.4s}, [x5], #32 st2 {v0.4s,v1.4s}, [x4], #32 ld2 {v20.4s,v21.4s}, [x0], #32 fmul v18.4s, v22.4s, v16.4s fmul v19.4s, v22.4s, v17.4s b.eq 0f ld1 {v4.4s,v5.4s}, [x2], #32 fmls v18.4s, v23.4s, v17.4s fmla v19.4s, v23.4s, v16.4s ld2 {v2.4s,v3.4s}, [x1], #32 fsub v22.4s, v20.4s, v18.4s fadd v20.4s, v20.4s, v18.4s fsub v23.4s, v21.4s, v19.4s fadd v21.4s, v21.4s, v19.4s uzp1 v6.4s, v4.4s, v5.4s uzp2 v7.4s, v4.4s, v5.4s st2 {v22.4s,v23.4s}, [x5], #32 st2 {v20.4s,v21.4s}, [x4], #32 ld2 {v0.4s,v1.4s}, [x0], #32 b 8b 4: fmls v4.4s, v3.4s, v7.4s fmla v5.4s, v3.4s, v6.4s fsub v2.4s, v0.4s, v4.4s fadd v0.4s, v0.4s, v4.4s fsub v3.4s, v1.4s, v5.4s fadd v1.4s, v1.4s, v5.4s st2 {v2.4s,v3.4s}, [x5], #32 st2 {v0.4s,v1.4s}, [x4], #32 ret 0: fmls v18.4s, v23.4s, v17.4s fmla v19.4s, v23.4s, v16.4s fsub v22.4s, v20.4s, v18.4s fadd v20.4s, v20.4s, v18.4s fsub v23.4s, v21.4s, v19.4s fadd v21.4s, v21.4s, v19.4s st2 {v22.4s,v23.4s}, [x5], #32 st2 {v20.4s,v21.4s}, [x4], #32 ret endfunc function fft30_neon, align=6 sub sp, sp, #0x20 stp x20, x21, [sp] stp x22, x30, [sp, #0x10] mov x21, x1 mov x22, x2 mov x20, x4 mov x0, x21 mov x1, x22 lsl x3, x20, #1 bl fft15_neon add x0, x21, #15*8 add x1, x22, x20, lsl #3 lsl x3, x20, #1 bl fft15_neon ldr x2, [x10, #(CELT_EXPTAB + 8)] // s->exptab[1] add x0, x21, #0 add x1, x21, #15*8 mov x3, #15 ldp x20, x21, [sp] ldp x22, x30, [sp, #0x10] add sp, sp, #0x20 b fft15_pass endfunc .macro def_fft n, n2 function fft\n\()_neon, align=6 sub sp, sp, #0x30 stp x20, x21, [sp] stp x22, x30, [sp, #0x10] stp x23, x24, [sp, #0x20] mov x21, x1 mov x22, x2 mov x23, x3 mov x20, x4 sub x3, x3, #1 lsl x4, x4, #1 bl fft\n2\()_neon add x1, x21, #(\n2 * 8) add x2, x22, x20, lsl #3 sub x3, x23, #1 lsl x4, x20, #1 bl fft\n2\()_neon add x5, x10, #CELT_EXPTAB mov x0, x21 ldr x2, [x5, x23, lsl #3] // s->exptab[N] add x1, x21, #(\n2 * 8) mov x3, #\n2 ldp x20, x21, [sp] ldp x22, x30, [sp, #0x10] ldp x23, x24, [sp, #0x20] add sp, sp, #0x30 b fft15_pass endfunc .endm def_fft 60, 30 def_fft 120, 60 def_fft 240, 120 def_fft 480, 240 def_fft 960, 480 function fft_b15_calc_neon sub sp, sp, #0x50 ldr x8, [x0, #CELT_EXPTAB] // s->exptab[0] movrel x6, fact5 movrel x11, shuffle_0213 movrel x12, shuffle_1032 movrel x13, shuffle_2301 movrel x14, shuffle_3120 add x8, x8, #8 movrel x5, fft_tab_neon stp x20, x30, [sp] stp d8, d9, [sp, #0x10] stp d10, d11, [sp, #0x20] stp d12, d13, [sp, #0x30] stp d14, d15, [sp, #0x40] ld1 {v15.4s}, [x6] ld1 {v0.4s,v1.4s}, [x8], #32 ld1 {v6.2s}, [x8], #8 ld1 {v2.4s,v3.4s}, [x8], #32 ld1 {v7.2s}, [x8], #8 ld1 {v4.4s,v5.4s}, [x8], #32 uzp1 v8.4s, v0.4s, v1.4s // exp[ 1 - 4].re uzp2 v9.4s, v0.4s, v1.4s // exp[ 1 - 4].im uzp1 v10.4s, v2.4s, v3.4s // exp[ 6 - 9].re uzp2 v11.4s, v2.4s, v3.4s // exp[ 6 - 9].im uzp1 v12.4s, v4.4s, v5.4s // exp[11 - 14].re uzp2 v13.4s, v4.4s, v5.4s // exp[11 - 14].im zip1 v14.4s, v6.4s, v7.4s // exp[5,10].re/exp[5,10].im add x5, x5, x3, lsl #3 ldr x5, [x5] mov x10, x0 blr x5 ldp x20, x30, [sp] ldp d8, d9, [sp, #0x10] ldp d10, d11, [sp, #0x20] ldp d12, d13, [sp, #0x30] ldp d14, d15, [sp, #0x40] add sp, sp, #0x50 ret endfunc const fft_tab_neon, relocate=1 .quad fft15_neon .quad fft30_neon .quad fft60_neon .quad fft120_neon .quad fft240_neon .quad fft480_neon .quad fft960_neon endconst function ff_celt_imdct_half_neon, export=1 sub sp, sp, #0x20 stp x21, x30, [sp] str s0, [sp, #0x10] ldp w5, w6, [x0, #CELT_LEN2] // CELT_LEN4 mov x10, x0 mov x21, x1 sub w5, w5, #1 lsl x7, x3, #3 // 2 * stride * sizeof(float) sub x8, xzr, x3, lsl #3 // -2 * stride * sizeof(float) mul x5, x5, x3 ldp x9, x10, [x0, #CELT_TMP] // CELT_TWIDDLE ldr w3, [x0, #CELT_FFT_N] add x5, x2, x5, lsl #2 mov x11, x9 sub w6, w6, #4 ld1 {v0.s}[0], [x5], x8 ld1 {v1.s}[0], [x2], x7 ld1 {v4.4s,v5.4s}, [x10], #32 ld1 {v0.s}[1], [x5], x8 ld1 {v1.s}[1], [x2], x7 uzp1 v2.4s, v4.4s, v5.4s ld1 {v0.s}[2], [x5], x8 ld1 {v1.s}[2], [x2], x7 uzp2 v3.4s, v4.4s, v5.4s ld1 {v0.s}[3], [x5], x8 ld1 {v1.s}[3], [x2], x7 1: subs w6, w6, #4 ld1 {v20.s}[0], [x5], x8 ld1 {v21.s}[0], [x2], x7 ld1 {v4.4s,v5.4s}, [x10], #32 fmul v6.4s, v0.4s, v2.4s fmul v7.4s, v0.4s, v3.4s ld1 {v20.s}[1], [x5], x8 ld1 {v21.s}[1], [x2], x7 fmls v6.4s, v1.4s, v3.4s fmla v7.4s, v1.4s, v2.4s ld1 {v20.s}[2], [x5], x8 ld1 {v21.s}[2], [x2], x7 uzp1 v2.4s, v4.4s, v5.4s uzp2 v3.4s, v4.4s, v5.4s ld1 {v20.s}[3], [x5], x8 ld1 {v21.s}[3], [x2], x7 zip1 v4.4s, v6.4s, v7.4s zip2 v5.4s, v6.4s, v7.4s fmul v6.4s, v20.4s, v2.4s fmul v7.4s, v20.4s, v3.4s st1 {v4.4s,v5.4s}, [x9], #32 fmls v6.4s, v21.4s, v3.4s fmla v7.4s, v21.4s, v2.4s b.eq 3f subs w6, w6, #4 ld1 {v4.4s,v5.4s}, [x10], #32 ld1 {v0.s}[0], [x5], x8 ld1 {v1.s}[0], [x2], x7 uzp1 v2.4s, v4.4s, v5.4s ld1 {v0.s}[1], [x5], x8 ld1 {v1.s}[1], [x2], x7 uzp2 v3.4s, v4.4s, v5.4s ld1 {v0.s}[2], [x5], x8 ld1 {v1.s}[2], [x2], x7 zip1 v4.4s, v6.4s, v7.4s zip2 v5.4s, v6.4s, v7.4s ld1 {v0.s}[3], [x5], x8 ld1 {v1.s}[3], [x2], x7 st1 {v4.4s,v5.4s}, [x9], #32 b.gt 1b fmul v6.4s, v0.4s, v2.4s fmul v7.4s, v0.4s, v3.4s fmls v6.4s, v1.4s, v3.4s fmla v7.4s, v1.4s, v2.4s 3: zip1 v4.4s, v6.4s, v7.4s zip2 v5.4s, v6.4s, v7.4s st1 {v4.4s,v5.4s}, [x9], #32 mov x2, x11 mov x4, #1 bl fft_b15_calc_neon ldr w5, [x10, #CELT_LEN4] ldr x6, [x10, #CELT_TWIDDLE] ldr s31, [sp, #0x10] add x1, x21, x5, lsl #2 add x3, x6, x5, lsl #2 sub x0, x1, #16 sub x2, x3, #16 mov x8, #-16 mov x7, #16 mov x10, x0 mov x11, x1 sub w5, w5, #4 ld1 {v0.4s}, [x0], x8 ld1 {v1.4s}, [x1], x7 ld1 {v2.4s}, [x2], x8 ld1 {v3.4s}, [x3], x7 uzp1 v4.4s, v0.4s, v1.4s // z[-i-2, -i-1, +i, i+1].re uzp2 v6.4s, v0.4s, v1.4s // z[-i-2, -i-1, +i, i+1].im uzp1 v5.4s, v2.4s, v3.4s // twidlle_exptab[-i-2, -i-1, +i, i+1].re uzp2 v7.4s, v2.4s, v3.4s // twidlle_exptab[-i-2, -i-1, +i, i+1].im fmul v1.4s, v6.4s, v5.4s fmul v0.4s, v6.4s, v7.4s 2: subs w5, w5, #4 ld1 {v20.4s}, [x0], x8 fmla v1.4s, v4.4s, v7.4s fmls v0.4s, v4.4s, v5.4s ld1 {v21.4s}, [x1], x7 ext v1.16b, v1.16b, v1.16b, #8 fmul v0.4s, v0.4s, v31.s[0] ld1 {v2.4s}, [x2], x8 rev64 v1.4s, v1.4s fmul v1.4s, v1.4s, v31.s[0] ld1 {v3.4s}, [x3], x7 zip1 v5.4s, v0.4s, v1.4s zip2 v7.4s, v0.4s, v1.4s uzp1 v4.4s, v20.4s, v21.4s // z[-i-2, -i-1, +i, i+1].re uzp2 v6.4s, v20.4s, v21.4s // z[-i-2, -i-1, +i, i+1].im st1 {v5.4s}, [x10], x8 st1 {v7.4s}, [x11], x7 uzp1 v5.4s, v2.4s, v3.4s // twidlle_exptab[-i-2, -i-1, +i, i+1].re uzp2 v7.4s, v2.4s, v3.4s // twidlle_exptab[-i-2, -i-1, +i, i+1].im fmul v1.4s, v6.4s, v5.4s fmul v0.4s, v6.4s, v7.4s b.gt 2b fmla v1.4s, v4.4s, v7.4s fmls v0.4s, v4.4s, v5.4s ext v1.16b, v1.16b, v1.16b, #8 fmul v0.4s, v0.4s, v31.s[0] rev64 v1.4s, v1.4s fmul v1.4s, v1.4s, v31.s[0] zip1 v5.4s, v0.4s, v1.4s zip2 v7.4s, v0.4s, v1.4s st1 {v5.4s}, [x10], x8 st1 {v7.4s}, [x11], x7 ldp x21, x30, [sp] add sp, sp, #0x20 ret endfunc // [0] = exp(2 * i * pi / 5), [1] = exp(2 * i * pi * 2 / 5) const fact5, align=4 .float 0.30901699437494745, 0.95105651629515353 .float -0.80901699437494734, 0.58778525229247325 endconst
Akagi201/ffmpeg-xcode
4,395
ffmpeg-3.0.2/libavcodec/aarch64/synth_filter_neon.S
/* * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> * Copyright (c) 2015 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm-offsets.h" #include "libavutil/aarch64/asm.S" .macro inner_loop ld1 {v29.4s}, [x9], x15 ld1 {v28.4s}, [x8], x15 ld1 {v30.4s}, [x10], x15 ld1 {v31.4s}, [x11], x15 rev64 v28.4s, v28.4s ld1 {v24.4s}, [x4], x15 ld1 {v25.4s}, [x5], x15 rev64 v31.4s, v31.4s ld1 {v26.4s}, [x6], x15 fmla v5.4s, v25.4s, v29.4s ld1 {v27.4s}, [x7], x15 ext v28.16b, v28.16b, v28.16b, #8 ext v31.16b, v31.16b, v31.16b, #8 fmla v6.4s, v26.4s, v30.4s fmls v4.4s, v24.4s, v28.4s fmla v7.4s, v27.4s, v31.4s .endm function ff_synth_filter_float_neon, export=1 ldr w7, [x2] // *synth_buf_offset ldr x9, [x0, #IMDCT_HALF] // imdct_half function pointer sxtw x7, w7 stp x3, x4, [sp, #-64]! add x1, x1, x7, lsl #2 // synth_buf sub w8, w7, #32 stp x5, x1, [sp, #16] bic x7, x7, #63 and w8, w8, #511 stp x7, x30, [sp, #32] str w8, [x2] str s0, [sp, #48] mov x2, x6 // in blr x9 ldp x2, x4, [sp] // synct_buf_2, window ldp x13, x9, [sp, #16] // out, synth_buf ldp x0, x30, [sp, #32] // *synth_buf_offset ldr s0, [sp, #48] add x3, x2, #16*4 // synct_buf_2 + 16 add x14, x13, #16*4 // out + 16 add x8, x9, #12*4 mov x15, #64*4 mov x1, #4 1: add x10, x9, #16*4 // synth_buf add x11, x8, #16*4 add x5, x4, #16*4 // window add x6, x4, #32*4 add x7, x4, #48*4 ld1 {v4.4s}, [x2] // a ld1 {v5.4s}, [x3] // b movi v6.4s, #0 // c movi v7.4s, #0 // d mov x12, #512 2: sub x12, x12, #64 cmp x12, x0 inner_loop b.gt 2b sub x8, x8, #512*4 sub x9, x9, #512*4 cbz x12, 4f sub x10, x10, #512*4 sub x11, x11, #512*4 3: subs x12, x12, #64 inner_loop b.gt 3b 4: subs x1, x1, #1 fmul v4.4s, v4.4s, v0.s[0] fmul v5.4s, v5.4s, v0.s[0] st1 {v6.4s}, [x2], #16 st1 {v7.4s}, [x3], #16 st1 {v4.4s}, [x13], #16 st1 {v5.4s}, [x14], #16 b.le 10f sub x4, x4, #508*4 // window add x9, x9, #4*4 // synth_buf sub x8, x8, #4*4 // synth_buf b 1b 10: add sp, sp, #64 ret endfunc
Akagi201/ffmpeg-xcode
15,620
ffmpeg-3.0.2/libavcodec/aarch64/h264cmc_neon.S
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ .macro h264_chroma_mc8 type, codec=h264 function ff_\type\()_\codec\()_chroma_mc8_neon, export=1 sxtw x2, w2 .ifc \type,avg mov x8, x0 .endif prfm pldl1strm, [x1] prfm pldl1strm, [x1, x2] .ifc \codec,rv40 movrel x6, rv40bias lsr w9, w5, #1 lsr w10, w4, #1 lsl w9, w9, #3 lsl w10, w10, #1 add w9, w9, w10 add x6, x6, w9, UXTW ld1r {v22.8H}, [x6] .endif .ifc \codec,vc1 movi v22.8H, #28 .endif mul w7, w4, w5 lsl w14, w5, #3 lsl w13, w4, #3 cmp w7, #0 sub w6, w14, w7 sub w12, w13, w7 sub w4, w7, w13 sub w4, w4, w14 add w4, w4, #64 b.eq 2f dup v0.8B, w4 dup v1.8B, w12 ld1 {v4.8B, v5.8B}, [x1], x2 dup v2.8B, w6 dup v3.8B, w7 ext v5.8B, v4.8B, v5.8B, #1 1: ld1 {v6.8B, v7.8B}, [x1], x2 umull v16.8H, v4.8B, v0.8B umlal v16.8H, v5.8B, v1.8B ext v7.8B, v6.8B, v7.8B, #1 ld1 {v4.8B, v5.8B}, [x1], x2 umlal v16.8H, v6.8B, v2.8B prfm pldl1strm, [x1] ext v5.8B, v4.8B, v5.8B, #1 umlal v16.8H, v7.8B, v3.8B umull v17.8H, v6.8B, v0.8B subs w3, w3, #2 umlal v17.8H, v7.8B, v1.8B umlal v17.8H, v4.8B, v2.8B umlal v17.8H, v5.8B, v3.8B prfm pldl1strm, [x1, x2] .ifc \codec,h264 rshrn v16.8B, v16.8H, #6 rshrn v17.8B, v17.8H, #6 .else add v16.8H, v16.8H, v22.8H add v17.8H, v17.8H, v22.8H shrn v16.8B, v16.8H, #6 shrn v17.8B, v17.8H, #6 .endif .ifc \type,avg ld1 {v20.8B}, [x8], x2 ld1 {v21.8B}, [x8], x2 urhadd v16.8B, v16.8B, v20.8B urhadd v17.8B, v17.8B, v21.8B .endif st1 {v16.8B}, [x0], x2 st1 {v17.8B}, [x0], x2 b.gt 1b ret 2: adds w12, w12, w6 dup v0.8B, w4 b.eq 5f tst w6, w6 dup v1.8B, w12 b.eq 4f ld1 {v4.8B}, [x1], x2 3: ld1 {v6.8B}, [x1], x2 umull v16.8H, v4.8B, v0.8B umlal v16.8H, v6.8B, v1.8B ld1 {v4.8B}, [x1], x2 umull v17.8H, v6.8B, v0.8B umlal v17.8H, v4.8B, v1.8B prfm pldl1strm, [x1] .ifc \codec,h264 rshrn v16.8B, v16.8H, #6 rshrn v17.8B, v17.8H, #6 .else add v16.8H, v16.8H, v22.8H add v17.8H, v17.8H, v22.8H shrn v16.8B, v16.8H, #6 shrn v17.8B, v17.8H, #6 .endif prfm pldl1strm, [x1, x2] .ifc \type,avg ld1 {v20.8B}, [x8], x2 ld1 {v21.8B}, [x8], x2 urhadd v16.8B, v16.8B, v20.8B urhadd v17.8B, v17.8B, v21.8B .endif subs w3, w3, #2 st1 {v16.8B}, [x0], x2 st1 {v17.8B}, [x0], x2 b.gt 3b ret 4: ld1 {v4.8B, v5.8B}, [x1], x2 ld1 {v6.8B, v7.8B}, [x1], x2 ext v5.8B, v4.8B, v5.8B, #1 ext v7.8B, v6.8B, v7.8B, #1 prfm pldl1strm, [x1] subs w3, w3, #2 umull v16.8H, v4.8B, v0.8B umlal v16.8H, v5.8B, v1.8B umull v17.8H, v6.8B, v0.8B umlal v17.8H, v7.8B, v1.8B prfm pldl1strm, [x1, x2] .ifc \codec,h264 rshrn v16.8B, v16.8H, #6 rshrn v17.8B, v17.8H, #6 .else add v16.8H, v16.8H, v22.8H add v17.8H, v17.8H, v22.8H shrn v16.8B, v16.8H, #6 shrn v17.8B, v17.8H, #6 .endif .ifc \type,avg ld1 {v20.8B}, [x8], x2 ld1 {v21.8B}, [x8], x2 urhadd v16.8B, v16.8B, v20.8B urhadd v17.8B, v17.8B, v21.8B .endif st1 {v16.8B}, [x0], x2 st1 {v17.8B}, [x0], x2 b.gt 4b ret 5: ld1 {v4.8B}, [x1], x2 ld1 {v5.8B}, [x1], x2 prfm pldl1strm, [x1] subs w3, w3, #2 umull v16.8H, v4.8B, v0.8B umull v17.8H, v5.8B, v0.8B prfm pldl1strm, [x1, x2] .ifc \codec,h264 rshrn v16.8B, v16.8H, #6 rshrn v17.8B, v17.8H, #6 .else add v16.8H, v16.8H, v22.8H add v17.8H, v17.8H, v22.8H shrn v16.8B, v16.8H, #6 shrn v17.8B, v17.8H, #6 .endif .ifc \type,avg ld1 {v20.8B}, [x8], x2 ld1 {v21.8B}, [x8], x2 urhadd v16.8B, v16.8B, v20.8B urhadd v17.8B, v17.8B, v21.8B .endif st1 {v16.8B}, [x0], x2 st1 {v17.8B}, [x0], x2 b.gt 5b ret endfunc .endm /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ .macro h264_chroma_mc4 type, codec=h264 function ff_\type\()_\codec\()_chroma_mc4_neon, export=1 sxtw x2, w2 .ifc \type,avg mov x8, x0 .endif prfm pldl1strm, [x1] prfm pldl1strm, [x1, x2] .ifc \codec,rv40 movrel x6, rv40bias lsr w9, w5, #1 lsr w10, w4, #1 lsl w9, w9, #3 lsl w10, w10, #1 add w9, w9, w10 add x6, x6, w9, UXTW ld1r {v22.8H}, [x6] .endif .ifc \codec,vc1 movi v22.8H, #28 .endif mul w7, w4, w5 lsl w14, w5, #3 lsl w13, w4, #3 cmp w7, #0 sub w6, w14, w7 sub w12, w13, w7 sub w4, w7, w13 sub w4, w4, w14 add w4, w4, #64 b.eq 2f dup v24.8B, w4 dup v25.8B, w12 ld1 {v4.8B}, [x1], x2 dup v26.8B, w6 dup v27.8B, w7 ext v5.8B, v4.8B, v5.8B, #1 trn1 v0.2S, v24.2S, v25.2S trn1 v2.2S, v26.2S, v27.2S trn1 v4.2S, v4.2S, v5.2S 1: ld1 {v6.8B}, [x1], x2 ext v7.8B, v6.8B, v7.8B, #1 trn1 v6.2S, v6.2S, v7.2S umull v18.8H, v4.8B, v0.8B umlal v18.8H, v6.8B, v2.8B ld1 {v4.8B}, [x1], x2 ext v5.8B, v4.8B, v5.8B, #1 trn1 v4.2S, v4.2S, v5.2S prfm pldl1strm, [x1] umull v19.8H, v6.8B, v0.8B umlal v19.8H, v4.8B, v2.8B trn1 v30.2D, v18.2D, v19.2D trn2 v31.2D, v18.2D, v19.2D add v18.8H, v30.8H, v31.8H .ifc \codec,h264 rshrn v16.8B, v18.8H, #6 .else add v18.8H, v18.8H, v22.8H shrn v16.8B, v18.8H, #6 .endif subs w3, w3, #2 prfm pldl1strm, [x1, x2] .ifc \type,avg ld1 {v20.S}[0], [x8], x2 ld1 {v20.S}[1], [x8], x2 urhadd v16.8B, v16.8B, v20.8B .endif st1 {v16.S}[0], [x0], x2 st1 {v16.S}[1], [x0], x2 b.gt 1b ret 2: adds w12, w12, w6 dup v30.8B, w4 b.eq 5f tst w6, w6 dup v31.8B, w12 trn1 v0.2S, v30.2S, v31.2S trn2 v1.2S, v30.2S, v31.2S b.eq 4f ext v1.8B, v0.8B, v1.8B, #4 ld1 {v4.S}[0], [x1], x2 3: ld1 {v4.S}[1], [x1], x2 umull v18.8H, v4.8B, v0.8B ld1 {v4.S}[0], [x1], x2 umull v19.8H, v4.8B, v1.8B trn1 v30.2D, v18.2D, v19.2D trn2 v31.2D, v18.2D, v19.2D add v18.8H, v30.8H, v31.8H prfm pldl1strm, [x1] .ifc \codec,h264 rshrn v16.8B, v18.8H, #6 .else add v18.8H, v18.8H, v22.8H shrn v16.8B, v18.8H, #6 .endif .ifc \type,avg ld1 {v20.S}[0], [x8], x2 ld1 {v20.S}[1], [x8], x2 urhadd v16.8B, v16.8B, v20.8B .endif subs w3, w3, #2 prfm pldl1strm, [x1, x2] st1 {v16.S}[0], [x0], x2 st1 {v16.S}[1], [x0], x2 b.gt 3b ret 4: ld1 {v4.8B}, [x1], x2 ld1 {v6.8B}, [x1], x2 ext v5.8B, v4.8B, v5.8B, #1 ext v7.8B, v6.8B, v7.8B, #1 trn1 v4.2S, v4.2S, v5.2S trn1 v6.2S, v6.2S, v7.2S umull v18.8H, v4.8B, v0.8B umull v19.8H, v6.8B, v0.8B subs w3, w3, #2 trn1 v30.2D, v18.2D, v19.2D trn2 v31.2D, v18.2D, v19.2D add v18.8H, v30.8H, v31.8H prfm pldl1strm, [x1] .ifc \codec,h264 rshrn v16.8B, v18.8H, #6 .else add v18.8H, v18.8H, v22.8H shrn v16.8B, v18.8H, #6 .endif .ifc \type,avg ld1 {v20.S}[0], [x8], x2 ld1 {v20.S}[1], [x8], x2 urhadd v16.8B, v16.8B, v20.8B .endif prfm pldl1strm, [x1] st1 {v16.S}[0], [x0], x2 st1 {v16.S}[1], [x0], x2 b.gt 4b ret 5: ld1 {v4.S}[0], [x1], x2 ld1 {v4.S}[1], [x1], x2 umull v18.8H, v4.8B, v30.8B subs w3, w3, #2 prfm pldl1strm, [x1] .ifc \codec,h264 rshrn v16.8B, v18.8H, #6 .else add v18.8H, v18.8H, v22.8H shrn v16.8B, v18.8H, #6 .endif .ifc \type,avg ld1 {v20.S}[0], [x8], x2 ld1 {v20.S}[1], [x8], x2 urhadd v16.8B, v16.8B, v20.8B .endif prfm pldl1strm, [x1] st1 {v16.S}[0], [x0], x2 st1 {v16.S}[1], [x0], x2 b.gt 5b ret endfunc .endm .macro h264_chroma_mc2 type function ff_\type\()_h264_chroma_mc2_neon, export=1 sxtw x2, w2 prfm pldl1strm, [x1] prfm pldl1strm, [x1, x2] orr w7, w4, w5 cbz w7, 2f mul w7, w4, w5 lsl w14, w5, #3 lsl w13, w4, #3 sub w6, w14, w7 sub w12, w13, w7 sub w4, w7, w13 sub w4, w4, w14 add w4, w4, #64 dup v0.8B, w4 dup v2.8B, w12 dup v1.8B, w6 dup v3.8B, w7 trn1 v0.4H, v0.4H, v2.4H trn1 v1.4H, v1.4H, v3.4H 1: ld1 {v4.S}[0], [x1], x2 ld1 {v4.S}[1], [x1], x2 rev64 v5.2S, v4.2S ld1 {v5.S}[1], [x1] ext v6.8B, v4.8B, v5.8B, #1 ext v7.8B, v5.8B, v4.8B, #1 trn1 v4.4H, v4.4H, v6.4H trn1 v5.4H, v5.4H, v7.4H umull v16.8H, v4.8B, v0.8B umlal v16.8H, v5.8B, v1.8B .ifc \type,avg ld1 {v18.H}[0], [x0], x2 ld1 {v18.H}[2], [x0] sub x0, x0, x2 .endif rev64 v17.4S, v16.4S add v16.8H, v16.8H, v17.8H rshrn v16.8B, v16.8H, #6 .ifc \type,avg urhadd v16.8B, v16.8B, v18.8B .endif st1 {v16.H}[0], [x0], x2 st1 {v16.H}[2], [x0], x2 subs w3, w3, #2 b.gt 1b ret 2: ld1 {v16.H}[0], [x1], x2 ld1 {v16.H}[1], [x1], x2 .ifc \type,avg ld1 {v18.H}[0], [x0], x2 ld1 {v18.H}[1], [x0] sub x0, x0, x2 urhadd v16.8B, v16.8B, v18.8B .endif st1 {v16.H}[0], [x0], x2 st1 {v16.H}[1], [x0], x2 subs w3, w3, #2 b.gt 2b ret endfunc .endm h264_chroma_mc8 put h264_chroma_mc8 avg h264_chroma_mc4 put h264_chroma_mc4 avg h264_chroma_mc2 put h264_chroma_mc2 avg #if CONFIG_RV40_DECODER const rv40bias .short 0, 16, 32, 16 .short 32, 28, 32, 28 .short 0, 32, 16, 32 .short 32, 28, 32, 28 endconst h264_chroma_mc8 put, rv40 h264_chroma_mc8 avg, rv40 h264_chroma_mc4 put, rv40 h264_chroma_mc4 avg, rv40 #endif #if CONFIG_VC1_DECODER h264_chroma_mc8 put, vc1 h264_chroma_mc8 avg, vc1 h264_chroma_mc4 put, vc1 h264_chroma_mc4 avg, vc1 #endif
Akagi201/ffmpeg-xcode
7,515
ffmpeg-3.0.2/libavcodec/aarch64/mpegaudiodsp_neon.S
/* * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" #define FRAC_BITS 23 // fractional bits for sb_samples and dct #define WFRAC_BITS 16 // fractional bits for window #define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 15) const tbl_rev128.s, align=4 .byte 12, 13, 14, 15 .byte 8, 9, 10, 11 .byte 4, 5, 6, 7 .byte 0, 1, 2, 3 endconst .macro apply_window type, st function ff_mpadsp_apply_window_\type\()_neon, export=1 mov x7, x0 sxtw x4, w4 // incr add x8, x0, #512<<2 ld1 {v0.4s,v1.4s,v2.4s,v3.4s}, [x7], #64 ld1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x7], #64 st1 {v0.4s,v1.4s,v2.4s,v3.4s}, [x8], #64 st1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x8], #64 movrel x15, tbl_rev128.s ld1 {v27.4s}, [x15] .ifc \type, fixed lsl x4, x4, #1 .else lsl x4, x4, #2 .endif add x10, x0, #45<<2 add x0, x0, #16<<2 add x1, x1, #16<<2 add x5, x3, x4, lsl #5 sub x5, x5, x4 // samples2 neg x13, x4 // -incr mov x9, #64<<2 .ifc \type, fixed ld1r {v16.2s}, [x2] // dither_state sxtl v16.2d, v16.2s movi v29.2d, #0 movi v30.2d, #(1<<OUT_SHIFT)-1 trn1 v31.2d, v29.2d, v30.2d trn2 v30.2d, v30.2d, v29.2d trn1 v16.2d, v16.2d, v29.2d .else movi v16.4s, #0 movi v28.4s, #0 .endif mov x14, #4 1: mov x8, x0 sub x7, x1, #3<<2 sub x6, x1, x14, lsl #4 add x7, x7, x14, lsl #4 add x11, x6, #(32)<<2 // w + 32 add x12, x7, #(32)<<2 // w2 + 32 mov x15, #8 movi v17.2d, #0 movi v18.2d, #0 movi v19.2d, #0 2: subs x15, x15, #1 ld1 {v0.4s}, [x8], x9 ld1 {v1.4s}, [x10], x9 ld1 {v2.4s}, [x6], x9 ld1 {v3.4s}, [x7], x9 tbl v6.16b, {v0.16b}, v27.16b tbl v7.16b, {v1.16b}, v27.16b ld1 {v4.4s}, [x11], x9 ld1 {v5.4s}, [x12], x9 MLA v16, v2, v0 MLA2 v17, v2, v0 MLS v18, v3, v6 MLS2 v19, v3, v6 MLS v16, v4, v7 MLS2 v17, v4, v7 MLS v18, v5, v1 MLS2 v19, v5, v1 b.gt 2b cmp x14, #4 sub x10, x10, #64<<5 // 64 * 8 * sizeof(int32_t) .ifc \type, fixed and v28.16b, v16.16b, v30.16b ext v28.16b, v29.16b, v28.16b, #8 b.eq 4f round_sample v19, 1, 1 4: round_sample v16, 1, 0 shrn v16.2s, v16.2d, #OUT_SHIFT round_sample v19, 0, 0 shrn v19.2s, v19.2d, #OUT_SHIFT round_sample v17, 0, 1 round_sample v18, 1, 1 round_sample v17, 1, 0 shrn2 v16.4s, v17.2d, #OUT_SHIFT round_sample v18, 0, 0 shrn2 v19.4s, v18.2d, #OUT_SHIFT sqxtn v16.4h, v16.4s sqxtn v18.4h, v19.4s .else ext v18.16b, v18.16b, v18.16b, #8 .endif st1 {v16.\st\()}[0], [x3], x4 b.eq 4f st1 {v18.\st\()}[1], [x5], x13 4: st1 {v16.\st\()}[1], [x3], x4 st1 {v18.\st\()}[0], [x5], x13 st1 {v16.\st\()}[2], [x3], x4 st1 {v18.\st\()}[3], [x5], x13 st1 {v16.\st\()}[3], [x3], x4 st1 {v18.\st\()}[2], [x5], x13 mov v16.16b, v28.16b subs x14, x14, #1 add x0, x0, #4<<2 sub x10, x10, #4<<2 b.gt 1b // comuting samples[16] add x6, x1, #32<<2 ld1 {v0.2s}, [x6], x9 ld1 {v1.2s}, [x0], x9 .rept 3 ld1 {v2.2s}, [x6], x9 ld1 {v3.2s}, [x0], x9 MLS v16, v0, v1 ld1 {v0.2s}, [x6], x9 ld1 {v1.2s}, [x0], x9 MLS v16, v2, v3 .endr ld1 {v2.2s}, [x6], x9 ld1 {v3.2s}, [x0], x9 MLS v16, v0, v1 MLS v16, v2, v3 .ifc \type, fixed and v28.16b, v16.16b, v30.16b shrn v20.2s, v16.2d, #OUT_SHIFT xtn v28.2s, v28.2d sqxtn v20.4h, v20.4s st1 {v28.s}[0], [x2] // save dither_state st1 {v20.h}[0], [x3] .else st1 {v16.s}[0], [x3] .endif ret endfunc .purgem round_sample .purgem MLA .purgem MLA2 .purgem MLS .purgem MLS2 .endm .macro round_sample r, idx, next add \r\().2d, \r\().2d, v28.2d .if \idx == 0 and v28.16b, \r\().16b, v30.16b .else // \idx == 1 and v28.16b, \r\().16b, v31.16b .endif .if \idx != \next .if \next == 0 ext v28.16b, v28.16b, v29.16b, #8 .else ext v28.16b, v29.16b, v28.16b, #8 .endif .endif .endm .macro MLA d, s1, s2 smlal \d\().2d, \s1\().2s, \s2\().2s .endm .macro MLA2 d, s1, s2 smlal2 \d\().2d, \s1\().4s, \s2\().4s .endm .macro MLS d, s1, s2 smlsl \d\().2d, \s1\().2s, \s2\().2s .endm .macro MLS2 d, s1, s2 smlsl2 \d\().2d, \s1\().4s, \s2\().4s .endm apply_window fixed, h // nothing to do for round_sample and ML{A,S}2 .macro round_sample r, idx, next .endm .macro MLA2 d, s1, s2 .endm .macro MLS2 d, s1, s2 .endm .macro MLA d, s1, s2 fmla \d\().4s, \s1\().4s, \s2\().4s .endm .macro MLS d, s1, s2 fmls \d\().4s, \s1\().4s, \s2\().4s .endm apply_window float, s
Akagi201/ffmpeg-xcode
6,761
ffmpeg-3.0.2/libavcodec/aarch64/neon.S
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ .macro transpose_8x8B r0, r1, r2, r3, r4, r5, r6, r7, r8, r9 trn1 \r8\().8B, \r0\().8B, \r1\().8B trn2 \r9\().8B, \r0\().8B, \r1\().8B trn1 \r1\().8B, \r2\().8B, \r3\().8B trn2 \r3\().8B, \r2\().8B, \r3\().8B trn1 \r0\().8B, \r4\().8B, \r5\().8B trn2 \r5\().8B, \r4\().8B, \r5\().8B trn1 \r2\().8B, \r6\().8B, \r7\().8B trn2 \r7\().8B, \r6\().8B, \r7\().8B trn1 \r4\().4H, \r0\().4H, \r2\().4H trn2 \r2\().4H, \r0\().4H, \r2\().4H trn1 \r6\().4H, \r5\().4H, \r7\().4H trn2 \r7\().4H, \r5\().4H, \r7\().4H trn1 \r5\().4H, \r9\().4H, \r3\().4H trn2 \r9\().4H, \r9\().4H, \r3\().4H trn1 \r3\().4H, \r8\().4H, \r1\().4H trn2 \r8\().4H, \r8\().4H, \r1\().4H trn1 \r0\().2S, \r3\().2S, \r4\().2S trn2 \r4\().2S, \r3\().2S, \r4\().2S trn1 \r1\().2S, \r5\().2S, \r6\().2S trn2 \r5\().2S, \r5\().2S, \r6\().2S trn2 \r6\().2S, \r8\().2S, \r2\().2S trn1 \r2\().2S, \r8\().2S, \r2\().2S trn1 \r3\().2S, \r9\().2S, \r7\().2S trn2 \r7\().2S, \r9\().2S, \r7\().2S .endm .macro transpose_8x16B r0, r1, r2, r3, r4, r5, r6, r7, t0, t1 trn1 \t0\().16B, \r0\().16B, \r1\().16B trn2 \t1\().16B, \r0\().16B, \r1\().16B trn1 \r1\().16B, \r2\().16B, \r3\().16B trn2 \r3\().16B, \r2\().16B, \r3\().16B trn1 \r0\().16B, \r4\().16B, \r5\().16B trn2 \r5\().16B, \r4\().16B, \r5\().16B trn1 \r2\().16B, \r6\().16B, \r7\().16B trn2 \r7\().16B, \r6\().16B, \r7\().16B trn1 \r4\().8H, \r0\().8H, \r2\().8H trn2 \r2\().8H, \r0\().8H, \r2\().8H trn1 \r6\().8H, \r5\().8H, \r7\().8H trn2 \r7\().8H, \r5\().8H, \r7\().8H trn1 \r5\().8H, \t1\().8H, \r3\().8H trn2 \t1\().8H, \t1\().8H, \r3\().8H trn1 \r3\().8H, \t0\().8H, \r1\().8H trn2 \t0\().8H, \t0\().8H, \r1\().8H trn1 \r0\().4S, \r3\().4S, \r4\().4S trn2 \r4\().4S, \r3\().4S, \r4\().4S trn1 \r1\().4S, \r5\().4S, \r6\().4S trn2 \r5\().4S, \r5\().4S, \r6\().4S trn2 \r6\().4S, \t0\().4S, \r2\().4S trn1 \r2\().4S, \t0\().4S, \r2\().4S trn1 \r3\().4S, \t1\().4S, \r7\().4S trn2 \r7\().4S, \t1\().4S, \r7\().4S .endm .macro transpose_4x16B r0, r1, r2, r3, t4, t5, t6, t7 trn1 \t4\().16B, \r0\().16B, \r1\().16B trn2 \t5\().16B, \r0\().16B, \r1\().16B trn1 \t6\().16B, \r2\().16B, \r3\().16B trn2 \t7\().16B, \r2\().16B, \r3\().16B trn1 \r0\().8H, \t4\().8H, \t6\().8H trn2 \r2\().8H, \t4\().8H, \t6\().8H trn1 \r1\().8H, \t5\().8H, \t7\().8H trn2 \r3\().8H, \t5\().8H, \t7\().8H .endm .macro transpose_4x8B r0, r1, r2, r3, t4, t5, t6, t7 trn1 \t4\().8B, \r0\().8B, \r1\().8B trn2 \t5\().8B, \r0\().8B, \r1\().8B trn1 \t6\().8B, \r2\().8B, \r3\().8B trn2 \t7\().8B, \r2\().8B, \r3\().8B trn1 \r0\().4H, \t4\().4H, \t6\().4H trn2 \r2\().4H, \t4\().4H, \t6\().4H trn1 \r1\().4H, \t5\().4H, \t7\().4H trn2 \r3\().4H, \t5\().4H, \t7\().4H .endm .macro transpose_4x4H r0, r1, r2, r3, r4, r5, r6, r7 trn1 \r4\().4H, \r0\().4H, \r1\().4H trn2 \r5\().4H, \r0\().4H, \r1\().4H trn1 \r7\().4H, \r2\().4H, \r3\().4H trn2 \r6\().4H, \r2\().4H, \r3\().4H trn1 \r0\().2S, \r4\().2S, \r7\().2S trn2 \r3\().2S, \r4\().2S, \r7\().2S trn1 \r1\().2S, \r5\().2S, \r6\().2S trn2 \r2\().2S, \r5\().2S, \r6\().2S .endm .macro transpose_8x8H r0, r1, r2, r3, r4, r5, r6, r7, r8, r9 trn1 \r8\().8H, \r0\().8H, \r1\().8H trn2 \r9\().8H, \r0\().8H, \r1\().8H trn1 \r1\().8H, \r2\().8H, \r3\().8H trn2 \r3\().8H, \r2\().8H, \r3\().8H trn1 \r0\().8H, \r4\().8H, \r5\().8H trn2 \r5\().8H, \r4\().8H, \r5\().8H trn1 \r2\().8H, \r6\().8H, \r7\().8H trn2 \r7\().8H, \r6\().8H, \r7\().8H trn1 \r4\().4S, \r0\().4S, \r2\().4S trn2 \r2\().4S, \r0\().4S, \r2\().4S trn1 \r6\().4S, \r5\().4S, \r7\().4S trn2 \r7\().4S, \r5\().4S, \r7\().4S trn1 \r5\().4S, \r9\().4S, \r3\().4S trn2 \r9\().4S, \r9\().4S, \r3\().4S trn1 \r3\().4S, \r8\().4S, \r1\().4S trn2 \r8\().4S, \r8\().4S, \r1\().4S trn1 \r0\().2D, \r3\().2D, \r4\().2D trn2 \r4\().2D, \r3\().2D, \r4\().2D trn1 \r1\().2D, \r5\().2D, \r6\().2D trn2 \r5\().2D, \r5\().2D, \r6\().2D trn2 \r6\().2D, \r8\().2D, \r2\().2D trn1 \r2\().2D, \r8\().2D, \r2\().2D trn1 \r3\().2D, \r9\().2D, \r7\().2D trn2 \r7\().2D, \r9\().2D, \r7\().2D .endm
Akagi201/ffmpeg-xcode
18,235
ffmpeg-3.0.2/libavcodec/aarch64/fft_neon.S
/* * ARM NEON optimised FFT * * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * Copyright (c) 2009 Naotoshi Nojiri * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net> * * This algorithm (though not any of the implementation details) is * based on libdjbfft by D. J. Bernstein. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" #define M_SQRT1_2 0.70710678118654752440 .macro transpose d0, d1, s0, s1 trn1 \d0, \s0, \s1 trn2 \d1, \s0, \s1 .endm function fft4_neon ld1 {v0.2s,v1.2s,v2.2s,v3.2s}, [x0] fadd v4.2s, v0.2s, v1.2s // r0+r1,i0+i1 fsub v6.2s, v0.2s, v1.2s // r0-r1,i0-i1 ext v16.8b, v2.8b, v3.8b, #4 ext v17.8b, v3.8b, v2.8b, #4 fadd v5.2s, v2.2s, v3.2s // i2+i3,r2+r3 fsub v7.2s, v16.2s, v17.2s // r3-r2,i2-i3 fadd v0.2s, v4.2s, v5.2s fsub v2.2s, v4.2s, v5.2s fadd v1.2s, v6.2s, v7.2s fsub v3.2s, v6.2s, v7.2s st1 {v0.2s,v1.2s,v2.2s,v3.2s}, [x0] ret endfunc function fft8_neon mov x1, x0 ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32 ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0] ext v22.8b, v2.8b, v3.8b, #4 ext v23.8b, v3.8b, v2.8b, #4 fadd v4.2s, v16.2s, v17.2s // r4+r5,i4+i5 fadd v5.2s, v18.2s, v19.2s // r6+r7,i6+i7 fsub v17.2s, v16.2s, v17.2s // r4-r5,i4-i5 fsub v19.2s, v18.2s, v19.2s // r6-r7,i6-i7 rev64 v27.2s, v28.2s // ??? fadd v20.2s, v0.2s, v1.2s // r0+r1,i0+i1 fadd v21.2s, v2.2s, v3.2s // r2+r3,i2+i3 fmul v26.2s, v17.2s, v28.2s // -a2r*w,a2i*w ext v6.8b, v4.8b, v5.8b, #4 ext v7.8b, v5.8b, v4.8b, #4 fmul v27.2s, v19.2s, v27.2s // a3r*w,-a3i*w fsub v23.2s, v22.2s, v23.2s // i2-i3,r3-r2 fsub v22.2s, v0.2s, v1.2s // r0-r1,i0-i1 fmul v24.2s, v17.2s, v28.s[1] // a2r*w,a2i*w fmul v25.2s, v19.2s, v28.s[1] // a3r*w,a3i*w fadd v0.2s, v20.2s, v21.2s fsub v2.2s, v20.2s, v21.2s fadd v1.2s, v22.2s, v23.2s rev64 v26.2s, v26.2s rev64 v27.2s, v27.2s fsub v3.2s, v22.2s, v23.2s fsub v6.2s, v6.2s, v7.2s fadd v24.2s, v24.2s, v26.2s // a2r+a2i,a2i-a2r t1,t2 fadd v25.2s, v25.2s, v27.2s // a3r-a3i,a3i+a3r t5,t6 fadd v7.2s, v4.2s, v5.2s fsub v18.2s, v2.2s, v6.2s ext v26.8b, v24.8b, v25.8b, #4 ext v27.8b, v25.8b, v24.8b, #4 fadd v2.2s, v2.2s, v6.2s fsub v16.2s, v0.2s, v7.2s fadd v5.2s, v25.2s, v24.2s fsub v4.2s, v26.2s, v27.2s fadd v0.2s, v0.2s, v7.2s fsub v17.2s, v1.2s, v5.2s fsub v19.2s, v3.2s, v4.2s fadd v3.2s, v3.2s, v4.2s fadd v1.2s, v1.2s, v5.2s st1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0] st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x1] ret endfunc function fft16_neon mov x1, x0 ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32 ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0], #32 ext v22.8b, v2.8b, v3.8b, #4 ext v23.8b, v3.8b, v2.8b, #4 fadd v4.2s, v16.2s, v17.2s // r4+r5,i4+i5 fadd v5.2s, v18.2s, v19.2s // r6+r7,i6+i7 fsub v17.2s, v16.2s, v17.2s // r4-r5,i4-i5 fsub v19.2s, v18.2s, v19.2s // r6-r7,i6-i7 rev64 v27.2s, v28.2s // ??? fadd v20.2s, v0.2s, v1.2s // r0+r1,i0+i1 fadd v21.2s, v2.2s, v3.2s // r2+r3,i2+i3 fmul v26.2s, v17.2s, v28.2s // -a2r*w,a2i*w ext v6.8b, v4.8b, v5.8b, #4 ext v7.8b, v5.8b, v4.8b, #4 fmul v27.2s, v19.2s, v27.2s // a3r*w,-a3i*w fsub v23.2s, v22.2s, v23.2s // i2-i3,r3-r2 fsub v22.2s, v0.2s, v1.2s // r0-r1,i0-i1 fmul v24.2s, v17.2s, v28.s[1] // a2r*w,a2i*w fmul v25.2s, v19.2s, v28.s[1] // a3r*w,a3i*w fadd v0.2s, v20.2s, v21.2s fsub v2.2s, v20.2s, v21.2s fadd v1.2s, v22.2s, v23.2s rev64 v26.2s, v26.2s rev64 v27.2s, v27.2s fsub v3.2s, v22.2s, v23.2s fsub v6.2s, v6.2s, v7.2s fadd v24.2s, v24.2s, v26.2s // a2r+a2i,a2i-a2r t1,t2 fadd v25.2s, v25.2s, v27.2s // a3r-a3i,a3i+a3r t5,t6 fadd v7.2s, v4.2s, v5.2s fsub v18.2s, v2.2s, v6.2s ld1 {v20.4s,v21.4s}, [x0], #32 ld1 {v22.4s,v23.4s}, [x0], #32 ext v26.8b, v24.8b, v25.8b, #4 ext v27.8b, v25.8b, v24.8b, #4 fadd v2.2s, v2.2s, v6.2s fsub v16.2s, v0.2s, v7.2s fadd v5.2s, v25.2s, v24.2s fsub v4.2s, v26.2s, v27.2s transpose v24.2d, v25.2d, v20.2d, v22.2d transpose v26.2d, v27.2d, v21.2d, v23.2d fadd v0.2s, v0.2s, v7.2s fsub v17.2s, v1.2s, v5.2s fsub v19.2s, v3.2s, v4.2s fadd v3.2s, v3.2s, v4.2s fadd v1.2s, v1.2s, v5.2s ext v20.16b, v21.16b, v21.16b, #4 ext v21.16b, v23.16b, v23.16b, #4 zip1 v0.2d, v0.2d, v1.2d // {z[0], z[1]} zip1 v1.2d, v2.2d, v3.2d // {z[2], z[3]} zip1 v2.2d, v16.2d, v17.2d // {z[o1], z[o1+1]} zip1 v3.2d, v18.2d, v19.2d // {z[o1+2],z[o1+3]} // 2 x fft4 transpose v22.2d, v23.2d, v20.2d, v21.2d fadd v4.4s, v24.4s, v25.4s fadd v5.4s, v26.4s, v27.4s fsub v6.4s, v24.4s, v25.4s fsub v7.4s, v22.4s, v23.4s ld1 {v23.4s}, [x14] fadd v24.4s, v4.4s, v5.4s // {z[o2+0],z[o2+1]} fsub v26.4s, v4.4s, v5.4s // {z[o2+2],z[o2+3]} fadd v25.4s, v6.4s, v7.4s // {z[o3+0],z[o3+1]} fsub v27.4s, v6.4s, v7.4s // {z[o3+2],z[o3+3]} //fft_pass_neon_16 rev64 v7.4s, v25.4s fmul v25.4s, v25.4s, v23.s[1] fmul v7.4s, v7.4s, v29.4s fmla v25.4s, v7.4s, v23.s[3] // {t1a,t2a,t5a,t6a} zip1 v20.4s, v24.4s, v25.4s zip2 v21.4s, v24.4s, v25.4s fneg v22.4s, v20.4s fadd v4.4s, v21.4s, v20.4s fsub v6.4s, v20.4s, v21.4s // just the second half fadd v5.4s, v21.4s, v22.4s // just the first half tbl v4.16b, {v4.16b}, v30.16b // trans4_float tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float fsub v20.4s, v0.4s, v4.4s // {z[o2],z[o2+1]} fadd v16.4s, v0.4s, v4.4s // {z[0], z[1]} fsub v22.4s, v2.4s, v5.4s // {z[o3],z[o3+1]} fadd v18.4s, v2.4s, v5.4s // {z[o1],z[o1+1]} //second half rev64 v6.4s, v26.4s fmul v26.4s, v26.4s, v23.s[2] rev64 v7.4s, v27.4s fmul v27.4s, v27.4s, v23.s[3] fmul v6.4s, v6.4s, v29.4s fmul v7.4s, v7.4s, v29.4s fmla v26.4s, v6.4s, v23.s[2] // {t1,t2,t5,t6} fmla v27.4s, v7.4s, v23.s[1] // {t1a,t2a,t5a,t6a} zip1 v24.4s, v26.4s, v27.4s zip2 v25.4s, v26.4s, v27.4s fneg v26.4s, v24.4s fadd v4.4s, v25.4s, v24.4s fsub v6.4s, v24.4s, v25.4s // just the second half fadd v5.4s, v25.4s, v26.4s // just the first half tbl v4.16b, {v4.16b}, v30.16b // trans4_float tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float fadd v17.4s, v1.4s, v4.4s // {z[2], z[3]} fsub v21.4s, v1.4s, v4.4s // {z[o2+2],z[o2+3]} fadd v19.4s, v3.4s, v5.4s // {z[o1+2],z[o1+3]} fsub v23.4s, v3.4s, v5.4s // {z[o3+2],z[o3+3]} st1 {v16.4s,v17.4s}, [x1], #32 st1 {v18.4s,v19.4s}, [x1], #32 st1 {v20.4s,v21.4s}, [x1], #32 st1 {v22.4s,v23.4s}, [x1], #32 ret endfunc const trans4_float, align=4 .byte 0, 1, 2, 3 .byte 8, 9, 10, 11 .byte 4, 5, 6, 7 .byte 12, 13, 14, 15 endconst const trans8_float, align=4 .byte 24, 25, 26, 27 .byte 0, 1, 2, 3 .byte 28, 29, 30, 31 .byte 4, 5, 6, 7 endconst function fft_pass_neon sub x6, x2, #1 // n - 1, loop counter lsl x5, x2, #3 // 2 * n * sizeof FFTSample lsl x1, x2, #4 // 2 * n * sizeof FFTComplex add x5, x4, x5 // wim add x3, x1, x2, lsl #5 // 4 * n * sizeof FFTComplex add x2, x0, x2, lsl #5 // &z[o2] add x3, x0, x3 // &z[o3] add x1, x0, x1 // &z[o1] ld1 {v20.4s},[x2] // {z[o2],z[o2+1]} ld1 {v22.4s},[x3] // {z[o3],z[o3+1]} ld1 {v4.2s}, [x4], #8 // {wre[0],wre[1]} trn2 v25.2d, v20.2d, v22.2d sub x5, x5, #4 // wim-- trn1 v24.2d, v20.2d, v22.2d ld1 {v5.s}[0], [x5], x7 // d5[0] = wim[-1] rev64 v7.4s, v25.4s fmul v25.4s, v25.4s, v4.s[1] ld1 {v16.4s}, [x0] // {z[0],z[1]} fmul v7.4s, v7.4s, v29.4s ld1 {v17.4s}, [x1] // {z[o1],z[o1+1]} prfm pldl1keep, [x2, #16] prfm pldl1keep, [x3, #16] fmla v25.4s, v7.4s, v5.s[0] // {t1a,t2a,t5a,t6a} prfm pldl1keep, [x0, #16] prfm pldl1keep, [x1, #16] zip1 v20.4s, v24.4s, v25.4s zip2 v21.4s, v24.4s, v25.4s fneg v22.4s, v20.4s fadd v4.4s, v21.4s, v20.4s fsub v6.4s, v20.4s, v21.4s // just the second half fadd v5.4s, v21.4s, v22.4s // just the first half tbl v4.16b, {v4.16b}, v30.16b // trans4_float tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float fadd v20.4s, v16.4s, v4.4s fsub v22.4s, v16.4s, v4.4s fadd v21.4s, v17.4s, v5.4s st1 {v20.4s}, [x0], #16 // {z[0], z[1]} fsub v23.4s, v17.4s, v5.4s st1 {v21.4s}, [x1], #16 // {z[o1],z[o1+1]} st1 {v22.4s}, [x2], #16 // {z[o2],z[o2+1]} st1 {v23.4s}, [x3], #16 // {z[o3],z[o3+1]} 1: ld1 {v20.4s},[x2] // {z[o2],z[o2+1]} ld1 {v22.4s},[x3] // {z[o3],z[o3+1]} ld1 {v4.2s}, [x4], #8 // {wre[0],wre[1]} transpose v26.2d, v27.2d, v20.2d, v22.2d ld1 {v5.2s}, [x5], x7 // {wim[-1],wim[0]} rev64 v6.4s, v26.4s fmul v26.4s, v26.4s, v4.s[0] rev64 v7.4s, v27.4s fmul v27.4s, v27.4s, v4.s[1] fmul v6.4s, v6.4s, v29.4s fmul v7.4s, v7.4s, v29.4s ld1 {v16.4s},[x0] // {z[0],z[1]} fmla v26.4s, v6.4s, v5.s[1] // {t1,t2,t5,t6} fmla v27.4s, v7.4s, v5.s[0] // {t1a,t2a,t5a,t6a} ld1 {v17.4s},[x1] // {z[o1],z[o1+1]} subs x6, x6, #1 // n-- zip1 v20.4s, v26.4s, v27.4s zip2 v21.4s, v26.4s, v27.4s fneg v22.4s, v20.4s fadd v4.4s, v21.4s, v20.4s fsub v6.4s, v20.4s, v21.4s // just the second half fadd v5.4s, v21.4s, v22.4s // just the first half tbl v4.16b, {v4.16b}, v30.16b // trans4_float tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float fadd v20.4s, v16.4s, v4.4s fsub v22.4s, v16.4s, v4.4s fadd v21.4s, v17.4s, v5.4s st1 {v20.4s}, [x0], #16 // {z[0], z[1]} fsub v23.4s, v17.4s, v5.4s st1 {v21.4s}, [x1], #16 // {z[o1],z[o1+1]} st1 {v22.4s}, [x2], #16 // {z[o2],z[o2+1]} st1 {v23.4s}, [x3], #16 // {z[o3],z[o3+1]} b.ne 1b ret endfunc .macro def_fft n, n2, n4 function fft\n\()_neon, align=6 sub sp, sp, #16 stp x28, x30, [sp] add x28, x0, #\n4*2*8 bl fft\n2\()_neon mov x0, x28 bl fft\n4\()_neon add x0, x28, #\n4*1*8 bl fft\n4\()_neon sub x0, x28, #\n4*2*8 ldp x28, x30, [sp], #16 movrel x4, X(ff_cos_\n) mov x2, #\n4>>1 b fft_pass_neon endfunc .endm def_fft 32, 16, 8 def_fft 64, 32, 16 def_fft 128, 64, 32 def_fft 256, 128, 64 def_fft 512, 256, 128 def_fft 1024, 512, 256 def_fft 2048, 1024, 512 def_fft 4096, 2048, 1024 def_fft 8192, 4096, 2048 def_fft 16384, 8192, 4096 def_fft 32768, 16384, 8192 def_fft 65536, 32768, 16384 function ff_fft_calc_neon, export=1 prfm pldl1keep, [x1] movrel x10, trans4_float ldr w2, [x0] movrel x11, trans8_float sub w2, w2, #2 movrel x3, fft_tab_neon ld1 {v30.16b}, [x10] mov x7, #-8 movrel x12, pmmp ldr x3, [x3, x2, lsl #3] movrel x13, mppm movrel x14, X(ff_cos_16) ld1 {v31.16b}, [x11] mov x0, x1 ld1 {v29.4s}, [x12] // pmmp ld1 {v28.4s}, [x13] br x3 endfunc function ff_fft_permute_neon, export=1 mov x6, #1 ldr w2, [x0] // nbits ldr x3, [x0, #16] // tmp_buf ldr x0, [x0, #8] // revtab lsl x6, x6, x2 mov x2, x6 1: ld1 {v0.2s,v1.2s}, [x1], #16 ldr w4, [x0], #4 uxth w5, w4 lsr w4, w4, #16 add x5, x3, x5, lsl #3 add x4, x3, x4, lsl #3 st1 {v0.2s}, [x5] st1 {v1.2s}, [x4] subs x6, x6, #2 b.gt 1b sub x1, x1, x2, lsl #3 1: ld1 {v0.4s,v1.4s}, [x3], #32 st1 {v0.4s,v1.4s}, [x1], #32 subs x2, x2, #4 b.gt 1b ret endfunc const fft_tab_neon, relocate=1 .quad fft4_neon .quad fft8_neon .quad fft16_neon .quad fft32_neon .quad fft64_neon .quad fft128_neon .quad fft256_neon .quad fft512_neon .quad fft1024_neon .quad fft2048_neon .quad fft4096_neon .quad fft8192_neon .quad fft16384_neon .quad fft32768_neon .quad fft65536_neon endconst const pmmp, align=4 .float +1.0, -1.0, -1.0, +1.0 endconst const mppm, align=4 .float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2 endconst
Akagi201/ffmpeg-xcode
3,220
ffmpeg-3.0.2/libavcodec/aarch64/vorbisdsp_neon.S
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" function ff_vorbis_inverse_coupling_neon, export=1 movi v20.4s, #1<<7, lsl #24 subs x2, x2, #4 mov x3, x0 mov x4, x1 b.eq 3f ld1 {v7.4s}, [x1], #16 ld1 {v6.4s}, [x0], #16 cmle v4.4s, v7.4s, #0 and v5.16b, v6.16b, v20.16b eor v7.16b, v7.16b, v5.16b and v2.16b, v7.16b, v4.16b bic v3.16b, v7.16b, v4.16b fadd v7.4s, v6.4s, v2.4s fsub v6.4s, v6.4s, v3.4s 1: ld1 {v1.4s}, [x1], #16 ld1 {v0.4s}, [x0], #16 cmle v4.4s, v1.4s, #0 and v5.16b, v0.16b, v20.16b eor v1.16b, v1.16b, v5.16b st1 {v7.4s}, [x3], #16 st1 {v6.4s}, [x4], #16 and v2.16b, v1.16b, v4.16b bic v3.16b, v1.16b, v4.16b fadd v1.4s, v0.4s, v2.4s fsub v0.4s, v0.4s, v3.4s subs x2, x2, #8 b.le 2f ld1 {v7.4s}, [x1], #16 ld1 {v6.4s}, [x0], #16 cmle v4.4s, v7.4s, #0 and v5.16b, v6.16b, v20.16b eor v7.16b, v7.16b, v5.16b st1 {v1.4s}, [x3], #16 st1 {v0.4s}, [x4], #16 and v2.16b, v7.16b, v4.16b bic v3.16b, v7.16b, v4.16b fadd v7.4s, v6.4s, v2.4s fsub v6.4s, v6.4s, v3.4s b 1b 2: st1 {v1.4s}, [x3], #16 st1 {v0.4s}, [x4], #16 b.lt ret 3: ld1 {v1.4s}, [x1] ld1 {v0.4s}, [x0] cmle v4.4s, v1.4s, #0 and v5.16b, v0.16b, v20.16b eor v1.16b, v1.16b, v5.16b and v2.16b, v1.16b, v4.16b bic v3.16b, v1.16b, v4.16b fadd v1.4s, v0.4s, v2.4s fsub v0.4s, v0.4s, v3.4s st1 {v1.4s}, [x0], #16 st1 {v0.4s}, [x1], #16 ret: ret endfunc
Akagi201/ffmpeg-xcode
13,962
ffmpeg-3.0.2/libavcodec/aarch64/hpeldsp_neon.S
/* * ARM NEON optimised DSP functions * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/aarch64/asm.S" .macro pixels16 rnd=1, avg=0 .if \avg mov x12, x0 .endif 1: ld1 {v0.16B}, [x1], x2 ld1 {v1.16B}, [x1], x2 ld1 {v2.16B}, [x1], x2 ld1 {v3.16B}, [x1], x2 .if \avg ld1 {v4.16B}, [x12], x2 urhadd v0.16B, v0.16B, v4.16B ld1 {v5.16B}, [x12], x2 urhadd v1.16B, v1.16B, v5.16B ld1 {v6.16B}, [x12], x2 urhadd v2.16B, v2.16B, v6.16B ld1 {v7.16B}, [x12], x2 urhadd v3.16B, v3.16B, v7.16B .endif subs w3, w3, #4 st1 {v0.16B}, [x0], x2 st1 {v1.16B}, [x0], x2 st1 {v2.16B}, [x0], x2 st1 {v3.16B}, [x0], x2 b.ne 1b ret .endm .macro pixels16_x2 rnd=1, avg=0 1: ld1 {v0.16B, v1.16B}, [x1], x2 ld1 {v2.16B, v3.16B}, [x1], x2 subs w3, w3, #2 ext v1.16B, v0.16B, v1.16B, #1 avg v0.16B, v0.16B, v1.16B ext v3.16B, v2.16B, v3.16B, #1 avg v2.16B, v2.16B, v3.16B .if \avg ld1 {v1.16B}, [x0], x2 ld1 {v3.16B}, [x0] urhadd v0.16B, v0.16B, v1.16B urhadd v2.16B, v2.16B, v3.16B sub x0, x0, x2 .endif st1 {v0.16B}, [x0], x2 st1 {v2.16B}, [x0], x2 b.ne 1b ret .endm .macro pixels16_y2 rnd=1, avg=0 sub w3, w3, #2 ld1 {v0.16B}, [x1], x2 ld1 {v1.16B}, [x1], x2 1: subs w3, w3, #2 avg v2.16B, v0.16B, v1.16B ld1 {v0.16B}, [x1], x2 avg v3.16B, v0.16B, v1.16B ld1 {v1.16B}, [x1], x2 .if \avg ld1 {v4.16B}, [x0], x2 ld1 {v5.16B}, [x0] urhadd v2.16B, v2.16B, v4.16B urhadd v3.16B, v3.16B, v5.16B sub x0, x0, x2 .endif st1 {v2.16B}, [x0], x2 st1 {v3.16B}, [x0], x2 b.ne 1b avg v2.16B, v0.16B, v1.16B ld1 {v0.16B}, [x1], x2 avg v3.16B, v0.16B, v1.16B .if \avg ld1 {v4.16B}, [x0], x2 ld1 {v5.16B}, [x0] urhadd v2.16B, v2.16B, v4.16B urhadd v3.16B, v3.16B, v5.16B sub x0, x0, x2 .endif st1 {v2.16B}, [x0], x2 st1 {v3.16B}, [x0], x2 ret .endm .macro pixels16_xy2 rnd=1, avg=0 sub w3, w3, #2 ld1 {v0.16B, v1.16B}, [x1], x2 ld1 {v4.16B, v5.16B}, [x1], x2 NRND movi v26.8H, #1 ext v1.16B, v0.16B, v1.16B, #1 ext v5.16B, v4.16B, v5.16B, #1 uaddl v16.8H, v0.8B, v1.8B uaddl2 v20.8H, v0.16B, v1.16B uaddl v18.8H, v4.8B, v5.8B uaddl2 v22.8H, v4.16B, v5.16B 1: subs w3, w3, #2 ld1 {v0.16B, v1.16B}, [x1], x2 add v24.8H, v16.8H, v18.8H NRND add v24.8H, v24.8H, v26.8H ext v30.16B, v0.16B, v1.16B, #1 add v1.8H, v20.8H, v22.8H mshrn v28.8B, v24.8H, #2 NRND add v1.8H, v1.8H, v26.8H mshrn2 v28.16B, v1.8H, #2 .if \avg ld1 {v16.16B}, [x0] urhadd v28.16B, v28.16B, v16.16B .endif uaddl v16.8H, v0.8B, v30.8B ld1 {v2.16B, v3.16B}, [x1], x2 uaddl2 v20.8H, v0.16B, v30.16B st1 {v28.16B}, [x0], x2 add v24.8H, v16.8H, v18.8H NRND add v24.8H, v24.8H, v26.8H ext v3.16B, v2.16B, v3.16B, #1 add v0.8H, v20.8H, v22.8H mshrn v30.8B, v24.8H, #2 NRND add v0.8H, v0.8H, v26.8H mshrn2 v30.16B, v0.8H, #2 .if \avg ld1 {v18.16B}, [x0] urhadd v30.16B, v30.16B, v18.16B .endif uaddl v18.8H, v2.8B, v3.8B uaddl2 v22.8H, v2.16B, v3.16B st1 {v30.16B}, [x0], x2 b.gt 1b ld1 {v0.16B, v1.16B}, [x1], x2 add v24.8H, v16.8H, v18.8H NRND add v24.8H, v24.8H, v26.8H ext v30.16B, v0.16B, v1.16B, #1 add v1.8H, v20.8H, v22.8H mshrn v28.8B, v24.8H, #2 NRND add v1.8H, v1.8H, v26.8H mshrn2 v28.16B, v1.8H, #2 .if \avg ld1 {v16.16B}, [x0] urhadd v28.16B, v28.16B, v16.16B .endif uaddl v16.8H, v0.8B, v30.8B uaddl2 v20.8H, v0.16B, v30.16B st1 {v28.16B}, [x0], x2 add v24.8H, v16.8H, v18.8H NRND add v24.8H, v24.8H, v26.8H add v0.8H, v20.8H, v22.8H mshrn v30.8B, v24.8H, #2 NRND add v0.8H, v0.8H, v26.8H mshrn2 v30.16B, v0.8H, #2 .if \avg ld1 {v18.16B}, [x0] urhadd v30.16B, v30.16B, v18.16B .endif st1 {v30.16B}, [x0], x2 ret .endm .macro pixels8 rnd=1, avg=0 1: ld1 {v0.8B}, [x1], x2 ld1 {v1.8B}, [x1], x2 ld1 {v2.8B}, [x1], x2 ld1 {v3.8B}, [x1], x2 .if \avg ld1 {v4.8B}, [x0], x2 urhadd v0.8B, v0.8B, v4.8B ld1 {v5.8B}, [x0], x2 urhadd v1.8B, v1.8B, v5.8B ld1 {v6.8B}, [x0], x2 urhadd v2.8B, v2.8B, v6.8B ld1 {v7.8B}, [x0], x2 urhadd v3.8B, v3.8B, v7.8B sub x0, x0, x2, lsl #2 .endif subs w3, w3, #4 st1 {v0.8B}, [x0], x2 st1 {v1.8B}, [x0], x2 st1 {v2.8B}, [x0], x2 st1 {v3.8B}, [x0], x2 b.ne 1b ret .endm .macro pixels8_x2 rnd=1, avg=0 1: ld1 {v0.8B, v1.8B}, [x1], x2 ext v1.8B, v0.8B, v1.8B, #1 ld1 {v2.8B, v3.8B}, [x1], x2 ext v3.8B, v2.8B, v3.8B, #1 subs w3, w3, #2 avg v0.8B, v0.8B, v1.8B avg v2.8B, v2.8B, v3.8B .if \avg ld1 {v4.8B}, [x0], x2 ld1 {v5.8B}, [x0] urhadd v0.8B, v0.8B, v4.8B urhadd v2.8B, v2.8B, v5.8B sub x0, x0, x2 .endif st1 {v0.8B}, [x0], x2 st1 {v2.8B}, [x0], x2 b.ne 1b ret .endm .macro pixels8_y2 rnd=1, avg=0 sub w3, w3, #2 ld1 {v0.8B}, [x1], x2 ld1 {v1.8B}, [x1], x2 1: subs w3, w3, #2 avg v4.8B, v0.8B, v1.8B ld1 {v0.8B}, [x1], x2 avg v5.8B, v0.8B, v1.8B ld1 {v1.8B}, [x1], x2 .if \avg ld1 {v2.8B}, [x0], x2 ld1 {v3.8B}, [x0] urhadd v4.8B, v4.8B, v2.8B urhadd v5.8B, v5.8B, v3.8B sub x0, x0, x2 .endif st1 {v4.8B}, [x0], x2 st1 {v5.8B}, [x0], x2 b.ne 1b avg v4.8B, v0.8B, v1.8B ld1 {v0.8B}, [x1], x2 avg v5.8B, v0.8B, v1.8B .if \avg ld1 {v2.8B}, [x0], x2 ld1 {v3.8B}, [x0] urhadd v4.8B, v4.8B, v2.8B urhadd v5.8B, v5.8B, v3.8B sub x0, x0, x2 .endif st1 {v4.8B}, [x0], x2 st1 {v5.8B}, [x0], x2 ret .endm .macro pixels8_xy2 rnd=1, avg=0 sub w3, w3, #2 ld1 {v0.16B}, [x1], x2 ld1 {v1.16B}, [x1], x2 NRND movi v19.8H, #1 ext v4.16B, v0.16B, v4.16B, #1 ext v6.16B, v1.16B, v6.16B, #1 uaddl v16.8H, v0.8B, v4.8B uaddl v17.8H, v1.8B, v6.8B 1: subs w3, w3, #2 ld1 {v0.16B}, [x1], x2 add v18.8H, v16.8H, v17.8H ext v4.16B, v0.16B, v4.16B, #1 NRND add v18.8H, v18.8H, v19.8H uaddl v16.8H, v0.8B, v4.8B mshrn v5.8B, v18.8H, #2 ld1 {v1.16B}, [x1], x2 add v18.8H, v16.8H, v17.8H .if \avg ld1 {v7.8B}, [x0] urhadd v5.8B, v5.8B, v7.8B .endif NRND add v18.8H, v18.8H, v19.8H st1 {v5.8B}, [x0], x2 mshrn v7.8B, v18.8H, #2 .if \avg ld1 {v5.8B}, [x0] urhadd v7.8B, v7.8B, v5.8B .endif ext v6.16B, v1.16B, v6.16B, #1 uaddl v17.8H, v1.8B, v6.8B st1 {v7.8B}, [x0], x2 b.gt 1b ld1 {v0.16B}, [x1], x2 add v18.8H, v16.8H, v17.8H ext v4.16B, v0.16B, v4.16B, #1 NRND add v18.8H, v18.8H, v19.8H uaddl v16.8H, v0.8B, v4.8B mshrn v5.8B, v18.8H, #2 add v18.8H, v16.8H, v17.8H .if \avg ld1 {v7.8B}, [x0] urhadd v5.8B, v5.8B, v7.8B .endif NRND add v18.8H, v18.8H, v19.8H st1 {v5.8B}, [x0], x2 mshrn v7.8B, v18.8H, #2 .if \avg ld1 {v5.8B}, [x0] urhadd v7.8B, v7.8B, v5.8B .endif st1 {v7.8B}, [x0], x2 ret .endm .macro pixfunc pfx, name, suf, rnd=1, avg=0 .if \rnd .macro avg rd, rn, rm urhadd \rd, \rn, \rm .endm .macro mshrn rd, rn, rm rshrn \rd, \rn, \rm .endm .macro mshrn2 rd, rn, rm rshrn2 \rd, \rn, \rm .endm .macro NRND insn:vararg .endm .else .macro avg rd, rn, rm uhadd \rd, \rn, \rm .endm .macro mshrn rd, rn, rm shrn \rd, \rn, \rm .endm .macro mshrn2 rd, rn, rm shrn2 \rd, \rn, \rm .endm .macro NRND insn:vararg \insn .endm .endif function ff_\pfx\name\suf\()_neon, export=1 \name \rnd, \avg endfunc .purgem avg .purgem mshrn .purgem mshrn2 .purgem NRND .endm .macro pixfunc2 pfx, name, avg=0 pixfunc \pfx, \name, rnd=1, avg=\avg pixfunc \pfx, \name, _no_rnd, rnd=0, avg=\avg .endm function ff_put_h264_qpel16_mc00_neon, export=1 mov w3, #16 endfunc pixfunc put_, pixels16, avg=0 pixfunc2 put_, pixels16_x2, avg=0 pixfunc2 put_, pixels16_y2, avg=0 pixfunc2 put_, pixels16_xy2, avg=0 function ff_avg_h264_qpel16_mc00_neon, export=1 mov w3, #16 endfunc pixfunc avg_, pixels16, avg=1 pixfunc2 avg_, pixels16_x2, avg=1 pixfunc2 avg_, pixels16_y2, avg=1 pixfunc2 avg_, pixels16_xy2, avg=1 function ff_put_h264_qpel8_mc00_neon, export=1 mov w3, #8 endfunc pixfunc put_, pixels8, avg=0 pixfunc2 put_, pixels8_x2, avg=0 pixfunc2 put_, pixels8_y2, avg=0 pixfunc2 put_, pixels8_xy2, avg=0 function ff_avg_h264_qpel8_mc00_neon, export=1 mov w3, #8 endfunc pixfunc avg_, pixels8, avg=1 pixfunc avg_, pixels8_x2, avg=1 pixfunc avg_, pixels8_y2, avg=1 pixfunc avg_, pixels8_xy2, avg=1
Akagi201/ffmpeg-xcode
12,668
ffmpeg-3.0.2/libavcodec/ppc/fft_altivec.S
/* * FFT transform with Altivec optimizations * Copyright (c) 2009 Loren Merritt * * This algorithm (though not any of the implementation details) is * based on libdjbfft by D. J. Bernstein. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * These functions are not individually interchangeable with the C versions. * While C takes arrays of FFTComplex, Altivec leaves intermediate results * in blocks as convenient to the vector size. * i.e. {4x real, 4x imaginary, 4x real, ...} * * I ignore standard calling convention. * Instead, the following registers are treated as global constants: * v14: zero * v15..v18: cosines * v19..v29: permutations * r9: 16 * r12: ff_cos_tabs * and the rest are free for local use. */ #include "config.h" #if HAVE_GNU_AS && HAVE_ALTIVEC #include "asm.S" .text .macro addi2 ra, imm // add 32-bit immediate .if \imm & 0xffff addi \ra, \ra, \imm@l .endif .if (\imm+0x8000)>>16 addis \ra, \ra, \imm@ha .endif .endm .macro FFT4 a0, a1, a2, a3 // in:0-1 out:2-3 vperm \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2} vperm \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3} vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5} vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7} vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4} vperm \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8} vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1} vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3} vperm \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3} vperm \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3} .endm .macro FFT4x2 a0, a1, b0, b1, a2, a3, b2, b3 vperm \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2} vperm \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3} vperm \b2,\b0,\b1,v20 vperm \b3,\b0,\b1,v21 vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5} vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7} vaddfp \b0,\b2,\b3 vsubfp \b1,\b2,\b3 vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4} vperm \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8} vmrghw \b2,\b0,\b1 vperm \b3,\b0,\b1,v22 vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1} vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3} vaddfp \b0,\b2,\b3 vsubfp \b1,\b2,\b3 vperm \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3} vperm \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3} vperm \b2,\b0,\b1,v23 vperm \b3,\b0,\b1,v24 .endm .macro FFT8 a0, a1, b0, b1, a2, a3, b2, b3, b4 // in,out:a0-b1 vmrghw \b2,\b0,\b1 // vcprm(0,s0,1,s1) // {r4,r6,i4,i6} vmrglw \b3,\b0,\b1 // vcprm(2,s2,3,s3) // {r5,r7,i5,i7} vperm \a2,\a0,\a1,v20 // FFT4 ... vperm \a3,\a0,\a1,v21 vaddfp \b0,\b2,\b3 // {t1,t3,t2,t4} vsubfp \b1,\b2,\b3 // {r5,r7,i5,i7} vperm \b4,\b1,\b1,v25 // vcprm(2,3,0,1) // {i5,i7,r5,r7} vaddfp \a0,\a2,\a3 vsubfp \a1,\a2,\a3 vmaddfp \b1,\b1,v17,v14 // * {-1,1,1,-1}/sqrt(2) vmaddfp \b1,\b4,v18,\b1 // * { 1,1,1,1 }/sqrt(2) // {t8,ta,t7,t9} vmrghw \a2,\a0,\a1 vperm \a3,\a0,\a1,v22 vperm \b2,\b0,\b1,v26 // vcprm(1,2,s3,s0) // {t3,t2,t9,t8} vperm \b3,\b0,\b1,v27 // vcprm(0,3,s2,s1) // {t1,t4,t7,ta} vaddfp \a0,\a2,\a3 vsubfp \a1,\a2,\a3 vaddfp \b0,\b2,\b3 // {t1,t2,t9,ta} vsubfp \b1,\b2,\b3 // {t6,t5,tc,tb} vperm \a2,\a0,\a1,v23 vperm \a3,\a0,\a1,v24 vperm \b2,\b0,\b1,v28 // vcprm(0,2,s1,s3) // {t1,t9,t5,tb} vperm \b3,\b0,\b1,v29 // vcprm(1,3,s0,s2) // {t2,ta,t6,tc} vsubfp \b0,\a2,\b2 // {r4,r5,r6,r7} vsubfp \b1,\a3,\b3 // {i4,i5,i6,i7} vaddfp \a0,\a2,\b2 // {r0,r1,r2,r3} vaddfp \a1,\a3,\b3 // {i0,i1,i2,i3} .endm .macro BF d0,d1,s0,s1 vsubfp \d1,\s0,\s1 vaddfp \d0,\s0,\s1 .endm .macro zip d0,d1,s0,s1 vmrghw \d0,\s0,\s1 vmrglw \d1,\s0,\s1 .endm .macro def_fft4 interleave fft4\interleave\()_altivec: lvx v0, 0,r3 lvx v1,r9,r3 FFT4 v0,v1,v2,v3 .ifnb \interleave zip v0,v1,v2,v3 stvx v0, 0,r3 stvx v1,r9,r3 .else stvx v2, 0,r3 stvx v3,r9,r3 .endif blr .endm .macro def_fft8 interleave fft8\interleave\()_altivec: addi r4,r3,32 lvx v0, 0,r3 lvx v1,r9,r3 lvx v2, 0,r4 lvx v3,r9,r4 FFT8 v0,v1,v2,v3,v4,v5,v6,v7,v8 .ifnb \interleave zip v4,v5,v0,v1 zip v6,v7,v2,v3 stvx v4, 0,r3 stvx v5,r9,r3 stvx v6, 0,r4 stvx v7,r9,r4 .else stvx v0, 0,r3 stvx v1,r9,r3 stvx v2, 0,r4 stvx v3,r9,r4 .endif blr .endm .macro def_fft16 interleave fft16\interleave\()_altivec: addi r5,r3,64 addi r6,r3,96 addi r4,r3,32 lvx v0, 0,r5 lvx v1,r9,r5 lvx v2, 0,r6 lvx v3,r9,r6 FFT4x2 v0,v1,v2,v3,v4,v5,v6,v7 lvx v0, 0,r3 lvx v1,r9,r3 lvx v2, 0,r4 lvx v3,r9,r4 FFT8 v0,v1,v2,v3,v8,v9,v10,v11,v12 vmaddfp v8,v4,v15,v14 // r2*wre vmaddfp v9,v5,v15,v14 // i2*wre vmaddfp v10,v6,v15,v14 // r3*wre vmaddfp v11,v7,v15,v14 // i3*wre vmaddfp v8,v5,v16,v8 // i2*wim vnmsubfp v9,v4,v16,v9 // r2*wim vnmsubfp v10,v7,v16,v10 // i3*wim vmaddfp v11,v6,v16,v11 // r3*wim BF v10,v12,v10,v8 BF v11,v13,v9,v11 BF v0,v4,v0,v10 BF v3,v7,v3,v12 BF v1,v5,v1,v11 BF v2,v6,v2,v13 .ifnb \interleave zip v8, v9,v0,v1 zip v10,v11,v2,v3 zip v12,v13,v4,v5 zip v14,v15,v6,v7 stvx v8, 0,r3 stvx v9,r9,r3 stvx v10, 0,r4 stvx v11,r9,r4 stvx v12, 0,r5 stvx v13,r9,r5 stvx v14, 0,r6 stvx v15,r9,r6 .else stvx v0, 0,r3 stvx v4, 0,r5 stvx v3,r9,r4 stvx v7,r9,r6 stvx v1,r9,r3 stvx v5,r9,r5 stvx v2, 0,r4 stvx v6, 0,r6 .endif blr .endm // void pass(float *z, float *wre, int n) .macro PASS interleave, suffix fft_pass\suffix\()_altivec: mtctr r5 slwi r0,r5,4 slwi r7,r5,6 // o2 slwi r5,r5,5 // o1 add r10,r5,r7 // o3 add r0,r4,r0 // wim addi r6,r5,16 // o1+16 addi r8,r7,16 // o2+16 addi r11,r10,16 // o3+16 1: lvx v8, 0,r4 // wre lvx v10, 0,r0 // wim sub r0,r0,r9 lvx v9, 0,r0 vperm v9,v9,v10,v19 // vcprm(s0,3,2,1) => wim[0 .. -3] lvx v4,r3,r7 // r2 = z[o2] lvx v5,r3,r8 // i2 = z[o2+16] lvx v6,r3,r10 // r3 = z[o3] lvx v7,r3,r11 // i3 = z[o3+16] vmaddfp v10,v4,v8,v14 // r2*wre vmaddfp v11,v5,v8,v14 // i2*wre vmaddfp v12,v6,v8,v14 // r3*wre vmaddfp v13,v7,v8,v14 // i3*wre lvx v0, 0,r3 // r0 = z[0] lvx v3,r3,r6 // i1 = z[o1+16] vmaddfp v10,v5,v9,v10 // i2*wim vnmsubfp v11,v4,v9,v11 // r2*wim vnmsubfp v12,v7,v9,v12 // i3*wim vmaddfp v13,v6,v9,v13 // r3*wim lvx v1,r3,r9 // i0 = z[16] lvx v2,r3,r5 // r1 = z[o1] BF v12,v8,v12,v10 BF v13,v9,v11,v13 BF v0,v4,v0,v12 BF v3,v7,v3,v8 .if !\interleave stvx v0, 0,r3 stvx v4,r3,r7 stvx v3,r3,r6 stvx v7,r3,r11 .endif BF v1,v5,v1,v13 BF v2,v6,v2,v9 .if !\interleave stvx v1,r3,r9 stvx v2,r3,r5 stvx v5,r3,r8 stvx v6,r3,r10 .else vmrghw v8,v0,v1 vmrglw v9,v0,v1 stvx v8, 0,r3 stvx v9,r3,r9 vmrghw v8,v2,v3 vmrglw v9,v2,v3 stvx v8,r3,r5 stvx v9,r3,r6 vmrghw v8,v4,v5 vmrglw v9,v4,v5 stvx v8,r3,r7 stvx v9,r3,r8 vmrghw v8,v6,v7 vmrglw v9,v6,v7 stvx v8,r3,r10 stvx v9,r3,r11 .endif addi r3,r3,32 addi r4,r4,16 bdnz 1b sub r3,r3,r5 blr .endm #define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ #define WORD_0 0x00,0x01,0x02,0x03 #define WORD_1 0x04,0x05,0x06,0x07 #define WORD_2 0x08,0x09,0x0a,0x0b #define WORD_3 0x0c,0x0d,0x0e,0x0f #define WORD_s0 0x10,0x11,0x12,0x13 #define WORD_s1 0x14,0x15,0x16,0x17 #define WORD_s2 0x18,0x19,0x1a,0x1b #define WORD_s3 0x1c,0x1d,0x1e,0x1f #define vcprm(a, b, c, d) .byte WORD_##a, WORD_##b, WORD_##c, WORD_##d .rodata .align 4 fft_data: .float 0, 0, 0, 0 .float 1, 0.92387953, M_SQRT1_2, 0.38268343 .float 0, 0.38268343, M_SQRT1_2, 0.92387953 .float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2,-M_SQRT1_2 .float M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2 vcprm(s0,3,2,1) vcprm(0,1,s2,s1) vcprm(2,3,s0,s3) vcprm(2,s3,3,s2) vcprm(0,1,s0,s1) vcprm(2,3,s2,s3) vcprm(2,3,0,1) vcprm(1,2,s3,s0) vcprm(0,3,s2,s1) vcprm(0,2,s1,s3) vcprm(1,3,s0,s2) .macro lvm b, r, regs:vararg lvx \r, 0, \b addi \b, \b, 16 .ifnb \regs lvm \b, \regs .endif .endm .macro stvm b, r, regs:vararg stvx \r, 0, \b addi \b, \b, 16 .ifnb \regs stvm \b, \regs .endif .endm .macro fft_calc interleave extfunc ff_fft_calc\interleave\()_altivec mflr r0 stp r0, 2*PS(r1) stpu r1, -(160+16*PS)(r1) get_got r11 addi r6, r1, 16*PS stvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29 mfvrsave r0 stw r0, 15*PS(r1) li r6, 0xfffffffc mtvrsave r6 movrel r6, fft_data, r11 lvm r6, v14, v15, v16, v17, v18, v19, v20, v21 lvm r6, v22, v23, v24, v25, v26, v27, v28, v29 li r9, 16 movrel r12, X(ff_cos_tabs), r11 movrel r6, fft_dispatch_tab\interleave\()_altivec, r11 lwz r3, 0(r3) subi r3, r3, 2 slwi r3, r3, 2+ARCH_PPC64 lpx r3, r3, r6 mtctr r3 mr r3, r4 bctrl addi r6, r1, 16*PS lvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29 lwz r6, 15*PS(r1) mtvrsave r6 lp r1, 0(r1) lp r0, 2*PS(r1) mtlr r0 blr .endm .macro DECL_FFT suffix, bits, n, n2, n4 fft\n\suffix\()_altivec: mflr r0 stp r0,PS*(\bits-3)(r1) bl fft\n2\()_altivec addi2 r3,\n*4 bl fft\n4\()_altivec addi2 r3,\n*2 bl fft\n4\()_altivec addi2 r3,\n*-6 lp r0,PS*(\bits-3)(r1) lp r4,\bits*PS(r12) mtlr r0 li r5,\n/16 b fft_pass\suffix\()_altivec .endm .macro DECL_FFTS interleave, suffix .text def_fft4 \suffix def_fft8 \suffix def_fft16 \suffix PASS \interleave, \suffix DECL_FFT \suffix, 5, 32, 16, 8 DECL_FFT \suffix, 6, 64, 32, 16 DECL_FFT \suffix, 7, 128, 64, 32 DECL_FFT \suffix, 8, 256, 128, 64 DECL_FFT \suffix, 9, 512, 256, 128 DECL_FFT \suffix,10, 1024, 512, 256 DECL_FFT \suffix,11, 2048, 1024, 512 DECL_FFT \suffix,12, 4096, 2048, 1024 DECL_FFT \suffix,13, 8192, 4096, 2048 DECL_FFT \suffix,14,16384, 8192, 4096 DECL_FFT \suffix,15,32768,16384, 8192 DECL_FFT \suffix,16,65536,32768,16384 fft_calc \suffix .rodata .align 3 fft_dispatch_tab\suffix\()_altivec: PTR fft4\suffix\()_altivec PTR fft8\suffix\()_altivec PTR fft16\suffix\()_altivec PTR fft32\suffix\()_altivec PTR fft64\suffix\()_altivec PTR fft128\suffix\()_altivec PTR fft256\suffix\()_altivec PTR fft512\suffix\()_altivec PTR fft1024\suffix\()_altivec PTR fft2048\suffix\()_altivec PTR fft4096\suffix\()_altivec PTR fft8192\suffix\()_altivec PTR fft16384\suffix\()_altivec PTR fft32768\suffix\()_altivec PTR fft65536\suffix\()_altivec .endm DECL_FFTS 0 DECL_FFTS 1, _interleave #endif /* HAVE_GNU_AS && HAVE_ALTIVEC */
Akagi201/ffmpeg-xcode
2,671
ffmpeg-3.0.2/libavcodec/ppc/asm.S
/* * Copyright (c) 2009 Loren Merritt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #define GLUE(a, b) a ## b #define JOIN(a, b) GLUE(a, b) #define X(s) JOIN(EXTERN_ASM, s) #if ARCH_PPC64 #define PTR .quad #define lp ld #define lpx ldx #define stp std #define stpu stdu #define PS 8 #define L(s) JOIN(., s) .macro extfunc name .global X(\name) #if _CALL_ELF == 2 .text X(\name): addis %r2, %r12, .TOC.-X(\name)@ha addi %r2, %r2, .TOC.-X(\name)@l .localentry X(\name), .-X(\name) #else .section .opd, "aw" X(\name): .quad L(\name), .TOC.@tocbase, 0 .previous .type X(\name), STT_FUNC L(\name): #endif .endm .macro movrel rd, sym, gp ld \rd, \sym@got(r2) .endm .macro get_got rd .endm #else /* ARCH_PPC64 */ #define PTR .int #define lp lwz #define lpx lwzx #define stp stw #define stpu stwu #define PS 4 #define L(s) s .macro extfunc name .global X(\name) .type X(\name), STT_FUNC X(\name): \name: .endm .macro movrel rd, sym, gp #if CONFIG_PIC lwz \rd, \sym@got(\gp) #else lis \rd, \sym@ha la \rd, \sym@l(\rd) #endif .endm .macro get_got rd #if CONFIG_PIC bcl 20, 31, .Lgot\@ .Lgot\@: mflr \rd addis \rd, \rd, _GLOBAL_OFFSET_TABLE_ - .Lgot\@@ha addi \rd, \rd, _GLOBAL_OFFSET_TABLE_ - .Lgot\@@l #endif .endm #endif /* ARCH_PPC64 */ #if HAVE_IBM_ASM .macro DEFINE_REG n .equiv r\n, \n .equiv f\n, \n .equiv v\n, \n .endm DEFINE_REG 0 DEFINE_REG 1 DEFINE_REG 2 DEFINE_REG 3 DEFINE_REG 4 DEFINE_REG 5 DEFINE_REG 6 DEFINE_REG 7 DEFINE_REG 8 DEFINE_REG 9 DEFINE_REG 10 DEFINE_REG 11 DEFINE_REG 12 DEFINE_REG 13 DEFINE_REG 14 DEFINE_REG 15 DEFINE_REG 16 DEFINE_REG 17 DEFINE_REG 18 DEFINE_REG 19 DEFINE_REG 20 DEFINE_REG 21 DEFINE_REG 22 DEFINE_REG 23 DEFINE_REG 24 DEFINE_REG 25 DEFINE_REG 26 DEFINE_REG 27 DEFINE_REG 28 DEFINE_REG 29 DEFINE_REG 30 DEFINE_REG 31 #endif /* HAVE_IBM_ASM */
Akagi201/ffmpeg-xcode
7,879
ffmpeg-3.0.2/libavcodec/arm/synth_filter_vfp.S
/* * Copyright (c) 2013 RISC OS Open Ltd * Author: Ben Avison <bavison@riscosopen.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" IMDCT .req r0 ORIG_P_SB .req r1 P_SB_OFF .req r2 I .req r0 P_SB2_UP .req r1 OLDFPSCR .req r2 P_SB2_DN .req r3 P_WIN_DN .req r4 P_OUT_DN .req r5 P_SB .req r6 J_WRAP .req r7 P_WIN_UP .req r12 P_OUT_UP .req r14 SCALE .req s0 SBUF_DAT_REV0 .req s4 SBUF_DAT_REV1 .req s5 SBUF_DAT_REV2 .req s6 SBUF_DAT_REV3 .req s7 VA0 .req s8 VA3 .req s11 VB0 .req s12 VB3 .req s15 VC0 .req s8 VC3 .req s11 VD0 .req s12 VD3 .req s15 SBUF_DAT0 .req s16 SBUF_DAT1 .req s17 SBUF_DAT2 .req s18 SBUF_DAT3 .req s19 SBUF_DAT_ALT0 .req s20 SBUF_DAT_ALT1 .req s21 SBUF_DAT_ALT2 .req s22 SBUF_DAT_ALT3 .req s23 WIN_DN_DAT0 .req s24 WIN_UP_DAT0 .req s28 .macro inner_loop half, tail, head .if (OFFSET & (64*4)) == 0 @ even numbered call SBUF_DAT_THIS0 .req SBUF_DAT0 SBUF_DAT_THIS1 .req SBUF_DAT1 SBUF_DAT_THIS2 .req SBUF_DAT2 SBUF_DAT_THIS3 .req SBUF_DAT3 .ifnc "\head","" vldr d8, [P_SB, #OFFSET] @ d8 = SBUF_DAT vldr d9, [P_SB, #OFFSET+8] .endif .else SBUF_DAT_THIS0 .req SBUF_DAT_ALT0 SBUF_DAT_THIS1 .req SBUF_DAT_ALT1 SBUF_DAT_THIS2 .req SBUF_DAT_ALT2 SBUF_DAT_THIS3 .req SBUF_DAT_ALT3 .ifnc "\head","" vldr d10, [P_SB, #OFFSET] @ d10 = SBUF_DAT_ALT vldr d11, [P_SB, #OFFSET+8] .endif .endif .ifnc "\tail","" .ifc "\half","ab" vmls.f VA0, SBUF_DAT_REV0, WIN_DN_DAT0 @ all operands treated as vectors .else vmla.f VD0, SBUF_DAT_REV0, WIN_DN_DAT0 @ all operands treated as vectors .endif .endif .ifnc "\head","" vldr d14, [P_WIN_UP, #OFFSET] @ d14 = WIN_UP_DAT vldr d15, [P_WIN_UP, #OFFSET+8] vldr d12, [P_WIN_DN, #OFFSET] @ d12 = WIN_DN_DAT vldr d13, [P_WIN_DN, #OFFSET+8] vmov SBUF_DAT_REV3, SBUF_DAT_THIS0 vmov SBUF_DAT_REV2, SBUF_DAT_THIS1 vmov SBUF_DAT_REV1, SBUF_DAT_THIS2 vmov SBUF_DAT_REV0, SBUF_DAT_THIS3 .ifc "\half","ab" vmla.f VB0, SBUF_DAT_THIS0, WIN_UP_DAT0 .else vmla.f VC0, SBUF_DAT_THIS0, WIN_UP_DAT0 .endif teq J_WRAP, #J bne 2f @ strongly predictable, so better than cond exec in this case sub P_SB, P_SB, #512*4 2: .set J, J - 64 .set OFFSET, OFFSET + 64*4 .endif .unreq SBUF_DAT_THIS0 .unreq SBUF_DAT_THIS1 .unreq SBUF_DAT_THIS2 .unreq SBUF_DAT_THIS3 .endm /* void ff_synth_filter_float_vfp(FFTContext *imdct, * float *synth_buf_ptr, int *synth_buf_offset, * float synth_buf2[32], const float window[512], * float out[32], const float in[32], float scale) */ function ff_synth_filter_float_vfp, export=1 push {r3-r7,lr} vpush {s16-s31} ldr lr, [P_SB_OFF] add a2, ORIG_P_SB, lr, lsl #2 @ calculate synth_buf to pass to imdct_half mov P_SB, a2 @ and keep a copy for ourselves bic J_WRAP, lr, #63 @ mangled to make testing for wrap easier in inner loop sub lr, lr, #32 and lr, lr, #512-32 str lr, [P_SB_OFF] @ rotate offset, modulo buffer size, ready for next call ldr a3, [sp, #(16+6+2)*4] @ fetch in from stack, to pass to imdct_half VFP vmov s16, SCALE @ imdct_half is free to corrupt s0, but it contains one of our arguments in hardfp case bl X(ff_imdct_half_vfp) VFP vmov SCALE, s16 fmrx OLDFPSCR, FPSCR ldr lr, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1 fmxr FPSCR, lr ldr P_SB2_DN, [sp, #16*4] ldr P_WIN_DN, [sp, #(16+6+0)*4] ldr P_OUT_DN, [sp, #(16+6+1)*4] NOVFP vldr SCALE, [sp, #(16+6+3)*4] #define IMM_OFF_SKEW 956 /* also valid immediate constant when you add 16*4 */ add P_SB, P_SB, #IMM_OFF_SKEW @ so we can use -ve offsets to use full immediate offset range add P_SB2_UP, P_SB2_DN, #16*4 add P_WIN_UP, P_WIN_DN, #16*4+IMM_OFF_SKEW add P_OUT_UP, P_OUT_DN, #16*4 add P_SB2_DN, P_SB2_DN, #16*4 add P_WIN_DN, P_WIN_DN, #12*4+IMM_OFF_SKEW add P_OUT_DN, P_OUT_DN, #16*4 mov I, #4 1: vldmia P_SB2_UP!, {VB0-VB3} vldmdb P_SB2_DN!, {VA0-VA3} .set J, 512 - 64 .set OFFSET, -IMM_OFF_SKEW inner_loop ab,, head .rept 7 inner_loop ab, tail, head .endr inner_loop ab, tail add P_WIN_UP, P_WIN_UP, #4*4 sub P_WIN_DN, P_WIN_DN, #4*4 vmul.f VB0, VB0, SCALE @ SCALE treated as scalar add P_SB, P_SB, #(512+4)*4 subs I, I, #1 vmul.f VA0, VA0, SCALE vstmia P_OUT_UP!, {VB0-VB3} vstmdb P_OUT_DN!, {VA0-VA3} bne 1b add P_SB2_DN, P_SB2_DN, #(16+28-12)*4 sub P_SB2_UP, P_SB2_UP, #(16+16)*4 add P_WIN_DN, P_WIN_DN, #(32+16+28-12)*4 mov I, #4 1: vldr.d d4, zero @ d4 = VC0 vldr.d d5, zero vldr.d d6, zero @ d6 = VD0 vldr.d d7, zero .set J, 512 - 64 .set OFFSET, -IMM_OFF_SKEW inner_loop cd,, head .rept 7 inner_loop cd, tail, head .endr inner_loop cd, tail add P_WIN_UP, P_WIN_UP, #4*4 sub P_WIN_DN, P_WIN_DN, #4*4 add P_SB, P_SB, #(512+4)*4 subs I, I, #1 vstmia P_SB2_UP!, {VC0-VC3} vstmdb P_SB2_DN!, {VD0-VD3} bne 1b fmxr FPSCR, OLDFPSCR vpop {s16-s31} pop {r3-r7,pc} endfunc .unreq IMDCT .unreq ORIG_P_SB .unreq P_SB_OFF .unreq I .unreq P_SB2_UP .unreq OLDFPSCR .unreq P_SB2_DN .unreq P_WIN_DN .unreq P_OUT_DN .unreq P_SB .unreq J_WRAP .unreq P_WIN_UP .unreq P_OUT_UP .unreq SCALE .unreq SBUF_DAT_REV0 .unreq SBUF_DAT_REV1 .unreq SBUF_DAT_REV2 .unreq SBUF_DAT_REV3 .unreq VA0 .unreq VA3 .unreq VB0 .unreq VB3 .unreq VC0 .unreq VC3 .unreq VD0 .unreq VD3 .unreq SBUF_DAT0 .unreq SBUF_DAT1 .unreq SBUF_DAT2 .unreq SBUF_DAT3 .unreq SBUF_DAT_ALT0 .unreq SBUF_DAT_ALT1 .unreq SBUF_DAT_ALT2 .unreq SBUF_DAT_ALT3 .unreq WIN_DN_DAT0 .unreq WIN_UP_DAT0 .align 3 zero: .word 0, 0
Akagi201/ffmpeg-xcode
3,858
ffmpeg-3.0.2/libavcodec/arm/mpegvideo_neon.S
/* * Copyright (c) 2010 Mans Rullgard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #include "asm-offsets.h" function ff_dct_unquantize_h263_inter_neon, export=1 add r12, r0, #BLOCK_LAST_INDEX ldr r12, [r12, r2, lsl #2] add r0, r0, #INTER_SCANTAB_RASTER_END ldrb r12, [r0, r12] sub r2, r3, #1 lsl r0, r3, #1 orr r2, r2, #1 add r3, r12, #1 endfunc function ff_dct_unquantize_h263_neon, export=1 vdup.16 q15, r0 @ qmul vdup.16 q14, r2 @ qadd vneg.s16 q13, q14 cmp r3, #4 mov r0, r1 ble 2f 1: vld1.16 {q0}, [r0,:128]! vclt.s16 q3, q0, #0 vld1.16 {q8}, [r0,:128]! vceq.s16 q1, q0, #0 vmul.s16 q2, q0, q15 vclt.s16 q11, q8, #0 vmul.s16 q10, q8, q15 vbsl q3, q13, q14 vbsl q11, q13, q14 vadd.s16 q2, q2, q3 vceq.s16 q9, q8, #0 vadd.s16 q10, q10, q11 vbif q0, q2, q1 vbif q8, q10, q9 subs r3, r3, #16 vst1.16 {q0}, [r1,:128]! vst1.16 {q8}, [r1,:128]! it le bxle lr cmp r3, #8 bgt 1b 2: vld1.16 {d0}, [r0,:64] vclt.s16 d3, d0, #0 vceq.s16 d1, d0, #0 vmul.s16 d2, d0, d30 vbsl d3, d26, d28 vadd.s16 d2, d2, d3 vbif d0, d2, d1 vst1.16 {d0}, [r1,:64] bx lr endfunc function ff_dct_unquantize_h263_intra_neon, export=1 push {r4-r6,lr} add r12, r0, #BLOCK_LAST_INDEX ldr r6, [r0, #AC_PRED] add lr, r0, #INTER_SCANTAB_RASTER_END cmp r6, #0 it ne movne r12, #63 bne 1f ldr r12, [r12, r2, lsl #2] ldrb r12, [lr, r12] 1: ldr r5, [r0, #H263_AIC] ldrsh r4, [r1] cmp r5, #0 mov r5, r1 it ne movne r2, #0 bne 2f cmp r2, #4 it ge addge r0, r0, #4 sub r2, r3, #1 ldr r6, [r0, #Y_DC_SCALE] orr r2, r2, #1 smulbb r4, r4, r6 2: lsl r0, r3, #1 add r3, r12, #1 bl X(ff_dct_unquantize_h263_neon) vmov.16 d0[0], r4 vst1.16 {d0[0]}, [r5] pop {r4-r6,pc} endfunc
Akagi201/ffmpeg-xcode
2,250
ffmpeg-3.0.2/libavcodec/arm/audiodsp_neon.S
/* * ARM NEON optimised audio functions * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_vector_clipf_neon, export=1 VFP vdup.32 q1, d0[1] VFP vdup.32 q0, d0[0] NOVFP vdup.32 q0, r2 NOVFP vdup.32 q1, r3 NOVFP ldr r2, [sp] vld1.f32 {q2},[r1,:128]! vmin.f32 q10, q2, q1 vld1.f32 {q3},[r1,:128]! vmin.f32 q11, q3, q1 1: vmax.f32 q8, q10, q0 vmax.f32 q9, q11, q0 subs r2, r2, #8 beq 2f vld1.f32 {q2},[r1,:128]! vmin.f32 q10, q2, q1 vld1.f32 {q3},[r1,:128]! vmin.f32 q11, q3, q1 vst1.f32 {q8},[r0,:128]! vst1.f32 {q9},[r0,:128]! b 1b 2: vst1.f32 {q8},[r0,:128]! vst1.f32 {q9},[r0,:128]! bx lr endfunc function ff_vector_clip_int32_neon, export=1 vdup.32 q0, r2 vdup.32 q1, r3 ldr r2, [sp] 1: vld1.32 {q2-q3}, [r1,:128]! vmin.s32 q2, q2, q1 vmin.s32 q3, q3, q1 vmax.s32 q2, q2, q0 vmax.s32 q3, q3, q0 vst1.32 {q2-q3}, [r0,:128]! subs r2, r2, #8 bgt 1b bx lr endfunc
Akagi201/ffmpeg-xcode
8,639
ffmpeg-3.0.2/libavcodec/arm/vp8_armv6.S
/* * Copyright (C) 2010 Mans Rullgard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" .macro rac_get_prob h, bs, buf, cw, pr, t0, t1 adds \bs, \bs, \t0 lsl \cw, \cw, \t0 lsl \t0, \h, \t0 rsb \h, \pr, #256 it cs ldrhcs \t1, [\buf], #2 smlabb \h, \t0, \pr, \h T itttt cs rev16cs \t1, \t1 A orrcs \cw, \cw, \t1, lsl \bs T lslcs \t1, \t1, \bs T orrcs \cw, \cw, \t1 subcs \bs, \bs, #16 lsr \h, \h, #8 cmp \cw, \h, lsl #16 itt ge subge \cw, \cw, \h, lsl #16 subge \h, \t0, \h .endm .macro rac_get_128 h, bs, buf, cw, t0, t1 adds \bs, \bs, \t0 lsl \cw, \cw, \t0 lsl \t0, \h, \t0 it cs ldrhcs \t1, [\buf], #2 mov \h, #128 it cs rev16cs \t1, \t1 add \h, \h, \t0, lsl #7 A orrcs \cw, \cw, \t1, lsl \bs T ittt cs T lslcs \t1, \t1, \bs T orrcs \cw, \cw, \t1 subcs \bs, \bs, #16 lsr \h, \h, #8 cmp \cw, \h, lsl #16 itt ge subge \cw, \cw, \h, lsl #16 subge \h, \t0, \h .endm function ff_decode_block_coeffs_armv6, export=1 push {r0,r1,r4-r11,lr} movrelx lr, X(ff_vp56_norm_shift) ldrd r4, r5, [sp, #44] @ token_prob, qmul cmp r3, #0 ldr r11, [r5] ldm r0, {r5-r7} @ high, bits, buf it ne pkhtbne r11, r11, r11, asr #16 ldr r8, [r0, #16] @ code_word 0: ldrb r9, [lr, r5] add r3, r3, #1 ldrb r0, [r4, #1] rac_get_prob r5, r6, r7, r8, r0, r9, r10 blt 2f ldrb r9, [lr, r5] ldrb r0, [r4, #2] rac_get_prob r5, r6, r7, r8, r0, r9, r10 ldrb r9, [lr, r5] bge 3f add r4, r3, r3, lsl #5 sxth r12, r11 add r4, r4, r2 adds r6, r6, r9 add r4, r4, #11 lsl r8, r8, r9 it cs ldrhcs r10, [r7], #2 lsl r9, r5, r9 mov r5, #128 it cs rev16cs r10, r10 add r5, r5, r9, lsl #7 T ittt cs T lslcs r10, r10, r6 T orrcs r8, r8, r10 A orrcs r8, r8, r10, lsl r6 subcs r6, r6, #16 lsr r5, r5, #8 cmp r8, r5, lsl #16 movrel r10, zigzag_scan-1 itt ge subge r8, r8, r5, lsl #16 subge r5, r9, r5 ldrb r10, [r10, r3] it ge rsbge r12, r12, #0 cmp r3, #16 strh r12, [r1, r10] bge 6f 5: ldrb r9, [lr, r5] ldrb r0, [r4] rac_get_prob r5, r6, r7, r8, r0, r9, r10 pkhtb r11, r11, r11, asr #16 bge 0b 6: ldr r0, [sp] ldr r9, [r0, #12] cmp r7, r9 it hi movhi r7, r9 stm r0, {r5-r7} @ high, bits, buf str r8, [r0, #16] @ code_word add sp, sp, #8 mov r0, r3 pop {r4-r11,pc} 2: add r4, r3, r3, lsl #5 cmp r3, #16 add r4, r4, r2 pkhtb r11, r11, r11, asr #16 bne 0b b 6b 3: ldrb r0, [r4, #3] rac_get_prob r5, r6, r7, r8, r0, r9, r10 ldrb r9, [lr, r5] bge 1f mov r12, #2 ldrb r0, [r4, #4] rac_get_prob r5, r6, r7, r8, r0, r9, r10 it ge addge r12, #1 ldrb r9, [lr, r5] blt 4f ldrb r0, [r4, #5] rac_get_prob r5, r6, r7, r8, r0, r9, r10 it ge addge r12, #1 ldrb r9, [lr, r5] b 4f 1: ldrb r0, [r4, #6] rac_get_prob r5, r6, r7, r8, r0, r9, r10 ldrb r9, [lr, r5] bge 3f ldrb r0, [r4, #7] rac_get_prob r5, r6, r7, r8, r0, r9, r10 ldrb r9, [lr, r5] bge 2f mov r12, #5 mov r0, #159 rac_get_prob r5, r6, r7, r8, r0, r9, r10 it ge addge r12, r12, #1 ldrb r9, [lr, r5] b 4f 2: mov r12, #7 mov r0, #165 rac_get_prob r5, r6, r7, r8, r0, r9, r10 it ge addge r12, r12, #2 ldrb r9, [lr, r5] mov r0, #145 rac_get_prob r5, r6, r7, r8, r0, r9, r10 it ge addge r12, r12, #1 ldrb r9, [lr, r5] b 4f 3: ldrb r0, [r4, #8] rac_get_prob r5, r6, r7, r8, r0, r9, r10 it ge addge r4, r4, #1 ldrb r9, [lr, r5] ite ge movge r12, #2 movlt r12, #0 ldrb r0, [r4, #9] rac_get_prob r5, r6, r7, r8, r0, r9, r10 mov r9, #8 it ge addge r12, r12, #1 movrelx r4, X(ff_vp8_dct_cat_prob), r1 lsl r9, r9, r12 ldr r4, [r4, r12, lsl #2] add r12, r9, #3 mov r1, #0 ldrb r0, [r4], #1 1: ldrb r9, [lr, r5] lsl r1, r1, #1 rac_get_prob r5, r6, r7, r8, r0, r9, r10 ldrb r0, [r4], #1 it ge addge r1, r1, #1 cmp r0, #0 bne 1b ldrb r9, [lr, r5] add r12, r12, r1 ldr r1, [sp, #4] 4: add r4, r3, r3, lsl #5 add r4, r4, r2 add r4, r4, #22 rac_get_128 r5, r6, r7, r8, r9, r10 it ge rsbge r12, r12, #0 smulbb r12, r12, r11 movrel r9, zigzag_scan-1 ldrb r9, [r9, r3] cmp r3, #16 strh r12, [r1, r9] bge 6b b 5b endfunc const zigzag_scan .byte 0, 2, 8, 16 .byte 10, 4, 6, 12 .byte 18, 24, 26, 20 .byte 14, 22, 28, 30 endconst
Akagi201/ffmpeg-xcode
48,266
ffmpeg-3.0.2/libavcodec/arm/vc1dsp_neon.S
/* * VC1 NEON optimisations * * Copyright (c) 2010 Rob Clark <rob@ti.com> * Copyright (c) 2011 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #include "neon.S" #include "config.h" @ Transpose rows into columns of a matrix of 16-bit elements. For 4x4, pass @ double-word registers, for 8x4, pass quad-word registers. .macro transpose16 r0, r1, r2, r3 @ At this point: @ row[0] r0 @ row[1] r1 @ row[2] r2 @ row[3] r3 vtrn.16 \r0, \r1 @ first and second row vtrn.16 \r2, \r3 @ third and fourth row vtrn.32 \r0, \r2 @ first and third row vtrn.32 \r1, \r3 @ second and fourth row @ At this point, if registers are quad-word: @ column[0] d0 @ column[1] d2 @ column[2] d4 @ column[3] d6 @ column[4] d1 @ column[5] d3 @ column[6] d5 @ column[7] d7 @ At this point, if registers are double-word: @ column[0] d0 @ column[1] d1 @ column[2] d2 @ column[3] d3 .endm @ ff_vc1_inv_trans_{4,8}x{4,8}_neon and overflow: The input values in the file @ are supposed to be in a specific range as to allow for 16-bit math without @ causing overflows, but sometimes the input values are just big enough to @ barely cause overflow in vadd instructions like: @ @ vadd.i16 q0, q8, q10 @ vshr.s16 q0, q0, #\rshift @ @ To prevent these borderline cases from overflowing, we just need one more @ bit of precision, which is accomplished by replacing the sequence above with: @ @ vhadd.s16 q0, q8, q10 @ vshr.s16 q0, q0, #(\rshift -1) @ @ This works because vhadd is a single instruction that adds, then shifts to @ the right once, all before writing the result to the destination register. @ @ Even with this workaround, there were still some files that caused overflows @ in ff_vc1_inv_trans_8x8_neon. See the comments in ff_vc1_inv_trans_8x8_neon @ for the additional workaround. @ Takes 4 columns of 8 values each and operates on it. Modeled after the first @ for loop in vc1_inv_trans_4x8_c. @ Input columns: q0 q1 q2 q3 @ Output columns: q0 q1 q2 q3 @ Trashes: r12 q8 q9 q10 q11 q12 q13 .macro vc1_inv_trans_4x8_helper add rshift @ Compute temp1, temp2 and setup scalar #17, #22, #10 vadd.i16 q12, q0, q2 @ temp1 = src[0] + src[2] movw r12, #17 vsub.i16 q13, q0, q2 @ temp2 = src[0] - src[2] movt r12, #22 vmov.32 d0[0], r12 movw r12, #10 vmov.16 d1[0], r12 vmov.i16 q8, #\add @ t1 will accumulate here vmov.i16 q9, #\add @ t2 will accumulate here vmul.i16 q10, q1, d0[1] @ t3 = 22 * (src[1]) vmul.i16 q11, q3, d0[1] @ t4 = 22 * (src[3]) vmla.i16 q8, q12, d0[0] @ t1 = 17 * (temp1) + 4 vmla.i16 q9, q13, d0[0] @ t2 = 17 * (temp2) + 4 vmla.i16 q10, q3, d1[0] @ t3 += 10 * src[3] vmls.i16 q11, q1, d1[0] @ t4 -= 10 * src[1] vhadd.s16 q0, q8, q10 @ dst[0] = (t1 + t3) >> 1 vhsub.s16 q3, q8, q10 @ dst[3] = (t1 - t3) >> 1 vhsub.s16 q1, q9, q11 @ dst[1] = (t2 - t4) >> 1 vhadd.s16 q2, q9, q11 @ dst[2] = (t2 + t4) >> 1 @ Halving add/sub above already did one shift vshr.s16 q0, q0, #(\rshift - 1) @ dst[0] >>= (rshift - 1) vshr.s16 q3, q3, #(\rshift - 1) @ dst[3] >>= (rshift - 1) vshr.s16 q1, q1, #(\rshift - 1) @ dst[1] >>= (rshift - 1) vshr.s16 q2, q2, #(\rshift - 1) @ dst[2] >>= (rshift - 1) .endm @ Takes 8 columns of 4 values each and operates on it. Modeled after the second @ for loop in vc1_inv_trans_4x8_c. @ Input columns: d0 d2 d4 d6 d1 d3 d5 d7 @ Output columns: d16 d17 d18 d19 d21 d20 d23 d22 @ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7 .macro vc1_inv_trans_8x4_helper add add1beforeshift rshift @ At this point: @ src[0] d0 overwritten later @ src[8] d2 @ src[16] d4 overwritten later @ src[24] d6 @ src[32] d1 overwritten later @ src[40] d3 @ src[48] d5 overwritten later @ src[56] d7 movw r12, #12 vmov.i16 q14, #\add @ t1|t2 will accumulate here movt r12, #6 vadd.i16 d20, d0, d1 @ temp1 = src[0] + src[32] vsub.i16 d21, d0, d1 @ temp2 = src[0] - src[32] vmov.i32 d0[0], r12 @ 16-bit: d0[0] = #12, d0[1] = #6 vshl.i16 q15, q2, #4 @ t3|t4 = 16 * (src[16]|src[48]) vswp d4, d5 @ q2 = src[48]|src[16] vmla.i16 q14, q10, d0[0] @ t1|t2 = 12 * (temp1|temp2) + 64 movw r12, #15 movt r12, #9 vmov.i32 d0[1], r12 @ 16-bit: d0[2] = #15, d0[3] = #9 vneg.s16 d31, d31 @ t4 = -t4 vmla.i16 q15, q2, d0[1] @ t3|t4 += 6 * (src[48]|src[16]) @ At this point: @ d0[2] #15 @ d0[3] #9 @ q1 src[8]|src[40] @ q3 src[24]|src[56] @ q14 old t1|t2 @ q15 old t3|t4 vshl.i16 q8, q1, #4 @ t1|t2 = 16 * (src[8]|src[40]) vswp d2, d3 @ q1 = src[40]|src[8] vshl.i16 q12, q3, #4 @ temp3a|temp4a = 16 * src[24]|src[56] vswp d6, d7 @ q3 = src[56]|src[24] vshl.i16 q13, q1, #2 @ temp3b|temp4b = 4 * (src[40]|src[8]) vshl.i16 q2, q3, #2 @ temp1|temp2 = 4 * (src[56]|src[24]) vswp d3, d6 @ q1 = src[40]|src[56], q3 = src[8]|src[24] vsub.i16 q9, q13, q12 @ t3|t4 = - (temp3a|temp4a) + (temp3b|temp4b) vadd.i16 q8, q8, q2 @ t1|t2 += temp1|temp2 vmul.i16 q12, q3, d0[3] @ temp3|temp4 = 9 * src[8]|src[24] vmla.i16 q8, q1, d0[3] @ t1|t2 += 9 * (src[40]|src[56]) vswp d6, d7 @ q3 = src[24]|src[8] vswp d2, d3 @ q1 = src[56]|src[40] vsub.i16 q11, q14, q15 @ t8|t7 = old t1|t2 - old t3|t4 vadd.i16 q10, q14, q15 @ t5|t6 = old t1|t2 + old t3|t4 .if \add1beforeshift vmov.i16 q15, #1 .endif vadd.i16 d18, d18, d24 @ t3 += temp3 vsub.i16 d19, d19, d25 @ t4 -= temp4 vswp d22, d23 @ q11 = t7|t8 vneg.s16 d17, d17 @ t2 = -t2 vmla.i16 q9, q1, d0[2] @ t3|t4 += 15 * src[56]|src[40] vmla.i16 q8, q3, d0[2] @ t1|t2 += 15 * src[24]|src[8] @ At this point: @ t1 d16 @ t2 d17 @ t3 d18 @ t4 d19 @ t5 d20 @ t6 d21 @ t7 d22 @ t8 d23 @ #1 q15 .if \add1beforeshift vadd.i16 q3, q15, q10 @ line[7,6] = t5|t6 + 1 vadd.i16 q2, q15, q11 @ line[5,4] = t7|t8 + 1 .endif @ Sometimes this overflows, so to get one additional bit of precision, use @ a single instruction that both adds and shifts right (halving). vhadd.s16 q1, q9, q11 @ line[2,3] = (t3|t4 + t7|t8) >> 1 vhadd.s16 q0, q8, q10 @ line[0,1] = (t1|t2 + t5|t6) >> 1 .if \add1beforeshift vhsub.s16 q2, q2, q9 @ line[5,4] = (t7|t8 - t3|t4 + 1) >> 1 vhsub.s16 q3, q3, q8 @ line[7,6] = (t5|t6 - t1|t2 + 1) >> 1 .else vhsub.s16 q2, q11, q9 @ line[5,4] = (t7|t8 - t3|t4) >> 1 vhsub.s16 q3, q10, q8 @ line[7,6] = (t5|t6 - t1|t2) >> 1 .endif vshr.s16 q9, q1, #(\rshift - 1) @ one shift is already done by vhadd/vhsub above vshr.s16 q8, q0, #(\rshift - 1) vshr.s16 q10, q2, #(\rshift - 1) vshr.s16 q11, q3, #(\rshift - 1) @ At this point: @ dst[0] d16 @ dst[1] d17 @ dst[2] d18 @ dst[3] d19 @ dst[4] d21 @ dst[5] d20 @ dst[6] d23 @ dst[7] d22 .endm @ This is modeled after the first and second for loop in vc1_inv_trans_8x8_c. @ Input columns: q8, q9, q10, q11, q12, q13, q14, q15 @ Output columns: q8, q9, q10, q11, q12, q13, q14, q15 @ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7 .macro vc1_inv_trans_8x8_helper add add1beforeshift rshift @ This actually computes half of t1, t2, t3, t4, as explained below @ near `tNhalf`. vmov.i16 q0, #(6 / 2) @ q0 = #6/2 vshl.i16 q1, q10, #3 @ t3 = 16/2 * src[16] vshl.i16 q3, q14, #3 @ temp4 = 16/2 * src[48] vmul.i16 q2, q10, q0 @ t4 = 6/2 * src[16] vmla.i16 q1, q14, q0 @ t3 += 6/2 * src[48] @ unused: q0, q10, q14 vmov.i16 q0, #(12 / 2) @ q0 = #12/2 vadd.i16 q10, q8, q12 @ temp1 = src[0] + src[32] vsub.i16 q14, q8, q12 @ temp2 = src[0] - src[32] @ unused: q8, q12 vmov.i16 q8, #(\add / 2) @ t1 will accumulate here vmov.i16 q12, #(\add / 2) @ t2 will accumulate here movw r12, #15 vsub.i16 q2, q2, q3 @ t4 = 6/2 * src[16] - 16/2 * src[48] movt r12, #9 @ unused: q3 vmla.i16 q8, q10, q0 @ t1 = 12/2 * temp1 + add vmla.i16 q12, q14, q0 @ t2 = 12/2 * temp2 + add vmov.i32 d0[0], r12 @ unused: q3, q10, q14 @ At this point: @ q0 d0=#15|#9 @ q1 old t3 @ q2 old t4 @ q3 @ q8 old t1 @ q9 src[8] @ q10 @ q11 src[24] @ q12 old t2 @ q13 src[40] @ q14 @ q15 src[56] @ unused: q3, q10, q14 movw r12, #16 vshl.i16 q3, q9, #4 @ t1 = 16 * src[8] movt r12, #4 vshl.i16 q10, q9, #2 @ t4 = 4 * src[8] vmov.i32 d1[0], r12 vmul.i16 q14, q9, d0[0] @ t2 = 15 * src[8] vmul.i16 q9, q9, d0[1] @ t3 = 9 * src[8] @ unused: none vmla.i16 q3, q11, d0[0] @ t1 += 15 * src[24] vmls.i16 q10, q11, d0[1] @ t4 -= 9 * src[24] vmls.i16 q14, q11, d1[1] @ t2 -= 4 * src[24] vmls.i16 q9, q11, d1[0] @ t3 -= 16 * src[24] @ unused: q11 vmla.i16 q3, q13, d0[1] @ t1 += 9 * src[40] vmla.i16 q10, q13, d0[0] @ t4 += 15 * src[40] vmls.i16 q14, q13, d1[0] @ t2 -= 16 * src[40] vmla.i16 q9, q13, d1[1] @ t3 += 4 * src[40] @ unused: q11, q13 @ Compute t5, t6, t7, t8 from old t1, t2, t3, t4. Actually, it computes @ half of t5, t6, t7, t8 since t1, t2, t3, t4 are halved. vadd.i16 q11, q8, q1 @ t5 = t1 + t3 vsub.i16 q1, q8, q1 @ t8 = t1 - t3 vadd.i16 q13, q12, q2 @ t6 = t2 + t4 vsub.i16 q2, q12, q2 @ t7 = t2 - t4 @ unused: q8, q12 .if \add1beforeshift vmov.i16 q12, #1 .endif @ unused: q8 vmla.i16 q3, q15, d1[1] @ t1 += 4 * src[56] vmls.i16 q14, q15, d0[1] @ t2 -= 9 * src[56] vmla.i16 q9, q15, d0[0] @ t3 += 15 * src[56] vmls.i16 q10, q15, d1[0] @ t4 -= 16 * src[56] @ unused: q0, q8, q15 @ At this point: @ t1 q3 @ t2 q14 @ t3 q9 @ t4 q10 @ t5half q11 @ t6half q13 @ t7half q2 @ t8half q1 @ #1 q12 @ @ tNhalf is half of the value of tN (as described in vc1_inv_trans_8x8_c). @ This is done because sometimes files have input that causes tN + tM to @ overflow. To avoid this overflow, we compute tNhalf, then compute @ tNhalf + tM (which doesn't overflow), and then we use vhadd to compute @ (tNhalf + (tNhalf + tM)) >> 1 which does not overflow because it is @ one instruction. @ For each pair of tN and tM, do: @ lineA = t5half + t1 @ if add1beforeshift: t1 -= 1 @ lineA = (t5half + lineA) >> 1 @ lineB = t5half - t1 @ lineB = (t5half + lineB) >> 1 @ lineA >>= rshift - 1 @ lineB >>= rshift - 1 vadd.i16 q8, q11, q3 @ q8 = t5half + t1 .if \add1beforeshift vsub.i16 q3, q3, q12 @ q3 = t1 - 1 .endif vadd.i16 q0, q13, q14 @ q0 = t6half + t2 .if \add1beforeshift vsub.i16 q14, q14, q12 @ q14 = t2 - 1 .endif vadd.i16 q15, q2, q9 @ q15 = t7half + t3 .if \add1beforeshift vsub.i16 q9, q9, q12 @ q9 = t3 - 1 .endif @ unused: none vhadd.s16 q8, q11, q8 @ q8 = (t5half + t5half + t1) >> 1 vsub.i16 q3, q11, q3 @ q3 = t5half - t1 + 1 vhadd.s16 q0, q13, q0 @ q0 = (t6half + t6half + t2) >> 1 vsub.i16 q14, q13, q14 @ q14 = t6half - t2 + 1 vhadd.s16 q15, q2, q15 @ q15 = (t7half + t7half + t3) >> 1 vsub.i16 q9, q2, q9 @ q9 = t7half - t3 + 1 vhadd.s16 q3, q11, q3 @ q3 = (t5half + t5half - t1 + 1) >> 1 @ unused: q11 vadd.i16 q11, q1, q10 @ q11 = t8half + t4 .if \add1beforeshift vsub.i16 q10, q10, q12 @ q10 = t4 - 1 .endif @ unused: q12 vhadd.s16 q14, q13, q14 @ q14 = (t6half + t6half - t2 + 1) >> 1 @ unused: q12, q13 vhadd.s16 q13, q2, q9 @ q9 = (t7half + t7half - t3 + 1) >> 1 @ unused: q12, q2, q9 vsub.i16 q10, q1, q10 @ q10 = t8half - t4 + 1 vhadd.s16 q11, q1, q11 @ q11 = (t8half + t8half + t4) >> 1 vshr.s16 q8, q8, #(\rshift - 1) @ q8 = line[0] vhadd.s16 q12, q1, q10 @ q12 = (t8half + t8half - t4 + 1) >> 1 vshr.s16 q9, q0, #(\rshift - 1) @ q9 = line[1] vshr.s16 q10, q15, #(\rshift - 1) @ q10 = line[2] vshr.s16 q11, q11, #(\rshift - 1) @ q11 = line[3] vshr.s16 q12, q12, #(\rshift - 1) @ q12 = line[4] vshr.s16 q13, q13, #(\rshift - 1) @ q13 = line[5] vshr.s16 q14, q14, #(\rshift - 1) @ q14 = line[6] vshr.s16 q15, q3, #(\rshift - 1) @ q15 = line[7] .endm @ (int16_t *block [r0]) function ff_vc1_inv_trans_8x8_neon, export=1 vld1.64 {q8-q9}, [r0,:128]! vld1.64 {q10-q11}, [r0,:128]! vld1.64 {q12-q13}, [r0,:128]! vld1.64 {q14-q15}, [r0,:128] sub r0, r0, #(16 * 2 * 3) @ restore r0 @ At this point: @ src[0] q8 @ src[8] q9 @ src[16] q10 @ src[24] q11 @ src[32] q12 @ src[40] q13 @ src[48] q14 @ src[56] q15 vc1_inv_trans_8x8_helper add=4 add1beforeshift=0 rshift=3 @ Transpose result matrix of 8x8 swap4 d17, d19, d21, d23, d24, d26, d28, d30 transpose16_4x4 q8, q9, q10, q11, q12, q13, q14, q15 vc1_inv_trans_8x8_helper add=64 add1beforeshift=1 rshift=7 vst1.64 {q8-q9}, [r0,:128]! vst1.64 {q10-q11}, [r0,:128]! vst1.64 {q12-q13}, [r0,:128]! vst1.64 {q14-q15}, [r0,:128] bx lr endfunc @ (uint8_t *dest [r0], int linesize [r1], int16_t *block [r2]) function ff_vc1_inv_trans_8x4_neon, export=1 vld1.64 {q0-q1}, [r2,:128]! @ load 8 * 4 * 2 = 64 bytes / 16 bytes per quad = 4 quad registers vld1.64 {q2-q3}, [r2,:128] transpose16 q0 q1 q2 q3 @ transpose rows to columns @ At this point: @ src[0] d0 @ src[1] d2 @ src[2] d4 @ src[3] d6 @ src[4] d1 @ src[5] d3 @ src[6] d5 @ src[7] d7 vc1_inv_trans_8x4_helper add=4 add1beforeshift=0 rshift=3 @ Move output to more standardized registers vmov d0, d16 vmov d2, d17 vmov d4, d18 vmov d6, d19 vmov d1, d21 vmov d3, d20 vmov d5, d23 vmov d7, d22 @ At this point: @ dst[0] d0 @ dst[1] d2 @ dst[2] d4 @ dst[3] d6 @ dst[4] d1 @ dst[5] d3 @ dst[6] d5 @ dst[7] d7 transpose16 q0 q1 q2 q3 @ turn columns into rows @ At this point: @ row[0] q0 @ row[1] q1 @ row[2] q2 @ row[3] q3 vc1_inv_trans_4x8_helper add=64 rshift=7 @ At this point: @ line[0].l d0 @ line[0].h d1 @ line[1].l d2 @ line[1].h d3 @ line[2].l d4 @ line[2].h d5 @ line[3].l d6 @ line[3].h d7 @ unused registers: q12, q13, q14, q15 vld1.64 {d28}, [r0,:64], r1 @ read dest vld1.64 {d29}, [r0,:64], r1 vld1.64 {d30}, [r0,:64], r1 vld1.64 {d31}, [r0,:64], r1 sub r0, r0, r1, lsl #2 @ restore original r0 value vaddw.u8 q0, q0, d28 @ line[0] += dest[0] vaddw.u8 q1, q1, d29 @ line[1] += dest[1] vaddw.u8 q2, q2, d30 @ line[2] += dest[2] vaddw.u8 q3, q3, d31 @ line[3] += dest[3] vqmovun.s16 d0, q0 @ line[0] vqmovun.s16 d1, q1 @ line[1] vqmovun.s16 d2, q2 @ line[2] vqmovun.s16 d3, q3 @ line[3] vst1.64 {d0}, [r0,:64], r1 @ write dest vst1.64 {d1}, [r0,:64], r1 vst1.64 {d2}, [r0,:64], r1 vst1.64 {d3}, [r0,:64] bx lr endfunc @ (uint8_t *dest [r0], int linesize [r1], int16_t *block [r2]) function ff_vc1_inv_trans_4x8_neon, export=1 mov r12, #(8 * 2) @ 8 elements per line, each element 2 bytes vld4.16 {d0[], d2[], d4[], d6[]}, [r2,:64], r12 @ read each column into a q register vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r2,:64], r12 vld4.16 {d0[2], d2[2], d4[2], d6[2]}, [r2,:64], r12 vld4.16 {d0[3], d2[3], d4[3], d6[3]}, [r2,:64], r12 vld4.16 {d1[], d3[], d5[], d7[]}, [r2,:64], r12 vld4.16 {d1[1], d3[1], d5[1], d7[1]}, [r2,:64], r12 vld4.16 {d1[2], d3[2], d5[2], d7[2]}, [r2,:64], r12 vld4.16 {d1[3], d3[3], d5[3], d7[3]}, [r2,:64] vc1_inv_trans_4x8_helper add=4 rshift=3 @ At this point: @ dst[0] = q0 @ dst[1] = q1 @ dst[2] = q2 @ dst[3] = q3 transpose16 q0 q1 q2 q3 @ Transpose rows (registers) into columns vc1_inv_trans_8x4_helper add=64 add1beforeshift=1 rshift=7 vld1.32 {d28[]}, [r0,:32], r1 @ read dest vld1.32 {d28[1]}, [r0,:32], r1 vld1.32 {d29[]}, [r0,:32], r1 vld1.32 {d29[1]}, [r0,:32], r1 vld1.32 {d30[]}, [r0,:32], r1 vld1.32 {d30[0]}, [r0,:32], r1 vld1.32 {d31[]}, [r0,:32], r1 vld1.32 {d31[0]}, [r0,:32], r1 sub r0, r0, r1, lsl #3 @ restore original r0 value vaddw.u8 q8, q8, d28 @ line[0,1] += dest[0,1] vaddw.u8 q9, q9, d29 @ line[2,3] += dest[2,3] vaddw.u8 q10, q10, d30 @ line[5,4] += dest[5,4] vaddw.u8 q11, q11, d31 @ line[7,6] += dest[7,6] vqmovun.s16 d16, q8 @ clip(line[0,1]) vqmovun.s16 d18, q9 @ clip(line[2,3]) vqmovun.s16 d20, q10 @ clip(line[5,4]) vqmovun.s16 d22, q11 @ clip(line[7,6]) vst1.32 {d16[0]}, [r0,:32], r1 @ write dest vst1.32 {d16[1]}, [r0,:32], r1 vst1.32 {d18[0]}, [r0,:32], r1 vst1.32 {d18[1]}, [r0,:32], r1 vst1.32 {d20[1]}, [r0,:32], r1 vst1.32 {d20[0]}, [r0,:32], r1 vst1.32 {d22[1]}, [r0,:32], r1 vst1.32 {d22[0]}, [r0,:32] bx lr endfunc @ Setup constants in registers which are used by vc1_inv_trans_4x4_helper .macro vc1_inv_trans_4x4_helper_setup vmov.i16 q13, #17 vmov.i16 q14, #22 vmov.i16 d30, #10 @ only need double-word, not quad-word .endm @ This is modeled after the first for loop in vc1_inv_trans_4x4_c. .macro vc1_inv_trans_4x4_helper add rshift vmov.i16 q2, #\add @ t1|t2 will accumulate here vadd.i16 d16, d0, d1 @ temp1 = src[0] + src[2] vsub.i16 d17, d0, d1 @ temp2 = src[0] - src[2] vmul.i16 q3, q14, q1 @ t3|t4 = 22 * (src[1]|src[3]) vmla.i16 q2, q13, q8 @ t1|t2 = 17 * (temp1|temp2) + add vmla.i16 d6, d30, d3 @ t3 += 10 * src[3] vmls.i16 d7, d30, d2 @ t4 -= 10 * src[1] vadd.i16 q0, q2, q3 @ dst[0,2] = (t1|t2 + t3|t4) vsub.i16 q1, q2, q3 @ dst[3,1] = (t1|t2 - t3|t4) vshr.s16 q0, q0, #\rshift @ dst[0,2] >>= rshift vshr.s16 q1, q1, #\rshift @ dst[3,1] >>= rshift .endm @ (uint8_t *dest [r0], int linesize [r1], int16_t *block [r2]) function ff_vc1_inv_trans_4x4_neon, export=1 mov r12, #(8 * 2) @ 8 elements per line, each element 2 bytes vld4.16 {d0[], d1[], d2[], d3[]}, [r2,:64], r12 @ read each column into a register vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r12 vld4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r12 vld4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64] vswp d1, d2 @ so that we can later access column 1 and column 3 as a single q1 register vc1_inv_trans_4x4_helper_setup @ At this point: @ src[0] = d0 @ src[1] = d2 @ src[2] = d1 @ src[3] = d3 vc1_inv_trans_4x4_helper add=4 rshift=3 @ compute t1, t2, t3, t4 and combine them into dst[0-3] @ At this point: @ dst[0] = d0 @ dst[1] = d3 @ dst[2] = d1 @ dst[3] = d2 transpose16 d0 d3 d1 d2 @ Transpose rows (registers) into columns @ At this point: @ src[0] = d0 @ src[8] = d3 @ src[16] = d1 @ src[24] = d2 vswp d2, d3 @ so that we can later access column 1 and column 3 in order as a single q1 register @ At this point: @ src[0] = d0 @ src[8] = d2 @ src[16] = d1 @ src[24] = d3 vc1_inv_trans_4x4_helper add=64 rshift=7 @ compute t1, t2, t3, t4 and combine them into dst[0-3] @ At this point: @ line[0] = d0 @ line[1] = d3 @ line[2] = d1 @ line[3] = d2 vld1.32 {d18[]}, [r0,:32], r1 @ read dest vld1.32 {d19[]}, [r0,:32], r1 vld1.32 {d18[1]}, [r0,:32], r1 vld1.32 {d19[0]}, [r0,:32], r1 sub r0, r0, r1, lsl #2 @ restore original r0 value vaddw.u8 q0, q0, d18 @ line[0,2] += dest[0,2] vaddw.u8 q1, q1, d19 @ line[3,1] += dest[3,1] vqmovun.s16 d0, q0 @ clip(line[0,2]) vqmovun.s16 d1, q1 @ clip(line[3,1]) vst1.32 {d0[0]}, [r0,:32], r1 @ write dest vst1.32 {d1[1]}, [r0,:32], r1 vst1.32 {d0[1]}, [r0,:32], r1 vst1.32 {d1[0]}, [r0,:32] bx lr endfunc #if HAVE_AS_DN_DIRECTIVE @ The absolute value of multiplication constants from vc1_mspel_filter and vc1_mspel_{ver,hor}_filter_16bits. @ The sign is embedded in the code below that carries out the multiplication (mspel_filter{,.16}). #define MSPEL_MODE_1_MUL_CONSTANTS 4 53 18 3 #define MSPEL_MODE_2_MUL_CONSTANTS 1 9 9 1 #define MSPEL_MODE_3_MUL_CONSTANTS 3 18 53 4 @ These constants are from reading the source code of vc1_mspel_mc and determining the value that @ is added to `rnd` to result in the variable `r`, and the value of the variable `shift`. #define MSPEL_MODES_11_ADDSHIFT_CONSTANTS 15 5 #define MSPEL_MODES_12_ADDSHIFT_CONSTANTS 3 3 #define MSPEL_MODES_13_ADDSHIFT_CONSTANTS 15 5 #define MSPEL_MODES_21_ADDSHIFT_CONSTANTS MSPEL_MODES_12_ADDSHIFT_CONSTANTS #define MSPEL_MODES_22_ADDSHIFT_CONSTANTS 0 1 #define MSPEL_MODES_23_ADDSHIFT_CONSTANTS 3 3 #define MSPEL_MODES_31_ADDSHIFT_CONSTANTS MSPEL_MODES_13_ADDSHIFT_CONSTANTS #define MSPEL_MODES_32_ADDSHIFT_CONSTANTS MSPEL_MODES_23_ADDSHIFT_CONSTANTS #define MSPEL_MODES_33_ADDSHIFT_CONSTANTS 15 5 @ The addition and shift constants from vc1_mspel_filter. #define MSPEL_MODE_1_ADDSHIFT_CONSTANTS 32 6 #define MSPEL_MODE_2_ADDSHIFT_CONSTANTS 8 4 #define MSPEL_MODE_3_ADDSHIFT_CONSTANTS 32 6 @ Setup constants in registers for a subsequent use of mspel_filter{,.16}. .macro mspel_constants typesize reg_a reg_b reg_c reg_d filter_a filter_b filter_c filter_d reg_add filter_add_register @ Define double-word register aliases. Typesize should be i8 or i16. ra .dn \reg_a\().\typesize rb .dn \reg_b\().\typesize rc .dn \reg_c\().\typesize rd .dn \reg_d\().\typesize @ Only set the register if the value is not 1 and unique .if \filter_a != 1 vmov ra, #\filter_a @ ra = filter_a .endif vmov rb, #\filter_b @ rb = filter_b .if \filter_b != \filter_c vmov rc, #\filter_c @ rc = filter_c .endif .if \filter_d != 1 vmov rd, #\filter_d @ rd = filter_d .endif @ vdup to double the size of typesize .ifc \typesize,i8 vdup.16 \reg_add, \filter_add_register @ reg_add = filter_add_register .else vdup.32 \reg_add, \filter_add_register @ reg_add = filter_add_register .endif .unreq ra .unreq rb .unreq rc .unreq rd .endm @ After mspel_constants has been used, do the filtering. .macro mspel_filter acc dest src0 src1 src2 src3 filter_a filter_b filter_c filter_d reg_a reg_b reg_c reg_d reg_add filter_shift narrow=1 .if \filter_a != 1 @ If filter_a != 1, then we need a move and subtract instruction vmov \acc, \reg_add @ acc = reg_add vmlsl.u8 \acc, \reg_a, \src0 @ acc -= filter_a * src[-stride] .else @ If filter_a is 1, then just subtract without an extra move vsubw.u8 \acc, \reg_add, \src0 @ acc = reg_add - src[-stride] @ since filter_a == 1 .endif vmlal.u8 \acc, \reg_b, \src1 @ acc += filter_b * src[0] .if \filter_b != \filter_c vmlal.u8 \acc, \reg_c, \src2 @ acc += filter_c * src[stride] .else @ If filter_b is the same as filter_c, use the same reg_b register vmlal.u8 \acc, \reg_b, \src2 @ acc += filter_c * src[stride] @ where filter_c == filter_b .endif .if \filter_d != 1 @ If filter_d != 1, then do a multiply accumulate vmlsl.u8 \acc, \reg_d, \src3 @ acc -= filter_d * src[stride * 2] .else @ If filter_d is 1, then just do a subtract vsubw.u8 \acc, \acc, \src3 @ acc -= src[stride * 2] @ since filter_d == 1 .endif .if \narrow vqshrun.s16 \dest, \acc, #\filter_shift @ dest = clip_uint8(acc >> filter_shift) .else vshr.s16 \dest, \acc, #\filter_shift @ dest = acc >> filter_shift .endif .endm @ This is similar to mspel_filter, but the input is 16-bit instead of 8-bit and narrow=0 is not supported. .macro mspel_filter.16 acc0 acc1 acc0_0 acc0_1 dest src0 src1 src2 src3 src4 src5 src6 src7 filter_a filter_b filter_c filter_d reg_a reg_b reg_c reg_d reg_add filter_shift .if \filter_a != 1 vmov \acc0, \reg_add vmov \acc1, \reg_add vmlsl.s16 \acc0, \reg_a, \src0 vmlsl.s16 \acc1, \reg_a, \src1 .else vsubw.s16 \acc0, \reg_add, \src0 vsubw.s16 \acc1, \reg_add, \src1 .endif vmlal.s16 \acc0, \reg_b, \src2 vmlal.s16 \acc1, \reg_b, \src3 .if \filter_b != \filter_c vmlal.s16 \acc0, \reg_c, \src4 vmlal.s16 \acc1, \reg_c, \src5 .else vmlal.s16 \acc0, \reg_b, \src4 vmlal.s16 \acc1, \reg_b, \src5 .endif .if \filter_d != 1 vmlsl.s16 \acc0, \reg_d, \src6 vmlsl.s16 \acc1, \reg_d, \src7 .else vsubw.s16 \acc0, \acc0, \src6 vsubw.s16 \acc1, \acc1, \src7 .endif @ Use acc0_0 and acc0_1 as temp space vqshrun.s32 \acc0_0, \acc0, #\filter_shift @ Shift and narrow with saturation from s32 to u16 vqshrun.s32 \acc0_1, \acc1, #\filter_shift vqmovn.u16 \dest, \acc0 @ Narrow with saturation from u16 to u8 .endm @ Register usage for put_vc1_mspel_mc functions. Registers marked 'hv' are only used in put_vc1_mspel_mc_hv. @ @ r0 adjusted dst @ r1 adjusted src @ r2 stride @ r3 adjusted rnd @ r4 [hv] tmp @ r11 [hv] sp saved @ r12 loop counter @ d0 src[-stride] @ d1 src[0] @ d2 src[stride] @ d3 src[stride * 2] @ q0 [hv] src[-stride] @ q1 [hv] src[0] @ q2 [hv] src[stride] @ q3 [hv] src[stride * 2] @ d21 often result from mspel_filter @ q11 accumulator 0 @ q12 [hv] accumulator 1 @ q13 accumulator initial value @ d28 filter_a @ d29 filter_b @ d30 filter_c @ d31 filter_d @ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3]) .macro put_vc1_mspel_mc_hv hmode vmode filter_h_a filter_h_b filter_h_c filter_h_d filter_v_a filter_v_b filter_v_c filter_v_d filter_add filter_shift function ff_put_vc1_mspel_mc\hmode\()\vmode\()_neon, export=1 push {r4, r11, lr} mov r11, sp @ r11 = stack pointer before realignmnet A bic sp, sp, #15 @ sp = round down to multiple of 16 bytes T bic r4, r11, #15 T mov sp, r4 sub sp, sp, #(8*2*16) @ make space for 8 rows * 2 byte per element * 16 elements per row (to fit 11 actual elements per row) mov r4, sp @ r4 = int16_t tmp[8 * 16] sub r1, r1, #1 @ src -= 1 .if \filter_add != 0 add r3, r3, #\filter_add @ r3 = filter_add + rnd .endif mov r12, #8 @ loop counter sub r1, r1, r2 @ r1 = &src[-stride] @ slide back @ Do vertical filtering from src into tmp mspel_constants i8 d28 d29 d30 d31 \filter_v_a \filter_v_b \filter_v_c \filter_v_d q13 r3 vld1.64 {d0,d1}, [r1], r2 vld1.64 {d2,d3}, [r1], r2 vld1.64 {d4,d5}, [r1], r2 1: subs r12, r12, #4 vld1.64 {d6,d7}, [r1], r2 mspel_filter q11 q11 d0 d2 d4 d6 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0 mspel_filter q12 q12 d1 d3 d5 d7 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0 vst1.64 {q11,q12}, [r4,:128]! @ store and increment vld1.64 {d0,d1}, [r1], r2 mspel_filter q11 q11 d2 d4 d6 d0 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0 mspel_filter q12 q12 d3 d5 d7 d1 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0 vst1.64 {q11,q12}, [r4,:128]! @ store and increment vld1.64 {d2,d3}, [r1], r2 mspel_filter q11 q11 d4 d6 d0 d2 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0 mspel_filter q12 q12 d5 d7 d1 d3 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0 vst1.64 {q11,q12}, [r4,:128]! @ store and increment vld1.64 {d4,d5}, [r1], r2 mspel_filter q11 q11 d6 d0 d2 d4 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0 mspel_filter q12 q12 d7 d1 d3 d5 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0 vst1.64 {q11,q12}, [r4,:128]! @ store and increment bne 1b rsb r3, r3, #(64 + \filter_add) @ r3 = (64 + filter_add) - r3 mov r12, #8 @ loop counter mov r4, sp @ r4 = tmp @ Do horizontal filtering from temp to dst mspel_constants i16 d28 d29 d30 d31 \filter_h_a \filter_h_b \filter_h_c \filter_h_d q13 r3 2: subs r12, r12, #1 vld1.64 {q0,q1}, [r4,:128]! @ read one line of tmp vext.16 q2, q0, q1, #2 vext.16 q3, q0, q1, #3 vext.16 q1, q0, q1, #1 @ do last because it writes to q1 which is read by the other vext instructions mspel_filter.16 q11 q12 d22 d23 d21 d0 d1 d2 d3 d4 d5 d6 d7 \filter_h_a \filter_h_b \filter_h_c \filter_h_d d28 d29 d30 d31 q13 7 vst1.64 {d21}, [r0,:64], r2 @ store and increment dst bne 2b mov sp, r11 pop {r4, r11, pc} endfunc .endm @ Use C preprocessor and assembler macros to expand to functions for horizontal and vertical filtering. #define PUT_VC1_MSPEL_MC_HV(hmode, vmode) \ put_vc1_mspel_mc_hv hmode vmode \ MSPEL_MODE_ ## hmode ## _MUL_CONSTANTS \ MSPEL_MODE_ ## vmode ## _MUL_CONSTANTS \ MSPEL_MODES_ ## hmode ## vmode ## _ADDSHIFT_CONSTANTS PUT_VC1_MSPEL_MC_HV(1, 1) PUT_VC1_MSPEL_MC_HV(1, 2) PUT_VC1_MSPEL_MC_HV(1, 3) PUT_VC1_MSPEL_MC_HV(2, 1) PUT_VC1_MSPEL_MC_HV(2, 2) PUT_VC1_MSPEL_MC_HV(2, 3) PUT_VC1_MSPEL_MC_HV(3, 1) PUT_VC1_MSPEL_MC_HV(3, 2) PUT_VC1_MSPEL_MC_HV(3, 3) #undef PUT_VC1_MSPEL_MC_HV .macro put_vc1_mspel_mc_h_only hmode filter_a filter_b filter_c filter_d filter_add filter_shift function ff_put_vc1_mspel_mc\hmode\()0_neon, export=1 rsb r3, r3, #\filter_add @ r3 = filter_add - r = filter_add - rnd mov r12, #8 @ loop counter sub r1, r1, #1 @ slide back, using immediate mspel_constants i8 d28 d29 d30 d31 \filter_a \filter_b \filter_c \filter_d q13 r3 1: subs r12, r12, #1 vld1.64 {d0,d1}, [r1], r2 @ read 16 bytes even though we only need 11, also src += stride vext.8 d2, d0, d1, #2 vext.8 d3, d0, d1, #3 vext.8 d1, d0, d1, #1 @ do last because it writes to d1 which is read by the other vext instructions mspel_filter q11 d21 d0 d1 d2 d3 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift vst1.64 {d21}, [r0,:64], r2 @ store and increment dst bne 1b bx lr endfunc .endm @ Use C preprocessor and assembler macros to expand to functions for horizontal only filtering. #define PUT_VC1_MSPEL_MC_H_ONLY(hmode) \ put_vc1_mspel_mc_h_only hmode MSPEL_MODE_ ## hmode ## _MUL_CONSTANTS MSPEL_MODE_ ## hmode ## _ADDSHIFT_CONSTANTS PUT_VC1_MSPEL_MC_H_ONLY(1) PUT_VC1_MSPEL_MC_H_ONLY(2) PUT_VC1_MSPEL_MC_H_ONLY(3) #undef PUT_VC1_MSPEL_MC_H_ONLY @ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3]) .macro put_vc1_mspel_mc_v_only vmode filter_a filter_b filter_c filter_d filter_add filter_shift function ff_put_vc1_mspel_mc0\vmode\()_neon, export=1 add r3, r3, #\filter_add - 1 @ r3 = filter_add - r = filter_add - (1 - rnd) = filter_add - 1 + rnd mov r12, #8 @ loop counter sub r1, r1, r2 @ r1 = &src[-stride] @ slide back mspel_constants i8 d28 d29 d30 d31 \filter_a \filter_b \filter_c \filter_d q13 r3 vld1.64 {d0}, [r1], r2 @ d0 = src[-stride] vld1.64 {d1}, [r1], r2 @ d1 = src[0] vld1.64 {d2}, [r1], r2 @ d2 = src[stride] 1: subs r12, r12, #4 vld1.64 {d3}, [r1], r2 @ d3 = src[stride * 2] mspel_filter q11 d21 d0 d1 d2 d3 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift vst1.64 {d21}, [r0,:64], r2 @ store and increment dst vld1.64 {d0}, [r1], r2 @ d0 = next line mspel_filter q11 d21 d1 d2 d3 d0 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift vst1.64 {d21}, [r0,:64], r2 @ store and increment dst vld1.64 {d1}, [r1], r2 @ d1 = next line mspel_filter q11 d21 d2 d3 d0 d1 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift vst1.64 {d21}, [r0,:64], r2 @ store and increment dst vld1.64 {d2}, [r1], r2 @ d2 = next line mspel_filter q11 d21 d3 d0 d1 d2 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift vst1.64 {d21}, [r0,:64], r2 @ store and increment dst bne 1b bx lr endfunc .endm @ Use C preprocessor and assembler macros to expand to functions for vertical only filtering. #define PUT_VC1_MSPEL_MC_V_ONLY(vmode) \ put_vc1_mspel_mc_v_only vmode MSPEL_MODE_ ## vmode ## _MUL_CONSTANTS MSPEL_MODE_ ## vmode ## _ADDSHIFT_CONSTANTS PUT_VC1_MSPEL_MC_V_ONLY(1) PUT_VC1_MSPEL_MC_V_ONLY(2) PUT_VC1_MSPEL_MC_V_ONLY(3) #undef PUT_VC1_MSPEL_MC_V_ONLY #endif function ff_put_pixels8x8_neon, export=1 vld1.64 {d0}, [r1], r2 vld1.64 {d1}, [r1], r2 vld1.64 {d2}, [r1], r2 vld1.64 {d3}, [r1], r2 vld1.64 {d4}, [r1], r2 vld1.64 {d5}, [r1], r2 vld1.64 {d6}, [r1], r2 vld1.64 {d7}, [r1] vst1.64 {d0}, [r0,:64], r2 vst1.64 {d1}, [r0,:64], r2 vst1.64 {d2}, [r0,:64], r2 vst1.64 {d3}, [r0,:64], r2 vst1.64 {d4}, [r0,:64], r2 vst1.64 {d5}, [r0,:64], r2 vst1.64 {d6}, [r0,:64], r2 vst1.64 {d7}, [r0,:64] bx lr endfunc function ff_vc1_inv_trans_8x8_dc_neon, export=1 ldrsh r2, [r2] @ int dc = block[0]; vld1.64 {d0}, [r0,:64], r1 vld1.64 {d1}, [r0,:64], r1 vld1.64 {d4}, [r0,:64], r1 vld1.64 {d5}, [r0,:64], r1 add r2, r2, r2, lsl #1 @ dc = (3 * dc + 1) >> 1; vld1.64 {d6}, [r0,:64], r1 add r2, r2, #1 vld1.64 {d7}, [r0,:64], r1 vld1.64 {d16}, [r0,:64], r1 vld1.64 {d17}, [r0,:64], r1 asr r2, r2, #1 sub r0, r0, r1, lsl #3 @ restore r0 to original value add r2, r2, r2, lsl #1 @ dc = (3 * dc + 16) >> 5; add r2, r2, #16 asr r2, r2, #5 vdup.16 q1, r2 @ dc vaddw.u8 q9, q1, d0 vaddw.u8 q10, q1, d1 vaddw.u8 q11, q1, d4 vaddw.u8 q12, q1, d5 vqmovun.s16 d0, q9 vqmovun.s16 d1, q10 vqmovun.s16 d4, q11 vst1.64 {d0}, [r0,:64], r1 vqmovun.s16 d5, q12 vst1.64 {d1}, [r0,:64], r1 vaddw.u8 q13, q1, d6 vst1.64 {d4}, [r0,:64], r1 vaddw.u8 q14, q1, d7 vst1.64 {d5}, [r0,:64], r1 vaddw.u8 q15, q1, d16 vaddw.u8 q1, q1, d17 @ this destroys q1 vqmovun.s16 d6, q13 vqmovun.s16 d7, q14 vqmovun.s16 d16, q15 vqmovun.s16 d17, q1 vst1.64 {d6}, [r0,:64], r1 vst1.64 {d7}, [r0,:64], r1 vst1.64 {d16}, [r0,:64], r1 vst1.64 {d17}, [r0,:64] bx lr endfunc function ff_vc1_inv_trans_8x4_dc_neon, export=1 ldrsh r2, [r2] @ int dc = block[0]; vld1.64 {d0}, [r0,:64], r1 vld1.64 {d1}, [r0,:64], r1 vld1.64 {d4}, [r0,:64], r1 vld1.64 {d5}, [r0,:64], r1 add r2, r2, r2, lsl #1 @ dc = ( 3 * dc + 1) >> 1; sub r0, r0, r1, lsl #2 @ restore r0 to original value add r2, r2, #1 asr r2, r2, #1 add r2, r2, r2, lsl #4 @ dc = (17 * dc + 64) >> 7; add r2, r2, #64 asr r2, r2, #7 vdup.16 q1, r2 @ dc vaddw.u8 q3, q1, d0 vaddw.u8 q8, q1, d1 vaddw.u8 q9, q1, d4 vaddw.u8 q10, q1, d5 vqmovun.s16 d0, q3 vqmovun.s16 d1, q8 vqmovun.s16 d4, q9 vst1.64 {d0}, [r0,:64], r1 vqmovun.s16 d5, q10 vst1.64 {d1}, [r0,:64], r1 vst1.64 {d4}, [r0,:64], r1 vst1.64 {d5}, [r0,:64] bx lr endfunc function ff_vc1_inv_trans_4x8_dc_neon, export=1 ldrsh r2, [r2] @ int dc = block[0]; vld1.32 {d0[]}, [r0,:32], r1 vld1.32 {d1[]}, [r0,:32], r1 vld1.32 {d0[1]}, [r0,:32], r1 vld1.32 {d1[1]}, [r0,:32], r1 add r2, r2, r2, lsl #4 @ dc = (17 * dc + 4) >> 3; vld1.32 {d4[]}, [r0,:32], r1 add r2, r2, #4 vld1.32 {d5[]}, [r0,:32], r1 vld1.32 {d4[1]}, [r0,:32], r1 asr r2, r2, #3 vld1.32 {d5[1]}, [r0,:32], r1 add r2, r2, r2, lsl #1 @ dc = (12 * dc + 64) >> 7; sub r0, r0, r1, lsl #3 @ restore r0 to original value lsl r2, r2, #2 add r2, r2, #64 asr r2, r2, #7 vdup.16 q1, r2 @ dc vaddw.u8 q3, q1, d0 vaddw.u8 q8, q1, d1 vaddw.u8 q9, q1, d4 vaddw.u8 q10, q1, d5 vqmovun.s16 d0, q3 vst1.32 {d0[0]}, [r0,:32], r1 vqmovun.s16 d1, q8 vst1.32 {d1[0]}, [r0,:32], r1 vqmovun.s16 d4, q9 vst1.32 {d0[1]}, [r0,:32], r1 vqmovun.s16 d5, q10 vst1.32 {d1[1]}, [r0,:32], r1 vst1.32 {d4[0]}, [r0,:32], r1 vst1.32 {d5[0]}, [r0,:32], r1 vst1.32 {d4[1]}, [r0,:32], r1 vst1.32 {d5[1]}, [r0,:32] bx lr endfunc function ff_vc1_inv_trans_4x4_dc_neon, export=1 ldrsh r2, [r2] @ int dc = block[0]; vld1.32 {d0[]}, [r0,:32], r1 vld1.32 {d1[]}, [r0,:32], r1 vld1.32 {d0[1]}, [r0,:32], r1 vld1.32 {d1[1]}, [r0,:32], r1 add r2, r2, r2, lsl #4 @ dc = (17 * dc + 4) >> 3; sub r0, r0, r1, lsl #2 @ restore r0 to original value add r2, r2, #4 asr r2, r2, #3 add r2, r2, r2, lsl #4 @ dc = (17 * dc + 64) >> 7; add r2, r2, #64 asr r2, r2, #7 vdup.16 q1, r2 @ dc vaddw.u8 q2, q1, d0 vaddw.u8 q3, q1, d1 vqmovun.s16 d0, q2 vst1.32 {d0[0]}, [r0,:32], r1 vqmovun.s16 d1, q3 vst1.32 {d1[0]}, [r0,:32], r1 vst1.32 {d0[1]}, [r0,:32], r1 vst1.32 {d1[1]}, [r0,:32] bx lr endfunc
Akagi201/ffmpeg-xcode
12,010
ffmpeg-3.0.2/libavcodec/arm/h264pred_neon.S
/* * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" .macro ldcol.8 rd, rs, rt, n=8, hi=0 .if \n == 8 || \hi == 0 vld1.8 {\rd[0]}, [\rs], \rt vld1.8 {\rd[1]}, [\rs], \rt vld1.8 {\rd[2]}, [\rs], \rt vld1.8 {\rd[3]}, [\rs], \rt .endif .if \n == 8 || \hi == 1 vld1.8 {\rd[4]}, [\rs], \rt vld1.8 {\rd[5]}, [\rs], \rt vld1.8 {\rd[6]}, [\rs], \rt vld1.8 {\rd[7]}, [\rs], \rt .endif .endm .macro add16x8 dq, dl, dh, rl, rh vaddl.u8 \dq, \rl, \rh vadd.u16 \dl, \dl, \dh vpadd.u16 \dl, \dl, \dl vpadd.u16 \dl, \dl, \dl .endm function ff_pred16x16_128_dc_neon, export=1 vmov.i8 q0, #128 b .L_pred16x16_dc_end endfunc function ff_pred16x16_top_dc_neon, export=1 sub r2, r0, r1 vld1.8 {q0}, [r2,:128] add16x8 q0, d0, d1, d0, d1 vrshrn.u16 d0, q0, #4 vdup.8 q0, d0[0] b .L_pred16x16_dc_end endfunc function ff_pred16x16_left_dc_neon, export=1 sub r2, r0, #1 ldcol.8 d0, r2, r1 ldcol.8 d1, r2, r1 add16x8 q0, d0, d1, d0, d1 vrshrn.u16 d0, q0, #4 vdup.8 q0, d0[0] b .L_pred16x16_dc_end endfunc function ff_pred16x16_dc_neon, export=1 sub r2, r0, r1 vld1.8 {q0}, [r2,:128] sub r2, r0, #1 ldcol.8 d2, r2, r1 ldcol.8 d3, r2, r1 vaddl.u8 q0, d0, d1 vaddl.u8 q1, d2, d3 vadd.u16 q0, q0, q1 vadd.u16 d0, d0, d1 vpadd.u16 d0, d0, d0 vpadd.u16 d0, d0, d0 vrshrn.u16 d0, q0, #5 vdup.8 q0, d0[0] .L_pred16x16_dc_end: mov r3, #8 6: vst1.8 {q0}, [r0,:128], r1 vst1.8 {q0}, [r0,:128], r1 subs r3, r3, #1 bne 6b bx lr endfunc function ff_pred16x16_hor_neon, export=1 sub r2, r0, #1 mov r3, #16 1: vld1.8 {d0[],d1[]},[r2], r1 vst1.8 {q0}, [r0,:128], r1 subs r3, r3, #1 bne 1b bx lr endfunc function ff_pred16x16_vert_neon, export=1 sub r0, r0, r1 vld1.8 {q0}, [r0,:128], r1 mov r3, #8 1: vst1.8 {q0}, [r0,:128], r1 vst1.8 {q0}, [r0,:128], r1 subs r3, r3, #1 bne 1b bx lr endfunc function ff_pred16x16_plane_neon, export=1 sub r3, r0, r1 add r2, r3, #8 sub r3, r3, #1 vld1.8 {d0}, [r3] vld1.8 {d2}, [r2,:64], r1 ldcol.8 d1, r3, r1 add r3, r3, r1 ldcol.8 d3, r3, r1 vrev64.8 q0, q0 vaddl.u8 q8, d2, d3 vsubl.u8 q2, d2, d0 vsubl.u8 q3, d3, d1 movrel r3, p16weight vld1.8 {q0}, [r3,:128] vmul.s16 q2, q2, q0 vmul.s16 q3, q3, q0 vadd.i16 d4, d4, d5 vadd.i16 d5, d6, d7 vpadd.i16 d4, d4, d5 vpadd.i16 d4, d4, d4 vshll.s16 q3, d4, #2 vaddw.s16 q2, q3, d4 vrshrn.s32 d4, q2, #6 mov r3, #0 vtrn.16 d4, d5 vadd.i16 d2, d4, d5 vshl.i16 d3, d2, #3 vrev64.16 d16, d17 vsub.i16 d3, d3, d2 vadd.i16 d16, d16, d0 vshl.i16 d2, d16, #4 vsub.i16 d2, d2, d3 vshl.i16 d3, d4, #4 vext.16 q0, q0, q0, #7 vsub.i16 d6, d5, d3 vmov.16 d0[0], r3 vmul.i16 q0, q0, d4[0] vdup.16 q1, d2[0] vdup.16 q2, d4[0] vdup.16 q3, d6[0] vshl.i16 q2, q2, #3 vadd.i16 q1, q1, q0 vadd.i16 q3, q3, q2 mov r3, #16 1: vqshrun.s16 d0, q1, #5 vadd.i16 q1, q1, q2 vqshrun.s16 d1, q1, #5 vadd.i16 q1, q1, q3 vst1.8 {q0}, [r0,:128], r1 subs r3, r3, #1 bne 1b bx lr endfunc const p16weight, align=4 .short 1,2,3,4,5,6,7,8 endconst function ff_pred8x8_hor_neon, export=1 sub r2, r0, #1 mov r3, #8 1: vld1.8 {d0[]}, [r2], r1 vst1.8 {d0}, [r0,:64], r1 subs r3, r3, #1 bne 1b bx lr endfunc function ff_pred8x8_vert_neon, export=1 sub r0, r0, r1 vld1.8 {d0}, [r0,:64], r1 mov r3, #4 1: vst1.8 {d0}, [r0,:64], r1 vst1.8 {d0}, [r0,:64], r1 subs r3, r3, #1 bne 1b bx lr endfunc function ff_pred8x8_plane_neon, export=1 sub r3, r0, r1 add r2, r3, #4 sub r3, r3, #1 vld1.32 {d0[0]}, [r3] vld1.32 {d2[0]}, [r2,:32], r1 ldcol.8 d0, r3, r1, 4, hi=1 add r3, r3, r1 ldcol.8 d3, r3, r1, 4 vaddl.u8 q8, d2, d3 vrev32.8 d0, d0 vtrn.32 d2, d3 vsubl.u8 q2, d2, d0 movrel r3, p16weight vld1.16 {q0}, [r3,:128] vmul.s16 d4, d4, d0 vmul.s16 d5, d5, d0 vpadd.i16 d4, d4, d5 vpaddl.s16 d4, d4 vshl.i32 d5, d4, #4 vadd.s32 d4, d4, d5 vrshrn.s32 d4, q2, #5 mov r3, #0 vtrn.16 d4, d5 vadd.i16 d2, d4, d5 vshl.i16 d3, d2, #2 vrev64.16 d16, d16 vsub.i16 d3, d3, d2 vadd.i16 d16, d16, d0 vshl.i16 d2, d16, #4 vsub.i16 d2, d2, d3 vshl.i16 d3, d4, #3 vext.16 q0, q0, q0, #7 vsub.i16 d6, d5, d3 vmov.16 d0[0], r3 vmul.i16 q0, q0, d4[0] vdup.16 q1, d2[0] vdup.16 q2, d4[0] vdup.16 q3, d6[0] vshl.i16 q2, q2, #3 vadd.i16 q1, q1, q0 vadd.i16 q3, q3, q2 mov r3, #8 1: vqshrun.s16 d0, q1, #5 vadd.i16 q1, q1, q3 vst1.8 {d0}, [r0,:64], r1 subs r3, r3, #1 bne 1b bx lr endfunc function ff_pred8x8_128_dc_neon, export=1 vmov.i8 q0, #128 b .L_pred8x8_dc_end endfunc function ff_pred8x8_top_dc_neon, export=1 sub r2, r0, r1 vld1.8 {d0}, [r2,:64] vpaddl.u8 d0, d0 vpadd.u16 d0, d0, d0 vrshrn.u16 d0, q0, #2 vdup.8 d1, d0[1] vdup.8 d0, d0[0] vtrn.32 d0, d1 b .L_pred8x8_dc_end endfunc function ff_pred8x8_left_dc_neon, export=1 sub r2, r0, #1 ldcol.8 d0, r2, r1 vpaddl.u8 d0, d0 vpadd.u16 d0, d0, d0 vrshrn.u16 d0, q0, #2 vdup.8 d1, d0[1] vdup.8 d0, d0[0] b .L_pred8x8_dc_end endfunc function ff_pred8x8_dc_neon, export=1 sub r2, r0, r1 vld1.8 {d0}, [r2,:64] sub r2, r0, #1 ldcol.8 d1, r2, r1 vtrn.32 d0, d1 vpaddl.u8 q0, q0 vpadd.u16 d0, d0, d1 vpadd.u16 d1, d0, d0 vrshrn.u16 d2, q0, #3 vrshrn.u16 d3, q0, #2 vdup.8 d0, d2[4] vdup.8 d1, d3[3] vdup.8 d4, d3[2] vdup.8 d5, d2[5] vtrn.32 q0, q2 .L_pred8x8_dc_end: mov r3, #4 add r2, r0, r1, lsl #2 6: vst1.8 {d0}, [r0,:64], r1 vst1.8 {d1}, [r2,:64], r1 subs r3, r3, #1 bne 6b bx lr endfunc function ff_pred8x8_l0t_dc_neon, export=1 sub r2, r0, r1 vld1.8 {d0}, [r2,:64] sub r2, r0, #1 ldcol.8 d1, r2, r1, 4 vtrn.32 d0, d1 vpaddl.u8 q0, q0 vpadd.u16 d0, d0, d1 vpadd.u16 d1, d0, d0 vrshrn.u16 d2, q0, #3 vrshrn.u16 d3, q0, #2 vdup.8 d0, d2[4] vdup.8 d1, d3[0] vdup.8 q2, d3[2] vtrn.32 q0, q2 b .L_pred8x8_dc_end endfunc function ff_pred8x8_l00_dc_neon, export=1 sub r2, r0, #1 ldcol.8 d0, r2, r1, 4 vpaddl.u8 d0, d0 vpadd.u16 d0, d0, d0 vrshrn.u16 d0, q0, #2 vmov.i8 d1, #128 vdup.8 d0, d0[0] b .L_pred8x8_dc_end endfunc function ff_pred8x8_0lt_dc_neon, export=1 sub r2, r0, r1 vld1.8 {d0}, [r2,:64] add r2, r0, r1, lsl #2 sub r2, r2, #1 ldcol.8 d1, r2, r1, 4, hi=1 vtrn.32 d0, d1 vpaddl.u8 q0, q0 vpadd.u16 d0, d0, d1 vpadd.u16 d1, d0, d0 vrshrn.u16 d3, q0, #2 vrshrn.u16 d2, q0, #3 vdup.8 d0, d3[0] vdup.8 d1, d3[3] vdup.8 d4, d3[2] vdup.8 d5, d2[5] vtrn.32 q0, q2 b .L_pred8x8_dc_end endfunc function ff_pred8x8_0l0_dc_neon, export=1 add r2, r0, r1, lsl #2 sub r2, r2, #1 ldcol.8 d1, r2, r1, 4 vpaddl.u8 d2, d1 vpadd.u16 d2, d2, d2 vrshrn.u16 d1, q1, #2 vmov.i8 d0, #128 vdup.8 d1, d1[0] b .L_pred8x8_dc_end endfunc
Akagi201/ffmpeg-xcode
12,850
ffmpeg-3.0.2/libavcodec/arm/simple_idct_neon.S
/* * ARM NEON IDCT * * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * * Based on Simple IDCT * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #define W1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define W2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define W3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define W4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define W5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define W6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define W7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 #define W4c ((1<<(COL_SHIFT-1))/W4) #define ROW_SHIFT 11 #define COL_SHIFT 20 #define w1 d0[0] #define w2 d0[1] #define w3 d0[2] #define w4 d0[3] #define w5 d1[0] #define w6 d1[1] #define w7 d1[2] #define w4c d1[3] .macro idct_col4_top vmull.s16 q7, d6, w2 /* q9 = W2 * col[2] */ vmull.s16 q8, d6, w6 /* q10 = W6 * col[2] */ vmull.s16 q9, d4, w1 /* q9 = W1 * col[1] */ vadd.i32 q11, q15, q7 vmull.s16 q10, d4, w3 /* q10 = W3 * col[1] */ vadd.i32 q12, q15, q8 vmull.s16 q5, d4, w5 /* q5 = W5 * col[1] */ vsub.i32 q13, q15, q8 vmull.s16 q6, d4, w7 /* q6 = W7 * col[1] */ vsub.i32 q14, q15, q7 vmlal.s16 q9, d8, w3 /* q9 += W3 * col[3] */ vmlsl.s16 q10, d8, w7 /* q10 -= W7 * col[3] */ vmlsl.s16 q5, d8, w1 /* q5 -= W1 * col[3] */ vmlsl.s16 q6, d8, w5 /* q6 -= W5 * col[3] */ .endm .text .align 6 function idct_row4_pld_neon pld [r0] add r3, r0, r1, lsl #2 pld [r0, r1] pld [r0, r1, lsl #1] A pld [r3, -r1] pld [r3] pld [r3, r1] add r3, r3, r1, lsl #1 pld [r3] pld [r3, r1] endfunc function idct_row4_neon vmov.i32 q15, #(1<<(ROW_SHIFT-1)) vld1.64 {d2-d5}, [r2,:128]! vmlal.s16 q15, d2, w4 /* q15 += W4 * col[0] */ vld1.64 {d6,d7}, [r2,:128]! vorr d10, d3, d5 vld1.64 {d8,d9}, [r2,:128]! add r2, r2, #-64 vorr d11, d7, d9 vorr d10, d10, d11 vmov r3, r4, d10 idct_col4_top orrs r3, r3, r4 beq 1f vmull.s16 q7, d3, w4 /* q7 = W4 * col[4] */ vmlal.s16 q9, d5, w5 /* q9 += W5 * col[5] */ vmlsl.s16 q10, d5, w1 /* q10 -= W1 * col[5] */ vmull.s16 q8, d7, w2 /* q8 = W2 * col[6] */ vmlal.s16 q5, d5, w7 /* q5 += W7 * col[5] */ vadd.i32 q11, q11, q7 vsub.i32 q12, q12, q7 vsub.i32 q13, q13, q7 vadd.i32 q14, q14, q7 vmlal.s16 q6, d5, w3 /* q6 += W3 * col[5] */ vmull.s16 q7, d7, w6 /* q7 = W6 * col[6] */ vmlal.s16 q9, d9, w7 vmlsl.s16 q10, d9, w5 vmlal.s16 q5, d9, w3 vmlsl.s16 q6, d9, w1 vadd.i32 q11, q11, q7 vsub.i32 q12, q12, q8 vadd.i32 q13, q13, q8 vsub.i32 q14, q14, q7 1: vadd.i32 q3, q11, q9 vadd.i32 q4, q12, q10 vshrn.i32 d2, q3, #ROW_SHIFT vshrn.i32 d4, q4, #ROW_SHIFT vadd.i32 q7, q13, q5 vadd.i32 q8, q14, q6 vtrn.16 d2, d4 vshrn.i32 d6, q7, #ROW_SHIFT vshrn.i32 d8, q8, #ROW_SHIFT vsub.i32 q14, q14, q6 vsub.i32 q11, q11, q9 vtrn.16 d6, d8 vsub.i32 q13, q13, q5 vshrn.i32 d3, q14, #ROW_SHIFT vtrn.32 d2, d6 vsub.i32 q12, q12, q10 vtrn.32 d4, d8 vshrn.i32 d5, q13, #ROW_SHIFT vshrn.i32 d7, q12, #ROW_SHIFT vshrn.i32 d9, q11, #ROW_SHIFT vtrn.16 d3, d5 vtrn.16 d7, d9 vtrn.32 d3, d7 vtrn.32 d5, d9 vst1.64 {d2-d5}, [r2,:128]! vst1.64 {d6-d9}, [r2,:128]! bx lr endfunc function idct_col4_neon mov ip, #16 vld1.64 {d2}, [r2,:64], ip /* d2 = col[0] */ vdup.16 d30, w4c vld1.64 {d4}, [r2,:64], ip /* d3 = col[1] */ vadd.i16 d30, d30, d2 vld1.64 {d6}, [r2,:64], ip /* d4 = col[2] */ vmull.s16 q15, d30, w4 /* q15 = W4*(col[0]+(1<<COL_SHIFT-1)/W4)*/ vld1.64 {d8}, [r2,:64], ip /* d5 = col[3] */ ldrd r4, r5, [r2] ldrd r6, r7, [r2, #16] orrs r4, r4, r5 idct_col4_top it eq addeq r2, r2, #16 beq 1f vld1.64 {d3}, [r2,:64], ip /* d6 = col[4] */ vmull.s16 q7, d3, w4 /* q7 = W4 * col[4] */ vadd.i32 q11, q11, q7 vsub.i32 q12, q12, q7 vsub.i32 q13, q13, q7 vadd.i32 q14, q14, q7 1: orrs r6, r6, r7 ldrd r4, r5, [r2, #16] it eq addeq r2, r2, #16 beq 2f vld1.64 {d5}, [r2,:64], ip /* d7 = col[5] */ vmlal.s16 q9, d5, w5 /* q9 += W5 * col[5] */ vmlsl.s16 q10, d5, w1 /* q10 -= W1 * col[5] */ vmlal.s16 q5, d5, w7 /* q5 += W7 * col[5] */ vmlal.s16 q6, d5, w3 /* q6 += W3 * col[5] */ 2: orrs r4, r4, r5 ldrd r4, r5, [r2, #16] it eq addeq r2, r2, #16 beq 3f vld1.64 {d7}, [r2,:64], ip /* d8 = col[6] */ vmull.s16 q7, d7, w6 /* q7 = W6 * col[6] */ vmull.s16 q8, d7, w2 /* q8 = W2 * col[6] */ vadd.i32 q11, q11, q7 vsub.i32 q14, q14, q7 vsub.i32 q12, q12, q8 vadd.i32 q13, q13, q8 3: orrs r4, r4, r5 it eq addeq r2, r2, #16 beq 4f vld1.64 {d9}, [r2,:64], ip /* d9 = col[7] */ vmlal.s16 q9, d9, w7 vmlsl.s16 q10, d9, w5 vmlal.s16 q5, d9, w3 vmlsl.s16 q6, d9, w1 4: vaddhn.i32 d2, q11, q9 vaddhn.i32 d3, q12, q10 vaddhn.i32 d4, q13, q5 vaddhn.i32 d5, q14, q6 vsubhn.i32 d9, q11, q9 vsubhn.i32 d8, q12, q10 vsubhn.i32 d7, q13, q5 vsubhn.i32 d6, q14, q6 bx lr endfunc .align 6 function idct_col4_st8_neon vqshrun.s16 d2, q1, #COL_SHIFT-16 vqshrun.s16 d3, q2, #COL_SHIFT-16 vqshrun.s16 d4, q3, #COL_SHIFT-16 vqshrun.s16 d5, q4, #COL_SHIFT-16 vst1.32 {d2[0]}, [r0,:32], r1 vst1.32 {d2[1]}, [r0,:32], r1 vst1.32 {d3[0]}, [r0,:32], r1 vst1.32 {d3[1]}, [r0,:32], r1 vst1.32 {d4[0]}, [r0,:32], r1 vst1.32 {d4[1]}, [r0,:32], r1 vst1.32 {d5[0]}, [r0,:32], r1 vst1.32 {d5[1]}, [r0,:32], r1 bx lr endfunc const idct_coeff_neon, align=4 .short W1, W2, W3, W4, W5, W6, W7, W4c endconst .macro idct_start data push {r4-r7, lr} pld [\data] pld [\data, #64] vpush {d8-d15} movrel r3, idct_coeff_neon vld1.64 {d0,d1}, [r3,:128] .endm .macro idct_end vpop {d8-d15} pop {r4-r7, pc} .endm /* void ff_simple_idct_put_neon(uint8_t *dst, int line_size, int16_t *data); */ function ff_simple_idct_put_neon, export=1 idct_start r2 bl idct_row4_pld_neon bl idct_row4_neon add r2, r2, #-128 bl idct_col4_neon bl idct_col4_st8_neon sub r0, r0, r1, lsl #3 add r0, r0, #4 add r2, r2, #-120 bl idct_col4_neon bl idct_col4_st8_neon idct_end endfunc .align 6 function idct_col4_add8_neon mov ip, r0 vld1.32 {d10[0]}, [r0,:32], r1 vshr.s16 q1, q1, #COL_SHIFT-16 vld1.32 {d10[1]}, [r0,:32], r1 vshr.s16 q2, q2, #COL_SHIFT-16 vld1.32 {d11[0]}, [r0,:32], r1 vshr.s16 q3, q3, #COL_SHIFT-16 vld1.32 {d11[1]}, [r0,:32], r1 vshr.s16 q4, q4, #COL_SHIFT-16 vld1.32 {d12[0]}, [r0,:32], r1 vaddw.u8 q1, q1, d10 vld1.32 {d12[1]}, [r0,:32], r1 vaddw.u8 q2, q2, d11 vld1.32 {d13[0]}, [r0,:32], r1 vqmovun.s16 d2, q1 vld1.32 {d13[1]}, [r0,:32], r1 vaddw.u8 q3, q3, d12 vst1.32 {d2[0]}, [ip,:32], r1 vqmovun.s16 d3, q2 vst1.32 {d2[1]}, [ip,:32], r1 vaddw.u8 q4, q4, d13 vst1.32 {d3[0]}, [ip,:32], r1 vqmovun.s16 d4, q3 vst1.32 {d3[1]}, [ip,:32], r1 vqmovun.s16 d5, q4 vst1.32 {d4[0]}, [ip,:32], r1 vst1.32 {d4[1]}, [ip,:32], r1 vst1.32 {d5[0]}, [ip,:32], r1 vst1.32 {d5[1]}, [ip,:32], r1 bx lr endfunc /* void ff_simple_idct_add_neon(uint8_t *dst, int line_size, int16_t *data); */ function ff_simple_idct_add_neon, export=1 idct_start r2 bl idct_row4_pld_neon bl idct_row4_neon add r2, r2, #-128 bl idct_col4_neon bl idct_col4_add8_neon sub r0, r0, r1, lsl #3 add r0, r0, #4 add r2, r2, #-120 bl idct_col4_neon bl idct_col4_add8_neon idct_end endfunc .align 6 function idct_col4_st16_neon mov ip, #16 vshr.s16 q1, q1, #COL_SHIFT-16 vshr.s16 q2, q2, #COL_SHIFT-16 vst1.64 {d2}, [r2,:64], ip vshr.s16 q3, q3, #COL_SHIFT-16 vst1.64 {d3}, [r2,:64], ip vshr.s16 q4, q4, #COL_SHIFT-16 vst1.64 {d4}, [r2,:64], ip vst1.64 {d5}, [r2,:64], ip vst1.64 {d6}, [r2,:64], ip vst1.64 {d7}, [r2,:64], ip vst1.64 {d8}, [r2,:64], ip vst1.64 {d9}, [r2,:64], ip bx lr endfunc /* void ff_simple_idct_neon(int16_t *data); */ function ff_simple_idct_neon, export=1 idct_start r0 mov r2, r0 bl idct_row4_neon bl idct_row4_neon add r2, r2, #-128 bl idct_col4_neon add r2, r2, #-128 bl idct_col4_st16_neon add r2, r2, #-120 bl idct_col4_neon add r2, r2, #-128 bl idct_col4_st16_neon idct_end endfunc
Akagi201/ffmpeg-xcode
4,911
ffmpeg-3.0.2/libavcodec/arm/vp6dsp_neon.S
/* * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" .macro vp6_edge_filter vdup.16 q3, r2 @ t vmov.i16 q13, #1 vsubl.u8 q0, d20, d18 @ p[ 0] - p[-s] vsubl.u8 q1, d16, d22 @ p[-2*s] - p[ s] vsubl.u8 q14, d21, d19 vsubl.u8 q15, d17, d23 vadd.i16 q2, q0, q0 @ 2*(p[0]-p[-s]) vadd.i16 d29, d28, d28 vadd.i16 q0, q0, q1 @ p[0]-p[-s] + p[-2*s]-p[s] vadd.i16 d28, d28, d30 vadd.i16 q0, q0, q2 @ 3*(p[0]-p[-s]) + p[-2*s]-p[s] vadd.i16 d28, d28, d29 vrshr.s16 q0, q0, #3 @ v vrshr.s16 d28, d28, #3 vsub.i16 q8, q3, q13 @ t-1 vabs.s16 q1, q0 @ V vshr.s16 q2, q0, #15 @ s vabs.s16 d30, d28 vshr.s16 d29, d28, #15 vsub.i16 q12, q1, q3 @ V-t vsub.i16 d31, d30, d6 vsub.i16 q12, q12, q13 @ V-t-1 vsub.i16 d31, d31, d26 vcge.u16 q12, q12, q8 @ V-t-1 >= t-1 vcge.u16 d31, d31, d16 vadd.i16 q13, q3, q3 @ 2*t vadd.i16 d16, d6, d6 vsub.i16 q13, q13, q1 @ 2*t - V vsub.i16 d16, d16, d30 vadd.i16 q13, q13, q2 @ += s vadd.i16 d16, d16, d29 veor q13, q13, q2 @ ^= s veor d16, d16, d29 vbif q0, q13, q12 vbif d28, d16, d31 vmovl.u8 q1, d20 vmovl.u8 q15, d21 vaddw.u8 q2, q0, d18 vaddw.u8 q3, q14, d19 vsub.i16 q1, q1, q0 vsub.i16 d30, d30, d28 vqmovun.s16 d18, q2 vqmovun.s16 d19, q3 vqmovun.s16 d20, q1 vqmovun.s16 d21, q15 .endm function ff_vp6_edge_filter_ver_neon, export=1 sub r0, r0, r1, lsl #1 vld1.8 {q8}, [r0], r1 @ p[-2*s] vld1.8 {q9}, [r0], r1 @ p[-s] vld1.8 {q10}, [r0], r1 @ p[0] vld1.8 {q11}, [r0] @ p[s] vp6_edge_filter sub r0, r0, r1, lsl #1 sub r1, r1, #8 vst1.8 {d18}, [r0]! vst1.32 {d19[0]}, [r0], r1 vst1.8 {d20}, [r0]! vst1.32 {d21[0]}, [r0] bx lr endfunc function ff_vp6_edge_filter_hor_neon, export=1 sub r3, r0, #1 sub r0, r0, #2 vld1.32 {d16[0]}, [r0], r1 vld1.32 {d18[0]}, [r0], r1 vld1.32 {d20[0]}, [r0], r1 vld1.32 {d22[0]}, [r0], r1 vld1.32 {d16[1]}, [r0], r1 vld1.32 {d18[1]}, [r0], r1 vld1.32 {d20[1]}, [r0], r1 vld1.32 {d22[1]}, [r0], r1 vld1.32 {d17[0]}, [r0], r1 vld1.32 {d19[0]}, [r0], r1 vld1.32 {d21[0]}, [r0], r1 vld1.32 {d23[0]}, [r0], r1 vtrn.8 q8, q9 vtrn.8 q10, q11 vtrn.16 q8, q10 vtrn.16 q9, q11 vp6_edge_filter vtrn.8 q9, q10 vst1.16 {d18[0]}, [r3], r1 vst1.16 {d20[0]}, [r3], r1 vst1.16 {d18[1]}, [r3], r1 vst1.16 {d20[1]}, [r3], r1 vst1.16 {d18[2]}, [r3], r1 vst1.16 {d20[2]}, [r3], r1 vst1.16 {d18[3]}, [r3], r1 vst1.16 {d20[3]}, [r3], r1 vst1.16 {d19[0]}, [r3], r1 vst1.16 {d21[0]}, [r3], r1 vst1.16 {d19[1]}, [r3], r1 vst1.16 {d21[1]}, [r3], r1 bx lr endfunc
Akagi201/ffmpeg-xcode
14,279
ffmpeg-3.0.2/libavcodec/arm/sbrdsp_neon.S
/* * Copyright (c) 2012 Mans Rullgard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_sbr_sum64x5_neon, export=1 push {lr} add r1, r0, # 64*4 add r2, r0, #128*4 add r3, r0, #192*4 add lr, r0, #256*4 mov r12, #64 1: vld1.32 {q0}, [r0,:128] vld1.32 {q1}, [r1,:128]! vadd.f32 q0, q0, q1 vld1.32 {q2}, [r2,:128]! vadd.f32 q0, q0, q2 vld1.32 {q3}, [r3,:128]! vadd.f32 q0, q0, q3 vld1.32 {q8}, [lr,:128]! vadd.f32 q0, q0, q8 vst1.32 {q0}, [r0,:128]! subs r12, #4 bgt 1b pop {pc} endfunc function ff_sbr_sum_square_neon, export=1 vmov.f32 q0, #0.0 1: vld1.32 {q1}, [r0,:128]! vmla.f32 q0, q1, q1 subs r1, r1, #2 bgt 1b vadd.f32 d0, d0, d1 vpadd.f32 d0, d0, d0 NOVFP vmov.32 r0, d0[0] bx lr endfunc function ff_sbr_neg_odd_64_neon, export=1 mov r1, r0 vmov.i32 q8, #1<<31 vld2.32 {q0,q1}, [r0,:128]! veor q1, q1, q8 vld2.32 {q2,q3}, [r0,:128]! .rept 3 vst2.32 {q0,q1}, [r1,:128]! veor q3, q3, q8 vld2.32 {q0,q1}, [r0,:128]! vst2.32 {q2,q3}, [r1,:128]! veor q1, q1, q8 vld2.32 {q2,q3}, [r0,:128]! .endr veor q3, q3, q8 vst2.32 {q0,q1}, [r1,:128]! vst2.32 {q2,q3}, [r1,:128]! bx lr endfunc function ff_sbr_qmf_pre_shuffle_neon, export=1 add r1, r0, #60*4 add r2, r0, #64*4 vld1.32 {d0}, [r0,:64]! vst1.32 {d0}, [r2,:64]! mov r3, #-16 mov r12, #24 vmov.i32 q8, #1<<31 vld1.32 {q0}, [r1,:128], r3 vld1.32 {d2}, [r0,:64]! 1: vld1.32 {d3,d4}, [r0,:128]! vrev64.32 q0, q0 vld1.32 {q9}, [r1,:128], r3 veor q0, q0, q8 vld1.32 {d5,d6}, [r0,:128]! vswp d0, d1 vrev64.32 q9, q9 vst2.32 {q0,q1}, [r2,:64]! vmov q10, q2 veor q9, q9, q8 vmov d2, d6 vswp d18, d19 vld1.32 {q0}, [r1,:128], r3 vst2.32 {q9,q10}, [r2,:64]! subs r12, r12, #8 bgt 1b vld1.32 {d3,d4}, [r0,:128]! vrev64.32 q0, q0 vld1.32 {q9}, [r1,:128], r3 veor q0, q0, q8 vld1.32 {d5}, [r0,:64]! vswp d0, d1 vrev64.32 q9, q9 vst2.32 {q0,q1}, [r2,:64]! vswp d4, d5 veor q1, q9, q8 vst2.32 {d3,d5}, [r2,:64]! vst2.32 {d2[0],d4[0]}, [r2,:64]! bx lr endfunc function ff_sbr_qmf_post_shuffle_neon, export=1 add r2, r1, #60*4 mov r3, #-16 mov r12, #32 vmov.i32 q8, #1<<31 vld1.32 {q0}, [r2,:128], r3 vld1.32 {q1}, [r1,:128]! 1: pld [r2, #-32] vrev64.32 q0, q0 vswp d2, d3 veor q0, q0, q8 vld1.32 {q2}, [r2,:128], r3 vld1.32 {q3}, [r1,:128]! vst2.32 {d1,d3}, [r0,:128]! vst2.32 {d0,d2}, [r0,:128]! pld [r2, #-32] vrev64.32 q2, q2 vswp d6, d7 veor q2, q2, q8 vld1.32 {q0}, [r2,:128], r3 vld1.32 {q1}, [r1,:128]! vst2.32 {d5,d7}, [r0,:128]! vst2.32 {d4,d6}, [r0,:128]! subs r12, r12, #8 bgt 1b bx lr endfunc function ff_sbr_qmf_deint_neg_neon, export=1 add r1, r1, #60*4 add r2, r0, #62*4 mov r3, #-16 mov r12, #32 vmov.i32 d2, #1<<31 1: vld2.32 {d0,d1}, [r1,:128], r3 veor d0, d0, d2 vrev64.32 d1, d1 vst1.32 {d0}, [r2,:64] vst1.32 {d1}, [r0,:64]! sub r2, r2, #8 subs r12, r12, #2 bgt 1b bx lr endfunc function ff_sbr_qmf_deint_bfly_neon, export=1 push {lr} add r2, r2, #60*4 add r3, r0, #124*4 mov r12, #64 mov lr, #-16 1: vld1.32 {q0}, [r1,:128]! vld1.32 {q1}, [r2,:128], lr vrev64.32 q2, q0 vrev64.32 q3, q1 vadd.f32 d3, d4, d3 vadd.f32 d2, d5, d2 vsub.f32 d0, d0, d7 vsub.f32 d1, d1, d6 vst1.32 {q1}, [r3,:128], lr vst1.32 {q0}, [r0,:128]! subs r12, r12, #4 bgt 1b pop {pc} endfunc function ff_sbr_hf_g_filt_neon, export=1 ldr r12, [sp] add r1, r1, r12, lsl #3 mov r12, #40*2*4 sub r3, r3, #1 vld2.32 {d2[],d3[]},[r2,:64]! vld1.32 {d0}, [r1,:64], r12 1: vld1.32 {d1}, [r1,:64], r12 vmul.f32 q3, q0, q1 vld2.32 {d2[],d3[]},[r2,:64]! vld1.32 {d0}, [r1,:64], r12 vst1.32 {q3}, [r0,:64]! subs r3, r3, #2 bgt 1b it lt bxlt lr vmul.f32 d0, d0, d2 vst1.32 {d0}, [r0,:64]! bx lr endfunc function ff_sbr_hf_gen_neon, export=1 NOVFP vld1.32 {d1[]}, [sp,:32] VFP vdup.32 d1, d0[0] vmul.f32 d0, d1, d1 vld1.32 {d3}, [r2,:64] vld1.32 {d2}, [r3,:64] vmul.f32 q0, q0, q1 ldrd r2, r3, [sp, #4*!HAVE_VFP_ARGS] vtrn.32 d0, d1 vneg.f32 d18, d1 vtrn.32 d18, d1 add r0, r0, r2, lsl #3 add r1, r1, r2, lsl #3 sub r1, r1, #2*8 sub r3, r3, r2 vld1.32 {q1}, [r1,:128]! 1: vld1.32 {q3}, [r1,:128]! vrev64.32 q2, q1 vmov q8, q3 vrev64.32 d20, d3 vrev64.32 d21, d6 vmla.f32 q3, q1, d0[0] vmla.f32 d6, d4, d18 vmla.f32 d7, d20, d18 vmla.f32 d6, d3, d0[1] vmla.f32 d7, d16, d0[1] vmla.f32 d6, d5, d1 vmla.f32 d7, d21, d1 vmov q1, q8 vst1.32 {q3}, [r0,:128]! subs r3, r3, #2 bgt 1b bx lr endfunc function ff_sbr_autocorrelate_neon, export=1 vld1.32 {q0}, [r0,:128]! vmov.f32 q1, #0.0 vmov.f32 q3, #0.0 vmov.f32 d20, #0.0 vmul.f32 d21, d1, d1 vmov q8, q0 vmov q11, q0 mov r12, #36 1: vld1.32 {q2}, [r0,:128]! vrev64.32 q12, q2 vmla.f32 q10, q2, q2 vmla.f32 d2, d1, d4 vmla.f32 d3, d1, d24 vmla.f32 d6, d0, d4 vmla.f32 d7, d0, d24 vmla.f32 d2, d4, d5 vmla.f32 d3, d4, d25 vmla.f32 d6, d1, d5 vmla.f32 d7, d1, d25 vmov q0, q2 subs r12, r12, #2 bgt 1b vld1.32 {q2}, [r0,:128]! vrev64.32 q12, q2 vmla.f32 d2, d1, d4 vmla.f32 d3, d1, d24 vmla.f32 d6, d0, d4 vmla.f32 d7, d0, d24 vadd.f32 d20, d20, d21 vrev64.32 d18, d17 vmla.f32 d6, d1, d5 vmla.f32 d7, d1, d25 vmov q0, q1 vmla.f32 d0, d16, d17 vmla.f32 d1, d16, d18 vmla.f32 d2, d4, d5 vmla.f32 d3, d4, d25 vneg.f32 s15, s15 vmov d21, d20 vpadd.f32 d0, d0, d2 vpadd.f32 d7, d6, d7 vtrn.32 d1, d3 vsub.f32 d6, d1, d3 vmla.f32 d20, d22, d22 vmla.f32 d21, d4, d4 vtrn.32 d0, d6 vpadd.f32 d20, d20, d21 vst1.32 {q3}, [r1,:128]! vst1.32 {d20[1]}, [r1,:32] add r1, r1, #2*4 vst1.32 {d0}, [r1,:64] add r1, r1, #4*4 vst1.32 {d20[0]}, [r1,:32] bx lr endfunc function ff_sbr_hf_apply_noise_0_neon, export=1 vmov.i32 d3, #0 .Lhf_apply_noise_0: push {r4,lr} movrelx r4, X(ff_sbr_noise_table) ldr r12, [sp, #12] add r3, r3, #1 bfc r3, #9, #23 sub r12, r12, #1 1: add lr, r4, r3, lsl #3 vld2.32 {q0}, [r0,:64] vld2.32 {q3}, [lr,:64] vld1.32 {d2}, [r1,:64]! vld1.32 {d18}, [r2,:64]! vceq.f32 d16, d2, #0 veor d2, d2, d3 vmov q2, q0 vmla.f32 d0, d6, d18 vmla.f32 d1, d7, d18 vadd.f32 d4, d4, d2 add r3, r3, #2 bfc r3, #9, #23 vbif d0, d4, d16 vbif d1, d5, d16 vst2.32 {q0}, [r0,:64]! subs r12, r12, #2 bgt 1b blt 2f add lr, r4, r3, lsl #3 vld1.32 {d0}, [r0,:64] vld1.32 {d6}, [lr,:64] vld1.32 {d2[]}, [r1,:32]! vld1.32 {d3[]}, [r2,:32]! vceq.f32 d4, d2, #0 veor d2, d2, d3 vmov d1, d0 vmla.f32 d0, d6, d3 vadd.f32 s2, s2, s4 vbif d0, d1, d4 vst1.32 {d0}, [r0,:64]! 2: pop {r4,pc} endfunc function ff_sbr_hf_apply_noise_1_neon, export=1 ldr r12, [sp] push {r4,lr} lsl r12, r12, #31 eor lr, r12, #1<<31 vmov d3, r12, lr .Lhf_apply_noise_1: movrelx r4, X(ff_sbr_noise_table) ldr r12, [sp, #12] add r3, r3, #1 bfc r3, #9, #23 sub r12, r12, #1 1: add lr, r4, r3, lsl #3 vld2.32 {q0}, [r0,:64] vld2.32 {q3}, [lr,:64] vld1.32 {d2}, [r1,:64]! vld1.32 {d18}, [r2,:64]! vceq.f32 d16, d2, #0 veor d2, d2, d3 vmov q2, q0 vmla.f32 d0, d6, d18 vmla.f32 d1, d7, d18 vadd.f32 d5, d5, d2 add r3, r3, #2 bfc r3, #9, #23 vbif d0, d4, d16 vbif d1, d5, d16 vst2.32 {q0}, [r0,:64]! subs r12, r12, #2 bgt 1b blt 2f add lr, r4, r3, lsl #3 vld1.32 {d0}, [r0,:64] vld1.32 {d6}, [lr,:64] vld1.32 {d2[]}, [r1,:32]! vld1.32 {d18[]}, [r2,:32]! vceq.f32 d4, d2, #0 veor d2, d2, d3 vmov d1, d0 vmla.f32 d0, d6, d18 vadd.f32 s3, s3, s5 vbif d0, d1, d4 vst1.32 {d0}, [r0,:64]! 2: pop {r4,pc} endfunc function ff_sbr_hf_apply_noise_2_neon, export=1 vmov.i32 d3, #1<<31 b .Lhf_apply_noise_0 endfunc function ff_sbr_hf_apply_noise_3_neon, export=1 ldr r12, [sp] push {r4,lr} lsl r12, r12, #31 eor lr, r12, #1<<31 vmov d3, lr, r12 b .Lhf_apply_noise_1 endfunc
Akagi201/ffmpeg-xcode
18,286
ffmpeg-3.0.2/libavcodec/arm/h264dsp_neon.S
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #include "neon.S" /* H.264 loop filter */ .macro h264_loop_filter_start ldr r12, [sp] tst r2, r2 ldr r12, [r12] it ne tstne r3, r3 vmov.32 d24[0], r12 and r12, r12, r12, lsl #16 it eq bxeq lr ands r12, r12, r12, lsl #8 it lt bxlt lr .endm .macro h264_loop_filter_luma vdup.8 q11, r2 @ alpha vmovl.u8 q12, d24 vabd.u8 q6, q8, q0 @ abs(p0 - q0) vmovl.u16 q12, d24 vabd.u8 q14, q9, q8 @ abs(p1 - p0) vsli.16 q12, q12, #8 vabd.u8 q15, q1, q0 @ abs(q1 - q0) vsli.32 q12, q12, #16 vclt.u8 q6, q6, q11 @ < alpha vdup.8 q11, r3 @ beta vclt.s8 q7, q12, #0 vclt.u8 q14, q14, q11 @ < beta vclt.u8 q15, q15, q11 @ < beta vbic q6, q6, q7 vabd.u8 q4, q10, q8 @ abs(p2 - p0) vand q6, q6, q14 vabd.u8 q5, q2, q0 @ abs(q2 - q0) vclt.u8 q4, q4, q11 @ < beta vand q6, q6, q15 vclt.u8 q5, q5, q11 @ < beta vand q4, q4, q6 vand q5, q5, q6 vand q12, q12, q6 vrhadd.u8 q14, q8, q0 vsub.i8 q6, q12, q4 vqadd.u8 q7, q9, q12 vhadd.u8 q10, q10, q14 vsub.i8 q6, q6, q5 vhadd.u8 q14, q2, q14 vmin.u8 q7, q7, q10 vqsub.u8 q11, q9, q12 vqadd.u8 q2, q1, q12 vmax.u8 q7, q7, q11 vqsub.u8 q11, q1, q12 vmin.u8 q14, q2, q14 vmovl.u8 q2, d0 vmax.u8 q14, q14, q11 vmovl.u8 q10, d1 vsubw.u8 q2, q2, d16 vsubw.u8 q10, q10, d17 vshl.i16 q2, q2, #2 vshl.i16 q10, q10, #2 vaddw.u8 q2, q2, d18 vaddw.u8 q10, q10, d19 vsubw.u8 q2, q2, d2 vsubw.u8 q10, q10, d3 vrshrn.i16 d4, q2, #3 vrshrn.i16 d5, q10, #3 vbsl q4, q7, q9 vbsl q5, q14, q1 vneg.s8 q7, q6 vmovl.u8 q14, d16 vmin.s8 q2, q2, q6 vmovl.u8 q6, d17 vmax.s8 q2, q2, q7 vmovl.u8 q11, d0 vmovl.u8 q12, d1 vaddw.s8 q14, q14, d4 vaddw.s8 q6, q6, d5 vsubw.s8 q11, q11, d4 vsubw.s8 q12, q12, d5 vqmovun.s16 d16, q14 vqmovun.s16 d17, q6 vqmovun.s16 d0, q11 vqmovun.s16 d1, q12 .endm function ff_h264_v_loop_filter_luma_neon, export=1 h264_loop_filter_start vld1.8 {d0, d1}, [r0,:128], r1 vld1.8 {d2, d3}, [r0,:128], r1 vld1.8 {d4, d5}, [r0,:128], r1 sub r0, r0, r1, lsl #2 sub r0, r0, r1, lsl #1 vld1.8 {d20,d21}, [r0,:128], r1 vld1.8 {d18,d19}, [r0,:128], r1 vld1.8 {d16,d17}, [r0,:128], r1 vpush {d8-d15} h264_loop_filter_luma sub r0, r0, r1, lsl #1 vst1.8 {d8, d9}, [r0,:128], r1 vst1.8 {d16,d17}, [r0,:128], r1 vst1.8 {d0, d1}, [r0,:128], r1 vst1.8 {d10,d11}, [r0,:128] vpop {d8-d15} bx lr endfunc function ff_h264_h_loop_filter_luma_neon, export=1 h264_loop_filter_start sub r0, r0, #4 vld1.8 {d6}, [r0], r1 vld1.8 {d20}, [r0], r1 vld1.8 {d18}, [r0], r1 vld1.8 {d16}, [r0], r1 vld1.8 {d0}, [r0], r1 vld1.8 {d2}, [r0], r1 vld1.8 {d4}, [r0], r1 vld1.8 {d26}, [r0], r1 vld1.8 {d7}, [r0], r1 vld1.8 {d21}, [r0], r1 vld1.8 {d19}, [r0], r1 vld1.8 {d17}, [r0], r1 vld1.8 {d1}, [r0], r1 vld1.8 {d3}, [r0], r1 vld1.8 {d5}, [r0], r1 vld1.8 {d27}, [r0], r1 transpose_8x8 q3, q10, q9, q8, q0, q1, q2, q13 vpush {d8-d15} h264_loop_filter_luma transpose_4x4 q4, q8, q0, q5 sub r0, r0, r1, lsl #4 add r0, r0, #2 vst1.32 {d8[0]}, [r0], r1 vst1.32 {d16[0]}, [r0], r1 vst1.32 {d0[0]}, [r0], r1 vst1.32 {d10[0]}, [r0], r1 vst1.32 {d8[1]}, [r0], r1 vst1.32 {d16[1]}, [r0], r1 vst1.32 {d0[1]}, [r0], r1 vst1.32 {d10[1]}, [r0], r1 vst1.32 {d9[0]}, [r0], r1 vst1.32 {d17[0]}, [r0], r1 vst1.32 {d1[0]}, [r0], r1 vst1.32 {d11[0]}, [r0], r1 vst1.32 {d9[1]}, [r0], r1 vst1.32 {d17[1]}, [r0], r1 vst1.32 {d1[1]}, [r0], r1 vst1.32 {d11[1]}, [r0], r1 vpop {d8-d15} bx lr endfunc .macro h264_loop_filter_chroma vdup.8 d22, r2 @ alpha vmovl.u8 q12, d24 vabd.u8 d26, d16, d0 @ abs(p0 - q0) vmovl.u8 q2, d0 vabd.u8 d28, d18, d16 @ abs(p1 - p0) vsubw.u8 q2, q2, d16 vsli.16 d24, d24, #8 vshl.i16 q2, q2, #2 vabd.u8 d30, d2, d0 @ abs(q1 - q0) vaddw.u8 q2, q2, d18 vclt.u8 d26, d26, d22 @ < alpha vsubw.u8 q2, q2, d2 vdup.8 d22, r3 @ beta vrshrn.i16 d4, q2, #3 vclt.u8 d28, d28, d22 @ < beta vclt.u8 d30, d30, d22 @ < beta vmin.s8 d4, d4, d24 vneg.s8 d25, d24 vand d26, d26, d28 vmax.s8 d4, d4, d25 vand d26, d26, d30 vmovl.u8 q11, d0 vand d4, d4, d26 vmovl.u8 q14, d16 vaddw.s8 q14, q14, d4 vsubw.s8 q11, q11, d4 vqmovun.s16 d16, q14 vqmovun.s16 d0, q11 .endm function ff_h264_v_loop_filter_chroma_neon, export=1 h264_loop_filter_start sub r0, r0, r1, lsl #1 vld1.8 {d18}, [r0,:64], r1 vld1.8 {d16}, [r0,:64], r1 vld1.8 {d0}, [r0,:64], r1 vld1.8 {d2}, [r0,:64] h264_loop_filter_chroma sub r0, r0, r1, lsl #1 vst1.8 {d16}, [r0,:64], r1 vst1.8 {d0}, [r0,:64], r1 bx lr endfunc function ff_h264_h_loop_filter_chroma_neon, export=1 h264_loop_filter_start sub r0, r0, #2 vld1.32 {d18[0]}, [r0], r1 vld1.32 {d16[0]}, [r0], r1 vld1.32 {d0[0]}, [r0], r1 vld1.32 {d2[0]}, [r0], r1 vld1.32 {d18[1]}, [r0], r1 vld1.32 {d16[1]}, [r0], r1 vld1.32 {d0[1]}, [r0], r1 vld1.32 {d2[1]}, [r0], r1 vtrn.16 d18, d0 vtrn.16 d16, d2 vtrn.8 d18, d16 vtrn.8 d0, d2 h264_loop_filter_chroma vtrn.16 d18, d0 vtrn.16 d16, d2 vtrn.8 d18, d16 vtrn.8 d0, d2 sub r0, r0, r1, lsl #3 vst1.32 {d18[0]}, [r0], r1 vst1.32 {d16[0]}, [r0], r1 vst1.32 {d0[0]}, [r0], r1 vst1.32 {d2[0]}, [r0], r1 vst1.32 {d18[1]}, [r0], r1 vst1.32 {d16[1]}, [r0], r1 vst1.32 {d0[1]}, [r0], r1 vst1.32 {d2[1]}, [r0], r1 bx lr endfunc @ Biweighted prediction .macro biweight_16 macs, macd vdup.8 d0, r4 vdup.8 d1, r5 vmov q2, q8 vmov q3, q8 1: subs r3, r3, #2 vld1.8 {d20-d21},[r0,:128], r2 \macd q2, d0, d20 pld [r0] \macd q3, d0, d21 vld1.8 {d22-d23},[r1,:128], r2 \macs q2, d1, d22 pld [r1] \macs q3, d1, d23 vmov q12, q8 vld1.8 {d28-d29},[r0,:128], r2 vmov q13, q8 \macd q12, d0, d28 pld [r0] \macd q13, d0, d29 vld1.8 {d30-d31},[r1,:128], r2 \macs q12, d1, d30 pld [r1] \macs q13, d1, d31 vshl.s16 q2, q2, q9 vshl.s16 q3, q3, q9 vqmovun.s16 d4, q2 vqmovun.s16 d5, q3 vshl.s16 q12, q12, q9 vshl.s16 q13, q13, q9 vqmovun.s16 d24, q12 vqmovun.s16 d25, q13 vmov q3, q8 vst1.8 {d4- d5}, [r6,:128], r2 vmov q2, q8 vst1.8 {d24-d25},[r6,:128], r2 bne 1b pop {r4-r6, pc} .endm .macro biweight_8 macs, macd vdup.8 d0, r4 vdup.8 d1, r5 vmov q1, q8 vmov q10, q8 1: subs r3, r3, #2 vld1.8 {d4},[r0,:64], r2 \macd q1, d0, d4 pld [r0] vld1.8 {d5},[r1,:64], r2 \macs q1, d1, d5 pld [r1] vld1.8 {d6},[r0,:64], r2 \macd q10, d0, d6 pld [r0] vld1.8 {d7},[r1,:64], r2 \macs q10, d1, d7 pld [r1] vshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 vshl.s16 q10, q10, q9 vqmovun.s16 d4, q10 vmov q10, q8 vst1.8 {d2},[r6,:64], r2 vmov q1, q8 vst1.8 {d4},[r6,:64], r2 bne 1b pop {r4-r6, pc} .endm .macro biweight_4 macs, macd vdup.8 d0, r4 vdup.8 d1, r5 vmov q1, q8 vmov q10, q8 1: subs r3, r3, #4 vld1.32 {d4[0]},[r0,:32], r2 vld1.32 {d4[1]},[r0,:32], r2 \macd q1, d0, d4 pld [r0] vld1.32 {d5[0]},[r1,:32], r2 vld1.32 {d5[1]},[r1,:32], r2 \macs q1, d1, d5 pld [r1] blt 2f vld1.32 {d6[0]},[r0,:32], r2 vld1.32 {d6[1]},[r0,:32], r2 \macd q10, d0, d6 pld [r0] vld1.32 {d7[0]},[r1,:32], r2 vld1.32 {d7[1]},[r1,:32], r2 \macs q10, d1, d7 pld [r1] vshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 vshl.s16 q10, q10, q9 vqmovun.s16 d4, q10 vmov q10, q8 vst1.32 {d2[0]},[r6,:32], r2 vst1.32 {d2[1]},[r6,:32], r2 vmov q1, q8 vst1.32 {d4[0]},[r6,:32], r2 vst1.32 {d4[1]},[r6,:32], r2 bne 1b pop {r4-r6, pc} 2: vshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 vst1.32 {d2[0]},[r6,:32], r2 vst1.32 {d2[1]},[r6,:32], r2 pop {r4-r6, pc} .endm .macro biweight_func w function ff_biweight_h264_pixels_\w\()_neon, export=1 push {r4-r6, lr} ldr r12, [sp, #16] add r4, sp, #20 ldm r4, {r4-r6} lsr lr, r4, #31 add r6, r6, #1 eors lr, lr, r5, lsr #30 orr r6, r6, #1 vdup.16 q9, r12 lsl r6, r6, r12 vmvn q9, q9 vdup.16 q8, r6 mov r6, r0 beq 10f subs lr, lr, #1 beq 20f subs lr, lr, #1 beq 30f b 40f 10: biweight_\w vmlal.u8, vmlal.u8 20: rsb r4, r4, #0 biweight_\w vmlal.u8, vmlsl.u8 30: rsb r4, r4, #0 rsb r5, r5, #0 biweight_\w vmlsl.u8, vmlsl.u8 40: rsb r5, r5, #0 biweight_\w vmlsl.u8, vmlal.u8 endfunc .endm biweight_func 16 biweight_func 8 biweight_func 4 @ Weighted prediction .macro weight_16 add vdup.8 d0, r12 1: subs r2, r2, #2 vld1.8 {d20-d21},[r0,:128], r1 vmull.u8 q2, d0, d20 pld [r0] vmull.u8 q3, d0, d21 vld1.8 {d28-d29},[r0,:128], r1 vmull.u8 q12, d0, d28 pld [r0] vmull.u8 q13, d0, d29 \add q2, q8, q2 vrshl.s16 q2, q2, q9 \add q3, q8, q3 vrshl.s16 q3, q3, q9 vqmovun.s16 d4, q2 vqmovun.s16 d5, q3 \add q12, q8, q12 vrshl.s16 q12, q12, q9 \add q13, q8, q13 vrshl.s16 q13, q13, q9 vqmovun.s16 d24, q12 vqmovun.s16 d25, q13 vst1.8 {d4- d5}, [r4,:128], r1 vst1.8 {d24-d25},[r4,:128], r1 bne 1b pop {r4, pc} .endm .macro weight_8 add vdup.8 d0, r12 1: subs r2, r2, #2 vld1.8 {d4},[r0,:64], r1 vmull.u8 q1, d0, d4 pld [r0] vld1.8 {d6},[r0,:64], r1 vmull.u8 q10, d0, d6 \add q1, q8, q1 pld [r0] vrshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 \add q10, q8, q10 vrshl.s16 q10, q10, q9 vqmovun.s16 d4, q10 vst1.8 {d2},[r4,:64], r1 vst1.8 {d4},[r4,:64], r1 bne 1b pop {r4, pc} .endm .macro weight_4 add vdup.8 d0, r12 vmov q1, q8 vmov q10, q8 1: subs r2, r2, #4 vld1.32 {d4[0]},[r0,:32], r1 vld1.32 {d4[1]},[r0,:32], r1 vmull.u8 q1, d0, d4 pld [r0] blt 2f vld1.32 {d6[0]},[r0,:32], r1 vld1.32 {d6[1]},[r0,:32], r1 vmull.u8 q10, d0, d6 pld [r0] \add q1, q8, q1 vrshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 \add q10, q8, q10 vrshl.s16 q10, q10, q9 vqmovun.s16 d4, q10 vmov q10, q8 vst1.32 {d2[0]},[r4,:32], r1 vst1.32 {d2[1]},[r4,:32], r1 vmov q1, q8 vst1.32 {d4[0]},[r4,:32], r1 vst1.32 {d4[1]},[r4,:32], r1 bne 1b pop {r4, pc} 2: \add q1, q8, q1 vrshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 vst1.32 {d2[0]},[r4,:32], r1 vst1.32 {d2[1]},[r4,:32], r1 pop {r4, pc} .endm .macro weight_func w function ff_weight_h264_pixels_\w\()_neon, export=1 push {r4, lr} ldr r12, [sp, #8] ldr r4, [sp, #12] cmp r3, #1 lsl r4, r4, r3 vdup.16 q8, r4 mov r4, r0 ble 20f rsb lr, r3, #1 vdup.16 q9, lr cmp r12, #0 blt 10f weight_\w vhadd.s16 10: rsb r12, r12, #0 weight_\w vhsub.s16 20: rsb lr, r3, #0 vdup.16 q9, lr cmp r12, #0 blt 10f weight_\w vadd.s16 10: rsb r12, r12, #0 weight_\w vsub.s16 endfunc .endm weight_func 16 weight_func 8 weight_func 4
Akagi201/ffmpeg-xcode
17,292
ffmpeg-3.0.2/libavcodec/arm/simple_idct_armv5te.S
/* * Simple IDCT * * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2006 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define ROW_SHIFT 11 #define COL_SHIFT 20 #define W13 (W1 | (W3 << 16)) #define W26 (W2 | (W6 << 16)) #define W57 (W5 | (W7 << 16)) function idct_row_armv5te str lr, [sp, #-4]! ldrd v1, v2, [a1, #8] ldrd a3, a4, [a1] /* a3 = row[1:0], a4 = row[3:2] */ orrs v1, v1, v2 itt eq cmpeq v1, a4 cmpeq v1, a3, lsr #16 beq row_dc_only mov v1, #(1<<(ROW_SHIFT-1)) mov ip, #16384 sub ip, ip, #1 /* ip = W4 */ smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */ ldr ip, =W26 /* ip = W2 | (W6 << 16) */ smultb a2, ip, a4 smulbb lr, ip, a4 add v2, v1, a2 sub v3, v1, a2 sub v4, v1, lr add v1, v1, lr ldr ip, =W13 /* ip = W1 | (W3 << 16) */ ldr lr, =W57 /* lr = W5 | (W7 << 16) */ smulbt v5, ip, a3 smultt v6, lr, a4 smlatt v5, ip, a4, v5 smultt a2, ip, a3 smulbt v7, lr, a3 sub v6, v6, a2 smulbt a2, ip, a4 smultt fp, lr, a3 sub v7, v7, a2 smulbt a2, lr, a4 ldrd a3, a4, [a1, #8] /* a3=row[5:4] a4=row[7:6] */ sub fp, fp, a2 orrs a2, a3, a4 beq 1f smlabt v5, lr, a3, v5 smlabt v6, ip, a3, v6 smlatt v5, lr, a4, v5 smlabt v6, lr, a4, v6 smlatt v7, lr, a3, v7 smlatt fp, ip, a3, fp smulbt a2, ip, a4 smlatt v7, ip, a4, v7 sub fp, fp, a2 ldr ip, =W26 /* ip = W2 | (W6 << 16) */ mov a2, #16384 sub a2, a2, #1 /* a2 = W4 */ smulbb a2, a2, a3 /* a2 = W4*row[4] */ smultb lr, ip, a4 /* lr = W6*row[6] */ add v1, v1, a2 /* v1 += W4*row[4] */ add v1, v1, lr /* v1 += W6*row[6] */ add v4, v4, a2 /* v4 += W4*row[4] */ sub v4, v4, lr /* v4 -= W6*row[6] */ smulbb lr, ip, a4 /* lr = W2*row[6] */ sub v2, v2, a2 /* v2 -= W4*row[4] */ sub v2, v2, lr /* v2 -= W2*row[6] */ sub v3, v3, a2 /* v3 -= W4*row[4] */ add v3, v3, lr /* v3 += W2*row[6] */ 1: add a2, v1, v5 mov a3, a2, lsr #11 bic a3, a3, #0x1f0000 sub a2, v2, v6 mov a2, a2, lsr #11 add a3, a3, a2, lsl #16 add a2, v3, v7 mov a4, a2, lsr #11 bic a4, a4, #0x1f0000 add a2, v4, fp mov a2, a2, lsr #11 add a4, a4, a2, lsl #16 strd a3, a4, [a1] sub a2, v4, fp mov a3, a2, lsr #11 bic a3, a3, #0x1f0000 sub a2, v3, v7 mov a2, a2, lsr #11 add a3, a3, a2, lsl #16 add a2, v2, v6 mov a4, a2, lsr #11 bic a4, a4, #0x1f0000 sub a2, v1, v5 mov a2, a2, lsr #11 add a4, a4, a2, lsl #16 strd a3, a4, [a1, #8] ldr pc, [sp], #4 row_dc_only: orr a3, a3, a3, lsl #16 bic a3, a3, #0xe000 mov a3, a3, lsl #3 mov a4, a3 strd a3, a4, [a1] strd a3, a4, [a1, #8] ldr pc, [sp], #4 endfunc .macro idct_col ldr a4, [a1] /* a4 = col[1:0] */ mov ip, #16384 sub ip, ip, #1 /* ip = W4 */ #if 0 mov v1, #(1<<(COL_SHIFT-1)) smlabt v2, ip, a4, v1 /* v2 = W4*col[1] + (1<<(COL_SHIFT-1)) */ smlabb v1, ip, a4, v1 /* v1 = W4*col[0] + (1<<(COL_SHIFT-1)) */ ldr a4, [a1, #(16*4)] #else mov v1, #((1<<(COL_SHIFT-1))/W4) /* this matches the C version */ add v2, v1, a4, asr #16 rsb v2, v2, v2, lsl #14 mov a4, a4, lsl #16 add v1, v1, a4, asr #16 ldr a4, [a1, #(16*4)] rsb v1, v1, v1, lsl #14 #endif smulbb lr, ip, a4 smulbt a3, ip, a4 sub v3, v1, lr sub v5, v1, lr add v7, v1, lr add v1, v1, lr sub v4, v2, a3 sub v6, v2, a3 add fp, v2, a3 ldr ip, =W26 ldr a4, [a1, #(16*2)] add v2, v2, a3 smulbb lr, ip, a4 smultb a3, ip, a4 add v1, v1, lr sub v7, v7, lr add v3, v3, a3 sub v5, v5, a3 smulbt lr, ip, a4 smultt a3, ip, a4 add v2, v2, lr sub fp, fp, lr add v4, v4, a3 ldr a4, [a1, #(16*6)] sub v6, v6, a3 smultb lr, ip, a4 smulbb a3, ip, a4 add v1, v1, lr sub v7, v7, lr sub v3, v3, a3 add v5, v5, a3 smultt lr, ip, a4 smulbt a3, ip, a4 add v2, v2, lr sub fp, fp, lr sub v4, v4, a3 add v6, v6, a3 stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp} ldr ip, =W13 ldr a4, [a1, #(16*1)] ldr lr, =W57 smulbb v1, ip, a4 smultb v3, ip, a4 smulbb v5, lr, a4 smultb v7, lr, a4 smulbt v2, ip, a4 smultt v4, ip, a4 smulbt v6, lr, a4 smultt fp, lr, a4 rsb v4, v4, #0 ldr a4, [a1, #(16*3)] rsb v3, v3, #0 smlatb v1, ip, a4, v1 smlatb v3, lr, a4, v3 smulbb a3, ip, a4 smulbb a2, lr, a4 sub v5, v5, a3 sub v7, v7, a2 smlatt v2, ip, a4, v2 smlatt v4, lr, a4, v4 smulbt a3, ip, a4 smulbt a2, lr, a4 sub v6, v6, a3 ldr a4, [a1, #(16*5)] sub fp, fp, a2 smlabb v1, lr, a4, v1 smlabb v3, ip, a4, v3 smlatb v5, lr, a4, v5 smlatb v7, ip, a4, v7 smlabt v2, lr, a4, v2 smlabt v4, ip, a4, v4 smlatt v6, lr, a4, v6 ldr a3, [a1, #(16*7)] smlatt fp, ip, a4, fp smlatb v1, lr, a3, v1 smlabb v3, lr, a3, v3 smlatb v5, ip, a3, v5 smulbb a4, ip, a3 smlatt v2, lr, a3, v2 sub v7, v7, a4 smlabt v4, lr, a3, v4 smulbt a4, ip, a3 smlatt v6, ip, a3, v6 sub fp, fp, a4 .endm function idct_col_armv5te str lr, [sp, #-4]! idct_col ldmfd sp!, {a3, a4} adds a2, a3, v1 mov a2, a2, lsr #20 it mi orrmi a2, a2, #0xf000 add ip, a4, v2 mov ip, ip, asr #20 orr a2, a2, ip, lsl #16 str a2, [a1] subs a3, a3, v1 mov a2, a3, lsr #20 it mi orrmi a2, a2, #0xf000 sub a4, a4, v2 mov a4, a4, asr #20 orr a2, a2, a4, lsl #16 ldmfd sp!, {a3, a4} str a2, [a1, #(16*7)] subs a2, a3, v3 mov a2, a2, lsr #20 it mi orrmi a2, a2, #0xf000 sub ip, a4, v4 mov ip, ip, asr #20 orr a2, a2, ip, lsl #16 str a2, [a1, #(16*1)] adds a3, a3, v3 mov a2, a3, lsr #20 it mi orrmi a2, a2, #0xf000 add a4, a4, v4 mov a4, a4, asr #20 orr a2, a2, a4, lsl #16 ldmfd sp!, {a3, a4} str a2, [a1, #(16*6)] adds a2, a3, v5 mov a2, a2, lsr #20 it mi orrmi a2, a2, #0xf000 add ip, a4, v6 mov ip, ip, asr #20 orr a2, a2, ip, lsl #16 str a2, [a1, #(16*2)] subs a3, a3, v5 mov a2, a3, lsr #20 it mi orrmi a2, a2, #0xf000 sub a4, a4, v6 mov a4, a4, asr #20 orr a2, a2, a4, lsl #16 ldmfd sp!, {a3, a4} str a2, [a1, #(16*5)] adds a2, a3, v7 mov a2, a2, lsr #20 it mi orrmi a2, a2, #0xf000 add ip, a4, fp mov ip, ip, asr #20 orr a2, a2, ip, lsl #16 str a2, [a1, #(16*3)] subs a3, a3, v7 mov a2, a3, lsr #20 it mi orrmi a2, a2, #0xf000 sub a4, a4, fp mov a4, a4, asr #20 orr a2, a2, a4, lsl #16 str a2, [a1, #(16*4)] ldr pc, [sp], #4 endfunc .macro clip dst, src:vararg movs \dst, \src it mi movmi \dst, #0 cmp \dst, #255 it gt movgt \dst, #255 .endm .macro aclip dst, src:vararg adds \dst, \src it mi movmi \dst, #0 cmp \dst, #255 it gt movgt \dst, #255 .endm function idct_col_put_armv5te str lr, [sp, #-4]! idct_col ldmfd sp!, {a3, a4} ldr lr, [sp, #32] add a2, a3, v1 clip a2, a2, asr #20 add ip, a4, v2 clip ip, ip, asr #20 orr a2, a2, ip, lsl #8 sub a3, a3, v1 clip a3, a3, asr #20 sub a4, a4, v2 clip a4, a4, asr #20 ldr v1, [sp, #28] strh a2, [v1] add a2, v1, #2 str a2, [sp, #28] orr a2, a3, a4, lsl #8 rsb v2, lr, lr, lsl #3 ldmfd sp!, {a3, a4} strh_pre a2, v2, v1 sub a2, a3, v3 clip a2, a2, asr #20 sub ip, a4, v4 clip ip, ip, asr #20 orr a2, a2, ip, lsl #8 strh_pre a2, v1, lr add a3, a3, v3 clip a2, a3, asr #20 add a4, a4, v4 clip a4, a4, asr #20 orr a2, a2, a4, lsl #8 ldmfd sp!, {a3, a4} strh_dpre a2, v2, lr add a2, a3, v5 clip a2, a2, asr #20 add ip, a4, v6 clip ip, ip, asr #20 orr a2, a2, ip, lsl #8 strh_pre a2, v1, lr sub a3, a3, v5 clip a2, a3, asr #20 sub a4, a4, v6 clip a4, a4, asr #20 orr a2, a2, a4, lsl #8 ldmfd sp!, {a3, a4} strh_dpre a2, v2, lr add a2, a3, v7 clip a2, a2, asr #20 add ip, a4, fp clip ip, ip, asr #20 orr a2, a2, ip, lsl #8 strh a2, [v1, lr] sub a3, a3, v7 clip a2, a3, asr #20 sub a4, a4, fp clip a4, a4, asr #20 orr a2, a2, a4, lsl #8 strh_dpre a2, v2, lr ldr pc, [sp], #4 endfunc function idct_col_add_armv5te str lr, [sp, #-4]! idct_col ldr lr, [sp, #36] ldmfd sp!, {a3, a4} ldrh ip, [lr] add a2, a3, v1 sub a3, a3, v1 and v1, ip, #255 aclip a2, v1, a2, asr #20 add v1, a4, v2 mov v1, v1, asr #20 aclip v1, v1, ip, lsr #8 orr a2, a2, v1, lsl #8 ldr v1, [sp, #32] sub a4, a4, v2 rsb v2, v1, v1, lsl #3 ldrh_pre ip, v2, lr strh a2, [lr] and a2, ip, #255 aclip a3, a2, a3, asr #20 mov a4, a4, asr #20 aclip a4, a4, ip, lsr #8 add a2, lr, #2 str a2, [sp, #28] orr a2, a3, a4, lsl #8 strh a2, [v2] ldmfd sp!, {a3, a4} ldrh_pre ip, lr, v1 sub a2, a3, v3 add a3, a3, v3 and v3, ip, #255 aclip a2, v3, a2, asr #20 sub v3, a4, v4 mov v3, v3, asr #20 aclip v3, v3, ip, lsr #8 orr a2, a2, v3, lsl #8 add a4, a4, v4 ldrh_dpre ip, v2, v1 strh a2, [lr] and a2, ip, #255 aclip a3, a2, a3, asr #20 mov a4, a4, asr #20 aclip a4, a4, ip, lsr #8 orr a2, a3, a4, lsl #8 strh a2, [v2] ldmfd sp!, {a3, a4} ldrh_pre ip, lr, v1 add a2, a3, v5 sub a3, a3, v5 and v3, ip, #255 aclip a2, v3, a2, asr #20 add v3, a4, v6 mov v3, v3, asr #20 aclip v3, v3, ip, lsr #8 orr a2, a2, v3, lsl #8 sub a4, a4, v6 ldrh_dpre ip, v2, v1 strh a2, [lr] and a2, ip, #255 aclip a3, a2, a3, asr #20 mov a4, a4, asr #20 aclip a4, a4, ip, lsr #8 orr a2, a3, a4, lsl #8 strh a2, [v2] ldmfd sp!, {a3, a4} ldrh_pre ip, lr, v1 add a2, a3, v7 sub a3, a3, v7 and v3, ip, #255 aclip a2, v3, a2, asr #20 add v3, a4, fp mov v3, v3, asr #20 aclip v3, v3, ip, lsr #8 orr a2, a2, v3, lsl #8 sub a4, a4, fp ldrh_dpre ip, v2, v1 strh a2, [lr] and a2, ip, #255 aclip a3, a2, a3, asr #20 mov a4, a4, asr #20 aclip a4, a4, ip, lsr #8 orr a2, a3, a4, lsl #8 strh a2, [v2] ldr pc, [sp], #4 endfunc function ff_simple_idct_armv5te, export=1 stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr} bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te sub a1, a1, #(16*7) bl idct_col_armv5te add a1, a1, #4 bl idct_col_armv5te add a1, a1, #4 bl idct_col_armv5te add a1, a1, #4 bl idct_col_armv5te ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc} endfunc function ff_simple_idct_add_armv5te, export=1 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr} mov a1, a3 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te sub a1, a1, #(16*7) bl idct_col_add_armv5te add a1, a1, #4 bl idct_col_add_armv5te add a1, a1, #4 bl idct_col_add_armv5te add a1, a1, #4 bl idct_col_add_armv5te add sp, sp, #8 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc} endfunc function ff_simple_idct_put_armv5te, export=1 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr} mov a1, a3 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te sub a1, a1, #(16*7) bl idct_col_put_armv5te add a1, a1, #4 bl idct_col_put_armv5te add a1, a1, #4 bl idct_col_put_armv5te add a1, a1, #4 bl idct_col_put_armv5te add sp, sp, #8 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc} endfunc
Akagi201/ffmpeg-xcode
33,242
ffmpeg-3.0.2/libavcodec/arm/h264qpel_neon.S
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #include "neon.S" /* H.264 qpel MC */ .macro lowpass_const r movw \r, #5 movt \r, #20 vmov.32 d6[0], \r .endm .macro lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1 .if \narrow t0 .req q0 t1 .req q8 .else t0 .req \d0 t1 .req \d1 .endif vext.8 d2, \r0, \r1, #2 vext.8 d3, \r0, \r1, #3 vaddl.u8 q1, d2, d3 vext.8 d4, \r0, \r1, #1 vext.8 d5, \r0, \r1, #4 vaddl.u8 q2, d4, d5 vext.8 d30, \r0, \r1, #5 vaddl.u8 t0, \r0, d30 vext.8 d18, \r2, \r3, #2 vmla.i16 t0, q1, d6[1] vext.8 d19, \r2, \r3, #3 vaddl.u8 q9, d18, d19 vext.8 d20, \r2, \r3, #1 vmls.i16 t0, q2, d6[0] vext.8 d21, \r2, \r3, #4 vaddl.u8 q10, d20, d21 vext.8 d31, \r2, \r3, #5 vaddl.u8 t1, \r2, d31 vmla.i16 t1, q9, d6[1] vmls.i16 t1, q10, d6[0] .if \narrow vqrshrun.s16 \d0, t0, #5 vqrshrun.s16 \d1, t1, #5 .endif .unreq t0 .unreq t1 .endm .macro lowpass_8_1 r0, r1, d0, narrow=1 .if \narrow t0 .req q0 .else t0 .req \d0 .endif vext.8 d2, \r0, \r1, #2 vext.8 d3, \r0, \r1, #3 vaddl.u8 q1, d2, d3 vext.8 d4, \r0, \r1, #1 vext.8 d5, \r0, \r1, #4 vaddl.u8 q2, d4, d5 vext.8 d30, \r0, \r1, #5 vaddl.u8 t0, \r0, d30 vmla.i16 t0, q1, d6[1] vmls.i16 t0, q2, d6[0] .if \narrow vqrshrun.s16 \d0, t0, #5 .endif .unreq t0 .endm .macro lowpass_8.16 r0, r1, l0, h0, l1, h1, d vext.16 q1, \r0, \r1, #2 vext.16 q0, \r0, \r1, #3 vaddl.s16 q9, d2, d0 vext.16 q2, \r0, \r1, #1 vaddl.s16 q1, d3, d1 vext.16 q3, \r0, \r1, #4 vaddl.s16 q10, d4, d6 vext.16 \r1, \r0, \r1, #5 vaddl.s16 q2, d5, d7 vaddl.s16 q0, \h0, \h1 vaddl.s16 q8, \l0, \l1 vshl.i32 q3, q9, #4 vshl.i32 q9, q9, #2 vshl.i32 q15, q10, #2 vadd.i32 q9, q9, q3 vadd.i32 q10, q10, q15 vshl.i32 q3, q1, #4 vshl.i32 q1, q1, #2 vshl.i32 q15, q2, #2 vadd.i32 q1, q1, q3 vadd.i32 q2, q2, q15 vadd.i32 q9, q9, q8 vsub.i32 q9, q9, q10 vadd.i32 q1, q1, q0 vsub.i32 q1, q1, q2 vrshrn.s32 d18, q9, #10 vrshrn.s32 d19, q1, #10 vqmovun.s16 \d, q9 .endm function put_h264_qpel16_h_lowpass_neon_packed mov r4, lr mov r12, #16 mov r3, #8 bl put_h264_qpel8_h_lowpass_neon sub r1, r1, r2, lsl #4 add r1, r1, #8 mov r12, #16 mov lr, r4 b put_h264_qpel8_h_lowpass_neon endfunc .macro h264_qpel_h_lowpass type function \type\()_h264_qpel16_h_lowpass_neon push {lr} mov r12, #16 bl \type\()_h264_qpel8_h_lowpass_neon sub r0, r0, r3, lsl #4 sub r1, r1, r2, lsl #4 add r0, r0, #8 add r1, r1, #8 mov r12, #16 pop {lr} endfunc function \type\()_h264_qpel8_h_lowpass_neon 1: vld1.8 {d0, d1}, [r1], r2 vld1.8 {d16,d17}, [r1], r2 subs r12, r12, #2 lowpass_8 d0, d1, d16, d17, d0, d16 .ifc \type,avg vld1.8 {d2}, [r0,:64], r3 vrhadd.u8 d0, d0, d2 vld1.8 {d3}, [r0,:64] vrhadd.u8 d16, d16, d3 sub r0, r0, r3 .endif vst1.8 {d0}, [r0,:64], r3 vst1.8 {d16}, [r0,:64], r3 bne 1b bx lr endfunc .endm h264_qpel_h_lowpass put h264_qpel_h_lowpass avg .macro h264_qpel_h_lowpass_l2 type function \type\()_h264_qpel16_h_lowpass_l2_neon push {lr} mov r12, #16 bl \type\()_h264_qpel8_h_lowpass_l2_neon sub r0, r0, r2, lsl #4 sub r1, r1, r2, lsl #4 sub r3, r3, r2, lsl #4 add r0, r0, #8 add r1, r1, #8 add r3, r3, #8 mov r12, #16 pop {lr} endfunc function \type\()_h264_qpel8_h_lowpass_l2_neon 1: vld1.8 {d0, d1}, [r1], r2 vld1.8 {d16,d17}, [r1], r2 vld1.8 {d28}, [r3], r2 vld1.8 {d29}, [r3], r2 subs r12, r12, #2 lowpass_8 d0, d1, d16, d17, d0, d1 vrhadd.u8 q0, q0, q14 .ifc \type,avg vld1.8 {d2}, [r0,:64], r2 vrhadd.u8 d0, d0, d2 vld1.8 {d3}, [r0,:64] vrhadd.u8 d1, d1, d3 sub r0, r0, r2 .endif vst1.8 {d0}, [r0,:64], r2 vst1.8 {d1}, [r0,:64], r2 bne 1b bx lr endfunc .endm h264_qpel_h_lowpass_l2 put h264_qpel_h_lowpass_l2 avg function put_h264_qpel16_v_lowpass_neon_packed mov r4, lr mov r2, #8 bl put_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #2 bl put_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 bl put_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #2 mov lr, r4 b put_h264_qpel8_v_lowpass_neon endfunc .macro h264_qpel_v_lowpass type function \type\()_h264_qpel16_v_lowpass_neon mov r4, lr bl \type\()_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #2 bl \type\()_h264_qpel8_v_lowpass_neon sub r0, r0, r2, lsl #4 add r0, r0, #8 sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 bl \type\()_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #2 mov lr, r4 endfunc function \type\()_h264_qpel8_v_lowpass_neon vld1.8 {d8}, [r1], r3 vld1.8 {d10}, [r1], r3 vld1.8 {d12}, [r1], r3 vld1.8 {d14}, [r1], r3 vld1.8 {d22}, [r1], r3 vld1.8 {d24}, [r1], r3 vld1.8 {d26}, [r1], r3 vld1.8 {d28}, [r1], r3 vld1.8 {d9}, [r1], r3 vld1.8 {d11}, [r1], r3 vld1.8 {d13}, [r1], r3 vld1.8 {d15}, [r1], r3 vld1.8 {d23}, [r1] transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14 lowpass_8 d8, d9, d10, d11, d8, d10 lowpass_8 d12, d13, d14, d15, d12, d14 lowpass_8 d22, d23, d24, d25, d22, d24 lowpass_8 d26, d27, d28, d29, d26, d28 transpose_8x8 d8, d10, d12, d14, d22, d24, d26, d28 .ifc \type,avg vld1.8 {d9}, [r0,:64], r2 vrhadd.u8 d8, d8, d9 vld1.8 {d11}, [r0,:64], r2 vrhadd.u8 d10, d10, d11 vld1.8 {d13}, [r0,:64], r2 vrhadd.u8 d12, d12, d13 vld1.8 {d15}, [r0,:64], r2 vrhadd.u8 d14, d14, d15 vld1.8 {d23}, [r0,:64], r2 vrhadd.u8 d22, d22, d23 vld1.8 {d25}, [r0,:64], r2 vrhadd.u8 d24, d24, d25 vld1.8 {d27}, [r0,:64], r2 vrhadd.u8 d26, d26, d27 vld1.8 {d29}, [r0,:64], r2 vrhadd.u8 d28, d28, d29 sub r0, r0, r2, lsl #3 .endif vst1.8 {d8}, [r0,:64], r2 vst1.8 {d10}, [r0,:64], r2 vst1.8 {d12}, [r0,:64], r2 vst1.8 {d14}, [r0,:64], r2 vst1.8 {d22}, [r0,:64], r2 vst1.8 {d24}, [r0,:64], r2 vst1.8 {d26}, [r0,:64], r2 vst1.8 {d28}, [r0,:64], r2 bx lr endfunc .endm h264_qpel_v_lowpass put h264_qpel_v_lowpass avg .macro h264_qpel_v_lowpass_l2 type function \type\()_h264_qpel16_v_lowpass_l2_neon mov r4, lr bl \type\()_h264_qpel8_v_lowpass_l2_neon sub r1, r1, r3, lsl #2 bl \type\()_h264_qpel8_v_lowpass_l2_neon sub r0, r0, r3, lsl #4 sub r12, r12, r2, lsl #4 add r0, r0, #8 add r12, r12, #8 sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 bl \type\()_h264_qpel8_v_lowpass_l2_neon sub r1, r1, r3, lsl #2 mov lr, r4 endfunc function \type\()_h264_qpel8_v_lowpass_l2_neon vld1.8 {d8}, [r1], r3 vld1.8 {d10}, [r1], r3 vld1.8 {d12}, [r1], r3 vld1.8 {d14}, [r1], r3 vld1.8 {d22}, [r1], r3 vld1.8 {d24}, [r1], r3 vld1.8 {d26}, [r1], r3 vld1.8 {d28}, [r1], r3 vld1.8 {d9}, [r1], r3 vld1.8 {d11}, [r1], r3 vld1.8 {d13}, [r1], r3 vld1.8 {d15}, [r1], r3 vld1.8 {d23}, [r1] transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14 lowpass_8 d8, d9, d10, d11, d8, d9 lowpass_8 d12, d13, d14, d15, d12, d13 lowpass_8 d22, d23, d24, d25, d22, d23 lowpass_8 d26, d27, d28, d29, d26, d27 transpose_8x8 d8, d9, d12, d13, d22, d23, d26, d27 vld1.8 {d0}, [r12], r2 vld1.8 {d1}, [r12], r2 vld1.8 {d2}, [r12], r2 vld1.8 {d3}, [r12], r2 vld1.8 {d4}, [r12], r2 vrhadd.u8 q0, q0, q4 vld1.8 {d5}, [r12], r2 vrhadd.u8 q1, q1, q6 vld1.8 {d10}, [r12], r2 vrhadd.u8 q2, q2, q11 vld1.8 {d11}, [r12], r2 vrhadd.u8 q5, q5, q13 .ifc \type,avg vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d0, d0, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d1, d1, d17 vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d2, d2, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d3, d3, d17 vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d4, d4, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d5, d5, d17 vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d10, d10, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d11, d11, d17 sub r0, r0, r3, lsl #3 .endif vst1.8 {d0}, [r0,:64], r3 vst1.8 {d1}, [r0,:64], r3 vst1.8 {d2}, [r0,:64], r3 vst1.8 {d3}, [r0,:64], r3 vst1.8 {d4}, [r0,:64], r3 vst1.8 {d5}, [r0,:64], r3 vst1.8 {d10}, [r0,:64], r3 vst1.8 {d11}, [r0,:64], r3 bx lr endfunc .endm h264_qpel_v_lowpass_l2 put h264_qpel_v_lowpass_l2 avg function put_h264_qpel8_hv_lowpass_neon_top lowpass_const r12 mov r12, #12 1: vld1.8 {d0, d1}, [r1], r3 vld1.8 {d16,d17}, [r1], r3 subs r12, r12, #2 lowpass_8 d0, d1, d16, d17, q11, q12, narrow=0 vst1.8 {d22-d25}, [r4,:128]! bne 1b vld1.8 {d0, d1}, [r1] lowpass_8_1 d0, d1, q12, narrow=0 mov r12, #-16 add r4, r4, r12 vld1.8 {d30,d31}, [r4,:128], r12 vld1.8 {d20,d21}, [r4,:128], r12 vld1.8 {d18,d19}, [r4,:128], r12 vld1.8 {d16,d17}, [r4,:128], r12 vld1.8 {d14,d15}, [r4,:128], r12 vld1.8 {d12,d13}, [r4,:128], r12 vld1.8 {d10,d11}, [r4,:128], r12 vld1.8 {d8, d9}, [r4,:128], r12 vld1.8 {d6, d7}, [r4,:128], r12 vld1.8 {d4, d5}, [r4,:128], r12 vld1.8 {d2, d3}, [r4,:128], r12 vld1.8 {d0, d1}, [r4,:128] swap4 d1, d3, d5, d7, d8, d10, d12, d14 transpose16_4x4 q0, q1, q2, q3, q4, q5, q6, q7 swap4 d17, d19, d21, d31, d24, d26, d28, d22 transpose16_4x4 q8, q9, q10, q15, q12, q13, q14, q11 vst1.8 {d30,d31}, [r4,:128]! vst1.8 {d6, d7}, [r4,:128]! vst1.8 {d20,d21}, [r4,:128]! vst1.8 {d4, d5}, [r4,:128]! vst1.8 {d18,d19}, [r4,:128]! vst1.8 {d2, d3}, [r4,:128]! vst1.8 {d16,d17}, [r4,:128]! vst1.8 {d0, d1}, [r4,:128] lowpass_8.16 q4, q12, d8, d9, d24, d25, d8 lowpass_8.16 q5, q13, d10, d11, d26, d27, d9 lowpass_8.16 q6, q14, d12, d13, d28, d29, d10 lowpass_8.16 q7, q11, d14, d15, d22, d23, d11 vld1.8 {d16,d17}, [r4,:128], r12 vld1.8 {d30,d31}, [r4,:128], r12 lowpass_8.16 q8, q15, d16, d17, d30, d31, d12 vld1.8 {d16,d17}, [r4,:128], r12 vld1.8 {d30,d31}, [r4,:128], r12 lowpass_8.16 q8, q15, d16, d17, d30, d31, d13 vld1.8 {d16,d17}, [r4,:128], r12 vld1.8 {d30,d31}, [r4,:128], r12 lowpass_8.16 q8, q15, d16, d17, d30, d31, d14 vld1.8 {d16,d17}, [r4,:128], r12 vld1.8 {d30,d31}, [r4,:128] lowpass_8.16 q8, q15, d16, d17, d30, d31, d15 transpose_8x8 d12, d13, d14, d15, d8, d9, d10, d11 bx lr endfunc .macro h264_qpel8_hv_lowpass type function \type\()_h264_qpel8_hv_lowpass_neon mov r10, lr bl put_h264_qpel8_hv_lowpass_neon_top .ifc \type,avg vld1.8 {d0}, [r0,:64], r2 vrhadd.u8 d12, d12, d0 vld1.8 {d1}, [r0,:64], r2 vrhadd.u8 d13, d13, d1 vld1.8 {d2}, [r0,:64], r2 vrhadd.u8 d14, d14, d2 vld1.8 {d3}, [r0,:64], r2 vrhadd.u8 d15, d15, d3 vld1.8 {d4}, [r0,:64], r2 vrhadd.u8 d8, d8, d4 vld1.8 {d5}, [r0,:64], r2 vrhadd.u8 d9, d9, d5 vld1.8 {d6}, [r0,:64], r2 vrhadd.u8 d10, d10, d6 vld1.8 {d7}, [r0,:64], r2 vrhadd.u8 d11, d11, d7 sub r0, r0, r2, lsl #3 .endif vst1.8 {d12}, [r0,:64], r2 vst1.8 {d13}, [r0,:64], r2 vst1.8 {d14}, [r0,:64], r2 vst1.8 {d15}, [r0,:64], r2 vst1.8 {d8}, [r0,:64], r2 vst1.8 {d9}, [r0,:64], r2 vst1.8 {d10}, [r0,:64], r2 vst1.8 {d11}, [r0,:64], r2 mov lr, r10 bx lr endfunc .endm h264_qpel8_hv_lowpass put h264_qpel8_hv_lowpass avg .macro h264_qpel8_hv_lowpass_l2 type function \type\()_h264_qpel8_hv_lowpass_l2_neon mov r10, lr bl put_h264_qpel8_hv_lowpass_neon_top vld1.8 {d0, d1}, [r2,:128]! vld1.8 {d2, d3}, [r2,:128]! vrhadd.u8 q0, q0, q6 vld1.8 {d4, d5}, [r2,:128]! vrhadd.u8 q1, q1, q7 vld1.8 {d6, d7}, [r2,:128]! vrhadd.u8 q2, q2, q4 vrhadd.u8 q3, q3, q5 .ifc \type,avg vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d0, d0, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d1, d1, d17 vld1.8 {d18}, [r0,:64], r3 vrhadd.u8 d2, d2, d18 vld1.8 {d19}, [r0,:64], r3 vrhadd.u8 d3, d3, d19 vld1.8 {d20}, [r0,:64], r3 vrhadd.u8 d4, d4, d20 vld1.8 {d21}, [r0,:64], r3 vrhadd.u8 d5, d5, d21 vld1.8 {d22}, [r0,:64], r3 vrhadd.u8 d6, d6, d22 vld1.8 {d23}, [r0,:64], r3 vrhadd.u8 d7, d7, d23 sub r0, r0, r3, lsl #3 .endif vst1.8 {d0}, [r0,:64], r3 vst1.8 {d1}, [r0,:64], r3 vst1.8 {d2}, [r0,:64], r3 vst1.8 {d3}, [r0,:64], r3 vst1.8 {d4}, [r0,:64], r3 vst1.8 {d5}, [r0,:64], r3 vst1.8 {d6}, [r0,:64], r3 vst1.8 {d7}, [r0,:64], r3 mov lr, r10 bx lr endfunc .endm h264_qpel8_hv_lowpass_l2 put h264_qpel8_hv_lowpass_l2 avg .macro h264_qpel16_hv type function \type\()_h264_qpel16_hv_lowpass_neon mov r9, lr bl \type\()_h264_qpel8_hv_lowpass_neon sub r1, r1, r3, lsl #2 bl \type\()_h264_qpel8_hv_lowpass_neon sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 sub r0, r0, r2, lsl #4 add r0, r0, #8 bl \type\()_h264_qpel8_hv_lowpass_neon sub r1, r1, r3, lsl #2 mov lr, r9 b \type\()_h264_qpel8_hv_lowpass_neon endfunc function \type\()_h264_qpel16_hv_lowpass_l2_neon mov r9, lr sub r2, r4, #256 bl \type\()_h264_qpel8_hv_lowpass_l2_neon sub r1, r1, r3, lsl #2 bl \type\()_h264_qpel8_hv_lowpass_l2_neon sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 sub r0, r0, r3, lsl #4 add r0, r0, #8 bl \type\()_h264_qpel8_hv_lowpass_l2_neon sub r1, r1, r3, lsl #2 mov lr, r9 b \type\()_h264_qpel8_hv_lowpass_l2_neon endfunc .endm h264_qpel16_hv put h264_qpel16_hv avg .macro h264_qpel8 type function ff_\type\()_h264_qpel8_mc10_neon, export=1 lowpass_const r3 mov r3, r1 sub r1, r1, #2 mov r12, #8 b \type\()_h264_qpel8_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel8_mc20_neon, export=1 lowpass_const r3 sub r1, r1, #2 mov r3, r2 mov r12, #8 b \type\()_h264_qpel8_h_lowpass_neon endfunc function ff_\type\()_h264_qpel8_mc30_neon, export=1 lowpass_const r3 add r3, r1, #1 sub r1, r1, #2 mov r12, #8 b \type\()_h264_qpel8_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel8_mc01_neon, export=1 push {lr} mov r12, r1 \type\()_h264_qpel8_mc01: lowpass_const r3 mov r3, r2 sub r1, r1, r2, lsl #1 vpush {d8-d15} bl \type\()_h264_qpel8_v_lowpass_l2_neon vpop {d8-d15} pop {pc} endfunc function ff_\type\()_h264_qpel8_mc11_neon, export=1 push {r0, r1, r11, lr} \type\()_h264_qpel8_mc11: lowpass_const r3 mov r11, sp A bic sp, sp, #15 T bic r0, r11, #15 T mov sp, r0 sub sp, sp, #64 mov r0, sp sub r1, r1, #2 mov r3, #8 mov r12, #8 vpush {d8-d15} bl put_h264_qpel8_h_lowpass_neon ldrd r0, r1, [r11], #8 mov r3, r2 add r12, sp, #64 sub r1, r1, r2, lsl #1 mov r2, #8 bl \type\()_h264_qpel8_v_lowpass_l2_neon vpop {d8-d15} mov sp, r11 pop {r11, pc} endfunc function ff_\type\()_h264_qpel8_mc21_neon, export=1 push {r0, r1, r4, r10, r11, lr} \type\()_h264_qpel8_mc21: lowpass_const r3 mov r11, sp A bic sp, sp, #15 T bic r0, r11, #15 T mov sp, r0 sub sp, sp, #(8*8+16*12) sub r1, r1, #2 mov r3, #8 mov r0, sp mov r12, #8 vpush {d8-d15} bl put_h264_qpel8_h_lowpass_neon mov r4, r0 ldrd r0, r1, [r11], #8 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 sub r2, r4, #64 bl \type\()_h264_qpel8_hv_lowpass_l2_neon vpop {d8-d15} mov sp, r11 pop {r4, r10, r11, pc} endfunc function ff_\type\()_h264_qpel8_mc31_neon, export=1 add r1, r1, #1 push {r0, r1, r11, lr} sub r1, r1, #1 b \type\()_h264_qpel8_mc11 endfunc function ff_\type\()_h264_qpel8_mc02_neon, export=1 push {lr} lowpass_const r3 sub r1, r1, r2, lsl #1 mov r3, r2 vpush {d8-d15} bl \type\()_h264_qpel8_v_lowpass_neon vpop {d8-d15} pop {pc} endfunc function ff_\type\()_h264_qpel8_mc12_neon, export=1 push {r0, r1, r4, r10, r11, lr} \type\()_h264_qpel8_mc12: lowpass_const r3 mov r11, sp A bic sp, sp, #15 T bic r0, r11, #15 T mov sp, r0 sub sp, sp, #(8*8+16*12) sub r1, r1, r2, lsl #1 mov r3, r2 mov r2, #8 mov r0, sp vpush {d8-d15} bl put_h264_qpel8_v_lowpass_neon mov r4, r0 ldrd r0, r1, [r11], #8 sub r1, r1, r3, lsl #1 sub r1, r1, #2 sub r2, r4, #64 bl \type\()_h264_qpel8_hv_lowpass_l2_neon vpop {d8-d15} mov sp, r11 pop {r4, r10, r11, pc} endfunc function ff_\type\()_h264_qpel8_mc22_neon, export=1 push {r4, r10, r11, lr} mov r11, sp A bic sp, sp, #15 T bic r4, r11, #15 T mov sp, r4 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 sub sp, sp, #(16*12) mov r4, sp vpush {d8-d15} bl \type\()_h264_qpel8_hv_lowpass_neon vpop {d8-d15} mov sp, r11 pop {r4, r10, r11, pc} endfunc function ff_\type\()_h264_qpel8_mc32_neon, export=1 push {r0, r1, r4, r10, r11, lr} add r1, r1, #1 b \type\()_h264_qpel8_mc12 endfunc function ff_\type\()_h264_qpel8_mc03_neon, export=1 push {lr} add r12, r1, r2 b \type\()_h264_qpel8_mc01 endfunc function ff_\type\()_h264_qpel8_mc13_neon, export=1 push {r0, r1, r11, lr} add r1, r1, r2 b \type\()_h264_qpel8_mc11 endfunc function ff_\type\()_h264_qpel8_mc23_neon, export=1 push {r0, r1, r4, r10, r11, lr} add r1, r1, r2 b \type\()_h264_qpel8_mc21 endfunc function ff_\type\()_h264_qpel8_mc33_neon, export=1 add r1, r1, #1 push {r0, r1, r11, lr} add r1, r1, r2 sub r1, r1, #1 b \type\()_h264_qpel8_mc11 endfunc .endm h264_qpel8 put h264_qpel8 avg .macro h264_qpel16 type function ff_\type\()_h264_qpel16_mc10_neon, export=1 lowpass_const r3 mov r3, r1 sub r1, r1, #2 b \type\()_h264_qpel16_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel16_mc20_neon, export=1 lowpass_const r3 sub r1, r1, #2 mov r3, r2 b \type\()_h264_qpel16_h_lowpass_neon endfunc function ff_\type\()_h264_qpel16_mc30_neon, export=1 lowpass_const r3 add r3, r1, #1 sub r1, r1, #2 b \type\()_h264_qpel16_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel16_mc01_neon, export=1 push {r4, lr} mov r12, r1 \type\()_h264_qpel16_mc01: lowpass_const r3 mov r3, r2 sub r1, r1, r2, lsl #1 vpush {d8-d15} bl \type\()_h264_qpel16_v_lowpass_l2_neon vpop {d8-d15} pop {r4, pc} endfunc function ff_\type\()_h264_qpel16_mc11_neon, export=1 push {r0, r1, r4, r11, lr} \type\()_h264_qpel16_mc11: lowpass_const r3 mov r11, sp A bic sp, sp, #15 T bic r0, r11, #15 T mov sp, r0 sub sp, sp, #256 mov r0, sp sub r1, r1, #2 mov r3, #16 vpush {d8-d15} bl put_h264_qpel16_h_lowpass_neon ldrd r0, r1, [r11], #8 mov r3, r2 add r12, sp, #64 sub r1, r1, r2, lsl #1 mov r2, #16 bl \type\()_h264_qpel16_v_lowpass_l2_neon vpop {d8-d15} mov sp, r11 pop {r4, r11, pc} endfunc function ff_\type\()_h264_qpel16_mc21_neon, export=1 push {r0, r1, r4-r5, r9-r11, lr} \type\()_h264_qpel16_mc21: lowpass_const r3 mov r11, sp A bic sp, sp, #15 T bic r0, r11, #15 T mov sp, r0 sub sp, sp, #(16*16+16*12) sub r1, r1, #2 mov r0, sp vpush {d8-d15} bl put_h264_qpel16_h_lowpass_neon_packed mov r4, r0 ldrd r0, r1, [r11], #8 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 bl \type\()_h264_qpel16_hv_lowpass_l2_neon vpop {d8-d15} mov sp, r11 pop {r4-r5, r9-r11, pc} endfunc function ff_\type\()_h264_qpel16_mc31_neon, export=1 add r1, r1, #1 push {r0, r1, r4, r11, lr} sub r1, r1, #1 b \type\()_h264_qpel16_mc11 endfunc function ff_\type\()_h264_qpel16_mc02_neon, export=1 push {r4, lr} lowpass_const r3 sub r1, r1, r2, lsl #1 mov r3, r2 vpush {d8-d15} bl \type\()_h264_qpel16_v_lowpass_neon vpop {d8-d15} pop {r4, pc} endfunc function ff_\type\()_h264_qpel16_mc12_neon, export=1 push {r0, r1, r4-r5, r9-r11, lr} \type\()_h264_qpel16_mc12: lowpass_const r3 mov r11, sp A bic sp, sp, #15 T bic r0, r11, #15 T mov sp, r0 sub sp, sp, #(16*16+16*12) sub r1, r1, r2, lsl #1 mov r0, sp mov r3, r2 vpush {d8-d15} bl put_h264_qpel16_v_lowpass_neon_packed mov r4, r0 ldrd r0, r1, [r11], #8 sub r1, r1, r3, lsl #1 sub r1, r1, #2 mov r2, r3 bl \type\()_h264_qpel16_hv_lowpass_l2_neon vpop {d8-d15} mov sp, r11 pop {r4-r5, r9-r11, pc} endfunc function ff_\type\()_h264_qpel16_mc22_neon, export=1 push {r4, r9-r11, lr} lowpass_const r3 mov r11, sp A bic sp, sp, #15 T bic r4, r11, #15 T mov sp, r4 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 sub sp, sp, #(16*12) mov r4, sp vpush {d8-d15} bl \type\()_h264_qpel16_hv_lowpass_neon vpop {d8-d15} mov sp, r11 pop {r4, r9-r11, pc} endfunc function ff_\type\()_h264_qpel16_mc32_neon, export=1 push {r0, r1, r4-r5, r9-r11, lr} add r1, r1, #1 b \type\()_h264_qpel16_mc12 endfunc function ff_\type\()_h264_qpel16_mc03_neon, export=1 push {r4, lr} add r12, r1, r2 b \type\()_h264_qpel16_mc01 endfunc function ff_\type\()_h264_qpel16_mc13_neon, export=1 push {r0, r1, r4, r11, lr} add r1, r1, r2 b \type\()_h264_qpel16_mc11 endfunc function ff_\type\()_h264_qpel16_mc23_neon, export=1 push {r0, r1, r4-r5, r9-r11, lr} add r1, r1, r2 b \type\()_h264_qpel16_mc21 endfunc function ff_\type\()_h264_qpel16_mc33_neon, export=1 add r1, r1, #1 push {r0, r1, r4, r11, lr} add r1, r1, r2 sub r1, r1, #1 b \type\()_h264_qpel16_mc11 endfunc .endm h264_qpel16 put h264_qpel16 avg
Akagi201/ffmpeg-xcode
1,102
ffmpeg-3.0.2/libavcodec/arm/videodsp_armv5te.S
@ @ ARMv5te-optimized core video DSP functions @ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp> @ @ This file is part of FFmpeg @ @ FFmpeg is free software; you can redistribute it and/or @ modify it under the terms of the GNU Lesser General Public @ License as published by the Free Software Foundation; either @ version 2.1 of the License, or (at your option) any later version. @ @ FFmpeg is distributed in the hope that it will be useful, @ but WITHOUT ANY WARRANTY; without even the implied warranty of @ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU @ Lesser General Public License for more details. @ @ You should have received a copy of the GNU Lesser General Public @ License along with FFmpeg; if not, write to the Free Software @ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA @ #include "config.h" #include "libavutil/arm/asm.S" function ff_prefetch_arm, export=1 1: subs r2, r2, #1 pld [r0] add r0, r0, r1 bne 1b bx lr endfunc
Akagi201/ffmpeg-xcode
1,828
ffmpeg-3.0.2/libavcodec/arm/idctdsp_armv6.S
/* * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_add_pixels_clamped_armv6, export=1 push {r4-r8,lr} mov r3, #8 1: ldm r0!, {r4,r5,r12,lr} ldrd r6, r7, [r1] pkhbt r8, r4, r5, lsl #16 pkhtb r5, r5, r4, asr #16 pkhbt r4, r12, lr, lsl #16 pkhtb lr, lr, r12, asr #16 pld [r1, r2] uxtab16 r8, r8, r6 uxtab16 r5, r5, r6, ror #8 uxtab16 r4, r4, r7 uxtab16 lr, lr, r7, ror #8 usat16 r8, #8, r8 usat16 r5, #8, r5 usat16 r4, #8, r4 usat16 lr, #8, lr orr r6, r8, r5, lsl #8 orr r7, r4, lr, lsl #8 subs r3, r3, #1 strd_post r6, r7, r1, r2 bgt 1b pop {r4-r8,pc} endfunc
Akagi201/ffmpeg-xcode
14,745
ffmpeg-3.0.2/libavcodec/arm/h264idct_neon.S
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_h264_idct_add_neon, export=1 vld1.64 {d0-d3}, [r1,:128] vmov.i16 q15, #0 vswp d1, d2 vst1.16 {q15}, [r1,:128]! vadd.i16 d4, d0, d1 vst1.16 {q15}, [r1,:128]! vshr.s16 q8, q1, #1 vsub.i16 d5, d0, d1 vadd.i16 d6, d2, d17 vsub.i16 d7, d16, d3 vadd.i16 q0, q2, q3 vsub.i16 q1, q2, q3 vtrn.16 d0, d1 vtrn.16 d3, d2 vtrn.32 d0, d3 vtrn.32 d1, d2 vadd.i16 d4, d0, d3 vld1.32 {d18[0]}, [r0,:32], r2 vswp d1, d3 vshr.s16 q8, q1, #1 vld1.32 {d19[1]}, [r0,:32], r2 vsub.i16 d5, d0, d1 vld1.32 {d18[1]}, [r0,:32], r2 vadd.i16 d6, d16, d3 vld1.32 {d19[0]}, [r0,:32], r2 vsub.i16 d7, d2, d17 sub r0, r0, r2, lsl #2 vadd.i16 q0, q2, q3 vsub.i16 q1, q2, q3 vrshr.s16 q0, q0, #6 vrshr.s16 q1, q1, #6 vaddw.u8 q0, q0, d18 vaddw.u8 q1, q1, d19 vqmovun.s16 d0, q0 vqmovun.s16 d1, q1 vst1.32 {d0[0]}, [r0,:32], r2 vst1.32 {d1[1]}, [r0,:32], r2 vst1.32 {d0[1]}, [r0,:32], r2 vst1.32 {d1[0]}, [r0,:32], r2 sub r1, r1, #32 bx lr endfunc function ff_h264_idct_dc_add_neon, export=1 mov r3, #0 vld1.16 {d2[],d3[]}, [r1,:16] strh r3, [r1] vrshr.s16 q1, q1, #6 vld1.32 {d0[0]}, [r0,:32], r2 vld1.32 {d0[1]}, [r0,:32], r2 vaddw.u8 q2, q1, d0 vld1.32 {d1[0]}, [r0,:32], r2 vld1.32 {d1[1]}, [r0,:32], r2 vaddw.u8 q1, q1, d1 vqmovun.s16 d0, q2 vqmovun.s16 d1, q1 sub r0, r0, r2, lsl #2 vst1.32 {d0[0]}, [r0,:32], r2 vst1.32 {d0[1]}, [r0,:32], r2 vst1.32 {d1[0]}, [r0,:32], r2 vst1.32 {d1[1]}, [r0,:32], r2 bx lr endfunc function ff_h264_idct_add16_neon, export=1 push {r4-r8,lr} mov r4, r0 mov r5, r1 mov r1, r2 mov r2, r3 ldr r6, [sp, #24] movrel r7, scan8 mov ip, #16 1: ldrb r8, [r7], #1 ldr r0, [r5], #4 ldrb r8, [r6, r8] subs r8, r8, #1 blt 2f ldrsh lr, [r1] add r0, r0, r4 it ne movne lr, #0 cmp lr, #0 ite ne adrne lr, X(ff_h264_idct_dc_add_neon) + CONFIG_THUMB adreq lr, X(ff_h264_idct_add_neon) + CONFIG_THUMB blx lr 2: subs ip, ip, #1 add r1, r1, #32 bne 1b pop {r4-r8,pc} endfunc function ff_h264_idct_add16intra_neon, export=1 push {r4-r8,lr} mov r4, r0 mov r5, r1 mov r1, r2 mov r2, r3 ldr r6, [sp, #24] movrel r7, scan8 mov ip, #16 1: ldrb r8, [r7], #1 ldr r0, [r5], #4 ldrb r8, [r6, r8] add r0, r0, r4 cmp r8, #0 ldrsh r8, [r1] iteet ne adrne lr, X(ff_h264_idct_add_neon) + CONFIG_THUMB adreq lr, X(ff_h264_idct_dc_add_neon) + CONFIG_THUMB cmpeq r8, #0 blxne lr subs ip, ip, #1 add r1, r1, #32 bne 1b pop {r4-r8,pc} endfunc function ff_h264_idct_add8_neon, export=1 push {r4-r10,lr} ldm r0, {r4,r9} add r5, r1, #16*4 add r1, r2, #16*32 mov r2, r3 mov r10, r1 ldr r6, [sp, #32] movrel r7, scan8+16 mov r12, #0 1: ldrb r8, [r7, r12] ldr r0, [r5, r12, lsl #2] ldrb r8, [r6, r8] add r0, r0, r4 add r1, r10, r12, lsl #5 cmp r8, #0 ldrsh r8, [r1] iteet ne adrne lr, X(ff_h264_idct_add_neon) + CONFIG_THUMB adreq lr, X(ff_h264_idct_dc_add_neon) + CONFIG_THUMB cmpeq r8, #0 blxne lr add r12, r12, #1 cmp r12, #4 itt eq moveq r12, #16 moveq r4, r9 cmp r12, #20 blt 1b pop {r4-r10,pc} endfunc .macro idct8x8_cols pass .if \pass == 0 qa .req q2 qb .req q14 vshr.s16 q2, q10, #1 vadd.i16 q0, q8, q12 vld1.16 {q14-q15},[r1,:128] vst1.16 {q3}, [r1,:128]! vst1.16 {q3}, [r1,:128]! vsub.i16 q1, q8, q12 vshr.s16 q3, q14, #1 vsub.i16 q2, q2, q14 vadd.i16 q3, q3, q10 .else qa .req q14 qb .req q2 vtrn.32 q8, q10 vtrn.16 q12, q13 vtrn.32 q9, q11 vtrn.32 q12, q2 vtrn.32 q13, q15 vswp d21, d4 vshr.s16 q14, q10, #1 vswp d17, d24 vshr.s16 q3, q2, #1 vswp d19, d26 vadd.i16 q0, q8, q12 vswp d23, d30 vsub.i16 q1, q8, q12 vsub.i16 q14, q14, q2 vadd.i16 q3, q3, q10 .endif vadd.i16 q10, q1, qa vsub.i16 q12, q1, qa vadd.i16 q8, q0, q3 vsub.i16 qb, q0, q3 vsub.i16 q0, q13, q11 vadd.i16 q1, q15, q9 vsub.i16 qa, q15, q9 vadd.i16 q3, q13, q11 vsub.i16 q0, q0, q15 vsub.i16 q1, q1, q11 vadd.i16 qa, qa, q13 vadd.i16 q3, q3, q9 vshr.s16 q9, q9, #1 vshr.s16 q11, q11, #1 vshr.s16 q13, q13, #1 vshr.s16 q15, q15, #1 vsub.i16 q0, q0, q15 vsub.i16 q1, q1, q11 vadd.i16 qa, qa, q13 vadd.i16 q3, q3, q9 vshr.s16 q9, q0, #2 vshr.s16 q11, q1, #2 vshr.s16 q13, qa, #2 vshr.s16 q15, q3, #2 vsub.i16 q3, q3, q9 vsub.i16 qa, q11, qa vadd.i16 q1, q1, q13 vadd.i16 q0, q0, q15 .if \pass == 0 vsub.i16 q15, q8, q3 vadd.i16 q8, q8, q3 vadd.i16 q9, q10, q2 vsub.i16 q2, q10, q2 vtrn.16 q8, q9 vadd.i16 q10, q12, q1 vtrn.16 q2, q15 vadd.i16 q11, q14, q0 vsub.i16 q13, q12, q1 vtrn.16 q10, q11 vsub.i16 q12, q14, q0 .else vsub.i16 q15, q8, q3 vadd.i16 q8, q8, q3 vadd.i16 q9, q10, q14 vsub.i16 q14, q10, q14 vadd.i16 q10, q12, q1 vsub.i16 q13, q12, q1 vadd.i16 q11, q2, q0 vsub.i16 q12, q2, q0 .endif .unreq qa .unreq qb .endm function ff_h264_idct8_add_neon, export=1 vmov.i16 q3, #0 vld1.16 {q8-q9}, [r1,:128] vst1.16 {q3}, [r1,:128]! vst1.16 {q3}, [r1,:128]! vld1.16 {q10-q11},[r1,:128] vst1.16 {q3}, [r1,:128]! vst1.16 {q3}, [r1,:128]! vld1.16 {q12-q13},[r1,:128] vst1.16 {q3}, [r1,:128]! vst1.16 {q3}, [r1,:128]! idct8x8_cols 0 idct8x8_cols 1 mov r3, r0 vrshr.s16 q8, q8, #6 vld1.8 {d0}, [r0,:64], r2 vrshr.s16 q9, q9, #6 vld1.8 {d1}, [r0,:64], r2 vrshr.s16 q10, q10, #6 vld1.8 {d2}, [r0,:64], r2 vrshr.s16 q11, q11, #6 vld1.8 {d3}, [r0,:64], r2 vrshr.s16 q12, q12, #6 vld1.8 {d4}, [r0,:64], r2 vrshr.s16 q13, q13, #6 vld1.8 {d5}, [r0,:64], r2 vrshr.s16 q14, q14, #6 vld1.8 {d6}, [r0,:64], r2 vrshr.s16 q15, q15, #6 vld1.8 {d7}, [r0,:64], r2 vaddw.u8 q8, q8, d0 vaddw.u8 q9, q9, d1 vaddw.u8 q10, q10, d2 vqmovun.s16 d0, q8 vaddw.u8 q11, q11, d3 vqmovun.s16 d1, q9 vaddw.u8 q12, q12, d4 vqmovun.s16 d2, q10 vst1.8 {d0}, [r3,:64], r2 vaddw.u8 q13, q13, d5 vqmovun.s16 d3, q11 vst1.8 {d1}, [r3,:64], r2 vaddw.u8 q14, q14, d6 vqmovun.s16 d4, q12 vst1.8 {d2}, [r3,:64], r2 vaddw.u8 q15, q15, d7 vqmovun.s16 d5, q13 vst1.8 {d3}, [r3,:64], r2 vqmovun.s16 d6, q14 vqmovun.s16 d7, q15 vst1.8 {d4}, [r3,:64], r2 vst1.8 {d5}, [r3,:64], r2 vst1.8 {d6}, [r3,:64], r2 vst1.8 {d7}, [r3,:64], r2 sub r1, r1, #128 bx lr endfunc function ff_h264_idct8_dc_add_neon, export=1 mov r3, #0 vld1.16 {d30[],d31[]},[r1,:16] strh r3, [r1] vld1.32 {d0}, [r0,:64], r2 vrshr.s16 q15, q15, #6 vld1.32 {d1}, [r0,:64], r2 vld1.32 {d2}, [r0,:64], r2 vaddw.u8 q8, q15, d0 vld1.32 {d3}, [r0,:64], r2 vaddw.u8 q9, q15, d1 vld1.32 {d4}, [r0,:64], r2 vaddw.u8 q10, q15, d2 vld1.32 {d5}, [r0,:64], r2 vaddw.u8 q11, q15, d3 vld1.32 {d6}, [r0,:64], r2 vaddw.u8 q12, q15, d4 vld1.32 {d7}, [r0,:64], r2 vaddw.u8 q13, q15, d5 vaddw.u8 q14, q15, d6 vaddw.u8 q15, q15, d7 vqmovun.s16 d0, q8 vqmovun.s16 d1, q9 vqmovun.s16 d2, q10 vqmovun.s16 d3, q11 sub r0, r0, r2, lsl #3 vst1.32 {d0}, [r0,:64], r2 vqmovun.s16 d4, q12 vst1.32 {d1}, [r0,:64], r2 vqmovun.s16 d5, q13 vst1.32 {d2}, [r0,:64], r2 vqmovun.s16 d6, q14 vst1.32 {d3}, [r0,:64], r2 vqmovun.s16 d7, q15 vst1.32 {d4}, [r0,:64], r2 vst1.32 {d5}, [r0,:64], r2 vst1.32 {d6}, [r0,:64], r2 vst1.32 {d7}, [r0,:64], r2 bx lr endfunc function ff_h264_idct8_add4_neon, export=1 push {r4-r8,lr} mov r4, r0 mov r5, r1 mov r1, r2 mov r2, r3 ldr r6, [sp, #24] movrel r7, scan8 mov r12, #16 1: ldrb r8, [r7], #4 ldr r0, [r5], #16 ldrb r8, [r6, r8] subs r8, r8, #1 blt 2f ldrsh lr, [r1] add r0, r0, r4 it ne movne lr, #0 cmp lr, #0 ite ne adrne lr, X(ff_h264_idct8_dc_add_neon) + CONFIG_THUMB adreq lr, X(ff_h264_idct8_add_neon) + CONFIG_THUMB blx lr 2: subs r12, r12, #4 add r1, r1, #128 bne 1b pop {r4-r8,pc} endfunc const scan8 .byte 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8 .byte 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8 .byte 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8 .byte 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8 .byte 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8 .byte 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8 .byte 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8 .byte 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8 .byte 4+11*8, 5+11*8, 4+12*8, 5+12*8 .byte 6+11*8, 7+11*8, 6+12*8, 7+12*8 .byte 4+13*8, 5+13*8, 4+14*8, 5+14*8 .byte 6+13*8, 7+13*8, 6+14*8, 7+14*8 endconst
Akagi201/ffmpeg-xcode
3,057
ffmpeg-3.0.2/libavcodec/arm/fmtconvert_neon.S
/* * ARM NEON optimised Format Conversion Utils * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * Copyright (c) 2015 Janne Grunau <janne-libav@jannau.net>b * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "libavutil/arm/asm.S" function ff_int32_to_float_fmul_scalar_neon, export=1 VFP vdup.32 q0, d0[0] VFP len .req r2 NOVFP vdup.32 q0, r2 NOVFP len .req r3 vld1.32 {q1},[r1,:128]! vcvt.f32.s32 q3, q1 vld1.32 {q2},[r1,:128]! vcvt.f32.s32 q8, q2 1: subs len, len, #8 pld [r1, #16] vmul.f32 q9, q3, q0 vmul.f32 q10, q8, q0 beq 2f vld1.32 {q1},[r1,:128]! vcvt.f32.s32 q3, q1 vld1.32 {q2},[r1,:128]! vcvt.f32.s32 q8, q2 vst1.32 {q9}, [r0,:128]! vst1.32 {q10},[r0,:128]! b 1b 2: vst1.32 {q9}, [r0,:128]! vst1.32 {q10},[r0,:128]! bx lr .unreq len endfunc function ff_int32_to_float_fmul_array8_neon, export=1 ldr r0, [sp] lsr r0, r0, #3 subs r0, r0, #1 beq 1f 2: vld1.32 {q0-q1}, [r2,:128]! vld1.32 {q2-q3}, [r2,:128]! vld1.32 {d20}, [r3]! subs r0, r0, #2 vcvt.f32.s32 q0, q0 vcvt.f32.s32 q1, q1 vdup.32 q8, d20[0] vcvt.f32.s32 q2, q2 vcvt.f32.s32 q3, q3 vmul.f32 q0, q0, q8 vdup.32 q9, d20[1] vmul.f32 q1, q1, q8 vmul.f32 q2, q2, q9 vmul.f32 q3, q3, q9 vst1.32 {q0-q1}, [r1,:128]! vst1.32 {q2-q3}, [r1,:128]! bgt 2b it lt bxlt lr 1: vld1.32 {q0-q1}, [r2,:128] vld1.32 {d16[],d17[]}, [r3] vcvt.f32.s32 q0, q0 vcvt.f32.s32 q1, q1 vmul.f32 q0, q0, q8 vmul.f32 q1, q1, q8 vst1.32 {q0-q1}, [r1,:128] bx lr endfunc
Akagi201/ffmpeg-xcode
12,548
ffmpeg-3.0.2/libavcodec/arm/mdct_neon.S
/* * ARM NEON optimised MDCT * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #define ff_fft_calc_neon X(ff_fft_calc_neon) function ff_imdct_half_neon, export=1 push {r4-r8,lr} mov r12, #1 ldr lr, [r0, #20] @ mdct_bits ldr r4, [r0, #24] @ tcos ldr r3, [r0, #8] @ revtab lsl r12, r12, lr @ n = 1 << nbits lsr lr, r12, #2 @ n4 = n >> 2 add r7, r2, r12, lsl #1 mov r12, #-16 sub r7, r7, #16 vld2.32 {d16-d17},[r7,:128],r12 @ d16=x,n1 d17=x,n0 vld2.32 {d0-d1}, [r2,:128]! @ d0 =m0,x d1 =m1,x vrev64.32 d17, d17 vld2.32 {d2,d3}, [r4,:128]! @ d2=c0,c1 d3=s0,s2 vmul.f32 d6, d17, d2 vmul.f32 d7, d0, d2 1: subs lr, lr, #2 ldr r6, [r3], #4 vmul.f32 d4, d0, d3 vmul.f32 d5, d17, d3 vsub.f32 d4, d6, d4 vadd.f32 d5, d5, d7 uxth r8, r6, ror #16 uxth r6, r6 add r8, r1, r8, lsl #3 add r6, r1, r6, lsl #3 beq 1f vld2.32 {d16-d17},[r7,:128],r12 vld2.32 {d0-d1}, [r2,:128]! vrev64.32 d17, d17 vld2.32 {d2,d3}, [r4,:128]! @ d2=c0,c1 d3=s0,s2 vmul.f32 d6, d17, d2 vmul.f32 d7, d0, d2 vst2.32 {d4[0],d5[0]}, [r6,:64] vst2.32 {d4[1],d5[1]}, [r8,:64] b 1b 1: vst2.32 {d4[0],d5[0]}, [r6,:64] vst2.32 {d4[1],d5[1]}, [r8,:64] mov r4, r0 mov r6, r1 bl ff_fft_calc_neon mov r12, #1 ldr lr, [r4, #20] @ mdct_bits ldr r4, [r4, #24] @ tcos lsl r12, r12, lr @ n = 1 << nbits lsr lr, r12, #3 @ n8 = n >> 3 add r4, r4, lr, lsl #3 add r6, r6, lr, lsl #3 sub r1, r4, #16 sub r3, r6, #16 mov r7, #-16 mov r8, r6 mov r0, r3 vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =i1,r1 d1 =i0,r0 vld2.32 {d20-d21},[r6,:128]! @ d20=i2,r2 d21=i3,r3 vld2.32 {d16,d18},[r1,:128], r7 @ d16=c1,c0 d18=s1,s0 1: subs lr, lr, #2 vmul.f32 d7, d0, d18 vld2.32 {d17,d19},[r4,:128]! @ d17=c2,c3 d19=s2,s3 vmul.f32 d4, d1, d18 vmul.f32 d5, d21, d19 vmul.f32 d6, d20, d19 vmul.f32 d22, d1, d16 vmul.f32 d23, d21, d17 vmul.f32 d24, d0, d16 vmul.f32 d25, d20, d17 vadd.f32 d7, d7, d22 vadd.f32 d6, d6, d23 vsub.f32 d4, d4, d24 vsub.f32 d5, d5, d25 beq 1f vld2.32 {d0-d1}, [r3,:128], r7 vld2.32 {d20-d21},[r6,:128]! vld2.32 {d16,d18},[r1,:128], r7 @ d16=c1,c0 d18=s1,s0 vrev64.32 q3, q3 vst2.32 {d4,d6}, [r0,:128], r7 vst2.32 {d5,d7}, [r8,:128]! b 1b 1: vrev64.32 q3, q3 vst2.32 {d4,d6}, [r0,:128] vst2.32 {d5,d7}, [r8,:128] pop {r4-r8,pc} endfunc function ff_imdct_calc_neon, export=1 push {r4-r6,lr} ldr r3, [r0, #20] mov r4, #1 mov r5, r1 lsl r4, r4, r3 add r1, r1, r4 bl X(ff_imdct_half_neon) add r0, r5, r4, lsl #2 add r1, r5, r4, lsl #1 sub r0, r0, #8 sub r2, r1, #16 mov r3, #-16 mov r6, #-8 vmov.i32 d30, #1<<31 1: vld1.32 {d0-d1}, [r2,:128], r3 pld [r0, #-16] vrev64.32 q0, q0 vld1.32 {d2-d3}, [r1,:128]! veor d4, d1, d30 pld [r2, #-16] vrev64.32 q1, q1 veor d5, d0, d30 vst1.32 {d2}, [r0,:64], r6 vst1.32 {d3}, [r0,:64], r6 vst1.32 {d4-d5}, [r5,:128]! subs r4, r4, #16 bgt 1b pop {r4-r6,pc} endfunc function ff_mdct_calc_neon, export=1 push {r4-r10,lr} mov r12, #1 ldr lr, [r0, #20] @ mdct_bits ldr r4, [r0, #24] @ tcos ldr r3, [r0, #8] @ revtab lsl lr, r12, lr @ n = 1 << nbits add r7, r2, lr @ in4u sub r9, r7, #16 @ in4d add r2, r7, lr, lsl #1 @ in3u add r8, r9, lr, lsl #1 @ in3d add r5, r4, lr, lsl #1 sub r5, r5, #16 sub r3, r3, #4 mov r12, #-16 vld2.32 {d16,d18},[r9,:128],r12 @ in0u0,in0u1 in4d1,in4d0 vld2.32 {d17,d19},[r8,:128],r12 @ in2u0,in2u1 in3d1,in3d0 vld2.32 {d0, d2}, [r7,:128]! @ in4u0,in4u1 in2d1,in2d0 vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1 vld2.32 {d1, d3}, [r2,:128]! @ in3u0,in3u1 in1d1,in1d0 vsub.f32 d0, d18, d0 @ in4d-in4u I vld2.32 {d20,d21},[r4,:128]! @ c0,c1 s0,s1 vrev64.32 q1, q1 @ in2d0,in2d1 in1d0,in1d1 vld2.32 {d30,d31},[r5,:128],r12 @ c2,c3 s2,s3 vadd.f32 d1, d1, d19 @ in3u+in3d -R vsub.f32 d16, d16, d2 @ in0u-in2d R vadd.f32 d17, d17, d3 @ in2u+in1d -I 1: vmul.f32 d7, d0, d21 @ I*s A ldr r10, [r3, lr, lsr #1] T lsr r10, lr, #1 T ldr r10, [r3, r10] vmul.f32 d6, d1, d20 @ -R*c ldr r6, [r3, #4]! vmul.f32 d4, d1, d21 @ -R*s vmul.f32 d5, d0, d20 @ I*c vmul.f32 d24, d16, d30 @ R*c vmul.f32 d25, d17, d31 @ -I*s vmul.f32 d22, d16, d31 @ R*s vmul.f32 d23, d17, d30 @ I*c subs lr, lr, #16 vsub.f32 d6, d6, d7 @ -R*c-I*s vadd.f32 d7, d4, d5 @ -R*s+I*c vsub.f32 d24, d25, d24 @ I*s-R*c vadd.f32 d25, d22, d23 @ R*s-I*c beq 1f mov r12, #-16 vld2.32 {d16,d18},[r9,:128],r12 @ in0u0,in0u1 in4d1,in4d0 vld2.32 {d17,d19},[r8,:128],r12 @ in2u0,in2u1 in3d1,in3d0 vneg.f32 d7, d7 @ R*s-I*c vld2.32 {d0, d2}, [r7,:128]! @ in4u0,in4u1 in2d1,in2d0 vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1 vld2.32 {d1, d3}, [r2,:128]! @ in3u0,in3u1 in1d1,in1d0 vsub.f32 d0, d18, d0 @ in4d-in4u I vld2.32 {d20,d21},[r4,:128]! @ c0,c1 s0,s1 vrev64.32 q1, q1 @ in2d0,in2d1 in1d0,in1d1 vld2.32 {d30,d31},[r5,:128],r12 @ c2,c3 s2,s3 vadd.f32 d1, d1, d19 @ in3u+in3d -R vsub.f32 d16, d16, d2 @ in0u-in2d R vadd.f32 d17, d17, d3 @ in2u+in1d -I uxth r12, r6, ror #16 uxth r6, r6 add r12, r1, r12, lsl #3 add r6, r1, r6, lsl #3 vst2.32 {d6[0],d7[0]}, [r6,:64] vst2.32 {d6[1],d7[1]}, [r12,:64] uxth r6, r10, ror #16 uxth r10, r10 add r6 , r1, r6, lsl #3 add r10, r1, r10, lsl #3 vst2.32 {d24[0],d25[0]},[r10,:64] vst2.32 {d24[1],d25[1]},[r6,:64] b 1b 1: vneg.f32 d7, d7 @ R*s-I*c uxth r12, r6, ror #16 uxth r6, r6 add r12, r1, r12, lsl #3 add r6, r1, r6, lsl #3 vst2.32 {d6[0],d7[0]}, [r6,:64] vst2.32 {d6[1],d7[1]}, [r12,:64] uxth r6, r10, ror #16 uxth r10, r10 add r6 , r1, r6, lsl #3 add r10, r1, r10, lsl #3 vst2.32 {d24[0],d25[0]},[r10,:64] vst2.32 {d24[1],d25[1]},[r6,:64] mov r4, r0 mov r6, r1 bl ff_fft_calc_neon mov r12, #1 ldr lr, [r4, #20] @ mdct_bits ldr r4, [r4, #24] @ tcos lsl r12, r12, lr @ n = 1 << nbits lsr lr, r12, #3 @ n8 = n >> 3 add r4, r4, lr, lsl #3 add r6, r6, lr, lsl #3 sub r1, r4, #16 sub r3, r6, #16 mov r7, #-16 mov r8, r6 mov r0, r3 vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =r1,i1 d1 =r0,i0 vld2.32 {d20-d21},[r6,:128]! @ d20=r2,i2 d21=r3,i3 vld2.32 {d16,d18},[r1,:128], r7 @ c1,c0 s1,s0 1: subs lr, lr, #2 vmul.f32 d7, d0, d18 @ r1*s1,r0*s0 vld2.32 {d17,d19},[r4,:128]! @ c2,c3 s2,s3 vmul.f32 d4, d1, d18 @ i1*s1,i0*s0 vmul.f32 d5, d21, d19 @ i2*s2,i3*s3 vmul.f32 d6, d20, d19 @ r2*s2,r3*s3 vmul.f32 d24, d0, d16 @ r1*c1,r0*c0 vmul.f32 d25, d20, d17 @ r2*c2,r3*c3 vmul.f32 d22, d21, d17 @ i2*c2,i3*c3 vmul.f32 d23, d1, d16 @ i1*c1,i0*c0 vadd.f32 d4, d4, d24 @ i1*s1+r1*c1,i0*s0+r0*c0 vadd.f32 d5, d5, d25 @ i2*s2+r2*c2,i3*s3+r3*c3 vsub.f32 d6, d22, d6 @ i2*c2-r2*s2,i3*c3-r3*s3 vsub.f32 d7, d23, d7 @ i1*c1-r1*s1,i0*c0-r0*s0 vneg.f32 q2, q2 beq 1f vld2.32 {d0-d1}, [r3,:128], r7 vld2.32 {d20-d21},[r6,:128]! vld2.32 {d16,d18},[r1,:128], r7 @ c1,c0 s1,s0 vrev64.32 q3, q3 vst2.32 {d4,d6}, [r0,:128], r7 vst2.32 {d5,d7}, [r8,:128]! b 1b 1: vrev64.32 q3, q3 vst2.32 {d4,d6}, [r0,:128] vst2.32 {d5,d7}, [r8,:128] pop {r4-r10,pc} endfunc
Akagi201/ffmpeg-xcode
2,613
ffmpeg-3.0.2/libavcodec/arm/mpegvideoencdsp_armv6.S
/* * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_pix_norm1_armv6, export=1 push {r4-r6, lr} mov r12, #16 mov lr, #0 1: ldm r0, {r2-r5} uxtb16 r6, r2 uxtb16 r2, r2, ror #8 smlad lr, r6, r6, lr uxtb16 r6, r3 smlad lr, r2, r2, lr uxtb16 r3, r3, ror #8 smlad lr, r6, r6, lr uxtb16 r6, r4 smlad lr, r3, r3, lr uxtb16 r4, r4, ror #8 smlad lr, r6, r6, lr uxtb16 r6, r5 smlad lr, r4, r4, lr uxtb16 r5, r5, ror #8 smlad lr, r6, r6, lr subs r12, r12, #1 add r0, r0, r1 smlad lr, r5, r5, lr bgt 1b mov r0, lr pop {r4-r6, pc} endfunc function ff_pix_sum_armv6, export=1 push {r4-r7, lr} mov r12, #16 mov r2, #0 mov r3, #0 mov lr, #0 ldr r4, [r0] 1: subs r12, r12, #1 ldr r5, [r0, #4] usada8 r2, r4, lr, r2 ldr r6, [r0, #8] usada8 r3, r5, lr, r3 ldr r7, [r0, #12] usada8 r2, r6, lr, r2 beq 2f ldr_pre r4, r0, r1 usada8 r3, r7, lr, r3 bgt 1b 2: usada8 r3, r7, lr, r3 add r0, r2, r3 pop {r4-r7, pc} endfunc
Akagi201/ffmpeg-xcode
27,781
ffmpeg-3.0.2/libavcodec/arm/hevcdsp_qpel_neon.S
/* * Copyright (c) 2014 - 2015 Seppo Tomperi <seppo.tomperi@vtt.fi> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #include "neon.S" #define MAX_PB_SIZE #64 .macro regshuffle_d8 vmov d16, d17 vmov d17, d18 vmov d18, d19 vmov d19, d20 vmov d20, d21 vmov d21, d22 vmov d22, d23 .endm .macro regshuffle_q8 vmov q0, q1 vmov q1, q2 vmov q2, q3 vmov q3, q4 vmov q4, q5 vmov q5, q6 vmov q6, q7 .endm .macro vextin8 pld [r2] vld1.8 {q11}, [r2], r3 vext.8 d16, d22, d23, #1 vext.8 d17, d22, d23, #2 vext.8 d18, d22, d23, #3 vext.8 d19, d22, d23, #4 vext.8 d20, d22, d23, #5 vext.8 d21, d22, d23, #6 vext.8 d22, d22, d23, #7 .endm .macro loadin8 pld [r2] vld1.8 {d16}, [r2], r3 pld [r2] vld1.8 {d17}, [r2], r3 pld [r2] vld1.8 {d18}, [r2], r3 pld [r2] vld1.8 {d19}, [r2], r3 pld [r2] vld1.8 {d20}, [r2], r3 pld [r2] vld1.8 {d21}, [r2], r3 pld [r2] vld1.8 {d22}, [r2], r3 pld [r2] vld1.8 {d23}, [r2], r3 .endm .macro qpel_filter_1_32b vmov.i16 d16, #58 vmov.i16 d17, #10 vmull.s16 q9, d6, d16 // 58 * d0 vmull.s16 q10, d7, d16 // 58 * d1 vmov.i16 d16, #17 vmull.s16 q11, d4, d17 // 10 * c0 vmull.s16 q12, d5, d17 // 10 * c1 vmov.i16 d17, #5 vmull.s16 q13, d8, d16 // 17 * e0 vmull.s16 q14, d9, d16 // 17 * e1 vmull.s16 q15, d10, d17 // 5 * f0 vmull.s16 q8, d11, d17 // 5 * f1 vsub.s32 q9, q11 // 58 * d0 - 10 * c0 vsub.s32 q10, q12 // 58 * d1 - 10 * c1 vshll.s16 q11, d2, #2 // 4 * b0 vshll.s16 q12, d3, #2 // 4 * b1 vadd.s32 q9, q13 // 58 * d0 - 10 * c0 + 17 * e0 vadd.s32 q10, q14 // 58 * d1 - 10 * c1 + 17 * e1 vsubl.s16 q13, d12, d0 // g0 - a0 vsubl.s16 q14, d13, d1 // g1 - a1 vadd.s32 q9, q11 // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 vadd.s32 q10, q12 // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 vsub.s32 q13, q15 // g0 - a0 - 5 * f0 vsub.s32 q14, q8 // g1 - a1 - 5 * f1 vadd.s32 q9, q13 // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 + g0 - a0 - 5 * f0 vadd.s32 q10, q14 // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 + g1 - a1 - 5 * f1 vqshrn.s32 d16, q9, #6 vqshrn.s32 d17, q10, #6 .endm // input q0 - q7 // output q8 .macro qpel_filter_2_32b vmov.i32 q8, #11 vaddl.s16 q9, d6, d8 // d0 + e0 vaddl.s16 q10, d7, d9 // d1 + e1 vaddl.s16 q11, d4, d10 // c0 + f0 vaddl.s16 q12, d5, d11 // c1 + f1 vmul.s32 q11, q8 // 11 * (c0 + f0) vmul.s32 q12, q8 // 11 * (c1 + f1) vmov.i32 q8, #40 vaddl.s16 q15, d2, d12 // b0 + g0 vmul.s32 q9, q8 // 40 * (d0 + e0) vmul.s32 q10, q8 // 40 * (d1 + e1) vaddl.s16 q8, d3, d13 // b1 + g1 vaddl.s16 q13, d0, d14 // a0 + h0 vaddl.s16 q14, d1, d15 // a1 + h1 vshl.s32 q15, #2 // 4*(b0+g0) vshl.s32 q8, #2 // 4*(b1+g1) vadd.s32 q11, q13 // 11 * (c0 + f0) + a0 + h0 vadd.s32 q12, q14 // 11 * (c1 + f1) + a1 + h1 vadd.s32 q9, q15 // 40 * (d0 + e0) + 4*(b0+g0) vadd.s32 q10, q8 // 40 * (d1 + e1) + 4*(b1+g1) vsub.s32 q9, q11 // 40 * (d0 + e0) + 4*(b0+g0) - (11 * (c0 + f0) + a0 + h0) vsub.s32 q10, q12 // 40 * (d1 + e1) + 4*(b1+g1) - (11 * (c1 + f1) + a1 + h1) vqshrn.s32 d16, q9, #6 vqshrn.s32 d17, q10, #6 .endm .macro qpel_filter_3_32b vmov.i16 d16, #58 vmov.i16 d17, #10 vmull.s16 q9, d8, d16 // 58 * d0 vmull.s16 q10, d9, d16 // 58 * d1 vmov.i16 d16, #17 vmull.s16 q11, d10, d17 // 10 * c0 vmull.s16 q12, d11, d17 // 10 * c1 vmov.i16 d17, #5 vmull.s16 q13, d6, d16 // 17 * e0 vmull.s16 q14, d7, d16 // 17 * e1 vmull.s16 q15, d4, d17 // 5 * f0 vmull.s16 q8, d5, d17 // 5 * f1 vsub.s32 q9, q11 // 58 * d0 - 10 * c0 vsub.s32 q10, q12 // 58 * d1 - 10 * c1 vshll.s16 q11, d12, #2 // 4 * b0 vshll.s16 q12, d13, #2 // 4 * b1 vadd.s32 q9, q13 // 58 * d0 - 10 * c0 + 17 * e0 vadd.s32 q10, q14 // 58 * d1 - 10 * c1 + 17 * e1 vsubl.s16 q13, d2, d14 // g0 - a0 vsubl.s16 q14, d3, d15 // g1 - a1 vadd.s32 q9, q11 // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 vadd.s32 q10, q12 // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 vsub.s32 q13, q15 // g0 - a0 - 5 * f0 vsub.s32 q14, q8 // g1 - a1 - 5 * f1 vadd.s32 q9, q13 // 58 * d0 - 10 * c0 + 17 * e0 + 4 * b0 + g0 - a0 - 5 * f0 vadd.s32 q10, q14 // 58 * d1 - 10 * c1 + 17 * e1 + 4 * b1 + g1 - a1 - 5 * f1 vqshrn.s32 d16, q9, #6 vqshrn.s32 d17, q10, #6 .endm .macro qpel_filter_1 out=q7 vmov.u8 d24, #58 vmov.u8 d25, #10 vshll.u8 q13, d20, #4 // 16*e vshll.u8 q14, d21, #2 // 4*f vmull.u8 \out, d19, d24 // 58*d vaddw.u8 q13, q13, d20 // 17*e vmull.u8 q15, d18, d25 // 10*c vaddw.u8 q14, q14, d21 // 5*f vsubl.u8 q12, d22, d16 // g - a vadd.u16 \out, q13 // 58d + 17e vshll.u8 q13, d17, #2 // 4*b vadd.u16 q15, q14 // 10*c + 5*f vadd.s16 q13, q12 // - a + 4*b + g vsub.s16 \out, q15 // -10*c + 58*d + 17*e -5*f vadd.s16 \out, q13 // -a + 4*b -10*c + 58*d + 17*e -5*f .endm .macro qpel_filter_2 out=q7 vmov.i16 q12, #10 vmov.i16 q14, #11 vaddl.u8 q13, d19, d20 // d + e vaddl.u8 q15, d18, d21 // c + f vmul.u16 q13, q12 // 10 * (d+e) vmul.u16 q15, q14 // 11 * ( c + f) vaddl.u8 \out, d17, d22 // b + g vaddl.u8 q12, d16, d23 // a + h vadd.u16 \out, q13 // b + 10 * (d + e) + g vadd.s16 q12, q15 vshl.u16 \out, #2 // 4 * (b + 10 * (d + e) + g) vsub.s16 \out, q12 .endm .macro qpel_filter_3 out=q7 vmov.u8 d24, #58 vmov.u8 d25, #10 vshll.u8 q13, d19, #4 // 16*e vshll.u8 q14, d18, #2 // 4*f vmull.u8 \out, d20, d24 // 58*d vaddw.u8 q13, q13, d19 // 17*e vmull.u8 q15, d21, d25 // 10*c vaddw.u8 q14, q14, d18 // 5*f vsubl.u8 q12, d17, d23 // g - a vadd.u16 \out, q13 // 58d + 17e vshll.u8 q13, d22, #2 // 4*b vadd.u16 q15, q14 // 10*c + 5*f vadd.s16 q13, q12 // - a + 4*b + g vsub.s16 \out, q15 // -10*c + 58*d + 17*e -5*f vadd.s16 \out, q13 // -a + 4*b -10*c + 58*d + 17*e -5*f .endm .macro hevc_put_qpel_vX_neon_8 filter push {r4, r5, r6, r7} ldr r4, [sp, #16] // height ldr r5, [sp, #20] // width vpush {d8-d15} sub r2, r2, r3, lsl #1 sub r2, r3 mov r12, r4 mov r6, r0 mov r7, r2 lsl r1, #1 0: loadin8 cmp r5, #4 beq 4f 8: subs r4, #1 \filter vst1.16 {q7}, [r0], r1 regshuffle_d8 vld1.8 {d23}, [r2], r3 bne 8b subs r5, #8 beq 99f mov r4, r12 add r6, #16 mov r0, r6 add r7, #8 mov r2, r7 b 0b 4: subs r4, #1 \filter vst1.16 d14, [r0], r1 regshuffle_d8 vld1.32 {d23[0]}, [r2], r3 bne 4b 99: vpop {d8-d15} pop {r4, r5, r6, r7} bx lr .endm .macro hevc_put_qpel_uw_vX_neon_8 filter push {r4-r10} ldr r5, [sp, #28] // width ldr r4, [sp, #32] // height ldr r8, [sp, #36] // src2 ldr r9, [sp, #40] // src2stride vpush {d8-d15} sub r2, r2, r3, lsl #1 sub r2, r3 mov r12, r4 mov r6, r0 mov r7, r2 cmp r8, #0 bne .Lbi\@ 0: loadin8 cmp r5, #4 beq 4f 8: subs r4, #1 \filter vqrshrun.s16 d0, q7, #6 vst1.8 d0, [r0], r1 regshuffle_d8 vld1.8 {d23}, [r2], r3 bne 8b subs r5, #8 beq 99f mov r4, r12 add r6, #8 mov r0, r6 add r7, #8 mov r2, r7 b 0b 4: subs r4, #1 \filter vqrshrun.s16 d0, q7, #6 vst1.32 d0[0], [r0], r1 regshuffle_d8 vld1.32 {d23[0]}, [r2], r3 bne 4b b 99f .Lbi\@: lsl r9, #1 mov r10, r8 0: loadin8 cmp r5, #4 beq 4f 8: subs r4, #1 \filter vld1.16 {q0}, [r8], r9 vqadd.s16 q0, q7 vqrshrun.s16 d0, q0, #7 vst1.8 d0, [r0], r1 regshuffle_d8 vld1.8 {d23}, [r2], r3 bne 8b subs r5, #8 beq 99f mov r4, r12 add r6, #8 mov r0, r6 add r10, #16 mov r8, r10 add r7, #8 mov r2, r7 b 0b 4: subs r4, #1 \filter vld1.16 d0, [r8], r9 vqadd.s16 d0, d14 vqrshrun.s16 d0, q0, #7 vst1.32 d0[0], [r0], r1 regshuffle_d8 vld1.32 {d23[0]}, [r2], r3 bne 4b 99: vpop {d8-d15} pop {r4-r10} bx lr .endm function ff_hevc_put_qpel_v1_neon_8, export=1 hevc_put_qpel_vX_neon_8 qpel_filter_1 endfunc function ff_hevc_put_qpel_v2_neon_8, export=1 hevc_put_qpel_vX_neon_8 qpel_filter_2 endfunc function ff_hevc_put_qpel_v3_neon_8, export=1 hevc_put_qpel_vX_neon_8 qpel_filter_3 endfunc function ff_hevc_put_qpel_uw_v1_neon_8, export=1 hevc_put_qpel_uw_vX_neon_8 qpel_filter_1 endfunc function ff_hevc_put_qpel_uw_v2_neon_8, export=1 hevc_put_qpel_uw_vX_neon_8 qpel_filter_2 endfunc function ff_hevc_put_qpel_uw_v3_neon_8, export=1 hevc_put_qpel_uw_vX_neon_8 qpel_filter_3 endfunc .macro hevc_put_qpel_hX_neon_8 filter push {r4, r5, r6, r7} ldr r4, [sp, #16] // height ldr r5, [sp, #20] // width vpush {d8-d15} sub r2, #4 lsl r1, #1 mov r12, r4 mov r6, r0 mov r7, r2 cmp r5, #4 beq 4f 8: subs r4, #1 vextin8 \filter vst1.16 {q7}, [r0], r1 bne 8b subs r5, #8 beq 99f mov r4, r12 add r6, #16 mov r0, r6 add r7, #8 mov r2, r7 cmp r5, #4 bne 8b 4: subs r4, #1 vextin8 \filter vst1.16 d14, [r0], r1 bne 4b 99: vpop {d8-d15} pop {r4, r5, r6, r7} bx lr .endm .macro hevc_put_qpel_uw_hX_neon_8 filter push {r4-r10} ldr r5, [sp, #28] // width ldr r4, [sp, #32] // height ldr r8, [sp, #36] // src2 ldr r9, [sp, #40] // src2stride vpush {d8-d15} sub r2, #4 mov r12, r4 mov r6, r0 mov r7, r2 cmp r8, #0 bne .Lbi\@ cmp r5, #4 beq 4f 8: subs r4, #1 vextin8 \filter vqrshrun.s16 d0, q7, #6 vst1.8 d0, [r0], r1 bne 8b subs r5, #8 beq 99f mov r4, r12 add r6, #8 mov r0, r6 add r7, #8 mov r2, r7 cmp r5, #4 bne 8b 4: subs r4, #1 vextin8 \filter vqrshrun.s16 d0, q7, #6 vst1.32 d0[0], [r0], r1 bne 4b b 99f .Lbi\@: lsl r9, #1 cmp r5, #4 beq 4f mov r10, r8 8: subs r4, #1 vextin8 \filter vld1.16 {q0}, [r8], r9 vqadd.s16 q0, q7 vqrshrun.s16 d0, q0, #7 vst1.8 d0, [r0], r1 bne 8b subs r5, #8 beq 99f mov r4, r12 add r6, #8 add r10, #16 mov r8, r10 mov r0, r6 add r7, #8 mov r2, r7 cmp r5, #4 bne 8b 4: subs r4, #1 vextin8 \filter vld1.16 d0, [r8], r9 vqadd.s16 d0, d14 vqrshrun.s16 d0, q0, #7 vst1.32 d0[0], [r0], r1 bne 4b 99: vpop {d8-d15} pop {r4-r10} bx lr .endm function ff_hevc_put_qpel_h1_neon_8, export=1 hevc_put_qpel_hX_neon_8 qpel_filter_1 endfunc function ff_hevc_put_qpel_h2_neon_8, export=1 hevc_put_qpel_hX_neon_8 qpel_filter_2 endfunc function ff_hevc_put_qpel_h3_neon_8, export=1 hevc_put_qpel_hX_neon_8 qpel_filter_3 endfunc function ff_hevc_put_qpel_uw_h1_neon_8, export=1 hevc_put_qpel_uw_hX_neon_8 qpel_filter_1 endfunc function ff_hevc_put_qpel_uw_h2_neon_8, export=1 hevc_put_qpel_uw_hX_neon_8 qpel_filter_2 endfunc function ff_hevc_put_qpel_uw_h3_neon_8, export=1 hevc_put_qpel_uw_hX_neon_8 qpel_filter_3 endfunc .macro hevc_put_qpel_hXvY_neon_8 filterh filterv push {r4, r5, r6, r7} ldr r4, [sp, #16] // height ldr r5, [sp, #20] // width vpush {d8-d15} sub r2, #4 sub r2, r2, r3, lsl #1 sub r2, r3 // extra_before 3 lsl r1, #1 mov r12, r4 mov r6, r0 mov r7, r2 0: vextin8 \filterh q0 vextin8 \filterh q1 vextin8 \filterh q2 vextin8 \filterh q3 vextin8 \filterh q4 vextin8 \filterh q5 vextin8 \filterh q6 vextin8 \filterh q7 cmp r5, #4 beq 4f 8: subs r4, #1 \filterv vst1.16 {q8}, [r0], r1 regshuffle_q8 vextin8 \filterh q7 bne 8b subs r5, #8 beq 99f mov r4, r12 add r6, #16 mov r0, r6 add r7, #8 mov r2, r7 b 0b 4: subs r4, #1 \filterv vst1.16 d16, [r0], r1 regshuffle_q8 vextin8 \filterh q7 bne 4b 99: vpop {d8-d15} pop {r4, r5, r6, r7} bx lr .endm .macro hevc_put_qpel_uw_hXvY_neon_8 filterh filterv push {r4-r10} ldr r5, [sp, #28] // width ldr r4, [sp, #32] // height ldr r8, [sp, #36] // src2 ldr r9, [sp, #40] // src2stride vpush {d8-d15} sub r2, #4 sub r2, r2, r3, lsl #1 sub r2, r3 // extra_before 3 mov r12, r4 mov r6, r0 mov r7, r2 cmp r8, #0 bne .Lbi\@ 0: vextin8 \filterh q0 vextin8 \filterh q1 vextin8 \filterh q2 vextin8 \filterh q3 vextin8 \filterh q4 vextin8 \filterh q5 vextin8 \filterh q6 vextin8 \filterh q7 cmp r5, #4 beq 4f 8: subs r4, #1 \filterv vqrshrun.s16 d0, q8, #6 vst1.8 d0, [r0], r1 regshuffle_q8 vextin8 \filterh q7 bne 8b subs r5, #8 beq 99f mov r4, r12 add r6, #8 mov r0, r6 add r7, #8 mov r2, r7 b 0b 4: subs r4, #1 \filterv vqrshrun.s16 d0, q8, #6 vst1.32 d0[0], [r0], r1 regshuffle_q8 vextin8 \filterh q7 bne 4b b 99f .Lbi\@: lsl r9, #1 mov r10, r8 0: vextin8 \filterh q0 vextin8 \filterh q1 vextin8 \filterh q2 vextin8 \filterh q3 vextin8 \filterh q4 vextin8 \filterh q5 vextin8 \filterh q6 vextin8 \filterh q7 cmp r5, #4 beq 4f 8: subs r4, #1 \filterv vld1.16 {q0}, [r8], r9 vqadd.s16 q0, q8 vqrshrun.s16 d0, q0, #7 vst1.8 d0, [r0], r1 regshuffle_q8 vextin8 \filterh q7 bne 8b subs r5, #8 beq 99f mov r4, r12 add r6, #8 mov r0, r6 add r10, #16 mov r8, r10 add r7, #8 mov r2, r7 b 0b 4: subs r4, #1 \filterv vld1.16 d0, [r8], r9 vqadd.s16 d0, d16 vqrshrun.s16 d0, q0, #7 vst1.32 d0[0], [r0], r1 regshuffle_q8 vextin8 \filterh q7 bne 4b 99: vpop {d8-d15} pop {r4-r10} bx lr .endm function ff_hevc_put_qpel_h1v1_neon_8, export=1 hevc_put_qpel_hXvY_neon_8 qpel_filter_1 qpel_filter_1_32b endfunc function ff_hevc_put_qpel_h2v1_neon_8, export=1 hevc_put_qpel_hXvY_neon_8 qpel_filter_2 qpel_filter_1_32b endfunc function ff_hevc_put_qpel_h3v1_neon_8, export=1 hevc_put_qpel_hXvY_neon_8 qpel_filter_3 qpel_filter_1_32b endfunc function ff_hevc_put_qpel_h1v2_neon_8, export=1 hevc_put_qpel_hXvY_neon_8 qpel_filter_1 qpel_filter_2_32b endfunc function ff_hevc_put_qpel_h2v2_neon_8, export=1 hevc_put_qpel_hXvY_neon_8 qpel_filter_2 qpel_filter_2_32b endfunc function ff_hevc_put_qpel_h3v2_neon_8, export=1 hevc_put_qpel_hXvY_neon_8 qpel_filter_3 qpel_filter_2_32b endfunc function ff_hevc_put_qpel_h1v3_neon_8, export=1 hevc_put_qpel_hXvY_neon_8 qpel_filter_1 qpel_filter_3_32b endfunc function ff_hevc_put_qpel_h2v3_neon_8, export=1 hevc_put_qpel_hXvY_neon_8 qpel_filter_2 qpel_filter_3_32b endfunc function ff_hevc_put_qpel_h3v3_neon_8, export=1 hevc_put_qpel_hXvY_neon_8 qpel_filter_3 qpel_filter_3_32b endfunc function ff_hevc_put_qpel_uw_h1v1_neon_8, export=1 hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_1 qpel_filter_1_32b endfunc function ff_hevc_put_qpel_uw_h2v1_neon_8, export=1 hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_2 qpel_filter_1_32b endfunc function ff_hevc_put_qpel_uw_h3v1_neon_8, export=1 hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_3 qpel_filter_1_32b endfunc function ff_hevc_put_qpel_uw_h1v2_neon_8, export=1 hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_1 qpel_filter_2_32b endfunc function ff_hevc_put_qpel_uw_h2v2_neon_8, export=1 hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_2 qpel_filter_2_32b endfunc function ff_hevc_put_qpel_uw_h3v2_neon_8, export=1 hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_3 qpel_filter_2_32b endfunc function ff_hevc_put_qpel_uw_h1v3_neon_8, export=1 hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_1 qpel_filter_3_32b endfunc function ff_hevc_put_qpel_uw_h2v3_neon_8, export=1 hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_2 qpel_filter_3_32b endfunc function ff_hevc_put_qpel_uw_h3v3_neon_8, export=1 hevc_put_qpel_uw_hXvY_neon_8 qpel_filter_3 qpel_filter_3_32b endfunc .macro init_put_pixels pld [r1] pld [r1, r2] mov r12, MAX_PB_SIZE lsl r12, #1 .endm function ff_hevc_put_pixels_w2_neon_8, export=1 init_put_pixels vmov.u8 d5, #255 vshr.u64 d5, #32 0: subs r3, #1 vld1.32 {d0[0]}, [r1], r2 pld [r1] vld1.32 d6, [r0] vshll.u8 q0, d0, #6 vbit d6, d0, d5 vst1.32 d6, [r0], r12 bne 0b bx lr endfunc function ff_hevc_put_pixels_w4_neon_8, export=1 init_put_pixels 0: subs r3, #2 vld1.32 {d0[0]}, [r1], r2 vld1.32 {d0[1]}, [r1], r2 pld [r1] pld [r1, r2] vshll.u8 q0, d0, #6 vst1.64 {d0}, [r0], r12 vst1.64 {d1}, [r0], r12 bne 0b bx lr endfunc function ff_hevc_put_pixels_w6_neon_8, export=1 init_put_pixels vmov.u8 q10, #255 vshr.u64 d21, #32 0: subs r3, #1 vld1.16 {d0}, [r1], r2 pld [r1] vshll.u8 q0, d0, #6 vld1.8 {q12}, [r0] vbit q12, q0, q10 vst1.8 {q12}, [r0], r12 bne 0b bx lr endfunc function ff_hevc_put_pixels_w8_neon_8, export=1 init_put_pixels 0: subs r3, #2 vld1.8 {d0}, [r1], r2 vld1.8 {d2}, [r1], r2 pld [r1] pld [r1, r2] vshll.u8 q0, d0, #6 vshll.u8 q1, d2, #6 vst1.16 {q0}, [r0], r12 vst1.16 {q1}, [r0], r12 bne 0b bx lr endfunc function ff_hevc_put_pixels_w12_neon_8, export=1 init_put_pixels 0: subs r3, #2 vld1.64 {d0}, [r1] add r1, #8 vld1.32 {d1[0]}, [r1], r2 sub r1, #8 vld1.64 {d2}, [r1] add r1, #8 vld1.32 {d1[1]}, [r1], r2 sub r1, #8 pld [r1] pld [r1, r2] vshll.u8 q8, d0, #6 vshll.u8 q9, d1, #6 vshll.u8 q10, d2, #6 vmov d22, d19 vst1.64 {d16, d17, d18}, [r0], r12 vst1.64 {d20, d21, d22}, [r0], r12 bne 0b bx lr endfunc function ff_hevc_put_pixels_w16_neon_8, export=1 init_put_pixels 0: subs r3, #2 vld1.8 {q0}, [r1], r2 vld1.8 {q1}, [r1], r2 pld [r1] pld [r1, r2] vshll.u8 q8, d0, #6 vshll.u8 q9, d1, #6 vshll.u8 q10, d2, #6 vshll.u8 q11, d3, #6 vst1.8 {q8, q9}, [r0], r12 vst1.8 {q10, q11}, [r0], r12 bne 0b bx lr endfunc function ff_hevc_put_pixels_w24_neon_8, export=1 init_put_pixels 0: subs r3, #1 vld1.8 {d0, d1, d2}, [r1], r2 pld [r1] vshll.u8 q10, d0, #6 vshll.u8 q11, d1, #6 vshll.u8 q12, d2, #6 vstm r0, {q10, q11, q12} add r0, r12 bne 0b bx lr endfunc function ff_hevc_put_pixels_w32_neon_8, export=1 init_put_pixels 0: subs r3, #1 vld1.8 {q0, q1}, [r1], r2 pld [r1] vshll.u8 q8, d0, #6 vshll.u8 q9, d1, #6 vshll.u8 q10, d2, #6 vshll.u8 q11, d3, #6 vstm r0, {q8, q9, q10, q11} add r0, r12 bne 0b bx lr endfunc function ff_hevc_put_pixels_w48_neon_8, export=1 init_put_pixels 0: subs r3, #1 vld1.8 {q0, q1}, [r1] add r1, #32 vld1.8 {q2}, [r1], r2 sub r1, #32 pld [r1] vshll.u8 q8, d0, #6 vshll.u8 q9, d1, #6 vshll.u8 q10, d2, #6 vshll.u8 q11, d3, #6 vshll.u8 q12, d4, #6 vshll.u8 q13, d5, #6 vstm r0, {q8, q9, q10, q11, q12, q13} add r0, r12 bne 0b bx lr endfunc function ff_hevc_put_pixels_w64_neon_8, export=1 init_put_pixels 0: subs r3, #1 vld1.8 {q0, q1}, [r1] add r1, #32 vld1.8 {q2, q3}, [r1], r2 sub r1, #32 pld [r1] vshll.u8 q8, d0, #6 vshll.u8 q9, d1, #6 vshll.u8 q10, d2, #6 vshll.u8 q11, d3, #6 vshll.u8 q12, d4, #6 vshll.u8 q13, d5, #6 vshll.u8 q14, d6, #6 vshll.u8 q15, d7, #6 vstm r0, {q8, q9, q10, q11, q12, q13, q14, q15} add r0, r12 bne 0b bx lr endfunc function ff_hevc_put_qpel_uw_pixels_neon_8, export=1 push {r4-r9} ldr r5, [sp, #24] // width ldr r4, [sp, #28] // height ldr r8, [sp, #32] // src2 ldr r9, [sp, #36] // src2stride vpush {d8-d15} cmp r8, #0 bne 2f 1: subs r4, #1 vld1.8 {d0}, [r2], r3 vst1.8 d0, [r0], r1 bne 1b vpop {d8-d15} pop {r4-r9} bx lr 2: subs r4, #1 vld1.8 {d0}, [r2], r3 vld1.16 {q1}, [r8], r9 vshll.u8 q0, d0, #6 vqadd.s16 q0, q1 vqrshrun.s16 d0, q0, #7 vst1.8 d0, [r0], r1 bne 2b vpop {d8-d15} pop {r4-r9} bx lr endfunc .macro put_qpel_uw_pixels width, regs, regs2, regs3, regs4 function ff_hevc_put_qpel_uw_pixels_w\width\()_neon_8, export=1 ldr r12, [sp] // height 1: subs r12, #4 vld1.32 {\regs} , [r2], r3 vld1.32 {\regs2} , [r2], r3 vld1.32 {\regs3} , [r2], r3 vld1.32 {\regs4} , [r2], r3 vst1.32 {\regs} , [r0], r1 vst1.32 {\regs2} , [r0], r1 vst1.32 {\regs3} , [r0], r1 vst1.32 {\regs4} , [r0], r1 bne 1b bx lr endfunc .endm .macro put_qpel_uw_pixels_m width, regs, regs2, regs3, regs4 function ff_hevc_put_qpel_uw_pixels_w\width\()_neon_8, export=1 push {r4-r5} ldr r12, [sp, #8] // height 1: subs r12, #2 mov r4, r2 vld1.32 {\regs} , [r2]! vld1.32 {\regs2} , [r2] add r2, r4, r3 mov r4, r2 vld1.32 {\regs3} , [r2]! vld1.32 {\regs4} , [r2] add r2, r4, r3 mov r5, r0 vst1.32 {\regs} , [r0]! vst1.32 {\regs2} , [r0] add r0, r5, r1 mov r5, r0 vst1.32 {\regs3} , [r0]! vst1.32 {\regs4} , [r0] add r0, r5, r1 bne 1b pop {r4-r5} bx lr endfunc .endm put_qpel_uw_pixels 4, d0[0], d0[1], d1[0], d1[1] put_qpel_uw_pixels 8, d0, d1, d2, d3 put_qpel_uw_pixels_m 12, d0, d1[0], d2, d3[0] put_qpel_uw_pixels 16, q0, q1, q2, q3 put_qpel_uw_pixels 24, d0-d2, d3-d5, d16-d18, d19-d21 put_qpel_uw_pixels 32, q0-q1, q2-q3, q8-q9, q10-q11 put_qpel_uw_pixels_m 48, q0-q1, q2, q8-q9, q10 put_qpel_uw_pixels_m 64, q0-q1, q2-q3, q8-q9, q10-q11
Akagi201/ffmpeg-xcode
1,825
ffmpeg-3.0.2/libavcodec/arm/int_neon.S
/* * ARM NEON optimised integer operations * Copyright (c) 2009 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_scalarproduct_int16_neon, export=1 vmov.i16 q0, #0 vmov.i16 q1, #0 vmov.i16 q2, #0 vmov.i16 q3, #0 1: vld1.16 {d16-d17}, [r0]! vld1.16 {d20-d21}, [r1,:128]! vmlal.s16 q0, d16, d20 vld1.16 {d18-d19}, [r0]! vmlal.s16 q1, d17, d21 vld1.16 {d22-d23}, [r1,:128]! vmlal.s16 q2, d18, d22 vmlal.s16 q3, d19, d23 subs r2, r2, #16 bgt 1b vpadd.s32 d16, d0, d1 vpadd.s32 d17, d2, d3 vpadd.s32 d18, d4, d5 vpadd.s32 d19, d6, d7 vpadd.s32 d0, d16, d17 vpadd.s32 d1, d18, d19 vpadd.s32 d2, d0, d1 vpaddl.s32 d3, d2 vmov.32 r0, d3[0] bx lr endfunc
Akagi201/ffmpeg-xcode
21,690
ffmpeg-3.0.2/libavcodec/arm/simple_idct_arm.S
/* * Copyright (C) 2002 Frederic 'dilb' Boulay * * Author: Frederic Boulay <dilb@handhelds.org> * * The function defined in this file is derived from the simple_idct function * from the libavcodec library part of the FFmpeg project. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" /* useful constants for the algorithm */ #define W1 22725 #define W2 21407 #define W3 19266 #define W4 16383 #define W5 12873 #define W6 8867 #define W7 4520 #define MASK_MSHW 0xFFFF0000 #define ROW_SHIFT 11 #define ROW_SHIFT2MSHW (16-11) #define COL_SHIFT 20 #define ROW_SHIFTED_1 1024 /* 1<< (ROW_SHIFT-1) */ #define COL_SHIFTED_1 524288 /* 1<< (COL_SHIFT-1) */ function ff_simple_idct_arm, export=1 @@ void simple_idct_arm(int16_t *block) @@ save stack for reg needed (take all of them), @@ R0-R3 are scratch regs, so no need to save them, but R0 contains the pointer to block @@ so it must not be overwritten, if it is not saved!! @@ R12 is another scratch register, so it should not be saved too @@ save all registers stmfd sp!, {r4-r11, r14} @ R14 is also called LR @@ at this point, R0=block, other registers are free. add r14, r0, #112 @ R14=&block[8*7], better start from the last row, and decrease the value until row=0, i.e. R12=block. @@ add 2 temporary variables in the stack: R0 and R14 sub sp, sp, #8 @ allow 2 local variables str r0, [sp, #0] @ save block in sp[0] @@ stack status @@ sp+4 free @@ sp+0 R0 (block) @@ at this point, R0=block, R14=&block[56], R12=__const_ptr_, R1-R11 free __row_loop: @@ read the row and check if it is null, almost null, or not, according to strongarm specs, it is not necessary to optimize ldr accesses (i.e. split 32bits in 2 16bits words), at least it gives more usable registers :) ldr r1, [r14, #0] @ R1=(int32)(R12)[0]=ROWr32[0] (relative row cast to a 32b pointer) ldr r2, [r14, #4] @ R2=(int32)(R12)[1]=ROWr32[1] ldr r3, [r14, #8] @ R3=ROWr32[2] ldr r4, [r14, #12] @ R4=ROWr32[3] @@ check if the words are null, if all of them are null, then proceed with next row (branch __end_row_loop), @@ if ROWr16[0] is the only one not null, then proceed with this special case (branch __almost_empty_row) @@ else follow the complete algorithm. @@ at this point, R0=block, R14=&block[n], R12=__const_ptr_, R1=ROWr32[0], R2=ROWr32[1], @@ R3=ROWr32[2], R4=ROWr32[3], R5-R11 free orr r5, r4, r3 @ R5=R4 | R3 orr r5, r5, r2 @ R5=R4 | R3 | R2 orrs r6, r5, r1 @ Test R5 | R1 (the aim is to check if everything is null) beq __end_row_loop mov r7, r1, asr #16 @ R7=R1>>16=ROWr16[1] (evaluate it now, as it could be useful later) ldrsh r6, [r14, #0] @ R6=ROWr16[0] orrs r5, r5, r7 @ R5=R4 | R3 | R2 | R7 beq __almost_empty_row @@ __b_evaluation: @@ at this point, R0=block (temp), R1(free), R2=ROWr32[1], R3=ROWr32[2], R4=ROWr32[3], @@ R5=(temp), R6=ROWr16[0], R7=ROWr16[1], R8-R11 free, @@ R12=__const_ptr_, R14=&block[n] @@ to save some registers/calls, proceed with b0-b3 first, followed by a0-a3 @@ MUL16(b0, W1, row[1]); @@ MUL16(b1, W3, row[1]); @@ MUL16(b2, W5, row[1]); @@ MUL16(b3, W7, row[1]); @@ MAC16(b0, W3, row[3]); @@ MAC16(b1, -W7, row[3]); @@ MAC16(b2, -W1, row[3]); @@ MAC16(b3, -W5, row[3]); ldr r8, =W1 @ R8=W1 mov r2, r2, asr #16 @ R2=ROWr16[3] mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldr r9, =W3 @ R9=W3 ldr r10, =W5 @ R10=W5 mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldr r11, =W7 @ R11=W7 mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) teq r2, #0 @ if null avoid muls itttt ne mlane r0, r9, r2, r0 @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) rsbne r2, r2, #0 @ R2=-ROWr16[3] mlane r1, r11, r2, r1 @ R1-=W7*ROWr16[3]=b1 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) mlane r5, r8, r2, r5 @ R5-=W1*ROWr16[3]=b2 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) it ne mlane r7, r10, r2, r7 @ R7-=W5*ROWr16[3]=b3 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) @@ at this point, R0=b0, R1=b1, R2 (free), R3=ROWr32[2], R4=ROWr32[3], @@ R5=b2, R6=ROWr16[0], R7=b3, R8=W1, R9=W3, R10=W5, R11=W7, @@ R12=__const_ptr_, R14=&block[n] @@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3]; @@ if (temp != 0) {} orrs r2, r3, r4 @ R2=ROWr32[2] | ROWr32[3] beq __end_b_evaluation @@ at this point, R0=b0, R1=b1, R2 (free), R3=ROWr32[2], R4=ROWr32[3], @@ R5=b2, R6=ROWr16[0], R7=b3, R8=W1, R9=W3, R10=W5, R11=W7, @@ R12=__const_ptr_, R14=&block[n] @@ MAC16(b0, W5, row[5]); @@ MAC16(b2, W7, row[5]); @@ MAC16(b3, W3, row[5]); @@ MAC16(b1, -W1, row[5]); @@ MAC16(b0, W7, row[7]); @@ MAC16(b2, W3, row[7]); @@ MAC16(b3, -W1, row[7]); @@ MAC16(b1, -W5, row[7]); mov r3, r3, asr #16 @ R3=ROWr16[5] teq r3, #0 @ if null avoid muls it ne mlane r0, r10, r3, r0 @ R0+=W5*ROWr16[5]=b0 mov r4, r4, asr #16 @ R4=ROWr16[7] itttt ne mlane r5, r11, r3, r5 @ R5+=W7*ROWr16[5]=b2 mlane r7, r9, r3, r7 @ R7+=W3*ROWr16[5]=b3 rsbne r3, r3, #0 @ R3=-ROWr16[5] mlane r1, r8, r3, r1 @ R7-=W1*ROWr16[5]=b1 @@ R3 is free now teq r4, #0 @ if null avoid muls itttt ne mlane r0, r11, r4, r0 @ R0+=W7*ROWr16[7]=b0 mlane r5, r9, r4, r5 @ R5+=W3*ROWr16[7]=b2 rsbne r4, r4, #0 @ R4=-ROWr16[7] mlane r7, r8, r4, r7 @ R7-=W1*ROWr16[7]=b3 it ne mlane r1, r10, r4, r1 @ R1-=W5*ROWr16[7]=b1 @@ R4 is free now __end_b_evaluation: @@ at this point, R0=b0, R1=b1, R2=ROWr32[2] | ROWr32[3] (tmp), R3 (free), R4 (free), @@ R5=b2, R6=ROWr16[0], R7=b3, R8 (free), R9 (free), R10 (free), R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ __a_evaluation: @@ a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1)); @@ a1 = a0 + W6 * row[2]; @@ a2 = a0 - W6 * row[2]; @@ a3 = a0 - W2 * row[2]; @@ a0 = a0 + W2 * row[2]; ldr r9, =W4 @ R9=W4 mul r6, r9, r6 @ R6=W4*ROWr16[0] ldr r10, =W6 @ R10=W6 ldrsh r4, [r14, #4] @ R4=ROWr16[2] (a3 not defined yet) add r6, r6, #ROW_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(ROW_SHIFT-1) (a0) mul r11, r10, r4 @ R11=W6*ROWr16[2] ldr r8, =W2 @ R8=W2 sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2) @@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3]; @@ if (temp != 0) {} teq r2, #0 beq __end_bef_a_evaluation add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1) mul r11, r8, r4 @ R11=W2*ROWr16[2] sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3) add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0) @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3, @@ R5=b2, R6=a0, R7=b3, R8=W2, R9=W4, R10=W6, R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ a0 += W4*row[4] @@ a1 -= W4*row[4] @@ a2 -= W4*row[4] @@ a3 += W4*row[4] ldrsh r11, [r14, #8] @ R11=ROWr16[4] teq r11, #0 @ if null avoid muls it ne mulne r11, r9, r11 @ R11=W4*ROWr16[4] @@ R9 is free now ldrsh r9, [r14, #12] @ R9=ROWr16[6] itttt ne addne r6, r6, r11 @ R6+=W4*ROWr16[4] (a0) subne r2, r2, r11 @ R2-=W4*ROWr16[4] (a1) subne r3, r3, r11 @ R3-=W4*ROWr16[4] (a2) addne r4, r4, r11 @ R4+=W4*ROWr16[4] (a3) @@ W6 alone is no more useful, save W2*ROWr16[6] in it instead teq r9, #0 @ if null avoid muls itttt ne mulne r11, r10, r9 @ R11=W6*ROWr16[6] addne r6, r6, r11 @ R6+=W6*ROWr16[6] (a0) mulne r10, r8, r9 @ R10=W2*ROWr16[6] @@ a0 += W6*row[6]; @@ a3 -= W6*row[6]; @@ a1 -= W2*row[6]; @@ a2 += W2*row[6]; subne r4, r4, r11 @ R4-=W6*ROWr16[6] (a3) itt ne subne r2, r2, r10 @ R2-=W2*ROWr16[6] (a1) addne r3, r3, r10 @ R3+=W2*ROWr16[6] (a2) __end_a_evaluation: @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3, @@ R5=b2, R6=a0, R7=b3, R8 (free), R9 (free), R10 (free), R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ row[0] = (a0 + b0) >> ROW_SHIFT; @@ row[1] = (a1 + b1) >> ROW_SHIFT; @@ row[2] = (a2 + b2) >> ROW_SHIFT; @@ row[3] = (a3 + b3) >> ROW_SHIFT; @@ row[4] = (a3 - b3) >> ROW_SHIFT; @@ row[5] = (a2 - b2) >> ROW_SHIFT; @@ row[6] = (a1 - b1) >> ROW_SHIFT; @@ row[7] = (a0 - b0) >> ROW_SHIFT; add r8, r6, r0 @ R8=a0+b0 add r9, r2, r1 @ R9=a1+b1 @@ put 2 16 bits half-words in a 32bits word @@ ROWr32[0]=ROWr16[0] | (ROWr16[1]<<16) (only Little Endian compliant then!!!) ldr r10, =MASK_MSHW @ R10=0xFFFF0000 and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a1+b1)<<5) mvn r11, r10 @ R11= NOT R10= 0x0000FFFF and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a0+b0)>>11) orr r8, r8, r9 str r8, [r14, #0] add r8, r3, r5 @ R8=a2+b2 add r9, r4, r7 @ R9=a3+b3 and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a3+b3)<<5) and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a2+b2)>>11) orr r8, r8, r9 str r8, [r14, #4] sub r8, r4, r7 @ R8=a3-b3 sub r9, r3, r5 @ R9=a2-b2 and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a2-b2)<<5) and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a3-b3)>>11) orr r8, r8, r9 str r8, [r14, #8] sub r8, r2, r1 @ R8=a1-b1 sub r9, r6, r0 @ R9=a0-b0 and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a0-b0)<<5) and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a1-b1)>>11) orr r8, r8, r9 str r8, [r14, #12] bal __end_row_loop __almost_empty_row: @@ the row was empty, except ROWr16[0], now, management of this special case @@ at this point, R0=block, R14=&block[n], R12=__const_ptr_, R1=ROWr32[0], R2=ROWr32[1], @@ R3=ROWr32[2], R4=ROWr32[3], R5=(temp), R6=ROWr16[0], R7=ROWr16[1], @@ R8=0xFFFF (temp), R9-R11 free mov r8, #0x10000 @ R8=0xFFFF (2 steps needed!) it saves a ldr call (because of delay run). sub r8, r8, #1 @ R8 is now ready. and r5, r8, r6, lsl #3 @ R5=R8 & (R6<<3)= (ROWr16[0]<<3) & 0xFFFF orr r5, r5, r5, lsl #16 @ R5=R5 | (R5<<16) str r5, [r14, #0] @ R14[0]=ROWr32[0]=R5 str r5, [r14, #4] @ R14[4]=ROWr32[1]=R5 str r5, [r14, #8] @ R14[8]=ROWr32[2]=R5 str r5, [r14, #12] @ R14[12]=ROWr32[3]=R5 __end_row_loop: @@ at this point, R0-R11 (free) @@ R12=__const_ptr_, R14=&block[n] ldr r0, [sp, #0] @ R0=block teq r0, r14 @ compare current &block[8*n] to block, when block is reached, the loop is finished. sub r14, r14, #16 bne __row_loop @@ at this point, R0=block, R1-R11 (free) @@ R12=__const_ptr_, R14=&block[n] add r14, r0, #14 @ R14=&block[7], better start from the last col, and decrease the value until col=0, i.e. R14=block. __col_loop: @@ __b_evaluation2: @@ at this point, R0=block (temp), R1-R11 (free) @@ R12=__const_ptr_, R14=&block[n] @@ proceed with b0-b3 first, followed by a0-a3 @@ MUL16(b0, W1, col[8x1]); @@ MUL16(b1, W3, col[8x1]); @@ MUL16(b2, W5, col[8x1]); @@ MUL16(b3, W7, col[8x1]); @@ MAC16(b0, W3, col[8x3]); @@ MAC16(b1, -W7, col[8x3]); @@ MAC16(b2, -W1, col[8x3]); @@ MAC16(b3, -W5, col[8x3]); ldr r8, =W1 @ R8=W1 ldrsh r7, [r14, #16] mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldr r9, =W3 @ R9=W3 ldr r10, =W5 @ R10=W5 mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldr r11, =W7 @ R11=W7 mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldrsh r2, [r14, #48] mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) teq r2, #0 @ if 0, then avoid muls itttt ne mlane r0, r9, r2, r0 @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) rsbne r2, r2, #0 @ R2=-ROWr16[3] mlane r1, r11, r2, r1 @ R1-=W7*ROWr16[3]=b1 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) mlane r5, r8, r2, r5 @ R5-=W1*ROWr16[3]=b2 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) it ne mlane r7, r10, r2, r7 @ R7-=W5*ROWr16[3]=b3 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) @@ at this point, R0=b0, R1=b1, R2 (free), R3 (free), R4 (free), @@ R5=b2, R6 (free), R7=b3, R8=W1, R9=W3, R10=W5, R11=W7, @@ R12=__const_ptr_, R14=&block[n] @@ MAC16(b0, W5, col[5x8]); @@ MAC16(b2, W7, col[5x8]); @@ MAC16(b3, W3, col[5x8]); @@ MAC16(b1, -W1, col[5x8]); @@ MAC16(b0, W7, col[7x8]); @@ MAC16(b2, W3, col[7x8]); @@ MAC16(b3, -W1, col[7x8]); @@ MAC16(b1, -W5, col[7x8]); ldrsh r3, [r14, #80] @ R3=COLr16[5x8] teq r3, #0 @ if 0 then avoid muls itttt ne mlane r0, r10, r3, r0 @ R0+=W5*ROWr16[5x8]=b0 mlane r5, r11, r3, r5 @ R5+=W7*ROWr16[5x8]=b2 mlane r7, r9, r3, r7 @ R7+=W3*ROWr16[5x8]=b3 rsbne r3, r3, #0 @ R3=-ROWr16[5x8] ldrsh r4, [r14, #112] @ R4=COLr16[7x8] it ne mlane r1, r8, r3, r1 @ R7-=W1*ROWr16[5x8]=b1 @@ R3 is free now teq r4, #0 @ if 0 then avoid muls itttt ne mlane r0, r11, r4, r0 @ R0+=W7*ROWr16[7x8]=b0 mlane r5, r9, r4, r5 @ R5+=W3*ROWr16[7x8]=b2 rsbne r4, r4, #0 @ R4=-ROWr16[7x8] mlane r7, r8, r4, r7 @ R7-=W1*ROWr16[7x8]=b3 it ne mlane r1, r10, r4, r1 @ R1-=W5*ROWr16[7x8]=b1 @@ R4 is free now @@ __end_b_evaluation2: @@ at this point, R0=b0, R1=b1, R2 (free), R3 (free), R4 (free), @@ R5=b2, R6 (free), R7=b3, R8 (free), R9 (free), R10 (free), R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ __a_evaluation2: @@ a0 = (W4 * col[8x0]) + (1 << (COL_SHIFT - 1)); @@ a1 = a0 + W6 * row[2]; @@ a2 = a0 - W6 * row[2]; @@ a3 = a0 - W2 * row[2]; @@ a0 = a0 + W2 * row[2]; ldrsh r6, [r14, #0] ldr r9, =W4 @ R9=W4 mul r6, r9, r6 @ R6=W4*ROWr16[0] ldr r10, =W6 @ R10=W6 ldrsh r4, [r14, #32] @ R4=ROWr16[2] (a3 not defined yet) add r6, r6, #COL_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(COL_SHIFT-1) (a0) mul r11, r10, r4 @ R11=W6*ROWr16[2] ldr r8, =W2 @ R8=W2 add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1) sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2) mul r11, r8, r4 @ R11=W2*ROWr16[2] sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3) add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0) @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3, @@ R5=b2, R6=a0, R7=b3, R8=W2, R9=W4, R10=W6, R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ a0 += W4*row[4] @@ a1 -= W4*row[4] @@ a2 -= W4*row[4] @@ a3 += W4*row[4] ldrsh r11, [r14, #64] @ R11=ROWr16[4] teq r11, #0 @ if null avoid muls itttt ne mulne r11, r9, r11 @ R11=W4*ROWr16[4] @@ R9 is free now addne r6, r6, r11 @ R6+=W4*ROWr16[4] (a0) subne r2, r2, r11 @ R2-=W4*ROWr16[4] (a1) subne r3, r3, r11 @ R3-=W4*ROWr16[4] (a2) ldrsh r9, [r14, #96] @ R9=ROWr16[6] it ne addne r4, r4, r11 @ R4+=W4*ROWr16[4] (a3) @@ W6 alone is no more useful, save W2*ROWr16[6] in it instead teq r9, #0 @ if null avoid muls itttt ne mulne r11, r10, r9 @ R11=W6*ROWr16[6] addne r6, r6, r11 @ R6+=W6*ROWr16[6] (a0) mulne r10, r8, r9 @ R10=W2*ROWr16[6] @@ a0 += W6*row[6]; @@ a3 -= W6*row[6]; @@ a1 -= W2*row[6]; @@ a2 += W2*row[6]; subne r4, r4, r11 @ R4-=W6*ROWr16[6] (a3) itt ne subne r2, r2, r10 @ R2-=W2*ROWr16[6] (a1) addne r3, r3, r10 @ R3+=W2*ROWr16[6] (a2) @@ __end_a_evaluation2: @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3, @@ R5=b2, R6=a0, R7=b3, R8 (free), R9 (free), R10 (free), R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ col[0 ] = ((a0 + b0) >> COL_SHIFT); @@ col[8 ] = ((a1 + b1) >> COL_SHIFT); @@ col[16] = ((a2 + b2) >> COL_SHIFT); @@ col[24] = ((a3 + b3) >> COL_SHIFT); @@ col[32] = ((a3 - b3) >> COL_SHIFT); @@ col[40] = ((a2 - b2) >> COL_SHIFT); @@ col[48] = ((a1 - b1) >> COL_SHIFT); @@ col[56] = ((a0 - b0) >> COL_SHIFT); @@@@@ no optimization here @@@@@ add r8, r6, r0 @ R8=a0+b0 add r9, r2, r1 @ R9=a1+b1 mov r8, r8, asr #COL_SHIFT mov r9, r9, asr #COL_SHIFT strh r8, [r14, #0] strh r9, [r14, #16] add r8, r3, r5 @ R8=a2+b2 add r9, r4, r7 @ R9=a3+b3 mov r8, r8, asr #COL_SHIFT mov r9, r9, asr #COL_SHIFT strh r8, [r14, #32] strh r9, [r14, #48] sub r8, r4, r7 @ R8=a3-b3 sub r9, r3, r5 @ R9=a2-b2 mov r8, r8, asr #COL_SHIFT mov r9, r9, asr #COL_SHIFT strh r8, [r14, #64] strh r9, [r14, #80] sub r8, r2, r1 @ R8=a1-b1 sub r9, r6, r0 @ R9=a0-b0 mov r8, r8, asr #COL_SHIFT mov r9, r9, asr #COL_SHIFT strh r8, [r14, #96] strh r9, [r14, #112] @@ __end_col_loop: @@ at this point, R0-R11 (free) @@ R12=__const_ptr_, R14=&block[n] ldr r0, [sp, #0] @ R0=block teq r0, r14 @ compare current &block[n] to block, when block is reached, the loop is finished. sub r14, r14, #2 bne __col_loop @@ __end_simple_idct_arm: @@ restore registers to previous status! add sp, sp, #8 @@ the local variables! ldmfd sp!, {r4-r11, r15} @@ update PC with LR content. @@ kind of sub-function, here not to overload the common case. __end_bef_a_evaluation: add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1) mul r11, r8, r4 @ R11=W2*ROWr16[2] sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3) add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0) bal __end_a_evaluation endfunc
Akagi201/ffmpeg-xcode
8,517
ffmpeg-3.0.2/libavcodec/arm/me_cmp_armv6.S
/* * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_pix_abs16_armv6, export=1 ldr r0, [sp] push {r4-r9, lr} mov r12, #0 mov lr, #0 ldm r1, {r4-r7} ldr r8, [r2] 1: ldr r9, [r2, #4] pld [r1, r3] usada8 r12, r4, r8, r12 ldr r8, [r2, #8] pld [r2, r3] usada8 lr, r5, r9, lr ldr r9, [r2, #12] usada8 r12, r6, r8, r12 subs r0, r0, #1 usada8 lr, r7, r9, lr beq 2f add r1, r1, r3 ldm r1, {r4-r7} add r2, r2, r3 ldr r8, [r2] b 1b 2: add r0, r12, lr pop {r4-r9, pc} endfunc function ff_pix_abs16_x2_armv6, export=1 ldr r12, [sp] push {r4-r11, lr} mov r0, #0 mov lr, #1 orr lr, lr, lr, lsl #8 orr lr, lr, lr, lsl #16 1: ldr r8, [r2] ldr r9, [r2, #4] lsr r10, r8, #8 ldr r4, [r1] lsr r6, r9, #8 orr r10, r10, r9, lsl #24 ldr r5, [r2, #8] eor r11, r8, r10 uhadd8 r7, r8, r10 orr r6, r6, r5, lsl #24 and r11, r11, lr uadd8 r7, r7, r11 ldr r8, [r1, #4] usada8 r0, r4, r7, r0 eor r7, r9, r6 lsr r10, r5, #8 and r7, r7, lr uhadd8 r4, r9, r6 ldr r6, [r2, #12] uadd8 r4, r4, r7 pld [r1, r3] orr r10, r10, r6, lsl #24 usada8 r0, r8, r4, r0 ldr r4, [r1, #8] eor r11, r5, r10 ldrb r7, [r2, #16] and r11, r11, lr uhadd8 r8, r5, r10 ldr r5, [r1, #12] uadd8 r8, r8, r11 pld [r2, r3] lsr r10, r6, #8 usada8 r0, r4, r8, r0 orr r10, r10, r7, lsl #24 subs r12, r12, #1 eor r11, r6, r10 add r1, r1, r3 uhadd8 r9, r6, r10 and r11, r11, lr uadd8 r9, r9, r11 add r2, r2, r3 usada8 r0, r5, r9, r0 bgt 1b pop {r4-r11, pc} endfunc .macro usad_y2 p0, p1, p2, p3, n0, n1, n2, n3 ldr \n0, [r2] eor \n1, \p0, \n0 uhadd8 \p0, \p0, \n0 and \n1, \n1, lr ldr \n2, [r1] uadd8 \p0, \p0, \n1 ldr \n1, [r2, #4] usada8 r0, \p0, \n2, r0 pld [r1, r3] eor \n3, \p1, \n1 uhadd8 \p1, \p1, \n1 and \n3, \n3, lr ldr \p0, [r1, #4] uadd8 \p1, \p1, \n3 ldr \n2, [r2, #8] usada8 r0, \p1, \p0, r0 pld [r2, r3] eor \p0, \p2, \n2 uhadd8 \p2, \p2, \n2 and \p0, \p0, lr ldr \p1, [r1, #8] uadd8 \p2, \p2, \p0 ldr \n3, [r2, #12] usada8 r0, \p2, \p1, r0 eor \p1, \p3, \n3 uhadd8 \p3, \p3, \n3 and \p1, \p1, lr ldr \p0, [r1, #12] uadd8 \p3, \p3, \p1 add r1, r1, r3 usada8 r0, \p3, \p0, r0 add r2, r2, r3 .endm function ff_pix_abs16_y2_armv6, export=1 pld [r1] pld [r2] ldr r12, [sp] push {r4-r11, lr} mov r0, #0 mov lr, #1 orr lr, lr, lr, lsl #8 orr lr, lr, lr, lsl #16 ldr r4, [r2] ldr r5, [r2, #4] ldr r6, [r2, #8] ldr r7, [r2, #12] add r2, r2, r3 1: usad_y2 r4, r5, r6, r7, r8, r9, r10, r11 subs r12, r12, #2 usad_y2 r8, r9, r10, r11, r4, r5, r6, r7 bgt 1b pop {r4-r11, pc} endfunc function ff_pix_abs8_armv6, export=1 pld [r2, r3] ldr r12, [sp] push {r4-r9, lr} mov r0, #0 mov lr, #0 ldrd_post r4, r5, r1, r3 1: subs r12, r12, #2 ldr r7, [r2, #4] ldr_post r6, r2, r3 ldrd_post r8, r9, r1, r3 usada8 r0, r4, r6, r0 pld [r2, r3] usada8 lr, r5, r7, lr ldr r7, [r2, #4] ldr_post r6, r2, r3 beq 2f ldrd_post r4, r5, r1, r3 usada8 r0, r8, r6, r0 pld [r2, r3] usada8 lr, r9, r7, lr b 1b 2: usada8 r0, r8, r6, r0 usada8 lr, r9, r7, lr add r0, r0, lr pop {r4-r9, pc} endfunc function ff_sse16_armv6, export=1 ldr r12, [sp] push {r4-r9, lr} mov r0, #0 1: ldrd r4, r5, [r1] ldr r8, [r2] uxtb16 lr, r4 uxtb16 r4, r4, ror #8 uxtb16 r9, r8 uxtb16 r8, r8, ror #8 ldr r7, [r2, #4] usub16 lr, lr, r9 usub16 r4, r4, r8 smlad r0, lr, lr, r0 uxtb16 r6, r5 uxtb16 lr, r5, ror #8 uxtb16 r8, r7 uxtb16 r9, r7, ror #8 smlad r0, r4, r4, r0 ldrd r4, r5, [r1, #8] usub16 r6, r6, r8 usub16 r8, lr, r9 ldr r7, [r2, #8] smlad r0, r6, r6, r0 uxtb16 lr, r4 uxtb16 r4, r4, ror #8 uxtb16 r9, r7 uxtb16 r7, r7, ror #8 smlad r0, r8, r8, r0 ldr r8, [r2, #12] usub16 lr, lr, r9 usub16 r4, r4, r7 smlad r0, lr, lr, r0 uxtb16 r6, r5 uxtb16 r5, r5, ror #8 uxtb16 r9, r8 uxtb16 r8, r8, ror #8 smlad r0, r4, r4, r0 usub16 r6, r6, r9 usub16 r5, r5, r8 smlad r0, r6, r6, r0 add r1, r1, r3 add r2, r2, r3 subs r12, r12, #1 smlad r0, r5, r5, r0 bgt 1b pop {r4-r9, pc} endfunc
Akagi201/ffmpeg-xcode
21,432
ffmpeg-3.0.2/libavcodec/arm/hpeldsp_arm.S
@ @ ARMv4-optimized halfpel functions @ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp> @ @ This file is part of FFmpeg. @ @ FFmpeg is free software; you can redistribute it and/or @ modify it under the terms of the GNU Lesser General Public @ License as published by the Free Software Foundation; either @ version 2.1 of the License, or (at your option) any later version. @ @ FFmpeg is distributed in the hope that it will be useful, @ but WITHOUT ANY WARRANTY; without even the implied warranty of @ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU @ Lesser General Public License for more details. @ @ You should have received a copy of the GNU Lesser General Public @ License along with FFmpeg; if not, write to the Free Software @ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA @ #include "config.h" #include "libavutil/arm/asm.S" #if !HAVE_ARMV5TE_EXTERNAL #define pld @ #endif .macro ALIGN_QWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4 mov \Rd0, \Rn0, lsr #(\shift * 8) mov \Rd1, \Rn1, lsr #(\shift * 8) mov \Rd2, \Rn2, lsr #(\shift * 8) mov \Rd3, \Rn3, lsr #(\shift * 8) orr \Rd0, \Rd0, \Rn1, lsl #(32 - \shift * 8) orr \Rd1, \Rd1, \Rn2, lsl #(32 - \shift * 8) orr \Rd2, \Rd2, \Rn3, lsl #(32 - \shift * 8) orr \Rd3, \Rd3, \Rn4, lsl #(32 - \shift * 8) .endm .macro ALIGN_DWORD shift, R0, R1, R2 mov \R0, \R0, lsr #(\shift * 8) orr \R0, \R0, \R1, lsl #(32 - \shift * 8) mov \R1, \R1, lsr #(\shift * 8) orr \R1, \R1, \R2, lsl #(32 - \shift * 8) .endm .macro ALIGN_DWORD_D shift, Rdst0, Rdst1, Rsrc0, Rsrc1, Rsrc2 mov \Rdst0, \Rsrc0, lsr #(\shift * 8) mov \Rdst1, \Rsrc1, lsr #(\shift * 8) orr \Rdst0, \Rdst0, \Rsrc1, lsl #(32 - (\shift * 8)) orr \Rdst1, \Rdst1, \Rsrc2, lsl #(32 - (\shift * 8)) .endm .macro RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask @ Rd = (Rn | Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1) @ Rmask = 0xFEFEFEFE @ Rn = destroy eor \Rd0, \Rn0, \Rm0 eor \Rd1, \Rn1, \Rm1 orr \Rn0, \Rn0, \Rm0 orr \Rn1, \Rn1, \Rm1 and \Rd0, \Rd0, \Rmask and \Rd1, \Rd1, \Rmask sub \Rd0, \Rn0, \Rd0, lsr #1 sub \Rd1, \Rn1, \Rd1, lsr #1 .endm .macro NO_RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask @ Rd = (Rn & Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1) @ Rmask = 0xFEFEFEFE @ Rn = destroy eor \Rd0, \Rn0, \Rm0 eor \Rd1, \Rn1, \Rm1 and \Rn0, \Rn0, \Rm0 and \Rn1, \Rn1, \Rm1 and \Rd0, \Rd0, \Rmask and \Rd1, \Rd1, \Rmask add \Rd0, \Rn0, \Rd0, lsr #1 add \Rd1, \Rn1, \Rd1, lsr #1 .endm .macro JMP_ALIGN tmp, reg ands \tmp, \reg, #3 bic \reg, \reg, #3 beq 1f subs \tmp, \tmp, #1 beq 2f subs \tmp, \tmp, #1 beq 3f b 4f .endm @ ---------------------------------------------------------------- function ff_put_pixels16_arm, export=1, align=5 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11, lr} JMP_ALIGN r5, r1 1: ldm r1, {r4-r7} add r1, r1, r2 stm r0, {r4-r7} pld [r1] subs r3, r3, #1 add r0, r0, r2 bne 1b pop {r4-r11, pc} .align 5 2: ldm r1, {r4-r8} add r1, r1, r2 ALIGN_QWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8 pld [r1] subs r3, r3, #1 stm r0, {r9-r12} add r0, r0, r2 bne 2b pop {r4-r11, pc} .align 5 3: ldm r1, {r4-r8} add r1, r1, r2 ALIGN_QWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8 pld [r1] subs r3, r3, #1 stm r0, {r9-r12} add r0, r0, r2 bne 3b pop {r4-r11, pc} .align 5 4: ldm r1, {r4-r8} add r1, r1, r2 ALIGN_QWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8 pld [r1] subs r3, r3, #1 stm r0, {r9-r12} add r0, r0, r2 bne 4b pop {r4-r11,pc} endfunc @ ---------------------------------------------------------------- function ff_put_pixels8_arm, export=1, align=5 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r5,lr} JMP_ALIGN r5, r1 1: ldm r1, {r4-r5} add r1, r1, r2 subs r3, r3, #1 pld [r1] stm r0, {r4-r5} add r0, r0, r2 bne 1b pop {r4-r5,pc} .align 5 2: ldm r1, {r4-r5, r12} add r1, r1, r2 ALIGN_DWORD 1, r4, r5, r12 pld [r1] subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 2b pop {r4-r5,pc} .align 5 3: ldm r1, {r4-r5, r12} add r1, r1, r2 ALIGN_DWORD 2, r4, r5, r12 pld [r1] subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 3b pop {r4-r5,pc} .align 5 4: ldm r1, {r4-r5, r12} add r1, r1, r2 ALIGN_DWORD 3, r4, r5, r12 pld [r1] subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 4b pop {r4-r5,pc} endfunc @ ---------------------------------------------------------------- function ff_put_pixels8_x2_arm, export=1, align=5 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r10,lr} ldr r12, =0xfefefefe JMP_ALIGN r5, r1 1: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 pld [r1] RND_AVG32 r8, r9, r4, r5, r6, r7, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 1b pop {r4-r10,pc} .align 5 2: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 ALIGN_DWORD_D 2, r8, r9, r4, r5, r10 pld [r1] RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 2b pop {r4-r10,pc} .align 5 3: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 2, r6, r7, r4, r5, r10 ALIGN_DWORD_D 3, r8, r9, r4, r5, r10 pld [r1] RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 3b pop {r4-r10,pc} .align 5 4: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 3, r6, r7, r4, r5, r10 pld [r1] RND_AVG32 r8, r9, r6, r7, r5, r10, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 4b pop {r4-r10,pc} endfunc function ff_put_no_rnd_pixels8_x2_arm, export=1, align=5 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r10,lr} ldr r12, =0xfefefefe JMP_ALIGN r5, r1 1: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 pld [r1] NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 1b pop {r4-r10,pc} .align 5 2: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 ALIGN_DWORD_D 2, r8, r9, r4, r5, r10 pld [r1] NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 2b pop {r4-r10,pc} .align 5 3: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 2, r6, r7, r4, r5, r10 ALIGN_DWORD_D 3, r8, r9, r4, r5, r10 pld [r1] NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 3b pop {r4-r10,pc} .align 5 4: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 3, r6, r7, r4, r5, r10 pld [r1] NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 4b pop {r4-r10,pc} endfunc @ ---------------------------------------------------------------- function ff_put_pixels8_y2_arm, export=1, align=5 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11,lr} mov r3, r3, lsr #1 ldr r12, =0xfefefefe JMP_ALIGN r5, r1 1: ldm r1, {r4-r5} add r1, r1, r2 6: ldm r1, {r6-r7} add r1, r1, r2 pld [r1] RND_AVG32 r8, r9, r4, r5, r6, r7, r12 ldm r1, {r4-r5} add r1, r1, r2 stm r0, {r8-r9} add r0, r0, r2 pld [r1] RND_AVG32 r8, r9, r6, r7, r4, r5, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 2: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 3: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 4: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} endfunc function ff_put_no_rnd_pixels8_y2_arm, export=1, align=5 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11,lr} mov r3, r3, lsr #1 ldr r12, =0xfefefefe JMP_ALIGN r5, r1 1: ldm r1, {r4-r5} add r1, r1, r2 6: ldm r1, {r6-r7} add r1, r1, r2 pld [r1] NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12 ldm r1, {r4-r5} add r1, r1, r2 stm r0, {r8-r9} add r0, r0, r2 pld [r1] NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 2: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 3: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 4: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} endfunc .ltorg @ ---------------------------------------------------------------- .macro RND_XY2_IT align, rnd @ l1= (a & 0x03030303) + (b & 0x03030303) ?(+ 0x02020202) @ h1= ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2) .if \align == 0 ldm r1, {r6-r8} .elseif \align == 3 ldm r1, {r5-r7} .else ldm r1, {r8-r10} .endif add r1, r1, r2 pld [r1] .if \align == 0 ALIGN_DWORD_D 1, r4, r5, r6, r7, r8 .elseif \align == 1 ALIGN_DWORD_D 1, r4, r5, r8, r9, r10 ALIGN_DWORD_D 2, r6, r7, r8, r9, r10 .elseif \align == 2 ALIGN_DWORD_D 2, r4, r5, r8, r9, r10 ALIGN_DWORD_D 3, r6, r7, r8, r9, r10 .elseif \align == 3 ALIGN_DWORD_D 3, r4, r5, r5, r6, r7 .endif ldr r14, =0x03030303 tst r3, #1 and r8, r4, r14 and r9, r5, r14 and r10, r6, r14 and r11, r7, r14 it eq andeq r14, r14, r14, \rnd #1 add r8, r8, r10 add r9, r9, r11 ldr r12, =0xfcfcfcfc >> 2 itt eq addeq r8, r8, r14 addeq r9, r9, r14 and r4, r12, r4, lsr #2 and r5, r12, r5, lsr #2 and r6, r12, r6, lsr #2 and r7, r12, r7, lsr #2 add r10, r4, r6 add r11, r5, r7 subs r3, r3, #1 .endm .macro RND_XY2_EXPAND align, rnd RND_XY2_IT \align, \rnd 6: push {r8-r11} RND_XY2_IT \align, \rnd pop {r4-r7} add r4, r4, r8 add r5, r5, r9 ldr r14, =0x0f0f0f0f add r6, r6, r10 add r7, r7, r11 and r4, r14, r4, lsr #2 and r5, r14, r5, lsr #2 add r4, r4, r6 add r5, r5, r7 stm r0, {r4-r5} add r0, r0, r2 bge 6b pop {r4-r11,pc} .endm function ff_put_pixels8_xy2_arm, export=1, align=5 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11,lr} @ R14 is also called LR JMP_ALIGN r5, r1 1: RND_XY2_EXPAND 0, lsl .align 5 2: RND_XY2_EXPAND 1, lsl .align 5 3: RND_XY2_EXPAND 2, lsl .align 5 4: RND_XY2_EXPAND 3, lsl endfunc function ff_put_no_rnd_pixels8_xy2_arm, export=1, align=5 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11,lr} JMP_ALIGN r5, r1 1: RND_XY2_EXPAND 0, lsr .align 5 2: RND_XY2_EXPAND 1, lsr .align 5 3: RND_XY2_EXPAND 2, lsr .align 5 4: RND_XY2_EXPAND 3, lsr endfunc
Akagi201/ffmpeg-xcode
14,328
ffmpeg-3.0.2/libavcodec/arm/hevcdsp_idct_neon.S
/* * Copyright (c) 2014 Seppo Tomperi <seppo.tomperi@vtt.fi> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #include "neon.S" function ff_hevc_idct_4x4_dc_neon_8, export=1 ldrsh r1, [r0] ldr r2, =0x20 add r1, #1 asr r1, #1 add r1, r2 asr r1, #6 vdup.16 q0, r1 vdup.16 q1, r1 vst1.16 {q0, q1}, [r0] bx lr endfunc function ff_hevc_idct_8x8_dc_neon_8, export=1 ldrsh r1, [r0] ldr r2, =0x20 add r1, #1 asr r1, #1 add r1, r2 asr r1, #6 vdup.16 q8, r1 vdup.16 q9, r1 vmov.16 q10, q8 vmov.16 q11, q8 vmov.16 q12, q8 vmov.16 q13, q8 vmov.16 q14, q8 vmov.16 q15, q8 vstm r0, {q8-q15} bx lr endfunc function ff_hevc_idct_16x16_dc_neon_8, export=1 ldrsh r1, [r0] ldr r2, =0x20 add r1, #1 asr r1, #1 add r1, r2 asr r1, #6 vdup.16 q8, r1 vdup.16 q9, r1 vmov.16 q10, q8 vmov.16 q11, q8 vmov.16 q12, q8 vmov.16 q13, q8 vmov.16 q14, q8 vmov.16 q15, q8 vstm r0!, {q8-q15} vstm r0!, {q8-q15} vstm r0!, {q8-q15} vstm r0, {q8-q15} bx lr endfunc function ff_hevc_idct_32x32_dc_neon_8, export=1 ldrsh r1, [r0] ldr r2, =0x20 add r1, #1 asr r1, #1 add r1, r2 asr r1, #6 mov r3, #16 vdup.16 q8, r1 vdup.16 q9, r1 vmov.16 q10, q8 vmov.16 q11, q8 vmov.16 q12, q8 vmov.16 q13, q8 vmov.16 q14, q8 vmov.16 q15, q8 1: subs r3, #1 vstm r0!, {q8-q15} bne 1b bx lr endfunc function ff_hevc_transform_add_4x4_neon_8, export=1 vldm r1, {q0-q1} vld1.32 d4[0], [r0], r2 vld1.32 d4[1], [r0], r2 vld1.32 d5[0], [r0], r2 vld1.32 d5[1], [r0], r2 sub r0, r0, r2, lsl #2 vmovl.u8 q8, d4 vmovl.u8 q9, d5 vqadd.s16 q0, q0, q8 vqadd.s16 q1, q1, q9 vqmovun.s16 d0, q0 vqmovun.s16 d1, q1 vst1.32 d0[0], [r0], r2 vst1.32 d0[1], [r0], r2 vst1.32 d1[0], [r0], r2 vst1.32 d1[1], [r0], r2 bx lr endfunc function ff_hevc_transform_add_8x8_neon_8, export=1 mov r3, #8 1: subs r3, #1 vld1.16 {q0}, [r1]! vld1.8 d16, [r0] vmovl.u8 q8, d16 vqadd.s16 q0, q8 vqmovun.s16 d0, q0 vst1.32 d0, [r0], r2 bne 1b bx lr endfunc function ff_hevc_transform_add_16x16_neon_8, export=1 mov r3, #16 1: subs r3, #1 vld1.16 {q0, q1}, [r1]! vld1.8 {q8}, [r0] vmovl.u8 q9, d16 vmovl.u8 q10, d17 vqadd.s16 q0, q9 vqadd.s16 q1, q10 vqmovun.s16 d0, q0 vqmovun.s16 d1, q1 vst1.8 {q0}, [r0], r2 bne 1b bx lr endfunc function ff_hevc_transform_add_32x32_neon_8, export=1 mov r3, #32 1: subs r3, #1 vldm r1!, {q0-q3} vld1.8 {q8, q9}, [r0] vmovl.u8 q10, d16 vmovl.u8 q11, d17 vmovl.u8 q12, d18 vmovl.u8 q13, d19 vqadd.s16 q0, q10 vqadd.s16 q1, q11 vqadd.s16 q2, q12 vqadd.s16 q3, q13 vqmovun.s16 d0, q0 vqmovun.s16 d1, q1 vqmovun.s16 d2, q2 vqmovun.s16 d3, q3 vst1.8 {q0, q1}, [r0], r2 bne 1b bx lr endfunc .macro transpose_16b_8x8 r0, r1, r2, r3, r4, r5, r6, r7 vtrn.64 \r0, \r4 vtrn.64 \r1, \r5 vtrn.64 \r2, \r6 vtrn.64 \r3, \r7 vtrn.32 \r0, \r2 vtrn.32 \r1, \r3 vtrn.32 \r4, \r6 vtrn.32 \r5, \r7 vtrn.16 \r0, \r1 vtrn.16 \r2, \r3 vtrn.16 \r4, \r5 vtrn.16 \r6, \r7 .endm // in 4 q regs // output 8 d regs .macro transpose_16b_4x4 r0, r1, r2, r3 vtrn.32 \r0, \r2 vtrn.32 \r1, \r3 vtrn.16 \r0, \r1 vtrn.16 \r2, \r3 .endm /* uses registers q2 - q9 for temp values */ /* TODO: reorder */ .macro tr4_luma_shift r0, r1, r2, r3, shift vaddl.s16 q5, \r0, \r2 // c0 = src0 + src2 vaddl.s16 q2, \r2, \r3 // c1 = src2 + src3 vsubl.s16 q4, \r0, \r3 // c2 = src0 - src3 vmull.s16 q6, \r1, d0[0] // c3 = 74 * src1 vaddl.s16 q7, \r0, \r3 // src0 + src3 vsubw.s16 q7, q7, \r2 // src0 - src2 + src3 vmul.s32 q7, q7, d0[0] // dst2 = 74 * (src0 - src2 + src3) vmul.s32 q8, q5, d0[1] // 29 * c0 vmul.s32 q9, q2, d1[0] // 55 * c1 vadd.s32 q8, q9 // 29 * c0 + 55 * c1 vadd.s32 q8, q6 // dst0 = 29 * c0 + 55 * c1 + c3 vmul.s32 q2, q2, d0[1] // 29 * c1 vmul.s32 q9, q4, d1[0] // 55 * c2 vsub.s32 q9, q2 // 55 * c2 - 29 * c1 vadd.s32 q9, q6 // dst1 = 55 * c2 - 29 * c1 + c3 vmul.s32 q5, q5, d1[0] // 55 * c0 vmul.s32 q4, q4, d0[1] // 29 * c2 vadd.s32 q5, q4 // 55 * c0 + 29 * c2 vsub.s32 q5, q6 // dst3 = 55 * c0 + 29 * c2 - c3 vqrshrn.s32 \r0, q8, \shift vqrshrn.s32 \r1, q9, \shift vqrshrn.s32 \r2, q7, \shift vqrshrn.s32 \r3, q5, \shift .endm /* uses registers q2 - q6 for temp values */ .macro tr4 r0, r1, r2, r3 vmull.s16 q4, \r1, d0[0] // 83 * src1 vmull.s16 q6, \r1, d0[1] // 36 * src1 vshll.s16 q2, \r0, #6 // 64 * src0 vshll.s16 q3, \r2, #6 // 64 * src2 vadd.s32 q5, q2, q3 // 64 * (src0 + src2) e0 vsub.s32 q2, q2, q3 // 64 * (src0 - src2) e1 vmlal.s16 q4, \r3, d0[1] // 83 * src1 + 36 * src3 o0 vmlsl.s16 q6, \r3, d0[0] // 36 * src1 - 83 * src3 o1 vsub.s32 q3, q5, q4 // e0 - o0 vadd.s32 q4, q5, q4 // e0 + o0 vadd.s32 q5, q2, q6 // e1 + o1 vsub.s32 q6, q2, q6 // e1 - o1 .endm .macro tr4_shift r0, r1, r2, r3, shift vmull.s16 q4, \r1, d0[0] // 83 * src1 vmull.s16 q6, \r1, d0[1] // 36 * src1 vshll.s16 q2, \r0, #6 // 64 * src0 vshll.s16 q3, \r2, #6 // 64 * src2 vadd.s32 q5, q2, q3 // 64 * (src0 + src2) e0 vsub.s32 q2, q2, q3 // 64 * (src0 - src2) e1 vmlal.s16 q4, \r3, d0[1] // 83 * src1 + 36 * src3 o0 vmlsl.s16 q6, \r3, d0[0] // 36 * src1 - 83 * src3 o1 vsub.s32 q3, q5, q4 // e0 - o0 vadd.s32 q4, q5, q4 // e0 + o0 vadd.s32 q5, q2, q6 // e1 + o1 vsub.s32 q6, q2, q6 // e1 - o1 vqrshrn.s32 \r0, q4, \shift vqrshrn.s32 \r1, q5, \shift vqrshrn.s32 \r2, q6, \shift vqrshrn.s32 \r3, q3, \shift .endm function ff_hevc_transform_4x4_neon_8, export=1 vpush {d8-d15} vld1.16 {q14, q15}, [r0] // coeffs ldr r3, =0x00240053 // 36 and 83 vmov.32 d0[0], r3 tr4_shift d28, d29, d30, d31, #7 vtrn.16 d28, d29 vtrn.16 d30, d31 vtrn.32 q14, q15 tr4_shift d28, d29, d30, d31, #12 vtrn.16 d28, d29 vtrn.16 d30, d31 vtrn.32 q14, q15 vst1.16 {q14, q15}, [r0] vpop {d8-d15} bx lr endfunc function ff_hevc_transform_luma_4x4_neon_8, export=1 vpush {d8-d15} vld1.16 {q14, q15}, [r0] // coeffs ldr r3, =0x4a // 74 vmov.32 d0[0], r3 ldr r3, =0x1d // 29 vmov.32 d0[1], r3 ldr r3, =0x37 // 55 vmov.32 d1[0], r3 tr4_luma_shift d28, d29, d30, d31, #7 vtrn.16 d28, d29 vtrn.16 d30, d31 vtrn.32 q14, q15 tr4_luma_shift d28, d29, d30, d31, #12 vtrn.16 d28, d29 vtrn.16 d30, d31 vtrn.32 q14, q15 vst1.16 {q14, q15}, [r0] vpop {d8-d15} bx lr endfunc .macro tr8_begin in0, in1, in2, in3 vmull.s16 q7, \in0, d1[1] // 89 * src1 vmull.s16 q8, \in0, d1[0] // 75 * src1 vmull.s16 q9, \in0, d1[3] // 50 * src1 vmull.s16 q10, \in0, d1[2] // 18 * src1 vmlal.s16 q7, \in1, d1[0] // 75 * src3 vmlsl.s16 q8, \in1, d1[2] //-18 * src3 vmlsl.s16 q9, \in1, d1[1] //-89 * src3 vmlsl.s16 q10, \in1, d1[3] //-50 * src3 vmlal.s16 q7, \in2, d1[3] // 50 * src5 vmlsl.s16 q8, \in2, d1[1] //-89 * src5 vmlal.s16 q9, \in2, d1[2] // 18 * src5 vmlal.s16 q10, \in2, d1[0] // 75 * src5 vmlal.s16 q7, \in3, d1[2] // 18 * src7 vmlsl.s16 q8, \in3, d1[3] //-50 * src7 vmlal.s16 q9, \in3, d1[0] // 75 * src7 vmlsl.s16 q10, \in3, d1[1] //-89 * src7 .endm .macro tr8_end shift vadd.s32 q1, q4, q7 // e_8[0] + o_8[0], dst[0] vsub.s32 q4, q4, q7 // e_8[0] - o_8[0], dst[7] vadd.s32 q2, q5, q8 // e_8[1] + o_8[1], dst[1] vsub.s32 q5, q5, q8 // e_8[1] - o_8[1], dst[6] vadd.s32 q11, q6, q9 // e_8[2] + o_8[2], dst[2] vsub.s32 q6, q6, q9 // e_8[2] - o_8[2], dst[5] vadd.s32 q12, q3, q10 // e_8[3] + o_8[3], dst[3] vsub.s32 q3, q3, q10 // e_8[3] - o_8[3], dst[4] vqrshrn.s32 d2, q1, \shift vqrshrn.s32 d3, q2, \shift vqrshrn.s32 d4, q11, \shift vqrshrn.s32 d5, q12, \shift vqrshrn.s32 d6, q3, \shift vqrshrn.s32 d7, q6, \shift vqrshrn.s32 d9, q4, \shift vqrshrn.s32 d8, q5, \shift .endm function ff_hevc_transform_8x8_neon_8, export=1 push {r4-r8} vpush {d8-d15} mov r5, #16 adr r3, tr4f vld1.16 {d0, d1}, [r3] // left half vld1.16 {d24}, [r0], r5 vld1.16 {d25}, [r0], r5 vld1.16 {d26}, [r0], r5 vld1.16 {d27}, [r0], r5 vld1.16 {d28}, [r0], r5 vld1.16 {d29}, [r0], r5 vld1.16 {d30}, [r0], r5 vld1.16 {d31}, [r0], r5 sub r0, #128 tr8_begin d25, d27, d29, d31 tr4 d24, d26, d28, d30 tr8_end #7 vst1.16 {d2}, [r0], r5 vst1.16 {d3}, [r0], r5 vst1.16 {d4}, [r0], r5 vst1.16 {d5}, [r0], r5 vst1.16 {d6}, [r0], r5 vst1.16 {d7}, [r0], r5 vst1.16 {d8}, [r0], r5 vst1.16 {d9}, [r0], r5 sub r0, #128 //skip right half if col_limit in r1 is less than 4 cmp r1, #4 blt 1f //right half add r0, #8 vld1.16 {d24}, [r0], r5 vld1.16 {d25}, [r0], r5 vld1.16 {d26}, [r0], r5 vld1.16 {d27}, [r0], r5 vld1.16 {d28}, [r0], r5 vld1.16 {d29}, [r0], r5 vld1.16 {d30}, [r0], r5 vld1.16 {d31}, [r0], r5 sub r0, #128 tr8_begin d25, d27, d29, d31 tr4 d24, d26, d28, d30 tr8_end #7 vst1.16 {d2}, [r0], r5 vst1.16 {d3}, [r0], r5 vst1.16 {d4}, [r0], r5 vst1.16 {d5}, [r0], r5 vst1.16 {d6}, [r0], r5 vst1.16 {d7}, [r0], r5 vst1.16 {d8}, [r0], r5 vst1.16 {d9}, [r0], r5 sub r0, #136 1: // top half vldm r0, {q12-q15} // coeffs transpose_16b_4x4 d24, d26, d28, d30 transpose_16b_4x4 d25, d27, d29, d31 tr8_begin d26, d30, d27, d31 tr4 d24, d28, d25, d29 tr8_end #12 transpose_16b_4x4 d2, d3, d4, d5 transpose_16b_4x4 d6, d7, d8, d9 vswp d7, d5 vswp d7, d8 vswp d3, d6 vswp d6, d4 vstm r0!, {q1-q4} // bottom half vldm r0, {q12-q15} // coeffs transpose_16b_4x4 d24, d26, d28, d30 transpose_16b_4x4 d25, d27, d29, d31 tr8_begin d26, d30, d27, d31 tr4 d24, d28, d25, d29 tr8_end #12 transpose_16b_4x4 d2, d3, d4, d5 transpose_16b_4x4 d6, d7, d8, d9 vswp d7, d5 vswp d7, d8 vswp d3, d6 vswp d6, d4 //vstm r0, {q1-q4} vst1.16 {q1-q2}, [r0] add r0, #32 vst1.16 {q3-q4}, [r0] sub r0, #32 vpop {d8-d15} pop {r4-r8} bx lr endfunc .align 4 tr4f: .word 0x00240053 // 36 and d1[0] = 83 .word 0x00000000 tr8f: .word 0x0059004b // 89, d0[0] = 75 .word 0x00320012 // 50, d0[2] = 18 tr16: .word 0x005a0057 // 90, d2[0] = 87 .word 0x00500046 // 80, d2[2] = 70 .word 0x0039002b // 57, d2[0] = 43 .word 0x00190009 // 25, d2[2] = 9
Akagi201/ffmpeg-xcode
4,647
ffmpeg-3.0.2/libavcodec/arm/flacdsp_arm.S
/* * Copyright (c) 2012 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function flac_lpc_16_1_arm ldr r12, [sp] push {r4, lr} ldr r1, [r1] subs r12, r12, #2 ldr lr, [r0], #4 beq 2f it lt poplt {r4, pc} 1: mul r4, lr, r1 ldm r0, {r2, lr} add_sh r2, r2, r4, asr r3 mul r4, r2, r1 subs r12, r12, #2 add_sh lr, lr, r4, asr r3 stm r0!, {r2, lr} bgt 1b it lt poplt {r4, pc} 2: mul r4, lr, r1 ldr r2, [r0] add_sh r2, r2, r4, asr r3 str r2, [r0] pop {r4, pc} endfunc function flac_lpc_16_2_arm ldr r12, [sp] subs r12, r12, r2 it le bxle lr push {r4-r9, lr} ldm r0!, {r6, r7} ldm r1, {r8, r9} subs r12, r12, #1 beq 2f 1: mul r4, r6, r8 mul r5, r7, r8 mla r4, r7, r9, r4 ldm r0, {r6, r7} add_sh r6, r6, r4, asr r3 mla r5, r6, r9, r5 add_sh r7, r7, r5, asr r3 stm r0!, {r6, r7} subs r12, r12, #2 bgt 1b it lt poplt {r4-r9, pc} 2: mul r4, r6, r8 mla r4, r7, r9, r4 ldr r5, [r0] add_sh r5, r5, r4, asr r3 str r5, [r0] pop {r4-r9, pc} endfunc function ff_flac_lpc_16_arm, export=1 cmp r2, #2 blt flac_lpc_16_1_arm beq flac_lpc_16_2_arm ldr r12, [sp] subs r12, r12, r2 it le bxle lr push {r4-r9, lr} subs r12, r12, #1 beq 3f 1: sub lr, r2, #2 mov r4, #0 mov r5, #0 ldr r7, [r0], #4 ldr r9, [r1], #4 2: mla r4, r7, r9, r4 ldm r0!, {r6, r7} mla r5, r6, r9, r5 ldm r1!, {r8, r9} mla r4, r6, r8, r4 subs lr, lr, #2 mla r5, r7, r8, r5 bgt 2b blt 6f mla r4, r7, r9, r4 ldr r7, [r0], #4 mla r5, r7, r9, r5 ldr r9, [r1], #4 6: mla r4, r7, r9, r4 ldm r0, {r6, r7} add_sh r6, r6, r4, asr r3 mla r5, r6, r9, r5 add_sh r7, r7, r5, asr r3 stm r0!, {r6, r7} sub r0, r0, r2, lsl #2 sub r1, r1, r2, lsl #2 subs r12, r12, #2 bgt 1b it lt poplt {r4-r9, pc} 3: mov r4, #0 4: ldr r5, [r1], #4 ldr r6, [r0], #4 mla r4, r5, r6, r4 subs r2, r2, #1 bgt 4b ldr r5, [r0] add_sh r5, r5, r4, asr r3 str r5, [r0] pop {r4-r9, pc} endfunc
Akagi201/ffmpeg-xcode
1,286
ffmpeg-3.0.2/libavcodec/arm/ac3dsp_arm.S
/* * Copyright (c) 2011 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_ac3_update_bap_counts_arm, export=1 push {lr} ldrb lr, [r1], #1 1: lsl r3, lr, #1 ldrh r12, [r0, r3] subs r2, r2, #1 it gt ldrbgt lr, [r1], #1 add r12, r12, #1 strh r12, [r0, r3] bgt 1b pop {pc} endfunc
Akagi201/ffmpeg-xcode
3,979
ffmpeg-3.0.2/libavcodec/arm/mpegvideo_armv5te_s.S
/* * Optimization of some functions from mpegvideo.c for armv5te * Copyright (c) 2007 Siarhei Siamashka <ssvb@users.sourceforge.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "libavutil/arm/asm.S" /* * Special optimized version of dct_unquantize_h263_helper_c, it * requires the block to be at least 8 bytes aligned, and may process * more elements than requested. But it is guaranteed to never * process more than 64 elements provided that count argument is <= 64, * so it is safe. This function is optimized for a common distribution * of values for nCoeffs (they are mostly multiple of 8 plus one or * two extra elements). So this function processes data as 8 elements * per loop iteration and contains optional 2 elements processing in * the end. * * Inner loop should take 6 cycles per element on arm926ej-s (Nokia 770) */ .macro dequant_t dst, src, mul, add, tmp rsbs \tmp, ip, \src, asr #16 it gt addgt \tmp, \add, #0 it lt rsblt \tmp, \add, #0 it ne smlatbne \dst, \src, \mul, \tmp .endm .macro dequant_b dst, src, mul, add, tmp rsbs \tmp, ip, \src, lsl #16 it gt addgt \tmp, \add, #0 it lt rsblt \tmp, \add, #0 it ne smlabbne \dst, \src, \mul, \tmp .endm function ff_dct_unquantize_h263_armv5te, export=1 push {r4-r9,lr} mov ip, #0 subs r3, r3, #2 ble 2f ldrd r4, r5, [r0, #0] 1: ldrd r6, r7, [r0, #8] dequant_t r9, r4, r1, r2, r9 dequant_t lr, r5, r1, r2, lr dequant_b r4, r4, r1, r2, r8 dequant_b r5, r5, r1, r2, r8 strh r4, [r0], #2 strh r9, [r0], #2 strh r5, [r0], #2 strh lr, [r0], #2 dequant_t r9, r6, r1, r2, r9 dequant_t lr, r7, r1, r2, lr dequant_b r6, r6, r1, r2, r8 dequant_b r7, r7, r1, r2, r8 strh r6, [r0], #2 strh r9, [r0], #2 strh r7, [r0], #2 strh lr, [r0], #2 subs r3, r3, #8 it gt ldrdgt r4, r5, [r0, #0] /* load data early to avoid load/use pipeline stall */ bgt 1b adds r3, r3, #2 it le pople {r4-r9,pc} 2: ldrsh r9, [r0, #0] ldrsh lr, [r0, #2] mov r8, r2 cmp r9, #0 it lt rsblt r8, r2, #0 it ne smlabbne r9, r9, r1, r8 mov r8, r2 cmp lr, #0 it lt rsblt r8, r2, #0 it ne smlabbne lr, lr, r1, r8 strh r9, [r0], #2 strh lr, [r0], #2 pop {r4-r9,pc} endfunc
Akagi201/ffmpeg-xcode
13,553
ffmpeg-3.0.2/libavcodec/arm/jrevdct_arm.S
/* C-like prototype : void j_rev_dct_arm(DCTBLOCK data) With DCTBLOCK being a pointer to an array of 64 'signed shorts' Copyright (c) 2001 Lionel Ulmer (lionel.ulmer@free.fr / bbrox@bbrox.org) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "libavutil/arm/asm.S" #define FIX_0_298631336 2446 #define FIX_0_541196100 4433 #define FIX_0_765366865 6270 #define FIX_1_175875602 9633 #define FIX_1_501321110 12299 #define FIX_2_053119869 16819 #define FIX_3_072711026 25172 #define FIX_M_0_390180644 -3196 #define FIX_M_0_899976223 -7373 #define FIX_M_1_847759065 -15137 #define FIX_M_1_961570560 -16069 #define FIX_M_2_562915447 -20995 #define FIX_0xFFFF 0xFFFF #define FIX_0_298631336_ID 0 #define FIX_0_541196100_ID 4 #define FIX_0_765366865_ID 8 #define FIX_1_175875602_ID 12 #define FIX_1_501321110_ID 16 #define FIX_2_053119869_ID 20 #define FIX_3_072711026_ID 24 #define FIX_M_0_390180644_ID 28 #define FIX_M_0_899976223_ID 32 #define FIX_M_1_847759065_ID 36 #define FIX_M_1_961570560_ID 40 #define FIX_M_2_562915447_ID 44 #define FIX_0xFFFF_ID 48 function ff_j_rev_dct_arm, export=1 push {r0, r4 - r11, lr} mov lr, r0 @ lr = pointer to the current row mov r12, #8 @ r12 = row-counter movrel r11, const_array @ r11 = base pointer to the constants array row_loop: ldrsh r0, [lr, # 0] @ r0 = 'd0' ldrsh r2, [lr, # 2] @ r2 = 'd2' @ Optimization for row that have all items except the first set to 0 @ (this works as the int16_t are always 4-byte aligned) ldr r5, [lr, # 0] ldr r6, [lr, # 4] ldr r3, [lr, # 8] ldr r4, [lr, #12] orr r3, r3, r4 orr r3, r3, r6 orrs r5, r3, r5 beq end_of_row_loop @ nothing to be done as ALL of them are '0' orrs r3, r3, r2 beq empty_row ldrsh r1, [lr, # 8] @ r1 = 'd1' ldrsh r4, [lr, # 4] @ r4 = 'd4' ldrsh r6, [lr, # 6] @ r6 = 'd6' ldr r3, [r11, #FIX_0_541196100_ID] add r7, r2, r6 ldr r5, [r11, #FIX_M_1_847759065_ID] mul r7, r3, r7 @ r7 = z1 ldr r3, [r11, #FIX_0_765366865_ID] mla r6, r5, r6, r7 @ r6 = tmp2 add r5, r0, r4 @ r5 = tmp0 mla r2, r3, r2, r7 @ r2 = tmp3 sub r3, r0, r4 @ r3 = tmp1 add r0, r2, r5, lsl #13 @ r0 = tmp10 rsb r2, r2, r5, lsl #13 @ r2 = tmp13 add r4, r6, r3, lsl #13 @ r4 = tmp11 rsb r3, r6, r3, lsl #13 @ r3 = tmp12 push {r0, r2, r3, r4} @ save on the stack tmp10, tmp13, tmp12, tmp11 ldrsh r3, [lr, #10] @ r3 = 'd3' ldrsh r5, [lr, #12] @ r5 = 'd5' ldrsh r7, [lr, #14] @ r7 = 'd7' add r0, r3, r5 @ r0 = 'z2' add r2, r1, r7 @ r2 = 'z1' add r4, r3, r7 @ r4 = 'z3' add r6, r1, r5 @ r6 = 'z4' ldr r9, [r11, #FIX_1_175875602_ID] add r8, r4, r6 @ r8 = z3 + z4 ldr r10, [r11, #FIX_M_0_899976223_ID] mul r8, r9, r8 @ r8 = 'z5' ldr r9, [r11, #FIX_M_2_562915447_ID] mul r2, r10, r2 @ r2 = 'z1' ldr r10, [r11, #FIX_M_1_961570560_ID] mul r0, r9, r0 @ r0 = 'z2' ldr r9, [r11, #FIX_M_0_390180644_ID] mla r4, r10, r4, r8 @ r4 = 'z3' ldr r10, [r11, #FIX_0_298631336_ID] mla r6, r9, r6, r8 @ r6 = 'z4' ldr r9, [r11, #FIX_2_053119869_ID] mla r7, r10, r7, r2 @ r7 = tmp0 + z1 ldr r10, [r11, #FIX_3_072711026_ID] mla r5, r9, r5, r0 @ r5 = tmp1 + z2 ldr r9, [r11, #FIX_1_501321110_ID] mla r3, r10, r3, r0 @ r3 = tmp2 + z2 add r7, r7, r4 @ r7 = tmp0 mla r1, r9, r1, r2 @ r1 = tmp3 + z1 add r5, r5, r6 @ r5 = tmp1 add r3, r3, r4 @ r3 = tmp2 add r1, r1, r6 @ r1 = tmp3 pop {r0, r2, r4, r6} @ r0 = tmp10 / r2 = tmp13 / r4 = tmp12 / r6 = tmp11 @ r1 = tmp3 / r3 = tmp2 / r5 = tmp1 / r7 = tmp0 @ Compute DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) add r8, r0, r1 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 0] @ Compute DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) sub r8, r0, r1 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, #14] @ Compute DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) add r8, r6, r3 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 2] @ Compute DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) sub r8, r6, r3 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, #12] @ Compute DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) add r8, r4, r5 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 4] @ Compute DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) sub r8, r4, r5 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, #10] @ Compute DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) add r8, r2, r7 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 6] @ Compute DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) sub r8, r2, r7 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 8] @ End of row loop add lr, lr, #16 subs r12, r12, #1 bne row_loop beq start_column_loop empty_row: ldr r1, [r11, #FIX_0xFFFF_ID] mov r0, r0, lsl #2 and r0, r0, r1 add r0, r0, r0, lsl #16 str r0, [lr, # 0] str r0, [lr, # 4] str r0, [lr, # 8] str r0, [lr, #12] end_of_row_loop: @ End of loop add lr, lr, #16 subs r12, r12, #1 bne row_loop start_column_loop: @ Start of column loop pop {lr} mov r12, #8 column_loop: ldrsh r0, [lr, #( 0*8)] @ r0 = 'd0' ldrsh r2, [lr, #( 4*8)] @ r2 = 'd2' ldrsh r4, [lr, #( 8*8)] @ r4 = 'd4' ldrsh r6, [lr, #(12*8)] @ r6 = 'd6' ldr r3, [r11, #FIX_0_541196100_ID] add r1, r2, r6 ldr r5, [r11, #FIX_M_1_847759065_ID] mul r1, r3, r1 @ r1 = z1 ldr r3, [r11, #FIX_0_765366865_ID] mla r6, r5, r6, r1 @ r6 = tmp2 add r5, r0, r4 @ r5 = tmp0 mla r2, r3, r2, r1 @ r2 = tmp3 sub r3, r0, r4 @ r3 = tmp1 add r0, r2, r5, lsl #13 @ r0 = tmp10 rsb r2, r2, r5, lsl #13 @ r2 = tmp13 add r4, r6, r3, lsl #13 @ r4 = tmp11 rsb r6, r6, r3, lsl #13 @ r6 = tmp12 ldrsh r1, [lr, #( 2*8)] @ r1 = 'd1' ldrsh r3, [lr, #( 6*8)] @ r3 = 'd3' ldrsh r5, [lr, #(10*8)] @ r5 = 'd5' ldrsh r7, [lr, #(14*8)] @ r7 = 'd7' @ Check for empty odd column (happens about 20 to 25 % of the time according to my stats) orr r9, r1, r3 orr r10, r5, r7 orrs r10, r9, r10 beq empty_odd_column push {r0, r2, r4, r6} @ save on the stack tmp10, tmp13, tmp12, tmp11 add r0, r3, r5 @ r0 = 'z2' add r2, r1, r7 @ r2 = 'z1' add r4, r3, r7 @ r4 = 'z3' add r6, r1, r5 @ r6 = 'z4' ldr r9, [r11, #FIX_1_175875602_ID] add r8, r4, r6 ldr r10, [r11, #FIX_M_0_899976223_ID] mul r8, r9, r8 @ r8 = 'z5' ldr r9, [r11, #FIX_M_2_562915447_ID] mul r2, r10, r2 @ r2 = 'z1' ldr r10, [r11, #FIX_M_1_961570560_ID] mul r0, r9, r0 @ r0 = 'z2' ldr r9, [r11, #FIX_M_0_390180644_ID] mla r4, r10, r4, r8 @ r4 = 'z3' ldr r10, [r11, #FIX_0_298631336_ID] mla r6, r9, r6, r8 @ r6 = 'z4' ldr r9, [r11, #FIX_2_053119869_ID] mla r7, r10, r7, r2 @ r7 = tmp0 + z1 ldr r10, [r11, #FIX_3_072711026_ID] mla r5, r9, r5, r0 @ r5 = tmp1 + z2 ldr r9, [r11, #FIX_1_501321110_ID] mla r3, r10, r3, r0 @ r3 = tmp2 + z2 add r7, r7, r4 @ r7 = tmp0 mla r1, r9, r1, r2 @ r1 = tmp3 + z1 add r5, r5, r6 @ r5 = tmp1 add r3, r3, r4 @ r3 = tmp2 add r1, r1, r6 @ r1 = tmp3 pop {r0, r2, r4, r6} @ r0 = tmp10 / r2 = tmp13 / r4 = tmp11 / r6 = tmp12 @ r1 = tmp3 / r3 = tmp2 / r5 = tmp1 / r7 = tmp0 @ Compute DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) add r8, r0, r1 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 0*8)] @ Compute DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) sub r8, r0, r1 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #(14*8)] @ Compute DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) add r8, r4, r3 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 2*8)] @ Compute DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) sub r8, r4, r3 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #(12*8)] @ Compute DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) add r8, r6, r5 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 4*8)] @ Compute DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) sub r8, r6, r5 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #(10*8)] @ Compute DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) add r8, r2, r7 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 6*8)] @ Compute DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) sub r8, r2, r7 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 8*8)] @ End of row loop add lr, lr, #2 subs r12, r12, #1 bne column_loop beq the_end empty_odd_column: @ Compute DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) @ Compute DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) add r0, r0, #(1<<17) mov r0, r0, asr #18 strh r0, [lr, #( 0*8)] strh r0, [lr, #(14*8)] @ Compute DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) @ Compute DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) add r4, r4, #(1<<17) mov r4, r4, asr #18 strh r4, [lr, #( 2*8)] strh r4, [lr, #(12*8)] @ Compute DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) @ Compute DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) add r6, r6, #(1<<17) mov r6, r6, asr #18 strh r6, [lr, #( 4*8)] strh r6, [lr, #(10*8)] @ Compute DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) @ Compute DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) add r2, r2, #(1<<17) mov r2, r2, asr #18 strh r2, [lr, #( 6*8)] strh r2, [lr, #( 8*8)] @ End of row loop add lr, lr, #2 subs r12, r12, #1 bne column_loop the_end: @ The end.... pop {r4 - r11, pc} endfunc const const_array .word FIX_0_298631336 .word FIX_0_541196100 .word FIX_0_765366865 .word FIX_1_175875602 .word FIX_1_501321110 .word FIX_2_053119869 .word FIX_3_072711026 .word FIX_M_0_390180644 .word FIX_M_0_899976223 .word FIX_M_1_847759065 .word FIX_M_1_961570560 .word FIX_M_2_562915447 .word FIX_0xFFFF endconst
Akagi201/ffmpeg-xcode
6,403
ffmpeg-3.0.2/libavcodec/arm/rv34dsp_neon.S
/* * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" #include "neon.S" .macro rv34_inv_transform r0 vld1.16 {q14-q15}, [\r0,:128] vmov.s16 d0, #13 vshll.s16 q12, d29, #3 vshll.s16 q13, d29, #4 vshll.s16 q9, d31, #3 vshll.s16 q1, d31, #4 vmull.s16 q10, d28, d0 vmlal.s16 q10, d30, d0 vmull.s16 q11, d28, d0 vmlsl.s16 q11, d30, d0 vsubw.s16 q12, q12, d29 @ z2 = block[i+4*1]*7 vaddw.s16 q13, q13, d29 @ z3 = block[i+4*1]*17 vsubw.s16 q9, q9, d31 vaddw.s16 q1, q1, d31 vadd.s32 q13, q13, q9 @ z3 = 17*block[i+4*1] + 7*block[i+4*3] vsub.s32 q12, q12, q1 @ z2 = 7*block[i+4*1] - 17*block[i+4*3] vadd.s32 q1, q10, q13 @ z0 + z3 vadd.s32 q2, q11, q12 @ z1 + z2 vsub.s32 q8, q10, q13 @ z0 - z3 vsub.s32 q3, q11, q12 @ z1 - z2 vtrn.32 q1, q2 vtrn.32 q3, q8 vswp d3, d6 vswp d5, d16 vmov.s32 d0, #13 vadd.s32 q10, q1, q3 vsub.s32 q11, q1, q3 vshl.s32 q12, q2, #3 vshl.s32 q9, q2, #4 vmul.s32 q13, q11, d0[0] vshl.s32 q11, q8, #4 vadd.s32 q9, q9, q2 vshl.s32 q15, q8, #3 vsub.s32 q12, q12, q2 vadd.s32 q11, q11, q8 vmul.s32 q14, q10, d0[0] vsub.s32 q8, q15, q8 vsub.s32 q12, q12, q11 vadd.s32 q9, q9, q8 vadd.s32 q2, q13, q12 @ z1 + z2 vadd.s32 q1, q14, q9 @ z0 + z3 vsub.s32 q3, q13, q12 @ z1 - z2 vsub.s32 q15, q14, q9 @ z0 - z3 .endm /* void rv34_idct_add_c(uint8_t *dst, int stride, int16_t *block) */ function ff_rv34_idct_add_neon, export=1 mov r3, r0 rv34_inv_transform r2 vmov.i16 q12, #0 vrshrn.s32 d16, q1, #10 @ (z0 + z3) >> 10 vrshrn.s32 d17, q2, #10 @ (z1 + z2) >> 10 vrshrn.s32 d18, q3, #10 @ (z1 - z2) >> 10 vrshrn.s32 d19, q15, #10 @ (z0 - z3) >> 10 vld1.32 {d28[]}, [r0,:32], r1 vld1.32 {d29[]}, [r0,:32], r1 vtrn.32 q8, q9 vld1.32 {d28[1]}, [r0,:32], r1 vld1.32 {d29[1]}, [r0,:32], r1 vst1.16 {q12}, [r2,:128]! @ memset(block, 0, 16) vst1.16 {q12}, [r2,:128] @ memset(block+16, 0, 16) vtrn.16 d16, d17 vtrn.32 d28, d29 vtrn.16 d18, d19 vaddw.u8 q0, q8, d28 vaddw.u8 q1, q9, d29 vqmovun.s16 d28, q0 vqmovun.s16 d29, q1 vst1.32 {d28[0]}, [r3,:32], r1 vst1.32 {d28[1]}, [r3,:32], r1 vst1.32 {d29[0]}, [r3,:32], r1 vst1.32 {d29[1]}, [r3,:32], r1 bx lr endfunc /* void rv34_inv_transform_noround_neon(int16_t *block); */ function ff_rv34_inv_transform_noround_neon, export=1 rv34_inv_transform r0 vshl.s32 q11, q2, #1 vshl.s32 q10, q1, #1 vshl.s32 q12, q3, #1 vshl.s32 q13, q15, #1 vadd.s32 q11, q11, q2 vadd.s32 q10, q10, q1 vadd.s32 q12, q12, q3 vadd.s32 q13, q13, q15 vshrn.s32 d0, q10, #11 @ (z0 + z3)*3 >> 11 vshrn.s32 d1, q11, #11 @ (z1 + z2)*3 >> 11 vshrn.s32 d2, q12, #11 @ (z1 - z2)*3 >> 11 vshrn.s32 d3, q13, #11 @ (z0 - z3)*3 >> 11 vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r0,:64]! vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r0,:64]! vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r0,:64]! vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r0,:64]! bx lr endfunc /* void ff_rv34_idct_dc_add_neon(uint8_t *dst, int stride, int dc) */ function ff_rv34_idct_dc_add_neon, export=1 mov r3, r0 vld1.32 {d28[]}, [r0,:32], r1 vld1.32 {d29[]}, [r0,:32], r1 vdup.16 d0, r2 vmov.s16 d1, #169 vld1.32 {d28[1]}, [r0,:32], r1 vmull.s16 q1, d0, d1 @ dc * 13 * 13 vld1.32 {d29[1]}, [r0,:32], r1 vrshrn.s32 d0, q1, #10 @ (dc * 13 * 13 + 0x200) >> 10 vmov d1, d0 vaddw.u8 q2, q0, d28 vaddw.u8 q3, q0, d29 vqmovun.s16 d28, q2 vqmovun.s16 d29, q3 vst1.32 {d28[0]}, [r3,:32], r1 vst1.32 {d29[0]}, [r3,:32], r1 vst1.32 {d28[1]}, [r3,:32], r1 vst1.32 {d29[1]}, [r3,:32], r1 bx lr endfunc /* void rv34_inv_transform_dc_noround_c(int16_t *block) */ function ff_rv34_inv_transform_noround_dc_neon, export=1 vld1.16 {d28[]}, [r0,:16] @ block[0] vmov.i16 d4, #251 vorr.s16 d4, #256 @ 13^2 * 3 vmull.s16 q3, d28, d4 vshrn.s32 d0, q3, #11 vmov.i16 d1, d0 vst1.64 {q0}, [r0,:128]! vst1.64 {q0}, [r0,:128]! bx lr endfunc
Akagi201/ffmpeg-xcode
5,842
ffmpeg-3.0.2/libavcodec/arm/ac3dsp_neon.S
/* * Copyright (c) 2011 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_ac3_max_msb_abs_int16_neon, export=1 vmov.i16 q0, #0 vmov.i16 q2, #0 1: vld1.16 {q1}, [r0,:128]! vabs.s16 q1, q1 vld1.16 {q3}, [r0,:128]! vabs.s16 q3, q3 vorr q0, q0, q1 vorr q2, q2, q3 subs r1, r1, #16 bgt 1b vorr q0, q0, q2 vorr d0, d0, d1 vpmax.u16 d0, d0, d0 vpmax.u16 d0, d0, d0 vmov.u16 r0, d0[0] bx lr endfunc function ff_ac3_exponent_min_neon, export=1 cmp r1, #0 it eq bxeq lr push {lr} mov r12, #256 1: vld1.8 {q0}, [r0,:128] mov lr, r1 add r3, r0, #256 2: vld1.8 {q1}, [r3,:128], r12 subs lr, lr, #1 vmin.u8 q0, q0, q1 bgt 2b subs r2, r2, #16 vst1.8 {q0}, [r0,:128]! bgt 1b pop {pc} endfunc function ff_ac3_lshift_int16_neon, export=1 vdup.16 q0, r2 1: vld1.16 {q1}, [r0,:128] vshl.s16 q1, q1, q0 vst1.16 {q1}, [r0,:128]! subs r1, r1, #8 bgt 1b bx lr endfunc function ff_ac3_rshift_int32_neon, export=1 rsb r2, r2, #0 vdup.32 q0, r2 1: vld1.32 {q1}, [r0,:128] vshl.s32 q1, q1, q0 vst1.32 {q1}, [r0,:128]! subs r1, r1, #4 bgt 1b bx lr endfunc function ff_float_to_fixed24_neon, export=1 1: vld1.32 {q0-q1}, [r1,:128]! vcvt.s32.f32 q0, q0, #24 vld1.32 {q2-q3}, [r1,:128]! vcvt.s32.f32 q1, q1, #24 vcvt.s32.f32 q2, q2, #24 vst1.32 {q0-q1}, [r0,:128]! vcvt.s32.f32 q3, q3, #24 vst1.32 {q2-q3}, [r0,:128]! subs r2, r2, #16 bgt 1b bx lr endfunc function ff_ac3_extract_exponents_neon, export=1 vmov.i32 q15, #8 1: vld1.32 {q0}, [r1,:128]! vabs.s32 q1, q0 vclz.i32 q3, q1 vsub.i32 q3, q3, q15 vmovn.i32 d6, q3 vmovn.i16 d6, q3 vst1.32 {d6[0]}, [r0,:32]! subs r2, r2, #4 bgt 1b bx lr endfunc function ff_apply_window_int16_neon, export=1 push {r4,lr} add r4, r1, r3, lsl #1 add lr, r0, r3, lsl #1 sub r4, r4, #16 sub lr, lr, #16 mov r12, #-16 1: vld1.16 {q0}, [r1,:128]! vld1.16 {q2}, [r2,:128]! vld1.16 {q1}, [r4,:128], r12 vrev64.16 q3, q2 vqrdmulh.s16 q0, q0, q2 vqrdmulh.s16 d2, d2, d7 vqrdmulh.s16 d3, d3, d6 vst1.16 {q0}, [r0,:128]! vst1.16 {q1}, [lr,:128], r12 subs r3, r3, #16 bgt 1b pop {r4,pc} endfunc function ff_ac3_sum_square_butterfly_int32_neon, export=1 vmov.i64 q0, #0 vmov.i64 q1, #0 vmov.i64 q2, #0 vmov.i64 q3, #0 1: vld1.32 {d16}, [r1]! vld1.32 {d17}, [r2]! vadd.s32 d18, d16, d17 vsub.s32 d19, d16, d17 vmlal.s32 q0, d16, d16 vmlal.s32 q1, d17, d17 vmlal.s32 q2, d18, d18 vmlal.s32 q3, d19, d19 subs r3, r3, #2 bgt 1b vadd.s64 d0, d0, d1 vadd.s64 d1, d2, d3 vadd.s64 d2, d4, d5 vadd.s64 d3, d6, d7 vst1.64 {q0-q1}, [r0] bx lr endfunc function ff_ac3_sum_square_butterfly_float_neon, export=1 vmov.f32 q0, #0.0 vmov.f32 q1, #0.0 1: vld1.32 {d16}, [r1]! vld1.32 {d17}, [r2]! vadd.f32 d18, d16, d17 vsub.f32 d19, d16, d17 vmla.f32 d0, d16, d16 vmla.f32 d1, d17, d17 vmla.f32 d2, d18, d18 vmla.f32 d3, d19, d19 subs r3, r3, #2 bgt 1b vpadd.f32 d0, d0, d1 vpadd.f32 d1, d2, d3 vst1.32 {q0}, [r0] bx lr endfunc
Akagi201/ffmpeg-xcode
4,944
ffmpeg-3.0.2/libavcodec/arm/idctdsp_neon.S
/* * ARM-NEON-optimized IDCT functions * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/arm/asm.S" function ff_put_pixels_clamped_neon, export=1 vld1.16 {d16-d19}, [r0,:128]! vqmovun.s16 d0, q8 vld1.16 {d20-d23}, [r0,:128]! vqmovun.s16 d1, q9 vld1.16 {d24-d27}, [r0,:128]! vqmovun.s16 d2, q10 vld1.16 {d28-d31}, [r0,:128]! vqmovun.s16 d3, q11 vst1.8 {d0}, [r1,:64], r2 vqmovun.s16 d4, q12 vst1.8 {d1}, [r1,:64], r2 vqmovun.s16 d5, q13 vst1.8 {d2}, [r1,:64], r2 vqmovun.s16 d6, q14 vst1.8 {d3}, [r1,:64], r2 vqmovun.s16 d7, q15 vst1.8 {d4}, [r1,:64], r2 vst1.8 {d5}, [r1,:64], r2 vst1.8 {d6}, [r1,:64], r2 vst1.8 {d7}, [r1,:64], r2 bx lr endfunc function ff_put_signed_pixels_clamped_neon, export=1 vmov.u8 d31, #128 vld1.16 {d16-d17}, [r0,:128]! vqmovn.s16 d0, q8 vld1.16 {d18-d19}, [r0,:128]! vqmovn.s16 d1, q9 vld1.16 {d16-d17}, [r0,:128]! vqmovn.s16 d2, q8 vld1.16 {d18-d19}, [r0,:128]! vadd.u8 d0, d0, d31 vld1.16 {d20-d21}, [r0,:128]! vadd.u8 d1, d1, d31 vld1.16 {d22-d23}, [r0,:128]! vadd.u8 d2, d2, d31 vst1.8 {d0}, [r1,:64], r2 vqmovn.s16 d3, q9 vst1.8 {d1}, [r1,:64], r2 vqmovn.s16 d4, q10 vst1.8 {d2}, [r1,:64], r2 vqmovn.s16 d5, q11 vld1.16 {d24-d25}, [r0,:128]! vadd.u8 d3, d3, d31 vld1.16 {d26-d27}, [r0,:128]! vadd.u8 d4, d4, d31 vadd.u8 d5, d5, d31 vst1.8 {d3}, [r1,:64], r2 vqmovn.s16 d6, q12 vst1.8 {d4}, [r1,:64], r2 vqmovn.s16 d7, q13 vst1.8 {d5}, [r1,:64], r2 vadd.u8 d6, d6, d31 vadd.u8 d7, d7, d31 vst1.8 {d6}, [r1,:64], r2 vst1.8 {d7}, [r1,:64], r2 bx lr endfunc function ff_add_pixels_clamped_neon, export=1 mov r3, r1 vld1.8 {d16}, [r1,:64], r2 vld1.16 {d0-d1}, [r0,:128]! vaddw.u8 q0, q0, d16 vld1.8 {d17}, [r1,:64], r2 vld1.16 {d2-d3}, [r0,:128]! vqmovun.s16 d0, q0 vld1.8 {d18}, [r1,:64], r2 vaddw.u8 q1, q1, d17 vld1.16 {d4-d5}, [r0,:128]! vaddw.u8 q2, q2, d18 vst1.8 {d0}, [r3,:64], r2 vqmovun.s16 d2, q1 vld1.8 {d19}, [r1,:64], r2 vld1.16 {d6-d7}, [r0,:128]! vaddw.u8 q3, q3, d19 vqmovun.s16 d4, q2 vst1.8 {d2}, [r3,:64], r2 vld1.8 {d16}, [r1,:64], r2 vqmovun.s16 d6, q3 vld1.16 {d0-d1}, [r0,:128]! vaddw.u8 q0, q0, d16 vst1.8 {d4}, [r3,:64], r2 vld1.8 {d17}, [r1,:64], r2 vld1.16 {d2-d3}, [r0,:128]! vaddw.u8 q1, q1, d17 vst1.8 {d6}, [r3,:64], r2 vqmovun.s16 d0, q0 vld1.8 {d18}, [r1,:64], r2 vld1.16 {d4-d5}, [r0,:128]! vaddw.u8 q2, q2, d18 vst1.8 {d0}, [r3,:64], r2 vqmovun.s16 d2, q1 vld1.8 {d19}, [r1,:64], r2 vqmovun.s16 d4, q2 vld1.16 {d6-d7}, [r0,:128]! vaddw.u8 q3, q3, d19 vst1.8 {d2}, [r3,:64], r2 vqmovun.s16 d6, q3 vst1.8 {d4}, [r3,:64], r2 vst1.8 {d6}, [r3,:64], r2 bx lr endfunc