repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_ContinuousConversion_TriggerSW_Init/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_ContinuousConversion_TriggerSW_Init/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerSW_IT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerSW_IT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerSW_IT/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_ContinuousConversion_TriggerSW_LowPower/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_ContinuousConversion_TriggerSW_LowPower/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_ContinuousConversion_TriggerSW_LowPower/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerSW_DMA/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerSW_DMA/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerSW_DMA/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_AnalogWatchdog/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_AnalogWatchdog/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aestream/faery
| 5,011
|
src/mp4/x264/tools/checkasm-aarch64.S
|
/****************************************************************************
* checkasm-aarch64.S: assembly check tool
*****************************************************************************
* Copyright (C) 2015-2024 x264 project
*
* Authors: Martin Storsjo <martin@martin.st>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "../common/aarch64/asm.S"
const register_init, align=4
.quad 0x21f86d66c8ca00ce
.quad 0x75b6ba21077c48ad
.quad 0xed56bb2dcb3c7736
.quad 0x8bda43d3fd1a7e06
.quad 0xb64a9c9e5d318408
.quad 0xdf9a54b303f1d3a3
.quad 0x4a75479abd64e097
.quad 0x249214109d5d1c88
.quad 0x1a1b2550a612b48c
.quad 0x79445c159ce79064
.quad 0x2eed899d5a28ddcd
.quad 0x86b2536fcd8cf636
.quad 0xb0856806085e7943
.quad 0x3f2bf84fc0fcca4e
.quad 0xacbd382dcf5b8de2
.quad 0xd229e1f5b281303f
.quad 0x71aeaff20b095fd9
.quad 0xab63e2e11fa38ed9
endconst
const error_message
.asciz "failed to preserve register"
endconst
.text
// max number of args used by any x264 asm function.
#define MAX_ARGS 15
#define CLOBBER_STACK ((8*MAX_ARGS + 15) & ~15)
function checkasm_stack_clobber, export=1
mov x3, sp
mov x2, #CLOBBER_STACK
1:
stp x0, x1, [sp, #-16]!
subs x2, x2, #16
b.gt 1b
mov sp, x3
ret
endfunc
#define ARG_STACK ((8*(MAX_ARGS - 8) + 15) & ~15)
function checkasm_call, export=1
stp x29, x30, [sp, #-16]!
mov x29, sp
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp d8, d9, [sp, #-16]!
stp d10, d11, [sp, #-16]!
stp d12, d13, [sp, #-16]!
stp d14, d15, [sp, #-16]!
movrel x9, register_init
ldp d8, d9, [x9], #16
ldp d10, d11, [x9], #16
ldp d12, d13, [x9], #16
ldp d14, d15, [x9], #16
ldp x19, x20, [x9], #16
ldp x21, x22, [x9], #16
ldp x23, x24, [x9], #16
ldp x25, x26, [x9], #16
ldp x27, x28, [x9], #16
str x1, [sp, #-16]!
sub sp, sp, #ARG_STACK
.equ pos, 0
.rept MAX_ARGS-8
// Skip the first 8 args, that are loaded into registers
ldr x9, [x29, #16 + 8*8 + pos]
str x9, [sp, #pos]
.equ pos, pos + 8
.endr
mov x12, x0
ldp x0, x1, [x29, #16]
ldp x2, x3, [x29, #32]
ldp x4, x5, [x29, #48]
ldp x6, x7, [x29, #64]
blr x12
add sp, sp, #ARG_STACK
ldr x2, [sp]
stp x0, x1, [sp]
movrel x9, register_init
movi v3.8h, #0
.macro check_reg_neon reg1, reg2
ldr q0, [x9], #16
uzp1 v1.2d, v\reg1\().2d, v\reg2\().2d
eor v0.16b, v0.16b, v1.16b
orr v3.16b, v3.16b, v0.16b
.endm
check_reg_neon 8, 9
check_reg_neon 10, 11
check_reg_neon 12, 13
check_reg_neon 14, 15
uqxtn v3.8b, v3.8h
umov x3, v3.d[0]
.macro check_reg reg1, reg2
ldp x0, x1, [x9], #16
eor x0, x0, \reg1
eor x1, x1, \reg2
orr x3, x3, x0
orr x3, x3, x1
.endm
check_reg x19, x20
check_reg x21, x22
check_reg x23, x24
check_reg x25, x26
check_reg x27, x28
cbz x3, 0f
mov w9, #0
str w9, [x2]
movrel x0, error_message
bl EXT(puts)
0:
ldp x0, x1, [sp], #16
ldp d14, d15, [sp], #16
ldp d12, d13, [sp], #16
ldp d10, d11, [sp], #16
ldp d8, d9, [sp], #16
ldp x27, x28, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ldp x29, x30, [sp], #16
ret
endfunc
#if HAVE_SVE
.arch armv8-a+sve
function checkasm_sve_length, export=1
cntb x0
lsl x0, x0, #3
ret
endfunc
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_AnalogWatchdog/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aestream/faery
| 5,371
|
src/mp4/x264/tools/checkasm-loongarch.S
|
/****************************************************************************
* checkasm-loongarch.S: assembly check tool
*****************************************************************************
* Copyright (C) 2024 x264 project
*
* Authors: Xiwei Gu <guxiwei-hf@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "../common/loongarch/loongson_asm.S"
const register_init, align=3
.quad 0x21f86d66c8ca00ce
.quad 0x75b6ba21077c48ad
.quad 0xed56bb2dcb3c7736
.quad 0x8bda43d3fd1a7e06
.quad 0xb64a9c9e5d318408
.quad 0xdf9a54b303f1d3a3
.quad 0x4a75479abd64e097
.quad 0x249214109d5d1c88
.quad 0x1a1b2550a612b48c
.quad 0x79445c159ce79064
.quad 0x2eed899d5a28ddcd
.quad 0x86b2536fcd8cf636
.quad 0xb0856806085e7943
.quad 0x3f2bf84fc0fcca4e
.quad 0xacbd382dcf5b8de2
.quad 0xd229e1f5b281303f
.quad 0x71aeaff20b095fd9
endconst
const error_message
.asciz "failed to preserve register"
endconst
.text
// max number of args used by any x264 asm function.
#define MAX_ARGS 15
#define CLOBBER_STACK ((8*MAX_ARGS + 15) & ~15)
// Fill dirty data at stack space
function x264_checkasm_stack_clobber
move t0, sp
addi.d t1, zero, CLOBBER_STACK
1:
st.d a0, sp, 0x00
st.d a1, sp, -0x08
addi.d sp, sp, -0x10
addi.d t1, t1, -0x10
blt zero,t1, 1b
move sp, t0
endfunc
#define ARG_STACK ((8*(MAX_ARGS - 8) + 15) & ~15)
function x264_checkasm_call
// Saved s0 - s8, fs0 - fs7
move t4, sp
addi.d sp, sp, -136
st.d s0, sp, 0
st.d s1, sp, 8
st.d s2, sp, 16
st.d s3, sp, 24
st.d s4, sp, 32
st.d s5, sp, 40
st.d s6, sp, 48
st.d s7, sp, 56
st.d s8, sp, 64
fst.d fs0, sp, 72
fst.d fs1, sp, 80
fst.d fs2, sp, 88
fst.d fs3, sp, 96
fst.d fs4, sp, 104
fst.d fs5, sp, 112
fst.d fs6, sp, 120
fst.d fs7, sp, 128
la.local t1, register_init
ld.d s0, t1, 0
ld.d s1, t1, 8
ld.d s2, t1, 16
ld.d s3, t1, 24
ld.d s4, t1, 32
ld.d s5, t1, 40
ld.d s6, t1, 48
ld.d s7, t1, 56
ld.d s8, t1, 64
fld.d fs0, t1, 72
fld.d fs1, t1, 80
fld.d fs2, t1, 88
fld.d fs3, t1, 96
fld.d fs4, t1, 104
fld.d fs5, t1, 112
fld.d fs6, t1, 120
fld.d fs7, t1, 128
addi.d sp, sp, -16
st.d a1, sp, 0 // ok
st.d ra, sp, 8 // Ret address
addi.d sp, sp, -ARG_STACK
addi.d t0, zero, 8*8
xor t1, t1, t1
.rept MAX_ARGS - 8
// Skip the first 8 args, that are loaded into registers
ldx.d t2, t4, t0
stx.d t2, sp, t1
addi.d t0, t0, 8
addi.d t1, t1, 8
.endr
move t3, a0 // Func
ld.d a0, t4, 0
ld.d a1, t4, 8
ld.d a2, t4, 16
ld.d a3, t4, 24
ld.d a4, t4, 32
ld.d a5, t4, 40
ld.d a6, t4, 48
ld.d a7, t4, 56
jirl ra, t3, 0
addi.d sp, sp, ARG_STACK
ld.d t2, sp, 0 // ok
ld.d ra, sp, 8 // Ret address
addi.d sp, sp, 16
la.local t1, register_init
xor t3, t3, t3
.macro check_reg_gr reg1
ld.d t0, t1, 0
xor t0, $s\reg1, t0
or t3, t3, t0
addi.d t1, t1, 8
.endm
check_reg_gr 0
check_reg_gr 1
check_reg_gr 2
check_reg_gr 3
check_reg_gr 4
check_reg_gr 5
check_reg_gr 6
check_reg_gr 7
check_reg_gr 8
.macro check_reg_fr reg1
ld.d t0, t1, 0
movfr2gr.d t4,$fs\reg1
xor t0, t0, t4
or t3, t3, t0
addi.d t1, t1, 8
.endm
check_reg_fr 0
check_reg_fr 1
check_reg_fr 2
check_reg_fr 3
check_reg_fr 4
check_reg_fr 5
check_reg_fr 6
check_reg_fr 7
beqz t3, 0f
st.d zero,t2, 0x00 // Set OK to 0
la.local a0, error_message
addi.d sp, sp, -8
st.d ra, sp, 0
bl puts
ld.d ra, sp, 0
addi.d sp, sp, 8
0:
ld.d s0, sp, 0
ld.d s1, sp, 8
ld.d s2, sp, 16
ld.d s3, sp, 24
ld.d s4, sp, 32
ld.d s5, sp, 40
ld.d s6, sp, 48
ld.d s7, sp, 56
ld.d s8, sp, 64
fld.d fs0, sp, 72
fld.d fs1, sp, 80
fld.d fs2, sp, 88
fld.d fs3, sp, 96
fld.d fs4, sp, 104
fld.d fs5, sp, 112
fld.d fs6, sp, 120
fld.d fs7, sp, 128
addi.d sp, sp, 136
endfunc
|
aestream/faery
| 3,712
|
src/mp4/x264/tools/checkasm-arm.S
|
/****************************************************************************
* checkasm-arm.S: assembly check tool
*****************************************************************************
* Copyright (C) 2015-2024 x264 project
*
* Authors: Martin Storsjo <martin@martin.st>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "../common/arm/asm.S"
const register_init, align=4
.quad 0x21f86d66c8ca00ce
.quad 0x75b6ba21077c48ad
.quad 0xed56bb2dcb3c7736
.quad 0x8bda43d3fd1a7e06
.quad 0xb64a9c9e5d318408
.quad 0xdf9a54b303f1d3a3
.quad 0x4a75479abd64e097
.quad 0x249214109d5d1c88
endconst
const error_message
.asciz "failed to preserve register"
endconst
.text
@ max number of args used by any x264 asm function.
#define MAX_ARGS 15
#define ARG_STACK 4*(MAX_ARGS - 4)
@ align the used stack space to 8 to preserve the stack alignment
#define ARG_STACK_A (((ARG_STACK + pushed + 7) & ~7) - pushed)
.macro clobbercheck variant
.equ pushed, 4*10
function checkasm_call_\variant
push {r4-r11, lr}
.ifc \variant, neon
vpush {q4-q7}
.equ pushed, pushed + 16*4
.endif
movrel r12, register_init
.ifc \variant, neon
vldm r12, {q4-q7}
.endif
ldm r12, {r4-r11}
push {r1}
sub sp, sp, #ARG_STACK_A
.equ pos, 0
.rept MAX_ARGS-4
ldr r12, [sp, #ARG_STACK_A + pushed + 8 + pos]
str r12, [sp, #pos]
.equ pos, pos + 4
.endr
mov r12, r0
mov r0, r2
mov r1, r3
ldrd r2, r3, [sp, #ARG_STACK_A + pushed]
blx r12
add sp, sp, #ARG_STACK_A
pop {r2}
push {r0, r1}
movrel r12, register_init
.ifc \variant, neon
vldm r12, {q0-q3}
veor q0, q0, q4
veor q1, q1, q5
veor q2, q2, q6
veor q3, q3, q7
vorr q0, q0, q1
vorr q0, q0, q2
vorr q0, q0, q3
vorr d0, d0, d1
vrev64.32 d1, d0
vorr d0, d0, d1
vmov.32 r3, d0[0]
.else
mov r3, #0
.endif
.macro check_reg reg1, reg2=
ldrd r0, r1, [r12], #8
eor r0, r0, \reg1
orr r3, r3, r0
.ifnb \reg2
eor r1, r1, \reg2
orr r3, r3, r1
.endif
.endm
check_reg r4, r5
check_reg r6, r7
@ r9 is a volatile register in the ios ABI
#if SYS_MACOSX
check_reg r8
#else
check_reg r8, r9
#endif
check_reg r10, r11
.purgem check_reg
cmp r3, #0
beq 0f
mov r12, #0
str r12, [r2]
movrel r0, error_message
blx EXT(puts)
0:
pop {r0, r1}
.ifc \variant, neon
vpop {q4-q7}
.endif
pop {r4-r11, pc}
endfunc
.endm
clobbercheck neon
clobbercheck noneon
|
aestream/faery
| 1,731
|
src/mp4/x264/common/aarch64/deblock-a-common.S
|
/*****************************************************************************
* deblock-a-common.S: aarch64 deblocking
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: Mans Rullgard <mans@mansr.com>
* Janne Grunau <janne-x264@jannau.net>
* David Chen <david.chen@myais.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
// This file contains the NEON macros that are intended to be used by
// the SVE/SVE2 functions as well
.macro h264_loop_filter_start
cmp w2, #0
ldr w6, [x4]
ccmp w3, #0, #0, ne
mov v24.s[0], w6
and w8, w6, w6, lsl #16
b.eq 1f
ands w8, w8, w8, lsl #8
b.ge 2f
1:
ret
2:
.endm
|
aestream/faery
| 2,637
|
src/mp4/x264/common/aarch64/bitstream-a.S
|
/*****************************************************************************
* bitstream-a.S: aarch64 bitstream functions
*****************************************************************************
* Copyright (C) 2014-2024 x264 project
*
* Authors: Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
function nal_escape_neon, export=1
movi v0.16b, #0xff
movi v4.16b, #4
mov w3, #3
subs x6, x1, x2
cbz x6, 99f
0:
cmn x6, #15
b.lt 16f
mov x1, x2
b 100f
16:
ld1 {v1.16b}, [x1], #16
ext v2.16b, v0.16b, v1.16b, #14
ext v3.16b, v0.16b, v1.16b, #15
cmhi v7.16b, v4.16b, v1.16b
cmeq v5.16b, v2.16b, #0
cmeq v6.16b, v3.16b, #0
and v5.16b, v5.16b, v7.16b
and v5.16b, v5.16b, v6.16b
shrn v7.8b, v5.8h, #4
mov x7, v7.d[0]
cbz x7, 16f
mov x6, #-16
100:
umov w5, v0.b[14]
umov w4, v0.b[15]
orr w5, w4, w5, lsl #8
101:
ldrb w4, [x1, x6]
orr w9, w4, w5, lsl #16
cmp w9, #3
b.hi 102f
strb w3, [x0], #1
orr w5, w3, w5, lsl #8
102:
adds x6, x6, #1
strb w4, [x0], #1
orr w5, w4, w5, lsl #8
b.lt 101b
subs x6, x1, x2
lsr w9, w5, #8
mov v0.b[14], w9
mov v0.b[15], w5
b.lt 0b
ret
16:
subs x6, x1, x2
st1 {v1.16b}, [x0], #16
mov v0.16b, v1.16b
b.lt 0b
99:
ret
endfunc
|
aestream/faery
| 33,080
|
src/mp4/x264/common/aarch64/dct-a.S
|
/****************************************************************************
* dct-a.S: aarch64 transform and zigzag
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "dct-a-common.S"
const scan4x4_frame, align=4
.byte 0,1, 8,9, 2,3, 4,5
.byte 10,11, 16,17, 24,25, 18,19
.byte 12,13, 6,7, 14,15, 20,21
.byte 26,27, 28,29, 22,23, 30,31
endconst
const scan4x4_field, align=4
.byte 0,1, 2,3, 8,9, 4,5
.byte 6,7, 10,11, 12,13, 14,15
endconst
const sub4x4_frame, align=4
.byte 0, 1, 4, 8
.byte 5, 2, 3, 6
.byte 9, 12, 13, 10
.byte 7, 11, 14, 15
endconst
const sub4x4_field, align=4
.byte 0, 4, 1, 8
.byte 12, 5, 9, 13
.byte 2, 6, 10, 14
.byte 3, 7, 11, 15
endconst
// sum = a + (b>>shift) sub = (a>>shift) - b
.macro SUMSUB_SHR shift sum sub a b t0 t1
sshr \t0, \b, #\shift
sshr \t1, \a, #\shift
add \sum, \a, \t0
sub \sub, \t1, \b
.endm
// sum = (a>>shift) + b sub = a - (b>>shift)
.macro SUMSUB_SHR2 shift sum sub a b t0 t1
sshr \t0, \a, #\shift
sshr \t1, \b, #\shift
add \sum, \t0, \b
sub \sub, \a, \t1
.endm
// a += 1.5*ma b -= 1.5*mb
.macro SUMSUB_15 a b ma mb t0 t1
sshr \t0, \ma, #1
sshr \t1, \mb, #1
add \t0, \t0, \ma
add \t1, \t1, \mb
add \a, \a, \t0
sub \b, \b, \t1
.endm
function dct4x4dc_neon, export=1
ld1 {v0.4h,v1.4h,v2.4h,v3.4h}, [x0]
movi v31.4h, #1
SUMSUB_AB v4.4h, v5.4h, v0.4h, v1.4h
SUMSUB_AB v6.4h, v7.4h, v2.4h, v3.4h
SUMSUB_AB v0.4h, v2.4h, v4.4h, v6.4h
SUMSUB_AB v3.4h, v1.4h, v5.4h, v7.4h
transpose v4.4h, v6.4h, v0.4h, v2.4h
transpose v5.4h, v7.4h, v1.4h, v3.4h
SUMSUB_AB v0.4h, v2.4h, v4.4h, v6.4h
SUMSUB_AB v1.4h, v3.4h, v5.4h, v7.4h
transpose v4.2s, v5.2s, v0.2s, v1.2s
transpose v6.2s, v7.2s, v2.2s, v3.2s
add v16.4h, v4.4h, v31.4h
add v17.4h, v6.4h, v31.4h
srhadd v0.4h, v4.4h, v5.4h
shsub v1.4h, v16.4h, v5.4h
shsub v2.4h, v17.4h, v7.4h
srhadd v3.4h, v6.4h, v7.4h
st1 {v0.4h,v1.4h,v2.4h,v3.4h}, [x0]
ret
endfunc
function idct4x4dc_neon, export=1
ld1 {v0.4h,v1.4h,v2.4h,v3.4h}, [x0]
SUMSUB_AB v4.4h, v5.4h, v0.4h, v1.4h
SUMSUB_AB v6.4h, v7.4h, v2.4h, v3.4h
SUMSUB_AB v0.4h, v2.4h, v4.4h, v6.4h
SUMSUB_AB v3.4h, v1.4h, v5.4h, v7.4h
transpose v4.4h, v6.4h, v0.4h, v2.4h
transpose v5.4h, v7.4h, v1.4h, v3.4h
SUMSUB_AB v0.4h, v2.4h, v4.4h, v6.4h
SUMSUB_AB v1.4h, v3.4h, v5.4h, v7.4h
transpose v4.2s, v5.2s, v0.2s, v1.2s
transpose v6.2s, v7.2s, v2.2s, v3.2s
SUMSUB_AB v0.4h, v1.4h, v4.4h, v5.4h
SUMSUB_AB v3.4h, v2.4h, v6.4h, v7.4h
st1 {v0.4h,v1.4h,v2.4h,v3.4h}, [x0]
ret
endfunc
function sub4x4_dct_neon, export=1
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
ld1 {v0.s}[0], [x1], x3
ld1 {v1.s}[0], [x2], x4
ld1 {v2.s}[0], [x1], x3
usubl v16.8h, v0.8b, v1.8b
ld1 {v3.s}[0], [x2], x4
ld1 {v4.s}[0], [x1], x3
usubl v17.8h, v2.8b, v3.8b
ld1 {v5.s}[0], [x2], x4
ld1 {v6.s}[0], [x1], x3
usubl v18.8h, v4.8b, v5.8b
ld1 {v7.s}[0], [x2], x4
usubl v19.8h, v6.8b, v7.8b
DCT_1D v0.4h, v1.4h, v2.4h, v3.4h, v16.4h, v17.4h, v18.4h, v19.4h
transpose4x4.h v0, v1, v2, v3, v4, v5, v6, v7
DCT_1D v4.4h, v5.4h, v6.4h, v7.4h, v0.4h, v1.4h, v2.4h, v3.4h
st1 {v4.4h,v5.4h,v6.4h,v7.4h}, [x0]
ret
endfunc
function sub8x4_dct_neon
ld1 {v0.8b}, [x1], x3
ld1 {v1.8b}, [x2], x4
usubl v16.8h, v0.8b, v1.8b
ld1 {v2.8b}, [x1], x3
ld1 {v3.8b}, [x2], x4
usubl v17.8h, v2.8b, v3.8b
ld1 {v4.8b}, [x1], x3
ld1 {v5.8b}, [x2], x4
usubl v18.8h, v4.8b, v5.8b
ld1 {v6.8b}, [x1], x3
ld1 {v7.8b}, [x2], x4
usubl v19.8h, v6.8b, v7.8b
DCT_1D v0.8h, v1.8h, v2.8h, v3.8h, v16.8h, v17.8h, v18.8h, v19.8h
transpose4x8.h v0, v1, v2, v3, v4, v5, v6, v7
SUMSUB_AB v16.8h, v19.8h, v0.8h, v3.8h
SUMSUB_AB v17.8h, v18.8h, v1.8h, v2.8h
add v22.8h, v19.8h, v19.8h
add v21.8h, v18.8h, v18.8h
add v0.8h, v16.8h, v17.8h
sub v1.8h, v16.8h, v17.8h
add v2.8h, v22.8h, v18.8h
sub v3.8h, v19.8h, v21.8h
zip1 v4.2d, v0.2d, v2.2d
zip2 v6.2d, v0.2d, v2.2d
zip1 v5.2d, v1.2d, v3.2d
zip2 v7.2d, v1.2d, v3.2d
st1 {v4.8h}, [x0], #16
st1 {v5.8h}, [x0], #16
st1 {v6.8h}, [x0], #16
st1 {v7.8h}, [x0], #16
ret
endfunc
function sub8x8_dct_neon, export=1
mov x5, x30
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
bl sub8x4_dct_neon
mov x30, x5
b sub8x4_dct_neon
endfunc
function sub16x16_dct_neon, export=1
mov x5, x30
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
bl sub8x4_dct_neon
bl sub8x4_dct_neon
sub x1, x1, #8*FENC_STRIDE-8
sub x2, x2, #8*FDEC_STRIDE-8
bl sub8x4_dct_neon
bl sub8x4_dct_neon
sub x1, x1, #8
sub x2, x2, #8
bl sub8x4_dct_neon
bl sub8x4_dct_neon
sub x1, x1, #8*FENC_STRIDE-8
sub x2, x2, #8*FDEC_STRIDE-8
bl sub8x4_dct_neon
mov x30, x5
b sub8x4_dct_neon
endfunc
.macro DCT8_1D type
SUMSUB_AB v18.8h, v17.8h, v3.8h, v4.8h // s34/d34
SUMSUB_AB v19.8h, v16.8h, v2.8h, v5.8h // s25/d25
SUMSUB_AB v22.8h, v21.8h, v1.8h, v6.8h // s16/d16
SUMSUB_AB v23.8h, v20.8h, v0.8h, v7.8h // s07/d07
SUMSUB_AB v24.8h, v26.8h, v23.8h, v18.8h // a0/a2
SUMSUB_AB v25.8h, v27.8h, v22.8h, v19.8h // a1/a3
SUMSUB_AB v30.8h, v29.8h, v20.8h, v17.8h // a6/a5
sshr v23.8h, v21.8h, #1
sshr v18.8h, v16.8h, #1
add v23.8h, v23.8h, v21.8h
add v18.8h, v18.8h, v16.8h
sub v30.8h, v30.8h, v23.8h
sub v29.8h, v29.8h, v18.8h
SUMSUB_AB v28.8h, v31.8h, v21.8h, v16.8h // a4/a7
sshr v22.8h, v20.8h, #1
sshr v19.8h, v17.8h, #1
add v22.8h, v22.8h, v20.8h
add v19.8h, v19.8h, v17.8h
add v22.8h, v28.8h, v22.8h
add v31.8h, v31.8h, v19.8h
SUMSUB_AB v0.8h, v4.8h, v24.8h, v25.8h
SUMSUB_SHR 2, v1.8h, v7.8h, v22.8h, v31.8h, v16.8h, v17.8h
SUMSUB_SHR 1, v2.8h, v6.8h, v26.8h, v27.8h, v18.8h, v19.8h
SUMSUB_SHR2 2, v3.8h, v5.8h, v30.8h, v29.8h, v20.8h, v21.8h
.endm
function sub8x8_dct8_neon, export=1
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
ld1 {v16.8b}, [x1], x3
ld1 {v17.8b}, [x2], x4
ld1 {v18.8b}, [x1], x3
ld1 {v19.8b}, [x2], x4
usubl v0.8h, v16.8b, v17.8b
ld1 {v20.8b}, [x1], x3
ld1 {v21.8b}, [x2], x4
usubl v1.8h, v18.8b, v19.8b
ld1 {v22.8b}, [x1], x3
ld1 {v23.8b}, [x2], x4
usubl v2.8h, v20.8b, v21.8b
ld1 {v24.8b}, [x1], x3
ld1 {v25.8b}, [x2], x4
usubl v3.8h, v22.8b, v23.8b
ld1 {v26.8b}, [x1], x3
ld1 {v27.8b}, [x2], x4
usubl v4.8h, v24.8b, v25.8b
ld1 {v28.8b}, [x1], x3
ld1 {v29.8b}, [x2], x4
usubl v5.8h, v26.8b, v27.8b
ld1 {v30.8b}, [x1], x3
ld1 {v31.8b}, [x2], x4
usubl v6.8h, v28.8b, v29.8b
usubl v7.8h, v30.8b, v31.8b
DCT8_1D row
transpose8x8.h v0, v1, v2, v3, v4, v5, v6, v7, v30, v31
DCT8_1D col
st1 {v0.8h,v1.8h,v2.8h,v3.8h}, [x0], #64
st1 {v4.8h,v5.8h,v6.8h,v7.8h}, [x0], #64
ret
endfunc
function sub16x16_dct8_neon, export=1
mov x7, x30
bl X(sub8x8_dct8_neon)
sub x1, x1, #FENC_STRIDE*8 - 8
sub x2, x2, #FDEC_STRIDE*8 - 8
bl X(sub8x8_dct8_neon)
sub x1, x1, #8
sub x2, x2, #8
bl X(sub8x8_dct8_neon)
mov x30, x7
sub x1, x1, #FENC_STRIDE*8 - 8
sub x2, x2, #FDEC_STRIDE*8 - 8
b X(sub8x8_dct8_neon)
endfunc
// First part of IDCT (minus final SUMSUB_BA)
.macro IDCT_1D d4 d5 d6 d7 d0 d1 d2 d3
SUMSUB_AB \d4, \d5, \d0, \d2
sshr \d7, \d1, #1
sshr \d6, \d3, #1
sub \d7, \d7, \d3
add \d6, \d6, \d1
.endm
function add4x4_idct_neon, export=1
mov x2, #FDEC_STRIDE
ld1 {v0.4h,v1.4h,v2.4h,v3.4h}, [x1]
IDCT_1D v4.4h, v5.4h, v6.4h, v7.4h, v0.4h, v1.4h, v2.4h, v3.4h
ld1 {v28.s}[0], [x0], x2
SUMSUB_AB v0.4h, v2.4h, v4.4h, v6.4h
SUMSUB_AB v1.4h, v3.4h, v5.4h, v7.4h
transpose4x4.h v0, v1, v3, v2, v16, v17, v18, v19
IDCT_1D v4.4h, v5.4h, v6.4h, v7.4h, v0.4h, v1.4h, v3.4h, v2.4h
ld1 {v29.s}[0], [x0], x2
SUMSUB_AB v0.4h, v2.4h, v4.4h, v6.4h
SUMSUB_AB v1.4h, v3.4h, v5.4h, v7.4h
srshr v0.4h, v0.4h, #6
srshr v1.4h, v1.4h, #6
ld1 {v31.s}[0], [x0], x2
srshr v2.4h, v2.4h, #6
srshr v3.4h, v3.4h, #6
ld1 {v30.s}[0], [x0], x2
sub x0, x0, x2, lsl #2
uaddw v0.8h, v0.8h, v28.8b
uaddw v1.8h, v1.8h, v29.8b
uaddw v2.8h, v2.8h, v30.8b
uaddw v3.8h, v3.8h, v31.8b
sqxtun v0.8b, v0.8h
sqxtun v1.8b, v1.8h
sqxtun v2.8b, v2.8h
sqxtun v3.8b, v3.8h
st1 {v0.s}[0], [x0], x2
st1 {v1.s}[0], [x0], x2
st1 {v3.s}[0], [x0], x2
st1 {v2.s}[0], [x0], x2
ret
endfunc
function add8x4_idct_neon, export=1
ld1 {v0.8h,v1.8h}, [x1], #32
ld1 {v2.8h,v3.8h}, [x1], #32
transpose v20.2d, v21.2d, v0.2d, v2.2d
transpose v22.2d, v23.2d, v1.2d, v3.2d
IDCT_1D v16.8h, v17.8h, v18.8h, v19.8h, v20.8h, v21.8h, v22.8h, v23.8h
SUMSUB_AB v0.8h, v3.8h, v16.8h, v18.8h
SUMSUB_AB v1.8h, v2.8h, v17.8h, v19.8h
transpose4x8.h v0, v1, v2, v3, v4, v5, v6, v7
IDCT_1D v16.8h, v17.8h, v18.8h, v19.8h, v0.8h, v1.8h, v2.8h, v3.8h
SUMSUB_AB v0.8h, v3.8h, v16.8h, v18.8h
SUMSUB_AB v1.8h, v2.8h, v17.8h, v19.8h
srshr v0.8h, v0.8h, #6
ld1 {v28.8b}, [x0], x2
srshr v1.8h, v1.8h, #6
ld1 {v29.8b}, [x0], x2
srshr v2.8h, v2.8h, #6
ld1 {v30.8b}, [x0], x2
srshr v3.8h, v3.8h, #6
ld1 {v31.8b}, [x0], x2
sub x0, x0, x2, lsl #2
uaddw v0.8h, v0.8h, v28.8b
uaddw v1.8h, v1.8h, v29.8b
uaddw v2.8h, v2.8h, v30.8b
uaddw v3.8h, v3.8h, v31.8b
sqxtun v0.8b, v0.8h
sqxtun v1.8b, v1.8h
st1 {v0.8b}, [x0], x2
sqxtun v2.8b, v2.8h
st1 {v1.8b}, [x0], x2
sqxtun v3.8b, v3.8h
st1 {v2.8b}, [x0], x2
st1 {v3.8b}, [x0], x2
ret
endfunc
function add8x8_idct_neon, export=1
mov x2, #FDEC_STRIDE
mov x5, x30
bl X(add8x4_idct_neon)
mov x30, x5
b X(add8x4_idct_neon)
endfunc
function add16x16_idct_neon, export=1
mov x2, #FDEC_STRIDE
mov x5, x30
bl X(add8x4_idct_neon)
bl X(add8x4_idct_neon)
sub x0, x0, #8*FDEC_STRIDE-8
bl X(add8x4_idct_neon)
bl X(add8x4_idct_neon)
sub x0, x0, #8
bl X(add8x4_idct_neon)
bl X(add8x4_idct_neon)
sub x0, x0, #8*FDEC_STRIDE-8
bl X(add8x4_idct_neon)
mov x30, x5
b X(add8x4_idct_neon)
endfunc
.macro IDCT8_1D type
SUMSUB_AB v0.8h, v1.8h, v16.8h, v20.8h // a0/a2
.ifc \type, row
ld1 {v22.8h,v23.8h}, [x1], #32
.endif
SUMSUB_SHR 1, v2.8h, v3.8h, v18.8h, v22.8h, v16.8h, v20.8h // a6/a4
SUMSUB_AB v16.8h, v18.8h, v21.8h, v19.8h
SUMSUB_15 v16.8h, v18.8h, v17.8h, v23.8h, v20.8h, v22.8h // a7/a1
SUMSUB_AB v22.8h, v23.8h, v23.8h, v17.8h
SUMSUB_15 v23.8h, v22.8h, v21.8h, v19.8h, v20.8h, v17.8h // a5/a3
SUMSUB_SHR 2, v21.8h, v22.8h, v22.8h, v23.8h, v19.8h, v17.8h // b3/b5
SUMSUB_SHR2 2, v20.8h, v23.8h, v16.8h, v18.8h, v19.8h, v17.8h // b1/b7
SUMSUB_AB v18.8h, v2.8h, v0.8h, v2.8h // b0/b6
SUMSUB_AB v19.8h, v3.8h, v1.8h, v3.8h // b2/b4
SUMSUB_AB v16.8h, v23.8h, v18.8h, v23.8h
SUMSUB_AB v17.8h, v22.8h, v19.8h, v22.8h
SUMSUB_AB v18.8h, v21.8h, v3.8h, v21.8h
SUMSUB_AB v19.8h, v20.8h, v2.8h, v20.8h
.endm
function add8x8_idct8_neon, export=1
mov x2, #FDEC_STRIDE
ld1 {v16.8h,v17.8h}, [x1], #32
ld1 {v18.8h,v19.8h}, [x1], #32
ld1 {v20.8h,v21.8h}, [x1], #32
IDCT8_1D row
transpose8x8.h v16, v17, v18, v19, v20, v21, v22, v23, v30, v31
IDCT8_1D col
ld1 {v0.8b}, [x0], x2
srshr v16.8h, v16.8h, #6
ld1 {v1.8b}, [x0], x2
srshr v17.8h, v17.8h, #6
ld1 {v2.8b}, [x0], x2
srshr v18.8h, v18.8h, #6
ld1 {v3.8b}, [x0], x2
srshr v19.8h, v19.8h, #6
ld1 {v4.8b}, [x0], x2
srshr v20.8h, v20.8h, #6
ld1 {v5.8b}, [x0], x2
srshr v21.8h, v21.8h, #6
ld1 {v6.8b}, [x0], x2
srshr v22.8h, v22.8h, #6
ld1 {v7.8b}, [x0], x2
srshr v23.8h, v23.8h, #6
sub x0, x0, x2, lsl #3
uaddw v16.8h, v16.8h, v0.8b
uaddw v17.8h, v17.8h, v1.8b
uaddw v18.8h, v18.8h, v2.8b
sqxtun v0.8b, v16.8h
sqxtun v1.8b, v17.8h
sqxtun v2.8b, v18.8h
uaddw v19.8h, v19.8h, v3.8b
st1 {v0.8b}, [x0], x2
uaddw v20.8h, v20.8h, v4.8b
st1 {v1.8b}, [x0], x2
uaddw v21.8h, v21.8h, v5.8b
st1 {v2.8b}, [x0], x2
sqxtun v3.8b, v19.8h
sqxtun v4.8b, v20.8h
uaddw v22.8h, v22.8h, v6.8b
uaddw v23.8h, v23.8h, v7.8b
st1 {v3.8b}, [x0], x2
sqxtun v5.8b, v21.8h
st1 {v4.8b}, [x0], x2
sqxtun v6.8b, v22.8h
sqxtun v7.8b, v23.8h
st1 {v5.8b}, [x0], x2
st1 {v6.8b}, [x0], x2
st1 {v7.8b}, [x0], x2
ret
endfunc
function add16x16_idct8_neon, export=1
mov x7, x30
bl X(add8x8_idct8_neon)
sub x0, x0, #8*FDEC_STRIDE-8
bl X(add8x8_idct8_neon)
sub x0, x0, #8
bl X(add8x8_idct8_neon)
sub x0, x0, #8*FDEC_STRIDE-8
mov x30, x7
b X(add8x8_idct8_neon)
endfunc
function add8x8_idct_dc_neon, export=1
mov x2, #FDEC_STRIDE
ld1 {v16.4h}, [x1]
ld1 {v0.8b}, [x0], x2
srshr v16.4h, v16.4h, #6
ld1 {v1.8b}, [x0], x2
dup v20.8h, v16.h[0]
dup v21.8h, v16.h[1]
ld1 {v2.8b}, [x0], x2
dup v22.8h, v16.h[2]
dup v23.8h, v16.h[3]
ld1 {v3.8b}, [x0], x2
trn1 v20.2d, v20.2d, v21.2d
ld1 {v4.8b}, [x0], x2
trn1 v21.2d, v22.2d, v23.2d
ld1 {v5.8b}, [x0], x2
neg v22.8h, v20.8h
ld1 {v6.8b}, [x0], x2
neg v23.8h, v21.8h
ld1 {v7.8b}, [x0], x2
sub x0, x0, #8*FDEC_STRIDE
sqxtun v20.8b, v20.8h
sqxtun v21.8b, v21.8h
sqxtun v22.8b, v22.8h
sqxtun v23.8b, v23.8h
uqadd v0.8b, v0.8b, v20.8b
uqadd v1.8b, v1.8b, v20.8b
uqadd v2.8b, v2.8b, v20.8b
uqadd v3.8b, v3.8b, v20.8b
uqadd v4.8b, v4.8b, v21.8b
uqadd v5.8b, v5.8b, v21.8b
uqadd v6.8b, v6.8b, v21.8b
uqadd v7.8b, v7.8b, v21.8b
uqsub v0.8b, v0.8b, v22.8b
uqsub v1.8b, v1.8b, v22.8b
uqsub v2.8b, v2.8b, v22.8b
uqsub v3.8b, v3.8b, v22.8b
uqsub v4.8b, v4.8b, v23.8b
uqsub v5.8b, v5.8b, v23.8b
uqsub v6.8b, v6.8b, v23.8b
uqsub v7.8b, v7.8b, v23.8b
st1 {v0.8b}, [x0], x2
st1 {v1.8b}, [x0], x2
st1 {v2.8b}, [x0], x2
st1 {v3.8b}, [x0], x2
st1 {v4.8b}, [x0], x2
st1 {v5.8b}, [x0], x2
st1 {v6.8b}, [x0], x2
st1 {v7.8b}, [x0], x2
ret
endfunc
.macro ADD16x4_IDCT_DC dc
ld1 {v4.16b}, [x0], x3
dup v24.8h, \dc[0]
dup v25.8h, \dc[1]
ld1 {v5.16b}, [x0], x3
dup v26.8h, \dc[2]
dup v27.8h, \dc[3]
ld1 {v6.16b}, [x0], x3
trn1 v24.2d, v24.2d, v25.2d
ld1 {v7.16b}, [x0], x3
trn1 v25.2d, v26.2d, v27.2d
neg v26.8h, v24.8h
neg v27.8h, v25.8h
sqxtun v20.8b, v24.8h
sqxtun v21.8b, v26.8h
sqxtun2 v20.16b, v25.8h
sqxtun2 v21.16b, v27.8h
uqadd v4.16b, v4.16b, v20.16b
uqadd v5.16b, v5.16b, v20.16b
uqadd v6.16b, v6.16b, v20.16b
uqadd v7.16b, v7.16b, v20.16b
uqsub v4.16b, v4.16b, v21.16b
uqsub v5.16b, v5.16b, v21.16b
uqsub v6.16b, v6.16b, v21.16b
st1 {v4.16b}, [x2], x3
uqsub v7.16b, v7.16b, v21.16b
st1 {v5.16b}, [x2], x3
st1 {v6.16b}, [x2], x3
st1 {v7.16b}, [x2], x3
.endm
function add16x16_idct_dc_neon, export=1
mov x2, x0
mov x3, #FDEC_STRIDE
ld1 {v0.4h,v1.4h,v2.4h,v3.4h}, [x1]
srshr v0.4h, v0.4h, #6
srshr v1.4h, v1.4h, #6
ADD16x4_IDCT_DC v0.h
srshr v2.4h, v2.4h, #6
ADD16x4_IDCT_DC v1.h
srshr v3.4h, v3.4h, #6
ADD16x4_IDCT_DC v2.h
ADD16x4_IDCT_DC v3.h
ret
endfunc
.macro sub4x4x2_dct_dc, dst, t0, t1, t2, t3, t4, t5, t6, t7
ld1 {\t0\().8b}, [x1], x3
ld1 {\t1\().8b}, [x2], x4
ld1 {\t2\().8b}, [x1], x3
ld1 {\t3\().8b}, [x2], x4
usubl \t0\().8h, \t0\().8b, \t1\().8b
ld1 {\t4\().8b}, [x1], x3
ld1 {\t5\().8b}, [x2], x4
usubl \t1\().8h, \t2\().8b, \t3\().8b
ld1 {\t6\().8b}, [x1], x3
ld1 {\t7\().8b}, [x2], x4
add \dst\().8h, \t0\().8h, \t1\().8h
usubl \t2\().8h, \t4\().8b, \t5\().8b
usubl \t3\().8h, \t6\().8b, \t7\().8b
add \dst\().8h, \dst\().8h, \t2\().8h
add \dst\().8h, \dst\().8h, \t3\().8h
.endm
function sub8x8_dct_dc_neon, export=1
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
sub4x4x2_dct_dc v0, v16, v17, v18, v19, v20, v21, v22, v23
sub4x4x2_dct_dc v1, v24, v25, v26, v27, v28, v29, v30, v31
transpose v2.2d, v3.2d, v0.2d, v1.2d
SUMSUB_AB v0.8h, v1.8h, v2.8h, v3.8h
transpose v2.2d, v3.2d, v0.2d, v1.2d
SUMSUB_AB v0.8h, v1.8h, v2.8h, v3.8h
transpose v2.2d, v3.2d, v0.2d, v1.2d
addp v0.8h, v2.8h, v3.8h
addp v0.8h, v0.8h, v0.8h
st1 {v0.4h}, [x0]
ret
endfunc
function sub8x16_dct_dc_neon, export=1
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
sub4x4x2_dct_dc v0, v16, v17, v18, v19, v20, v21, v22, v23
sub4x4x2_dct_dc v1, v24, v25, v26, v27, v28, v29, v30, v31
sub4x4x2_dct_dc v2, v16, v17, v18, v19, v20, v21, v22, v23
sub4x4x2_dct_dc v3, v24, v25, v26, v27, v28, v29, v30, v31
addp v4.8h, v0.8h, v2.8h
addp v5.8h, v1.8h, v3.8h
transpose v2.4s, v3.4s, v4.4s, v5.4s
SUMSUB_AB v0.8h, v1.8h, v2.8h, v3.8h
transpose v2.4s, v3.4s, v0.4s, v1.4s
SUMSUB_AB v0.8h, v1.8h, v2.8h, v3.8h
transpose v2.2d, v3.2d, v0.2d, v1.2d
SUMSUB_AB v0.8h, v1.8h, v2.8h, v3.8h
trn1 v2.2d, v0.2d, v1.2d
trn2 v3.2d, v1.2d, v0.2d
addp v0.8h, v2.8h, v3.8h
st1 {v0.8h}, [x0]
ret
endfunc
function zigzag_interleave_8x8_cavlc_neon, export=1
mov x3, #7
movi v31.4s, #1
ld4 {v0.8h,v1.8h,v2.8h,v3.8h}, [x1], #64
ld4 {v4.8h,v5.8h,v6.8h,v7.8h}, [x1], #64
umax v16.8h, v0.8h, v4.8h
umax v17.8h, v1.8h, v5.8h
umax v18.8h, v2.8h, v6.8h
umax v19.8h, v3.8h, v7.8h
st1 {v0.8h}, [x0], #16
st1 {v4.8h}, [x0], #16
umaxp v16.8h, v16.8h, v17.8h
umaxp v18.8h, v18.8h, v19.8h
st1 {v1.8h}, [x0], #16
st1 {v5.8h}, [x0], #16
umaxp v16.8h, v16.8h, v18.8h
st1 {v2.8h}, [x0], #16
st1 {v6.8h}, [x0], #16
cmhs v16.4s, v16.4s, v31.4s
st1 {v3.8h}, [x0], #16
and v16.16b, v16.16b, v31.16b
st1 {v7.8h}, [x0], #16
st1 {v16.b}[0], [x2], #1
st1 {v16.b}[4], [x2], x3
st1 {v16.b}[8], [x2], #1
st1 {v16.b}[12], [x2]
ret
endfunc
function zigzag_scan_4x4_frame_neon, export=1
movrel x2, scan4x4_frame
ld1 {v0.16b,v1.16b}, [x1]
ld1 {v16.16b,v17.16b}, [x2]
tbl v2.16b, {v0.16b,v1.16b}, v16.16b
tbl v3.16b, {v0.16b,v1.16b}, v17.16b
st1 {v2.16b,v3.16b}, [x0]
ret
endfunc
.macro zigzag_sub_4x4 f ac
function zigzag_sub_4x4\ac\()_\f\()_neon, export=1
mov x9, #FENC_STRIDE
mov x4, #FDEC_STRIDE
movrel x5, sub4x4_\f
mov x6, x2
ld1 {v0.s}[0], [x1], x9
ld1 {v0.s}[1], [x1], x9
ld1 {v0.s}[2], [x1], x9
ld1 {v0.s}[3], [x1], x9
ld1 {v16.16b}, [x5]
ld1 {v1.s}[0], [x2], x4
ld1 {v1.s}[1], [x2], x4
ld1 {v1.s}[2], [x2], x4
ld1 {v1.s}[3], [x2], x4
tbl v2.16b, {v0.16b}, v16.16b
tbl v3.16b, {v1.16b}, v16.16b
st1 {v0.s}[0], [x6], x4
usubl v4.8h, v2.8b, v3.8b
.ifc \ac, ac
dup h7, v4.h[0]
ins v4.h[0], wzr
fmov w5, s7
strh w5, [x3]
.endif
usubl2 v5.8h, v2.16b, v3.16b
st1 {v0.s}[1], [x6], x4
umax v6.8h, v4.8h, v5.8h
umaxv h6, v6.8h
st1 {v0.s}[2], [x6], x4
fmov w7, s6
st1 {v0.s}[3], [x6], x4
cmp w7, #0
st1 {v4.8h,v5.8h}, [x0]
cset w0, ne
ret
endfunc
.endm
zigzag_sub_4x4 field
zigzag_sub_4x4 field, ac
zigzag_sub_4x4 frame
zigzag_sub_4x4 frame, ac
function zigzag_scan_4x4_field_neon, export=1
movrel x2, scan4x4_field
ld1 {v0.8h,v1.8h}, [x1]
ld1 {v16.16b}, [x2]
tbl v0.16b, {v0.16b}, v16.16b
st1 {v0.8h,v1.8h}, [x0]
ret
endfunc
function zigzag_scan_8x8_frame_neon, export=1
movrel x2, scan8x8_frame
ld1 {v0.8h,v1.8h}, [x1], #32
ld1 {v2.8h,v3.8h}, [x1], #32
ld1 {v4.8h,v5.8h}, [x1], #32
ld1 {v6.8h,v7.8h}, [x1]
ld1 {v16.16b,v17.16b}, [x2], #32
ld1 {v18.16b,v19.16b}, [x2], #32
ld1 {v20.16b,v21.16b}, [x2], #32
ld1 {v22.16b,v23.16b}, [x2], #32
tbl v24.16b, {v0.16b,v1.16b,v2.16b,v3.16b}, v16.16b
tbl v25.16b, {v0.16b,v1.16b,v2.16b,v3.16b}, v17.16b
tbl v26.16b, {v0.16b,v1.16b,v2.16b,v3.16b}, v18.16b
tbl v27.16b, {v3.16b,v4.16b,v5.16b,v6.16b}, v19.16b
tbl v28.16b, {v0.16b,v1.16b,v2.16b,v3.16b}, v20.16b
tbl v29.16b, {v4.16b,v5.16b,v6.16b,v7.16b}, v21.16b
tbl v30.16b, {v4.16b,v5.16b,v6.16b,v7.16b}, v22.16b
tbl v31.16b, {v4.16b,v5.16b,v6.16b,v7.16b}, v23.16b
mov v25.h[6], v4.h[0]
mov v25.h[7], v5.h[0]
mov v26.h[0], v4.h[1]
mov v27.h[4], v7.h[0]
mov v28.h[7], v4.h[4]
mov v29.h[7], v3.h[6]
mov v30.h[0], v2.h[7]
mov v30.h[1], v3.h[7]
st1 {v24.8h,v25.8h}, [x0], #32
st1 {v26.8h,v27.8h}, [x0], #32
st1 {v28.8h,v29.8h}, [x0], #32
st1 {v30.8h,v31.8h}, [x0]
ret
endfunc
#define Z(z) 2*(z), 2*(z)+1
#define T(x,y) Z(x*8+y)
const scan8x8_frame, align=5
.byte T(0,0), T(1,0), T(0,1), T(0,2)
.byte T(1,1), T(2,0), T(3,0), T(2,1)
.byte T(1,2), T(0,3), T(0,4), T(1,3)
.byte T(2,2), T(3,1), T(4,0), T(5,0)
.byte T(4,1), T(3,2), T(2,3), T(1,4)
.byte T(0,5), T(0,6), T(1,5), T(2,4)
#undef T
#define T(x,y) Z((x-3)*8+y)
.byte T(3,3), T(4,2), T(5,1), T(6,0)
.byte T(7,0), T(6,1), T(5,2), T(4,3)
#undef T
#define T(x,y) Z((x-0)*8+y)
.byte T(3,4), T(2,5), T(1,6), T(0,7)
.byte T(1,7), T(2,6), T(3,5), T(4,4)
#undef T
#define T(x,y) Z((x-4)*8+y)
.byte T(5,3), T(6,2), T(7,1), T(7,2)
.byte T(6,3), T(5,4), T(4,5), T(3,6)
.byte T(2,7), T(3,7), T(4,6), T(5,5)
.byte T(6,4), T(7,3), T(7,4), T(6,5)
.byte T(5,6), T(4,7), T(5,7), T(6,6)
.byte T(7,5), T(7,6), T(6,7), T(7,7)
endconst
function zigzag_scan_8x8_field_neon, export=1
movrel x2, scan8x8_field
ld1 {v0.8h,v1.8h}, [x1], #32
ld1 {v2.8h,v3.8h}, [x1], #32
ld1 {v4.8h,v5.8h}, [x1], #32
ld1 {v6.8h,v7.8h}, [x1]
ld1 {v16.16b,v17.16b}, [x2], #32
ld1 {v18.16b,v19.16b}, [x2], #32
ld1 {v20.16b,v21.16b}, [x2], #32
ld1 {v22.16b}, [x2]
ext v31.16b, v7.16b, v7.16b, #4
tbl v24.16b, {v0.16b,v1.16b}, v16.16b
tbl v25.16b, {v0.16b,v1.16b,v2.16b,v3.16b}, v17.16b
tbl v26.16b, {v1.16b,v2.16b,v3.16b,v4.16b}, v18.16b
tbl v27.16b, {v2.16b,v3.16b,v4.16b,v5.16b}, v19.16b
tbl v28.16b, {v3.16b,v4.16b,v5.16b,v6.16b}, v20.16b
tbl v29.16b, {v4.16b,v5.16b,v6.16b}, v21.16b
tbl v30.16b, {v5.16b,v6.16b,v7.16b}, v22.16b
ext v31.16b, v6.16b, v31.16b, #12
st1 {v24.8h,v25.8h}, [x0], #32
st1 {v26.8h,v27.8h}, [x0], #32
st1 {v28.8h,v29.8h}, [x0], #32
st1 {v30.8h,v31.8h}, [x0]
ret
endfunc
.macro zigzag_sub8x8 f
function zigzag_sub_8x8_\f\()_neon, export=1
movrel x4, sub8x8_\f
mov x5, #FENC_STRIDE
mov x6, #FDEC_STRIDE
mov x7, x2
ld1 {v0.d}[0], [x1], x5
ld1 {v0.d}[1], [x1], x5
ld1 {v1.d}[0], [x1], x5
ld1 {v1.d}[1], [x1], x5
ld1 {v2.d}[0], [x1], x5
ld1 {v2.d}[1], [x1], x5
ld1 {v3.d}[0], [x1], x5
ld1 {v3.d}[1], [x1]
ld1 {v4.d}[0], [x2], x6
ld1 {v4.d}[1], [x2], x6
ld1 {v5.d}[0], [x2], x6
ld1 {v5.d}[1], [x2], x6
ld1 {v6.d}[0], [x2], x6
ld1 {v6.d}[1], [x2], x6
ld1 {v7.d}[0], [x2], x6
ld1 {v7.d}[1], [x2]
ld1 {v16.16b,v17.16b}, [x4], #32
ld1 {v18.16b,v19.16b}, [x4], #32
tbl v24.16b, {v0.16b,v1.16b,v2.16b,v3.16b}, v16.16b
tbl v25.16b, {v0.16b,v1.16b,v2.16b,v3.16b}, v17.16b
tbl v26.16b, {v0.16b,v1.16b,v2.16b,v3.16b}, v18.16b
tbl v27.16b, {v0.16b,v1.16b,v2.16b,v3.16b}, v19.16b
tbl v28.16b, {v4.16b,v5.16b,v6.16b,v7.16b}, v16.16b
tbl v29.16b, {v4.16b,v5.16b,v6.16b,v7.16b}, v17.16b
tbl v30.16b, {v4.16b,v5.16b,v6.16b,v7.16b}, v18.16b
tbl v31.16b, {v4.16b,v5.16b,v6.16b,v7.16b}, v19.16b
usubl v4.8h, v24.8b, v28.8b
usubl2 v5.8h, v24.16b, v28.16b
usubl v6.8h, v25.8b, v29.8b
usubl2 v7.8h, v25.16b, v29.16b
usubl v16.8h, v26.8b, v30.8b
usubl2 v17.8h, v26.16b, v30.16b
usubl v18.8h, v27.8b, v31.8b
usubl2 v19.8h, v27.16b, v31.16b
umax v20.8h, v4.8h, v5.8h
umax v21.8h, v6.8h, v7.8h
umax v22.8h, v16.8h, v17.8h
umax v23.8h, v18.8h, v19.8h
umax v20.8h, v20.8h, v21.8h
umax v21.8h, v22.8h, v23.8h
umax v20.8h, v20.8h, v21.8h
umaxv h22, v20.8h
st1 {v0.d}[0], [x7], x6
st1 {v0.d}[1], [x7], x6
st1 {v1.d}[0], [x7], x6
st1 {v1.d}[1], [x7], x6
st1 {v2.d}[0], [x7], x6
st1 {v2.d}[1], [x7], x6
st1 {v3.d}[0], [x7], x6
st1 {v3.d}[1], [x7]
st1 {v4.8h,v5.8h}, [x0], #32
st1 {v6.8h,v7.8h}, [x0], #32
st1 {v16.8h,v17.8h}, [x0], #32
st1 {v18.8h,v19.8h}, [x0]
fmov w9, s22
cmp w9, #0
cset w0, ne
ret
endfunc
.endm
zigzag_sub8x8 field
zigzag_sub8x8 frame
#undef T
#define T(x,y) Z(x*8+y)
const scan8x8_field, align=5
.byte T(0,0), T(0,1), T(0,2), T(1,0)
.byte T(1,1), T(0,3), T(0,4), T(1,2)
.byte T(2,0), T(1,3), T(0,5), T(0,6)
.byte T(0,7), T(1,4), T(2,1), T(3,0)
#undef T
#define T(x,y) Z((x-1)*8+y)
.byte T(2,2), T(1,5), T(1,6), T(1,7)
.byte T(2,3), T(3,1), T(4,0), T(3,2)
#undef T
#define T(x,y) Z((x-2)*8+y)
.byte T(2,4), T(2,5), T(2,6), T(2,7)
.byte T(3,3), T(4,1), T(5,0), T(4,2)
#undef T
#define T(x,y) Z((x-3)*8+y)
.byte T(3,4), T(3,5), T(3,6), T(3,7)
.byte T(4,3), T(5,1), T(6,0), T(5,2)
#undef T
#define T(x,y) Z((x-4)*8+y)
.byte T(4,4), T(4,5), T(4,6), T(4,7)
.byte T(5,3), T(6,1), T(6,2), T(5,4)
#undef T
#define T(x,y) Z((x-5)*8+y)
.byte T(5,5), T(5,6), T(5,7), T(6,3)
.byte T(7,0), T(7,1), T(6,4), T(6,5)
endconst
#undef T
#define T(y,x) x*8+y
const sub8x8_frame, align=5
.byte T(0,0), T(1,0), T(0,1), T(0,2)
.byte T(1,1), T(2,0), T(3,0), T(2,1)
.byte T(1,2), T(0,3), T(0,4), T(1,3)
.byte T(2,2), T(3,1), T(4,0), T(5,0)
.byte T(4,1), T(3,2), T(2,3), T(1,4)
.byte T(0,5), T(0,6), T(1,5), T(2,4)
.byte T(3,3), T(4,2), T(5,1), T(6,0)
.byte T(7,0), T(6,1), T(5,2), T(4,3)
.byte T(3,4), T(2,5), T(1,6), T(0,7)
.byte T(1,7), T(2,6), T(3,5), T(4,4)
.byte T(5,3), T(6,2), T(7,1), T(7,2)
.byte T(6,3), T(5,4), T(4,5), T(3,6)
.byte T(2,7), T(3,7), T(4,6), T(5,5)
.byte T(6,4), T(7,3), T(7,4), T(6,5)
.byte T(5,6), T(4,7), T(5,7), T(6,6)
.byte T(7,5), T(7,6), T(6,7), T(7,7)
endconst
const sub8x8_field, align=5
.byte T(0,0), T(0,1), T(0,2), T(1,0)
.byte T(1,1), T(0,3), T(0,4), T(1,2)
.byte T(2,0), T(1,3), T(0,5), T(0,6)
.byte T(0,7), T(1,4), T(2,1), T(3,0)
.byte T(2,2), T(1,5), T(1,6), T(1,7)
.byte T(2,3), T(3,1), T(4,0), T(3,2)
.byte T(2,4), T(2,5), T(2,6), T(2,7)
.byte T(3,3), T(4,1), T(5,0), T(4,2)
.byte T(3,4), T(3,5), T(3,6), T(3,7)
.byte T(4,3), T(5,1), T(6,0), T(5,2)
.byte T(4,4), T(4,5), T(4,6), T(4,7)
.byte T(5,3), T(6,1), T(6,2), T(5,4)
.byte T(5,5), T(5,6), T(5,7), T(6,3)
.byte T(7,0), T(7,1), T(6,4), T(6,5)
.byte T(6,6), T(6,7), T(7,2), T(7,3)
.byte T(7,4), T(7,5), T(7,6), T(7,7)
endconst
|
aestream/faery
| 3,264
|
src/mp4/x264/common/aarch64/dct-a-sve.S
|
/****************************************************************************
* dct-a-sve.S: aarch64 transform and zigzag
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Chen <david.chen@myais.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "dct-a-common.S"
.arch armv8-a+sve
function sub4x4_dct_sve, export=1
mov x3, #FENC_STRIDE
mov x4, #FDEC_STRIDE
ptrue p0.h, vl4
ld1b {z0.h}, p0/z, [x1]
add x1, x1, x3
ld1b {z1.h}, p0/z, [x2]
add x2, x2, x4
ld1b {z2.h}, p0/z, [x1]
add x1, x1, x3
sub v16.4h, v0.4h, v1.4h
ld1b {z3.h}, p0/z, [x2]
add x2, x2, x4
ld1b {z4.h}, p0/z, [x1]
add x1, x1, x3
sub v17.4h, v2.4h, v3.4h
ld1b {z5.h}, p0/z, [x2]
add x2, x2, x4
ld1b {z6.h}, p0/z, [x1]
sub v18.4h, v4.4h, v5.4h
ld1b {z7.h}, p0/z, [x2]
sub v19.4h, v6.4h, v7.4h
DCT_1D v0.4h, v1.4h, v2.4h, v3.4h, v16.4h, v17.4h, v18.4h, v19.4h
transpose4x4.h v0, v1, v2, v3, v4, v5, v6, v7
DCT_1D v4.4h, v5.4h, v6.4h, v7.4h, v0.4h, v1.4h, v2.4h, v3.4h
st1 {v4.4h,v5.4h,v6.4h,v7.4h}, [x0]
ret
endfunc
function zigzag_interleave_8x8_cavlc_sve, export=1
mov z31.s, #1
ptrue p2.s, vl2
ld4 {v0.8h,v1.8h,v2.8h,v3.8h}, [x1], #64
ld4 {v4.8h,v5.8h,v6.8h,v7.8h}, [x1], #64
umax v16.8h, v0.8h, v4.8h
umax v17.8h, v1.8h, v5.8h
umax v18.8h, v2.8h, v6.8h
umax v19.8h, v3.8h, v7.8h
st1 {v0.8h}, [x0], #16
st1 {v4.8h}, [x0], #16
umaxp v16.8h, v16.8h, v17.8h
umaxp v18.8h, v18.8h, v19.8h
st1 {v1.8h}, [x0], #16
st1 {v5.8h}, [x0], #16
umaxp v16.8h, v16.8h, v18.8h
st1 {v2.8h}, [x0], #16
st1 {v6.8h}, [x0], #16
cmhs v16.4s, v16.4s, v31.4s
st1 {v3.8h}, [x0], #16
and v16.16b, v16.16b, v31.16b
st1 {v7.8h}, [x0], #16
st1b {z16.s}, p2, [x2]
add x2, x2, #8
mov v16.d[0], v16.d[1]
st1b {z16.s}, p2, [x2]
ret
endfunc
|
aestream/faery
| 3,234
|
src/mp4/x264/common/aarch64/mc-a-sve.S
|
/*****************************************************************************
* mc-a-sve.S: aarch64 motion compensation
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Chen <david.chen@myais.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "mc-a-common.S"
.arch armv8-a+sve
#if BIT_DEPTH == 8
// void pixel_avg( uint8_t *dst, intptr_t dst_stride,
// uint8_t *src1, intptr_t src1_stride,
// uint8_t *src2, intptr_t src2_stride, int weight );
.macro AVGH_SVE w h
function pixel_avg_\w\()x\h\()_sve, export=1
mov w10, #64
cmp w6, #32
mov w9, #\h
b.eq pixel_avg_w\w\()_neon
subs w7, w10, w6
b.lt pixel_avg_weight_w\w\()_add_sub_sve // weight > 64
cmp w6, #0
b.ge pixel_avg_weight_w\w\()_add_add_sve
b pixel_avg_weight_w\w\()_sub_add_sve // weight < 0
endfunc
.endm
AVGH_SVE 4, 2
AVGH_SVE 4, 4
AVGH_SVE 4, 8
AVGH_SVE 4, 16
// 0 < weight < 64
.macro weight_add_add_sve dst, s1, s2, h=
mul \dst, \s1, v30.8h
mla \dst, \s2, v31.8h
.endm
// weight > 64
.macro weight_add_sub_sve dst, s1, s2, h=
mul \dst, \s1, v30.8h
mls \dst, \s2, v31.8h
.endm
// weight < 0
.macro weight_sub_add_sve dst, s1, s2, h=
mul \dst, \s2, v31.8h
mls \dst, \s1, v30.8h
.endm
.macro AVG_WEIGHT_SVE ext
function pixel_avg_weight_w4_\ext\()_sve
load_weights_\ext
ptrue p0.b, vl8
dup v30.8h, w6
dup v31.8h, w7
1: // height loop
subs w9, w9, #2
ld1b {z0.h}, p0/z, [x2]
add x2, x2, x3
ld1b {z1.h}, p0/z, [x4]
add x4, x4, x5
weight_\ext\()_sve v4.8h, v0.8h, v1.8h
ld1b {z2.h}, p0/z, [x2]
add x2, x2, x3
ld1b {z3.h}, p0/z, [x4]
add x4, x4, x5
sqrshrun v0.8b, v4.8h, #6
weight_\ext\()_sve v5.8h, v2.8h, v3.8h
st1 {v0.s}[0], [x0], x1
sqrshrun v1.8b, v5.8h, #6
st1 {v1.s}[0], [x0], x1
b.gt 1b
ret
endfunc
.endm
AVG_WEIGHT_SVE add_add
AVG_WEIGHT_SVE add_sub
AVG_WEIGHT_SVE sub_add
#else // BIT_DEPTH == 10
#endif
|
aestream/faery
| 109,852
|
src/mp4/x264/common/aarch64/mc-a.S
|
/*****************************************************************************
* mc.S: aarch64 motion compensation
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
* Mans Rullgard <mans@mansr.com>
* Stefan Groenroos <stefan.gronroos@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "mc-a-common.S"
// note: prefetch stuff assumes 64-byte cacheline
// void prefetch_ref( uint8_t *pix, intptr_t stride, int parity )
function prefetch_ref_aarch64, export=1
cmp w2, #1
csel x2, xzr, x1, eq
add x0, x0, #64
add x0, x0, x2, lsl #3
lsl x2, x1, #1
add x3, x1, x1, lsl #1
add x4, x0, x1, lsl #2
prfm pldl1strm, [x0]
prfm pldl1strm, [x0, x1]
prfm pldl1strm, [x0, x2]
prfm pldl1strm, [x0, x3]
prfm pldl1strm, [x4]
prfm pldl1strm, [x4, x1]
prfm pldl1strm, [x4, x2]
prfm pldl1strm, [x4, x3]
ret
endfunc
// void prefetch_fenc( uint8_t *pix_y, intptr_t stride_y,
// uint8_t *pix_uv, intptr_t stride_uv, int mb_x )
.macro prefetch_fenc sub
function prefetch_fenc_\sub\()_aarch64, export=1
and w6, w5, #3
and w7, w5, #3
mul x6, x6, x1
mul x7, x7, x3
add x0, x0, #64
add x2, x2, #64
add x0, x0, x6, lsl #2
add x6, x0, x1, lsl #1
prfm pldl1strm, [x0]
prfm pldl1strm, [x0, x1]
prfm pldl1strm, [x6]
prfm pldl1strm, [x6, x1]
add x2, x2, x7, lsl #1
prfm pldl1strm, [x2]
prfm pldl1strm, [x2, x3]
.ifc \sub, 422
add x7, x2, x3, lsl #1
prfm pldl1strm, [x7]
prfm pldl1strm, [x7, x3]
.endif
ret
endfunc
.endm
prefetch_fenc 420
prefetch_fenc 422
function mbtree_propagate_cost_neon, export=1
ld1r {v5.4s}, [x5]
8:
subs w6, w6, #8
ld1 {v1.8h}, [x1], #16
ld1 {v2.8h}, [x2], #16
ld1 {v3.8h}, [x3], #16
ld1 {v4.8h}, [x4], #16
bic v3.8h, #0xc0, lsl #8
umin v3.8h, v2.8h, v3.8h
umull v20.4s, v2.4h, v4.4h // propagate_intra
umull2 v21.4s, v2.8h, v4.8h // propagate_intra
usubl v22.4s, v2.4h, v3.4h // propagate_num
usubl2 v23.4s, v2.8h, v3.8h // propagate_num
uxtl v26.4s, v2.4h // propagate_denom
uxtl2 v27.4s, v2.8h // propagate_denom
uxtl v24.4s, v1.4h
uxtl2 v25.4s, v1.8h
ucvtf v20.4s, v20.4s
ucvtf v21.4s, v21.4s
ucvtf v26.4s, v26.4s
ucvtf v27.4s, v27.4s
ucvtf v22.4s, v22.4s
ucvtf v23.4s, v23.4s
frecpe v28.4s, v26.4s
frecpe v29.4s, v27.4s
ucvtf v24.4s, v24.4s
ucvtf v25.4s, v25.4s
frecps v30.4s, v28.4s, v26.4s
frecps v31.4s, v29.4s, v27.4s
fmla v24.4s, v20.4s, v5.4s // propagate_amount
fmla v25.4s, v21.4s, v5.4s // propagate_amount
fmul v28.4s, v28.4s, v30.4s
fmul v29.4s, v29.4s, v31.4s
fmul v16.4s, v24.4s, v22.4s
fmul v17.4s, v25.4s, v23.4s
fmul v18.4s, v16.4s, v28.4s
fmul v19.4s, v17.4s, v29.4s
fcvtns v20.4s, v18.4s
fcvtns v21.4s, v19.4s
sqxtn v0.4h, v20.4s
sqxtn2 v0.8h, v21.4s
st1 {v0.8h}, [x0], #16
b.gt 8b
ret
endfunc
const pw_0to15, align=5
.short 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
endconst
function mbtree_propagate_list_internal_neon, export=1
movrel x11, pw_0to15
dup v31.8h, w4 // bipred_weight
movi v30.8h, #0xc0, lsl #8
ld1 {v29.8h}, [x11] //h->mb.i_mb_x,h->mb.i_mb_y
movi v28.4s, #4
movi v27.8h, #31
movi v26.8h, #32
dup v24.8h, w5 // mb_y
zip1 v29.8h, v29.8h, v24.8h
8:
subs w6, w6, #8
ld1 {v1.8h}, [x1], #16 // propagate_amount
ld1 {v2.8h}, [x2], #16 // lowres_cost
and v2.16b, v2.16b, v30.16b
cmeq v25.8h, v2.8h, v30.8h
umull v16.4s, v1.4h, v31.4h
umull2 v17.4s, v1.8h, v31.8h
rshrn v16.4h, v16.4s, #6
rshrn2 v16.8h, v17.4s, #6
bsl v25.16b, v16.16b, v1.16b // if( lists_used == 3 )
// propagate_amount = (propagate_amount * bipred_weight + 32) >> 6
ld1 {v4.8h,v5.8h}, [x0], #32
sshr v6.8h, v4.8h, #5
sshr v7.8h, v5.8h, #5
add v6.8h, v6.8h, v29.8h
add v29.8h, v29.8h, v28.8h
add v7.8h, v7.8h, v29.8h
add v29.8h, v29.8h, v28.8h
st1 {v6.8h,v7.8h}, [x3], #32
and v4.16b, v4.16b, v27.16b
and v5.16b, v5.16b, v27.16b
uzp1 v6.8h, v4.8h, v5.8h // x & 31
uzp2 v7.8h, v4.8h, v5.8h // y & 31
sub v4.8h, v26.8h, v6.8h // 32 - (x & 31)
sub v5.8h, v26.8h, v7.8h // 32 - (y & 31)
mul v19.8h, v6.8h, v7.8h // idx3weight = y*x;
mul v18.8h, v4.8h, v7.8h // idx2weight = y*(32-x);
mul v17.8h, v6.8h, v5.8h // idx1weight = (32-y)*x;
mul v16.8h, v4.8h, v5.8h // idx0weight = (32-y)*(32-x) ;
umull v6.4s, v19.4h, v25.4h
umull2 v7.4s, v19.8h, v25.8h
umull v4.4s, v18.4h, v25.4h
umull2 v5.4s, v18.8h, v25.8h
umull v2.4s, v17.4h, v25.4h
umull2 v3.4s, v17.8h, v25.8h
umull v0.4s, v16.4h, v25.4h
umull2 v1.4s, v16.8h, v25.8h
rshrn v19.4h, v6.4s, #10
rshrn2 v19.8h, v7.4s, #10
rshrn v18.4h, v4.4s, #10
rshrn2 v18.8h, v5.4s, #10
rshrn v17.4h, v2.4s, #10
rshrn2 v17.8h, v3.4s, #10
rshrn v16.4h, v0.4s, #10
rshrn2 v16.8h, v1.4s, #10
zip1 v0.8h, v16.8h, v17.8h
zip2 v1.8h, v16.8h, v17.8h
zip1 v2.8h, v18.8h, v19.8h
zip2 v3.8h, v18.8h, v19.8h
st1 {v0.8h,v1.8h}, [x3], #32
st1 {v2.8h,v3.8h}, [x3], #32
b.ge 8b
ret
endfunc
function memcpy_aligned_neon, export=1
tst x2, #16
b.eq 32f
sub x2, x2, #16
ldr q0, [x1], #16
str q0, [x0], #16
32:
tst x2, #32
b.eq 640f
sub x2, x2, #32
ldp q0, q1, [x1], #32
stp q0, q1, [x0], #32
640:
cbz x2, 1f
64:
subs x2, x2, #64
ldp q0, q1, [x1, #32]
ldp q2, q3, [x1], #64
stp q0, q1, [x0, #32]
stp q2, q3, [x0], #64
b.gt 64b
1:
ret
endfunc
function memzero_aligned_neon, export=1
movi v0.16b, #0
movi v1.16b, #0
1:
subs x1, x1, #128
stp q0, q1, [x0, #96]
stp q0, q1, [x0, #64]
stp q0, q1, [x0, #32]
stp q0, q1, [x0], 128
b.gt 1b
ret
endfunc
// void mbtree_fix8_pack( int16_t *dst, float *src, int count )
function mbtree_fix8_pack_neon, export=1
subs w3, w2, #8
b.lt 2f
1:
subs w3, w3, #8
ld1 {v0.4s,v1.4s}, [x1], #32
fcvtzs v0.4s, v0.4s, #8
fcvtzs v1.4s, v1.4s, #8
sqxtn v2.4h, v0.4s
sqxtn2 v2.8h, v1.4s
rev16 v3.16b, v2.16b
st1 {v3.8h}, [x0], #16
b.ge 1b
2:
adds w3, w3, #8
b.eq 4f
3:
subs w3, w3, #1
ldr s0, [x1], #4
fcvtzs w4, s0, #8
rev16 w5, w4
strh w5, [x0], #2
b.gt 3b
4:
ret
endfunc
// void mbtree_fix8_unpack( float *dst, int16_t *src, int count )
function mbtree_fix8_unpack_neon, export=1
subs w3, w2, #8
b.lt 2f
1:
subs w3, w3, #8
ld1 {v0.8h}, [x1], #16
rev16 v1.16b, v0.16b
sxtl v2.4s, v1.4h
sxtl2 v3.4s, v1.8h
scvtf v4.4s, v2.4s, #8
scvtf v5.4s, v3.4s, #8
st1 {v4.4s,v5.4s}, [x0], #32
b.ge 1b
2:
adds w3, w3, #8
b.eq 4f
3:
subs w3, w3, #1
ldrh w4, [x1], #2
rev16 w5, w4
sxth w6, w5
scvtf s0, w6, #8
str s0, [x0], #4
b.gt 3b
4:
ret
endfunc
#if BIT_DEPTH == 8
// void pixel_avg( uint8_t *dst, intptr_t dst_stride,
// uint8_t *src1, intptr_t src1_stride,
// uint8_t *src2, intptr_t src2_stride, int weight );
.macro AVGH w h
function pixel_avg_\w\()x\h\()_neon, export=1
mov w10, #64
cmp w6, #32
mov w9, #\h
b.eq pixel_avg_w\w\()_neon
subs w7, w10, w6
b.lt pixel_avg_weight_w\w\()_add_sub_neon // weight > 64
cmp w6, #0
b.ge pixel_avg_weight_w\w\()_add_add_neon
b pixel_avg_weight_w\w\()_sub_add_neon // weight < 0
endfunc
.endm
AVGH 4, 2
AVGH 4, 4
AVGH 4, 8
AVGH 4, 16
AVGH 8, 4
AVGH 8, 8
AVGH 8, 16
AVGH 16, 8
AVGH 16, 16
// 0 < weight < 64
.macro weight_add_add dst, s1, s2, h=
.ifc \h, 2
umull2 \dst, \s1, v30.16b
umlal2 \dst, \s2, v31.16b
.else
umull \dst, \s1, v30.8b
umlal \dst, \s2, v31.8b
.endif
.endm
// weight > 64
.macro weight_add_sub dst, s1, s2, h=
.ifc \h, 2
umull2 \dst, \s1, v30.16b
umlsl2 \dst, \s2, v31.16b
.else
umull \dst, \s1, v30.8b
umlsl \dst, \s2, v31.8b
.endif
.endm
// weight < 0
.macro weight_sub_add dst, s1, s2, h=
.ifc \h, 2
umull2 \dst, \s2, v31.16b
umlsl2 \dst, \s1, v30.16b
.else
umull \dst, \s2, v31.8b
umlsl \dst, \s1, v30.8b
.endif
.endm
.macro AVG_WEIGHT ext
function pixel_avg_weight_w4_\ext\()_neon
load_weights_\ext
dup v30.8b, w6
dup v31.8b, w7
1: // height loop
subs w9, w9, #2
ld1 {v0.s}[0], [x2], x3
ld1 {v1.s}[0], [x4], x5
weight_\ext v4.8h, v0.8b, v1.8b
ld1 {v2.s}[0], [x2], x3
ld1 {v3.s}[0], [x4], x5
sqrshrun v0.8b, v4.8h, #6
weight_\ext v5.8h, v2.8b, v3.8b
st1 {v0.s}[0], [x0], x1
sqrshrun v1.8b, v5.8h, #6
st1 {v1.s}[0], [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg_weight_w8_\ext\()_neon
load_weights_\ext
dup v30.8b, w6
dup v31.8b, w7
1: // height loop
subs w9, w9, #4
ld1 {v0.8b}, [x2], x3
ld1 {v1.8b}, [x4], x5
weight_\ext v16.8h, v0.8b, v1.8b
ld1 {v2.8b}, [x2], x3
ld1 {v3.8b}, [x4], x5
weight_\ext v17.8h, v2.8b, v3.8b
ld1 {v4.8b}, [x2], x3
ld1 {v5.8b}, [x4], x5
weight_\ext v18.8h, v4.8b, v5.8b
ld1 {v6.8b}, [x2], x3
ld1 {v7.8b}, [x4], x5
weight_\ext v19.8h, v6.8b, v7.8b
sqrshrun v0.8b, v16.8h, #6
sqrshrun v1.8b, v17.8h, #6
sqrshrun v2.8b, v18.8h, #6
sqrshrun v3.8b, v19.8h, #6
st1 {v0.8b}, [x0], x1
st1 {v1.8b}, [x0], x1
st1 {v2.8b}, [x0], x1
st1 {v3.8b}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg_weight_w16_\ext\()_neon
load_weights_\ext
dup v30.16b, w6
dup v31.16b, w7
1: // height loop
subs w9, w9, #2
ld1 {v0.16b}, [x2], x3
ld1 {v1.16b}, [x4], x5
weight_\ext v16.8h, v0.8b, v1.8b
weight_\ext v17.8h, v0.16b, v1.16b, 2
ld1 {v2.16b}, [x2], x3
ld1 {v3.16b}, [x4], x5
weight_\ext v18.8h, v2.8b, v3.8b
weight_\ext v19.8h, v2.16b, v3.16b, 2
sqrshrun v0.8b, v16.8h, #6
sqrshrun v1.8b, v18.8h, #6
sqrshrun2 v0.16b, v17.8h, #6
sqrshrun2 v1.16b, v19.8h, #6
st1 {v0.16b}, [x0], x1
st1 {v1.16b}, [x0], x1
b.gt 1b
ret
endfunc
.endm
AVG_WEIGHT add_add
AVG_WEIGHT add_sub
AVG_WEIGHT sub_add
function pixel_avg_w8_neon
1: subs w9, w9, #4
ld1 {v0.8b}, [x2], x3
ld1 {v1.8b}, [x4], x5
ld1 {v2.8b}, [x2], x3
urhadd v0.8b, v0.8b, v1.8b
ld1 {v3.8b}, [x4], x5
st1 {v0.8b}, [x0], x1
ld1 {v4.8b}, [x2], x3
urhadd v1.8b, v2.8b, v3.8b
ld1 {v5.8b}, [x4], x5
st1 {v1.8b}, [x0], x1
ld1 {v6.8b}, [x2], x3
ld1 {v7.8b}, [x4], x5
urhadd v2.8b, v4.8b, v5.8b
urhadd v3.8b, v6.8b, v7.8b
st1 {v2.8b}, [x0], x1
st1 {v3.8b}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg_w16_neon
1: subs w9, w9, #4
ld1 {v0.16b}, [x2], x3
ld1 {v1.16b}, [x4], x5
ld1 {v2.16b}, [x2], x3
urhadd v0.16b, v0.16b, v1.16b
ld1 {v3.16b}, [x4], x5
st1 {v0.16b}, [x0], x1
ld1 {v4.16b}, [x2], x3
urhadd v1.16b, v2.16b, v3.16b
ld1 {v5.16b}, [x4], x5
st1 {v1.16b}, [x0], x1
ld1 {v6.16b}, [x2], x3
ld1 {v7.16b}, [x4], x5
urhadd v2.16b, v4.16b, v5.16b
urhadd v3.16b, v6.16b, v7.16b
st1 {v2.16b}, [x0], x1
st1 {v3.16b}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg2_w4_neon, export=1
1:
subs w5, w5, #2
ld1 {v0.s}[0], [x2], x3
ld1 {v2.s}[0], [x4], x3
urhadd v0.8b, v0.8b, v2.8b
ld1 {v1.s}[0], [x2], x3
ld1 {v3.s}[0], [x4], x3
urhadd v1.8b, v1.8b, v3.8b
st1 {v0.s}[0], [x0], x1
st1 {v1.s}[0], [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg2_w8_neon, export=1
1:
subs w5, w5, #2
ld1 {v0.8b}, [x2], x3
ld1 {v2.8b}, [x4], x3
urhadd v0.8b, v0.8b, v2.8b
ld1 {v1.8b}, [x2], x3
ld1 {v3.8b}, [x4], x3
urhadd v1.8b, v1.8b, v3.8b
st1 {v0.8b}, [x0], x1
st1 {v1.8b}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg2_w16_neon, export=1
1:
subs w5, w5, #2
ld1 {v0.16b}, [x2], x3
ld1 {v2.16b}, [x4], x3
urhadd v0.16b, v0.16b, v2.16b
ld1 {v1.16b}, [x2], x3
ld1 {v3.16b}, [x4], x3
urhadd v1.16b, v1.16b, v3.16b
st1 {v0.16b}, [x0], x1
st1 {v1.16b}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg2_w20_neon, export=1
sub x1, x1, #16
1:
subs w5, w5, #2
ld1 {v0.16b,v1.16b}, [x2], x3
ld1 {v2.16b,v3.16b}, [x4], x3
urhadd v0.16b, v0.16b, v2.16b
urhadd v1.8b, v1.8b, v3.8b
ld1 {v4.16b,v5.16b}, [x2], x3
ld1 {v6.16b,v7.16b}, [x4], x3
urhadd v4.16b, v4.16b, v6.16b
urhadd v5.8b, v5.8b, v7.8b
st1 {v0.16b}, [x0], #16
st1 {v1.s}[0], [x0], x1
st1 {v4.16b}, [x0], #16
st1 {v5.s}[0], [x0], x1
b.gt 1b
ret
endfunc
.macro weight_prologue type
mov w9, w5 // height
.ifc \type, full
ldr w12, [x4, #32] // denom
.endif
ldp w4, w5, [x4, #32+4] // scale, offset
dup v0.16b, w4
dup v1.8h, w5
.ifc \type, full
neg w12, w12
dup v2.8h, w12
.endif
.endm
// void mc_weight( uint8_t *src, intptr_t src_stride, uint8_t *dst,
// intptr_t dst_stride, const x264_weight_t *weight, int h )
function mc_weight_w20_neon, export=1
weight_prologue full
sub x1, x1, #16
1:
subs w9, w9, #2
ld1 {v16.8b,v17.8b,v18.8b}, [x2], x3
ld1 {v19.8b,v20.8b,v21.8b}, [x2], x3
umull v22.8h, v16.8b, v0.8b
umull v23.8h, v17.8b, v0.8b
zip1 v18.2s, v18.2s, v21.2s
umull v25.8h, v19.8b, v0.8b
umull v26.8h, v20.8b, v0.8b
umull v24.8h, v18.8b, v0.8b
srshl v22.8h, v22.8h, v2.8h
srshl v23.8h, v23.8h, v2.8h
srshl v24.8h, v24.8h, v2.8h
srshl v25.8h, v25.8h, v2.8h
srshl v26.8h, v26.8h, v2.8h
add v22.8h, v22.8h, v1.8h
add v23.8h, v23.8h, v1.8h
add v24.8h, v24.8h, v1.8h
add v25.8h, v25.8h, v1.8h
add v26.8h, v26.8h, v1.8h
sqxtun v4.8b, v22.8h
sqxtun2 v4.16b, v23.8h
sqxtun v6.8b, v24.8h
sqxtun v5.8b, v25.8h
sqxtun2 v5.16b, v26.8h
st1 {v4.16b}, [x0], #16
st1 {v6.s}[0], [x0], x1
st1 {v5.16b}, [x0], #16
st1 {v6.s}[1], [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w16_neon, export=1
weight_prologue full
weight16_loop:
1:
subs w9, w9, #2
ld1 {v4.16b}, [x2], x3
ld1 {v5.16b}, [x2], x3
umull v22.8h, v4.8b, v0.8b
umull2 v23.8h, v4.16b, v0.16b
umull v24.8h, v5.8b, v0.8b
umull2 v25.8h, v5.16b, v0.16b
srshl v22.8h, v22.8h, v2.8h
srshl v23.8h, v23.8h, v2.8h
srshl v24.8h, v24.8h, v2.8h
srshl v25.8h, v25.8h, v2.8h
add v22.8h, v22.8h, v1.8h
add v23.8h, v23.8h, v1.8h
add v24.8h, v24.8h, v1.8h
add v25.8h, v25.8h, v1.8h
sqxtun v4.8b, v22.8h
sqxtun2 v4.16b, v23.8h
sqxtun v5.8b, v24.8h
sqxtun2 v5.16b, v25.8h
st1 {v4.16b}, [x0], x1
st1 {v5.16b}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w8_neon, export=1
weight_prologue full
1:
subs w9, w9, #2
ld1 {v16.8b}, [x2], x3
ld1 {v17.8b}, [x2], x3
umull v4.8h, v16.8b, v0.8b
umull v5.8h, v17.8b, v0.8b
srshl v4.8h, v4.8h, v2.8h
srshl v5.8h, v5.8h, v2.8h
add v4.8h, v4.8h, v1.8h
add v5.8h, v5.8h, v1.8h
sqxtun v16.8b, v4.8h
sqxtun v17.8b, v5.8h
st1 {v16.8b}, [x0], x1
st1 {v17.8b}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w4_neon, export=1
weight_prologue full
1:
subs w9, w9, #2
ld1 {v16.s}[0], [x2], x3
ld1 {v16.s}[1], [x2], x3
umull v4.8h, v16.8b, v0.8b
srshl v4.8h, v4.8h, v2.8h
add v4.8h, v4.8h, v1.8h
sqxtun v16.8b, v4.8h
st1 {v16.s}[0], [x0], x1
st1 {v16.s}[1], [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w20_nodenom_neon, export=1
weight_prologue nodenom
sub x1, x1, #16
1:
subs w9, w9, #2
ld1 {v16.8b,v17.8b,v18.8b}, [x2], x3
mov v27.16b, v1.16b
mov v28.16b, v1.16b
ld1 {v19.8b,v20.8b,v21.8b}, [x2], x3
mov v31.16b, v1.16b
mov v29.16b, v1.16b
mov v30.16b, v1.16b
zip1 v18.2s, v18.2s, v21.2s
umlal v27.8h, v16.8b, v0.8b
umlal v28.8h, v17.8b, v0.8b
umlal v31.8h, v18.8b, v0.8b
umlal v29.8h, v19.8b, v0.8b
umlal v30.8h, v20.8b, v0.8b
sqxtun v4.8b, v27.8h
sqxtun2 v4.16b, v28.8h
sqxtun v5.8b, v29.8h
sqxtun2 v5.16b, v30.8h
sqxtun v6.8b, v31.8h
st1 {v4.16b}, [x0], #16
st1 {v6.s}[0], [x0], x1
st1 {v5.16b}, [x0], #16
st1 {v6.s}[1], [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w16_nodenom_neon, export=1
weight_prologue nodenom
1:
subs w9, w9, #2
ld1 {v6.16b}, [x2], x3
mov v27.16b, v1.16b
mov v28.16b, v1.16b
ld1 {v7.16b}, [x2], x3
mov v29.16b, v1.16b
mov v30.16b, v1.16b
umlal v27.8h, v6.8b, v0.8b
umlal2 v28.8h, v6.16b, v0.16b
umlal v29.8h, v7.8b, v0.8b
umlal2 v30.8h, v7.16b, v0.16b
sqxtun v4.8b, v27.8h
sqxtun2 v4.16b, v28.8h
sqxtun v5.8b, v29.8h
sqxtun2 v5.16b, v30.8h
st1 {v4.16b}, [x0], x1
st1 {v5.16b}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w8_nodenom_neon, export=1
weight_prologue nodenom
1:
subs w9, w9, #2
ld1 {v16.8b}, [x2], x3
mov v27.16b, v1.16b
ld1 {v17.8b}, [x2], x3
mov v29.16b, v1.16b
umlal v27.8h, v16.8b, v0.8b
umlal v29.8h, v17.8b, v0.8b
sqxtun v4.8b, v27.8h
sqxtun v5.8b, v29.8h
st1 {v4.8b}, [x0], x1
st1 {v5.8b}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w4_nodenom_neon, export=1
weight_prologue nodenom
1:
subs w9, w9, #2
ld1 {v16.s}[0], [x2], x3
ld1 {v16.s}[1], [x2], x3
mov v27.16b, v1.16b
umlal v27.8h, v16.8b, v0.8b
sqxtun v4.8b, v27.8h
st1 {v4.s}[0], [x0], x1
st1 {v4.s}[1], [x0], x1
b.gt 1b
ret
endfunc
.macro weight_simple_prologue
ldr w6, [x4] // offset
dup v1.16b, w6
.endm
.macro weight_simple name op
function mc_weight_w20_\name\()_neon, export=1
weight_simple_prologue
1:
subs w5, w5, #2
ldr s18, [x2, #16]
ld1 {v16.16b}, [x2], x3
ldr s19, [x2, #16]
ld1 {v17.16b}, [x2], x3
\op v18.8b, v18.8b, v1.8b
\op v16.16b, v16.16b, v1.16b
\op v19.8b, v19.8b, v1.8b
\op v17.16b, v17.16b, v1.16b
str s18, [x0, #16]
st1 {v16.16b}, [x0], x1
str s19, [x0, #16]
st1 {v17.16b}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w16_\name\()_neon, export=1
weight_simple_prologue
1:
subs w5, w5, #2
ld1 {v16.16b}, [x2], x3
ld1 {v17.16b}, [x2], x3
\op v16.16b, v16.16b, v1.16b
\op v17.16b, v17.16b, v1.16b
st1 {v16.16b}, [x0], x1
st1 {v17.16b}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w8_\name\()_neon, export=1
weight_simple_prologue
1:
subs w5, w5, #2
ld1 {v16.8b}, [x2], x3
ld1 {v17.8b}, [x2], x3
\op v16.8b, v16.8b, v1.8b
\op v17.8b, v17.8b, v1.8b
st1 {v16.8b}, [x0], x1
st1 {v17.8b}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w4_\name\()_neon, export=1
weight_simple_prologue
1:
subs w5, w5, #2
ld1 {v16.s}[0], [x2], x3
ld1 {v16.s}[1], [x2], x3
\op v16.8b, v16.8b, v1.8b
st1 {v16.s}[0], [x0], x1
st1 {v16.s}[1], [x0], x1
b.gt 1b
ret
endfunc
.endm
weight_simple offsetadd, uqadd
weight_simple offsetsub, uqsub
// void mc_copy( uint8_t *dst, intptr_t dst_stride, uint8_t *src, intptr_t src_stride, int height )
function mc_copy_w4_neon, export=1
1:
subs w4, w4, #4
ld1 {v0.s}[0], [x2], x3
ld1 {v1.s}[0], [x2], x3
ld1 {v2.s}[0], [x2], x3
ld1 {v3.s}[0], [x2], x3
st1 {v0.s}[0], [x0], x1
st1 {v1.s}[0], [x0], x1
st1 {v2.s}[0], [x0], x1
st1 {v3.s}[0], [x0], x1
b.gt 1b
ret
endfunc
function mc_copy_w8_neon, export=1
1: subs w4, w4, #4
ld1 {v0.8b}, [x2], x3
ld1 {v1.8b}, [x2], x3
ld1 {v2.8b}, [x2], x3
ld1 {v3.8b}, [x2], x3
st1 {v0.8b}, [x0], x1
st1 {v1.8b}, [x0], x1
st1 {v2.8b}, [x0], x1
st1 {v3.8b}, [x0], x1
b.gt 1b
ret
endfunc
function mc_copy_w16_neon, export=1
1: subs w4, w4, #4
ld1 {v0.16b}, [x2], x3
ld1 {v1.16b}, [x2], x3
ld1 {v2.16b}, [x2], x3
ld1 {v3.16b}, [x2], x3
st1 {v0.16b}, [x0], x1
st1 {v1.16b}, [x0], x1
st1 {v2.16b}, [x0], x1
st1 {v3.16b}, [x0], x1
b.gt 1b
ret
endfunc
// void mc_chroma( uint8_t *dst_u, uint8_t *dst_v,
// intptr_t i_dst_stride,
// uint8_t *src, intptr_t i_src_stride,
// int dx, int dy, int i_width, int i_height );
function mc_chroma_neon, export=1
ldr w15, [sp] // height
sbfx x12, x6, #3, #29 // asr(3) and sign extend
sbfx x11, x5, #3, #29 // asr(3) and sign extend
cmp w7, #4
mul x12, x12, x4
add x3, x3, x11, lsl #1
and w5, w5, #7
and w6, w6, #7
add x3, x3, x12
//pld [x3]
//pld [x3, x4]
b.gt mc_chroma_w8_neon
b.eq mc_chroma_w4_neon
endfunc
.macro CHROMA_MC_START r00, r01, r10, r11
mul w12, w5, w6 // cD = d8x *d8y
lsl w13, w5, #3
add w9, w12, #64
lsl w14, w6, #3
tst w12, w12
sub w9, w9, w13
sub w10, w13, w12 // cB = d8x *(8-d8y);
sub w11, w14, w12 // cC = (8-d8x)*d8y
sub w9, w9, w14 // cA = (8-d8x)*(8-d8y);
.endm
.macro CHROMA_MC width, vsize
function mc_chroma_w\width\()_neon
// since the element size varies, there's a different index for the 2nd store
.if \width == 4
.set idx2, 1
.else
.set idx2, 2
.endif
CHROMA_MC_START
b.eq 2f
ld2 {v28.8b,v29.8b}, [x3], x4
dup v0.8b, w9 // cA
dup v1.8b, w10 // cB
ext v6.8b, v28.8b, v6.8b, #1
ext v7.8b, v29.8b, v7.8b, #1
ld2 {v30.8b,v31.8b}, [x3], x4
dup v2.8b, w11 // cC
dup v3.8b, w12 // cD
ext v22.8b, v30.8b, v22.8b, #1
ext v23.8b, v31.8b, v23.8b, #1
trn1 v0.2s, v0.2s, v1.2s
trn1 v2.2s, v2.2s, v3.2s
trn1 v4.2s, v28.2s, v6.2s
trn1 v5.2s, v29.2s, v7.2s
trn1 v20.2s, v30.2s, v22.2s
trn1 v21.2s, v31.2s, v23.2s
1: // height loop, interpolate xy
subs w15, w15, #2
umull v16.8h, v4.8b, v0.8b
umlal v16.8h, v20.8b, v2.8b
umull v17.8h, v5.8b, v0.8b
umlal v17.8h, v21.8b, v2.8b
ld2 {v28.8b,v29.8b}, [x3], x4
transpose v24.2d, v25.2d, v16.2d, v17.2d
ext v6.8b, v28.8b, v6.8b, #1
ext v7.8b, v29.8b, v7.8b, #1
trn1 v4.2s, v28.2s, v6.2s
trn1 v5.2s, v29.2s, v7.2s
add v16.8h, v24.8h, v25.8h
umull v18.8h, v20.8b, v0.8b
umlal v18.8h, v4.8b, v2.8b
umull v19.8h, v21.8b, v0.8b
umlal v19.8h, v5.8b, v2.8b
ld2 {v30.8b,v31.8b}, [x3], x4
transpose v26.2d, v27.2d, v18.2d, v19.2d
ext v22.8b, v30.8b, v22.8b, #1
ext v23.8b, v31.8b, v23.8b, #1
trn1 v20.2s, v30.2s, v22.2s
trn1 v21.2s, v31.2s, v23.2s
add v17.8h, v26.8h, v27.8h
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
//pld [x3]
//pld [x3, x4]
st1 {v16.\vsize}[0], [x0], x2
st1 {v16.\vsize}[idx2], [x1], x2
st1 {v17.\vsize}[0], [x0], x2
st1 {v17.\vsize}[idx2], [x1], x2
b.gt 1b
ret
2: // dx or dy are 0
tst w11, w11
add w10, w10, w11
dup v0.8b, w9
dup v1.8b, w10
b.eq 4f
ld1 {v4.8b}, [x3], x4
ld1 {v6.8b}, [x3], x4
3: // vertical interpolation loop
subs w15, w15, #2
umull v16.8h, v4.8b, v0.8b
ld1 {v4.8b}, [x3], x4
umlal v16.8h, v6.8b, v1.8b
umull v17.8h, v6.8b, v0.8b
ld1 {v6.8b}, [x3], x4
umlal v17.8h, v4.8b, v1.8b
rshrn v20.8b, v16.8h, #6 // uvuvuvuv
rshrn v21.8b, v17.8h, #6 // uvuvuvuv
uzp1 v16.8b, v20.8b, v21.8b // d16=uuuu|uuuu, d17=vvvv|vvvv
uzp2 v17.8b, v20.8b, v21.8b // d16=uuuu|uuuu, d17=vvvv|vvvv
//pld [x3]
//pld [x3, x4]
st1 {v16.\vsize}[0], [x0], x2
st1 {v16.\vsize}[idx2], [x0], x2
st1 {v17.\vsize}[0], [x1], x2
st1 {v17.\vsize}[idx2], [x1], x2
b.gt 3b
ret
4: // dy is 0
ld1 {v4.8b,v5.8b}, [x3], x4
ld1 {v6.8b,v7.8b}, [x3], x4
ext v5.8b, v4.8b, v5.8b, #2
ext v7.8b, v6.8b, v7.8b, #2
5: // horizontal interpolation loop
subs w15, w15, #2
umull v16.8h, v4.8b, v0.8b
umlal v16.8h, v5.8b, v1.8b
umull v17.8h, v6.8b, v0.8b
umlal v17.8h, v7.8b, v1.8b
ld1 {v4.8b,v5.8b}, [x3], x4
ld1 {v6.8b,v7.8b}, [x3], x4
rshrn v20.8b, v16.8h, #6
rshrn v21.8b, v17.8h, #6
ext v5.8b, v4.8b, v5.8b, #2
ext v7.8b, v6.8b, v7.8b, #2
uzp1 v16.8b, v20.8b, v21.8b // d16=uuuu|uuuu, d17=vvvv|vvvv
uzp2 v17.8b, v20.8b, v21.8b // d16=uuuu|uuuu, d17=vvvv|vvvv
//pld [x3]
//pld [x3, x4]
st1 {v16.\vsize}[0], [x0], x2
st1 {v16.\vsize}[idx2], [x0], x2
st1 {v17.\vsize}[0], [x1], x2
st1 {v17.\vsize}[idx2], [x1], x2
b.gt 5b
ret
endfunc
.endm
CHROMA_MC 2, h
CHROMA_MC 4, s
function mc_chroma_w8_neon
CHROMA_MC_START
b.eq 2f
ld2 {v4.16b,v5.16b}, [x3], x4
ld2 {v20.16b,v21.16b}, [x3], x4
dup v0.8b, w9 // cA
dup v1.8b, w10 // cB
ext v6.16b, v4.16b, v4.16b, #1
ext v7.16b, v5.16b, v5.16b, #1
dup v2.8b, w11 // cC
dup v3.8b, w12 // cD
ext v22.16b, v20.16b, v20.16b, #1
ext v23.16b, v21.16b, v21.16b, #1
1: // height loop, interpolate xy
subs w15, w15, #2
umull v16.8h, v4.8b, v0.8b
umlal v16.8h, v6.8b, v1.8b
umlal v16.8h, v20.8b, v2.8b
umlal v16.8h, v22.8b, v3.8b
umull v17.8h, v5.8b, v0.8b
umlal v17.8h, v7.8b, v1.8b
umlal v17.8h, v21.8b, v2.8b
umlal v17.8h, v23.8b, v3.8b
ld2 {v4.16b,v5.16b}, [x3], x4
ext v6.16b, v4.16b, v4.16b, #1
ext v7.16b, v5.16b, v5.16b, #1
umull v18.8h, v20.8b, v0.8b
umlal v18.8h, v22.8b, v1.8b
umlal v18.8h, v4.8b, v2.8b
umlal v18.8h, v6.8b, v3.8b
umull v19.8h, v21.8b, v0.8b
umlal v19.8h, v23.8b, v1.8b
umlal v19.8h, v5.8b, v2.8b
umlal v19.8h, v7.8b, v3.8b
ld2 {v20.16b,v21.16b}, [x3], x4
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
rshrn v18.8b, v18.8h, #6
rshrn v19.8b, v19.8h, #6
ext v22.16b, v20.16b, v20.16b, #1
ext v23.16b, v21.16b, v21.16b, #1
//pld [x3]
//pld [x3, x4]
st1 {v16.8b}, [x0], x2
st1 {v17.8b}, [x1], x2
st1 {v18.8b}, [x0], x2
st1 {v19.8b}, [x1], x2
b.gt 1b
ret
2: // dx or dy are 0
tst w11, w11
add w10, w10, w11
dup v0.8b, w9
dup v1.8b, w10
b.eq 4f
ld2 {v4.8b,v5.8b}, [x3], x4
ld2 {v6.8b,v7.8b}, [x3], x4
3: // vertical interpolation loop
subs w15, w15, #2
umull v16.8h, v4.8b, v0.8b //U
umlal v16.8h, v6.8b, v1.8b
umull v17.8h, v5.8b, v0.8b //V
umlal v17.8h, v7.8b, v1.8b
ld2 {v4.8b,v5.8b}, [x3], x4
umull v18.8h, v6.8b, v0.8b
umlal v18.8h, v4.8b, v1.8b
umull v19.8h, v7.8b, v0.8b
umlal v19.8h, v5.8b, v1.8b
ld2 {v6.8b,v7.8b}, [x3], x4
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
rshrn v18.8b, v18.8h, #6
rshrn v19.8b, v19.8h, #6
//pld [x3]
//pld [x3, x4]
st1 {v16.8b}, [x0], x2
st1 {v17.8b}, [x1], x2
st1 {v18.8b}, [x0], x2
st1 {v19.8b}, [x1], x2
b.gt 3b
ret
4: // dy is 0
ld2 {v4.16b,v5.16b}, [x3], x4
ext v6.16b, v4.16b, v4.16b, #1
ext v7.16b, v5.16b, v5.16b, #1
ld2 {v20.16b,v21.16b}, [x3], x4
ext v22.16b, v20.16b, v20.16b, #1
ext v23.16b, v21.16b, v21.16b, #1
5: // horizontal interpolation loop
subs w15, w15, #2
umull v16.8h, v4.8b, v0.8b //U
umlal v16.8h, v6.8b, v1.8b
umull v17.8h, v5.8b, v0.8b //V
umlal v17.8h, v7.8b, v1.8b
ld2 {v4.16b,v5.16b}, [x3], x4
umull v18.8h, v20.8b, v0.8b
umlal v18.8h, v22.8b, v1.8b
umull v19.8h, v21.8b, v0.8b
umlal v19.8h, v23.8b, v1.8b
ld2 {v20.16b,v21.16b}, [x3], x4
rshrn v16.8b, v16.8h, #6
rshrn v17.8b, v17.8h, #6
rshrn v18.8b, v18.8h, #6
rshrn v19.8b, v19.8h, #6
ext v6.16b, v4.16b, v4.16b, #1
ext v7.16b, v5.16b, v5.16b, #1
ext v22.16b, v20.16b, v20.16b, #1
ext v23.16b, v21.16b, v21.16b, #1
//pld [x3]
//pld [x3, x4]
st1 {v16.8b}, [x0], x2
st1 {v17.8b}, [x1], x2
st1 {v18.8b}, [x0], x2
st1 {v19.8b}, [x1], x2
b.gt 5b
ret
endfunc
// void hpel_filter( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
// intptr_t stride, int width, int height, int16_t *buf )
function hpel_filter_neon, export=1
ubfm x9, x3, #0, #3
add w15, w5, w9
sub x13, x3, x9 // align src
sub x10, x0, x9
sub x11, x1, x9
sub x12, x2, x9
movi v30.16b, #5
movi v31.16b, #20
1: // line start
mov x3, x13
mov x2, x12
mov x1, x11
mov x0, x10
add x7, x3, #16 // src pointer next 16b for horiz filter
mov x5, x15 // restore width
sub x3, x3, x4, lsl #1 // src - 2*stride
ld1 {v28.16b}, [x7], #16 // src[16:31]
add x9, x3, x5 // holds src - 2*stride + width
ld1 {v16.16b}, [x3], x4 // src-2*stride[0:15]
ld1 {v17.16b}, [x3], x4 // src-1*stride[0:15]
ld1 {v18.16b}, [x3], x4 // src+0*stride[0:15]
ld1 {v19.16b}, [x3], x4 // src+1*stride[0:15]
ld1 {v20.16b}, [x3], x4 // src+2*stride[0:15]
ld1 {v21.16b}, [x3], x4 // src+3*stride[0:15]
ext v22.16b, v7.16b, v18.16b, #14
uaddl v1.8h, v16.8b, v21.8b
ext v26.16b, v18.16b, v28.16b, #3
umlsl v1.8h, v17.8b, v30.8b
ext v23.16b, v7.16b, v18.16b, #15
umlal v1.8h, v18.8b, v31.8b
ext v24.16b, v18.16b, v28.16b, #1
umlal v1.8h, v19.8b, v31.8b
ext v25.16b, v18.16b, v28.16b, #2
umlsl v1.8h, v20.8b, v30.8b
2: // next 16 pixel of line
subs x5, x5, #16
sub x3, x9, x5 // src - 2*stride += 16
uaddl v4.8h, v22.8b, v26.8b
uaddl2 v5.8h, v22.16b, v26.16b
sqrshrun v6.8b, v1.8h, #5
umlsl v4.8h, v23.8b, v30.8b
umlsl2 v5.8h, v23.16b, v30.16b
umlal v4.8h, v18.8b, v31.8b
umlal2 v5.8h, v18.16b, v31.16b
umlal v4.8h, v24.8b, v31.8b
umlal2 v5.8h, v24.16b, v31.16b
umlsl v4.8h, v25.8b, v30.8b
umlsl2 v5.8h, v25.16b, v30.16b
uaddl2 v2.8h, v16.16b, v21.16b
sqrshrun v4.8b, v4.8h, #5
mov v7.16b, v18.16b
sqrshrun2 v4.16b, v5.8h, #5
umlsl2 v2.8h, v17.16b, v30.16b
ld1 {v16.16b}, [x3], x4 // src-2*stride[0:15]
umlal2 v2.8h, v18.16b, v31.16b
ld1 {v17.16b}, [x3], x4 // src-1*stride[0:15]
umlal2 v2.8h, v19.16b, v31.16b
ld1 {v18.16b}, [x3], x4 // src+0*stride[0:15]
umlsl2 v2.8h, v20.16b, v30.16b
ld1 {v19.16b}, [x3], x4 // src+1*stride[0:15]
st1 {v4.16b}, [x0], #16
sqrshrun2 v6.16b, v2.8h, #5
ld1 {v20.16b}, [x3], x4 // src+2*stride[0:15]
ld1 {v21.16b}, [x3], x4 // src+3*stride[0:15]
ext v22.16b, v0.16b, v1.16b, #12
ext v26.16b, v1.16b, v2.16b, #6
ext v23.16b, v0.16b, v1.16b, #14
st1 {v6.16b}, [x1], #16
uaddl v3.8h, v16.8b, v21.8b
ext v25.16b, v1.16b, v2.16b, #4
umlsl v3.8h, v17.8b, v30.8b
ext v24.16b, v1.16b, v2.16b, #2
umlal v3.8h, v18.8b, v31.8b
add v4.8h, v22.8h, v26.8h
umlal v3.8h, v19.8b, v31.8b
add v5.8h, v23.8h, v25.8h
umlsl v3.8h, v20.8b, v30.8b
add v6.8h, v24.8h, v1.8h
ext v22.16b, v1.16b, v2.16b, #12
ext v26.16b, v2.16b, v3.16b, #6
ext v23.16b, v1.16b, v2.16b, #14
ext v25.16b, v2.16b, v3.16b, #4
ext v24.16b, v2.16b, v3.16b, #2
add v22.8h, v22.8h, v26.8h
add v23.8h, v23.8h, v25.8h
add v24.8h, v24.8h, v2.8h
sub v4.8h, v4.8h, v5.8h // a-b
sub v5.8h, v5.8h, v6.8h // b-c
sub v22.8h, v22.8h, v23.8h // a-b
sub v23.8h, v23.8h, v24.8h // b-c
sshr v4.8h, v4.8h, #2 // (a-b)/4
sshr v22.8h, v22.8h, #2 // (a-b)/4
sub v4.8h, v4.8h, v5.8h // (a-b)/4-b+c
sub v22.8h, v22.8h, v23.8h // (a-b)/4-b+c
sshr v4.8h, v4.8h, #2 // ((a-b)/4-b+c)/4
sshr v22.8h, v22.8h, #2 // ((a-b)/4-b+c)/4
add v4.8h, v4.8h, v6.8h // ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
add v22.8h, v22.8h, v24.8h // ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
sqrshrun v4.8b, v4.8h, #6
ld1 {v28.16b}, [x7], #16 // src[16:31]
mov v0.16b, v2.16b
ext v23.16b, v7.16b, v18.16b, #15
sqrshrun2 v4.16b, v22.8h, #6
mov v1.16b, v3.16b
ext v22.16b, v7.16b, v18.16b, #14
ext v24.16b, v18.16b, v28.16b, #1
ext v25.16b, v18.16b, v28.16b, #2
ext v26.16b, v18.16b, v28.16b, #3
st1 {v4.16b}, [x2], #16
b.gt 2b
subs w6, w6, #1
add x10, x10, x4
add x11, x11, x4
add x12, x12, x4
add x13, x13, x4
b.gt 1b
ret
endfunc
// frame_init_lowres_core( uint8_t *src0, uint8_t *dst0, uint8_t *dsth,
// uint8_t *dstv, uint8_t *dstc, intptr_t src_stride,
// intptr_t dst_stride, int width, int height )
function frame_init_lowres_core_neon, export=1
ldr w8, [sp]
sub x10, x6, w7, uxtw // dst_stride - width
and x10, x10, #~15
1:
mov w9, w7 // width
mov x11, x0 // src0
add x12, x0, x5 // src1 = src0 + src_stride
add x13, x0, x5, lsl #1 // src2 = src1 + src_stride
ld2 {v0.16b,v1.16b}, [x11], #32
ld2 {v2.16b,v3.16b}, [x12], #32
ld2 {v4.16b,v5.16b}, [x13], #32
urhadd v20.16b, v0.16b, v2.16b // s0[2x] + s1[2x]
urhadd v22.16b, v2.16b, v4.16b // s1[2x] + s2[2x]
2:
subs w9, w9, #16
urhadd v21.16b, v1.16b, v3.16b // s0[2x+1] + s1[2x+1]
urhadd v23.16b, v3.16b, v5.16b // s1[2x+1] + s2[2x+1]
ld2 {v0.16b,v1.16b}, [x11], #32
ld2 {v2.16b,v3.16b}, [x12], #32
ld2 {v4.16b,v5.16b}, [x13], #32
urhadd v30.16b, v0.16b, v2.16b // loop: s0[2x] + s1[2x]
urhadd v31.16b, v2.16b, v4.16b // loop: s1[2x] + s2[2x]
ext v24.16b, v20.16b, v30.16b, #1 // s0[2x+2] + s1[2x+2]
ext v25.16b, v22.16b, v31.16b, #1 // s1[2x+2] + s2[2x+2]
urhadd v16.16b, v20.16b, v21.16b
urhadd v18.16b, v22.16b, v23.16b
urhadd v17.16b, v21.16b, v24.16b
urhadd v19.16b, v23.16b, v25.16b
st1 {v16.16b}, [x1], #16
st1 {v18.16b}, [x3], #16
st1 {v17.16b}, [x2], #16
st1 {v19.16b}, [x4], #16
b.le 3f
subs w9, w9, #16
urhadd v21.16b, v1.16b, v3.16b // s0[2x+1] + s1[2x+1]
urhadd v23.16b, v3.16b, v5.16b // s1[2x+1] + s2[2x+1]
ld2 {v0.16b,v1.16b}, [x11], #32
ld2 {v2.16b,v3.16b}, [x12], #32
ld2 {v4.16b,v5.16b}, [x13], #32
urhadd v20.16b, v0.16b, v2.16b // loop: s0[2x] + s1[2x]
urhadd v22.16b, v2.16b, v4.16b // loop: s1[2x] + s2[2x]
ext v24.16b, v30.16b, v20.16b, #1 // s0[2x+2] + s1[2x+2]
ext v25.16b, v31.16b, v22.16b, #1 // s1[2x+2] + s2[2x+2]
urhadd v16.16b, v30.16b, v21.16b
urhadd v18.16b, v31.16b, v23.16b
urhadd v17.16b, v21.16b, v24.16b
urhadd v19.16b, v23.16b, v25.16b
st1 {v16.16b}, [x1], #16
st1 {v18.16b}, [x3], #16
st1 {v17.16b}, [x2], #16
st1 {v19.16b}, [x4], #16
b.gt 2b
3:
subs w8, w8, #1
add x0, x0, x5, lsl #1
add x1, x1, x10
add x2, x2, x10
add x3, x3, x10
add x4, x4, x10
b.gt 1b
ret
endfunc
function load_deinterleave_chroma_fenc_neon, export=1
mov x4, #FENC_STRIDE/2
b load_deinterleave_chroma
endfunc
function load_deinterleave_chroma_fdec_neon, export=1
mov x4, #FDEC_STRIDE/2
load_deinterleave_chroma:
ld2 {v0.8b,v1.8b}, [x1], x2
ld2 {v2.8b,v3.8b}, [x1], x2
subs w3, w3, #2
st1 {v0.8b}, [x0], x4
st1 {v1.8b}, [x0], x4
st1 {v2.8b}, [x0], x4
st1 {v3.8b}, [x0], x4
b.gt load_deinterleave_chroma
ret
endfunc
function plane_copy_core_neon, export=1
add w8, w4, #15 // 32-bit write clears the upper 32-bit the register
and w4, w8, #~15
// safe use of the full reg since negative width makes no sense
sub x1, x1, x4
sub x3, x3, x4
1:
mov w8, w4
16:
tst w8, #16
b.eq 32f
subs w8, w8, #16
ldr q0, [x2], #16
str q0, [x0], #16
b.eq 0f
32:
subs w8, w8, #32
ldp q0, q1, [x2], #32
stp q0, q1, [x0], #32
b.gt 32b
0:
subs w5, w5, #1
add x2, x2, x3
add x0, x0, x1
b.gt 1b
ret
endfunc
function plane_copy_swap_core_neon, export=1
lsl w4, w4, #1
sub x1, x1, x4
sub x3, x3, x4
1:
mov w8, w4
tbz w4, #4, 32f
subs w8, w8, #16
ld1 {v0.16b}, [x2], #16
rev16 v0.16b, v0.16b
st1 {v0.16b}, [x0], #16
b.eq 0f
32:
subs w8, w8, #32
ld1 {v0.16b,v1.16b}, [x2], #32
rev16 v0.16b, v0.16b
rev16 v1.16b, v1.16b
st1 {v0.16b,v1.16b}, [x0], #32
b.gt 32b
0:
subs w5, w5, #1
add x2, x2, x3
add x0, x0, x1
b.gt 1b
ret
endfunc
function plane_copy_deinterleave_neon, export=1
add w9, w6, #15
and w9, w9, #0xfffffff0
sub x1, x1, x9
sub x3, x3, x9
sub x5, x5, x9, lsl #1
1:
ld2 {v0.16b,v1.16b}, [x4], #32
subs w9, w9, #16
st1 {v0.16b}, [x0], #16
st1 {v1.16b}, [x2], #16
b.gt 1b
add x4, x4, x5
subs w7, w7, #1
add x0, x0, x1
add x2, x2, x3
mov w9, w6
b.gt 1b
ret
endfunc
.macro deinterleave_rgb
subs x11, x11, #8
st1 {v0.8b}, [x0], #8
st1 {v1.8b}, [x2], #8
st1 {v2.8b}, [x4], #8
b.gt 1b
subs w10, w10, #1
add x0, x0, x1
add x2, x2, x3
add x4, x4, x5
add x6, x6, x7
mov x11, x9
b.gt 1b
.endm
function plane_copy_deinterleave_rgb_neon, export=1
#if SYS_MACOSX
ldr w8, [sp]
ldp w9, w10, [sp, #4]
#else
ldr x8, [sp]
ldp x9, x10, [sp, #8]
#endif
cmp w8, #3
uxtw x9, w9
add x11, x9, #7
and x11, x11, #~7
sub x1, x1, x11
sub x3, x3, x11
sub x5, x5, x11
b.ne 4f
sub x7, x7, x11, lsl #1
sub x7, x7, x11
1:
ld3 {v0.8b,v1.8b,v2.8b}, [x6], #24
deinterleave_rgb
ret
4:
sub x7, x7, x11, lsl #2
1:
ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [x6], #32
deinterleave_rgb
ret
endfunc
function plane_copy_interleave_core_neon, export=1
add w9, w6, #15
and w9, w9, #0xfffffff0
sub x1, x1, x9, lsl #1
sub x3, x3, x9
sub x5, x5, x9
1:
ld1 {v0.16b}, [x2], #16
ld1 {v1.16b}, [x4], #16
subs w9, w9, #16
st2 {v0.16b,v1.16b}, [x0], #32
b.gt 1b
subs w7, w7, #1
add x0, x0, x1
add x2, x2, x3
add x4, x4, x5
mov w9, w6
b.gt 1b
ret
endfunc
function store_interleave_chroma_neon, export=1
mov x5, #FDEC_STRIDE
1:
ld1 {v0.8b}, [x2], x5
ld1 {v1.8b}, [x3], x5
ld1 {v2.8b}, [x2], x5
ld1 {v3.8b}, [x3], x5
subs w4, w4, #2
zip1 v4.16b, v0.16b, v1.16b
zip1 v5.16b, v2.16b, v3.16b
st1 {v4.16b}, [x0], x1
st1 {v5.16b}, [x0], x1
b.gt 1b
ret
endfunc
.macro integral4h p1, p2
ext v1.8b, \p1\().8b, \p2\().8b, #1
ext v2.8b, \p1\().8b, \p2\().8b, #2
ext v3.8b, \p1\().8b, \p2\().8b, #3
uaddl v0.8h, \p1\().8b, v1.8b
uaddl v4.8h, v2.8b, v3.8b
add v0.8h, v0.8h, v4.8h
add v0.8h, v0.8h, v5.8h
.endm
function integral_init4h_neon, export=1
sub x3, x0, x2, lsl #1
ld1 {v6.8b,v7.8b}, [x1], #16
1:
subs x2, x2, #16
ld1 {v5.8h}, [x3], #16
integral4h v6, v7
ld1 {v6.8b}, [x1], #8
ld1 {v5.8h}, [x3], #16
st1 {v0.8h}, [x0], #16
integral4h v7, v6
ld1 {v7.8b}, [x1], #8
st1 {v0.8h}, [x0], #16
b.gt 1b
ret
endfunc
.macro integral8h p1, p2, s
ext v1.8b, \p1\().8b, \p2\().8b, #1
ext v2.8b, \p1\().8b, \p2\().8b, #2
ext v3.8b, \p1\().8b, \p2\().8b, #3
ext v4.8b, \p1\().8b, \p2\().8b, #4
ext v5.8b, \p1\().8b, \p2\().8b, #5
ext v6.8b, \p1\().8b, \p2\().8b, #6
ext v7.8b, \p1\().8b, \p2\().8b, #7
uaddl v0.8h, \p1\().8b, v1.8b
uaddl v2.8h, v2.8b, v3.8b
uaddl v4.8h, v4.8b, v5.8b
uaddl v6.8h, v6.8b, v7.8b
add v0.8h, v0.8h, v2.8h
add v4.8h, v4.8h, v6.8h
add v0.8h, v0.8h, v4.8h
add v0.8h, v0.8h, \s\().8h
.endm
function integral_init8h_neon, export=1
sub x3, x0, x2, lsl #1
ld1 {v16.8b,v17.8b}, [x1], #16
1:
subs x2, x2, #16
ld1 {v18.8h}, [x3], #16
integral8h v16, v17, v18
ld1 {v16.8b}, [x1], #8
ld1 {v18.8h}, [x3], #16
st1 {v0.8h}, [x0], #16
integral8h v17, v16, v18
ld1 {v17.8b}, [x1], #8
st1 {v0.8h}, [x0], #16
b.gt 1b
ret
endfunc
function integral_init4v_neon, export=1
mov x3, x0
add x4, x0, x2, lsl #3
add x8, x0, x2, lsl #4
sub x2, x2, #8
ld1 {v20.8h,v21.8h,v22.8h}, [x3], #48
ld1 {v16.8h,v17.8h,v18.8h}, [x8], #48
1:
subs x2, x2, #16
ld1 {v24.8h,v25.8h}, [x4], #32
ext v0.16b, v20.16b, v21.16b, #8
ext v1.16b, v21.16b, v22.16b, #8
ext v2.16b, v16.16b, v17.16b, #8
ext v3.16b, v17.16b, v18.16b, #8
sub v24.8h, v24.8h, v20.8h
sub v25.8h, v25.8h, v21.8h
add v0.8h, v0.8h, v20.8h
add v1.8h, v1.8h, v21.8h
add v2.8h, v2.8h, v16.8h
add v3.8h, v3.8h, v17.8h
st1 {v24.8h}, [x1], #16
st1 {v25.8h}, [x1], #16
mov v20.16b, v22.16b
mov v16.16b, v18.16b
sub v0.8h, v2.8h, v0.8h
sub v1.8h, v3.8h, v1.8h
ld1 {v21.8h,v22.8h}, [x3], #32
ld1 {v17.8h,v18.8h}, [x8], #32
st1 {v0.8h}, [x0], #16
st1 {v1.8h}, [x0], #16
b.gt 1b
2:
ret
endfunc
function integral_init8v_neon, export=1
add x2, x0, x1, lsl #4
sub x1, x1, #8
ands x3, x1, #16 - 1
b.eq 1f
subs x1, x1, #8
ld1 {v0.8h}, [x0]
ld1 {v2.8h}, [x2], #16
sub v4.8h, v2.8h, v0.8h
st1 {v4.8h}, [x0], #16
b.le 2f
1:
subs x1, x1, #16
ld1 {v0.8h,v1.8h}, [x0]
ld1 {v2.8h,v3.8h}, [x2], #32
sub v4.8h, v2.8h, v0.8h
sub v5.8h, v3.8h, v1.8h
st1 {v4.8h}, [x0], #16
st1 {v5.8h}, [x0], #16
b.gt 1b
2:
ret
endfunc
#else // BIT_DEPTH == 8
// void pixel_avg( pixel *dst, intptr_t dst_stride,
// pixel *src1, intptr_t src1_stride,
// pixel *src2, intptr_t src2_stride, int weight );
.macro AVGH w h
function pixel_avg_\w\()x\h\()_neon, export=1
mov w10, #64
cmp w6, #32
mov w9, #\h
b.eq pixel_avg_w\w\()_neon
subs w7, w10, w6
b.lt pixel_avg_weight_w\w\()_add_sub_neon // weight > 64
cmp w6, #0
b.ge pixel_avg_weight_w\w\()_add_add_neon
b pixel_avg_weight_w\w\()_sub_add_neon // weight < 0
endfunc
.endm
AVGH 4, 2
AVGH 4, 4
AVGH 4, 8
AVGH 4, 16
AVGH 8, 4
AVGH 8, 8
AVGH 8, 16
AVGH 16, 8
AVGH 16, 16
// 0 < weight < 64
.macro load_weights_add_add
mov w6, w6
.endm
.macro weight_add_add dst, s1, s2, h=
.ifc \h, 2
umull2 \dst, \s1, v30.8h
umlal2 \dst, \s2, v31.8h
.else
umull \dst, \s1, v30.4h
umlal \dst, \s2, v31.4h
.endif
.endm
// weight > 64
.macro load_weights_add_sub
neg w7, w7
.endm
.macro weight_add_sub dst, s1, s2, h=
.ifc \h, 2
umull2 \dst, \s1, v30.8h
umlsl2 \dst, \s2, v31.8h
.else
umull \dst, \s1, v30.4h
umlsl \dst, \s2, v31.4h
.endif
.endm
// weight < 0
.macro load_weights_sub_add
neg w6, w6
.endm
.macro weight_sub_add dst, s1, s2, h=
.ifc \h, 2
umull2 \dst, \s2, v31.8h
umlsl2 \dst, \s1, v30.8h
.else
umull \dst, \s2, v31.4h
umlsl \dst, \s1, v30.4h
.endif
.endm
.macro AVG_WEIGHT ext
function pixel_avg_weight_w4_\ext\()_neon
load_weights_\ext
dup v30.8h, w6
dup v31.8h, w7
lsl x3, x3, #1
lsl x5, x5, #1
lsl x1, x1, #1
1: // height loop
subs w9, w9, #2
ld1 {v0.d}[0], [x2], x3
ld1 {v1.d}[0], [x4], x5
weight_\ext v4.4s, v0.4h, v1.4h
ld1 {v2.d}[0], [x2], x3
ld1 {v3.d}[0], [x4], x5
mvni v28.8h, #0xfc, lsl #8
sqrshrun v4.4h, v4.4s, #6
weight_\ext v5.4s, v2.4h, v3.4h
smin v4.4h, v4.4h, v28.4h
sqrshrun v5.4h, v5.4s, #6
st1 {v4.d}[0], [x0], x1
smin v5.4h, v5.4h, v28.4h
st1 {v5.d}[0], [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg_weight_w8_\ext\()_neon
load_weights_\ext
dup v30.8h, w6
dup v31.8h, w7
lsl x1, x1, #1
lsl x3, x3, #1
lsl x5, x5, #1
1: // height loop
subs w9, w9, #4
ld1 {v0.8h}, [x2], x3
ld1 {v1.8h}, [x4], x5
weight_\ext v16.4s, v0.4h, v1.4h
weight_\ext v17.4s, v0.8h, v1.8h, 2
ld1 {v2.8h}, [x2], x3
ld1 {v3.8h}, [x4], x5
weight_\ext v18.4s, v2.4h, v3.4h
weight_\ext v19.4s, v2.8h, v3.8h, 2
ld1 {v4.8h}, [x2], x3
ld1 {v5.8h}, [x4], x5
weight_\ext v20.4s, v4.4h, v5.4h
weight_\ext v21.4s, v4.8h, v5.8h, 2
ld1 {v6.8h}, [x2], x3
ld1 {v7.8h}, [x4], x5
weight_\ext v22.4s, v6.4h, v7.4h
weight_\ext v23.4s, v6.8h, v7.8h, 2
mvni v28.8h, #0xfc, lsl #8
sqrshrun v0.4h, v16.4s, #6
sqrshrun v2.4h, v18.4s, #6
sqrshrun v4.4h, v20.4s, #6
sqrshrun2 v0.8h, v17.4s, #6
sqrshrun v6.4h, v22.4s, #6
sqrshrun2 v2.8h, v19.4s, #6
sqrshrun2 v4.8h, v21.4s, #6
smin v0.8h, v0.8h, v28.8h
smin v2.8h, v2.8h, v28.8h
sqrshrun2 v6.8h, v23.4s, #6
smin v4.8h, v4.8h, v28.8h
smin v6.8h, v6.8h, v28.8h
st1 {v0.8h}, [x0], x1
st1 {v2.8h}, [x0], x1
st1 {v4.8h}, [x0], x1
st1 {v6.8h}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg_weight_w16_\ext\()_neon
load_weights_\ext
dup v30.8h, w6
dup v31.8h, w7
lsl x1, x1, #1
lsl x3, x3, #1
lsl x5, x5, #1
1: // height loop
subs w9, w9, #2
ld1 {v0.8h, v1.8h}, [x2], x3
ld1 {v2.8h, v3.8h}, [x4], x5
ld1 {v4.8h, v5.8h}, [x2], x3
ld1 {v6.8h, v7.8h}, [x4], x5
weight_\ext v16.4s, v0.4h, v2.4h
weight_\ext v17.4s, v0.8h, v2.8h, 2
weight_\ext v18.4s, v1.4h, v3.4h
weight_\ext v19.4s, v1.8h, v3.8h, 2
weight_\ext v20.4s, v4.4h, v6.4h
weight_\ext v21.4s, v4.8h, v6.8h, 2
weight_\ext v22.4s, v5.4h, v7.4h
weight_\ext v23.4s, v5.8h, v7.8h, 2
mvni v28.8h, #0xfc, lsl #8
sqrshrun v0.4h, v16.4s, #6
sqrshrun v1.4h, v18.4s, #6
sqrshrun v2.4h, v20.4s, #6
sqrshrun2 v0.8h, v17.4s, #6
sqrshrun2 v1.8h, v19.4s, #6
sqrshrun2 v2.8h, v21.4s, #6
smin v0.8h, v0.8h, v28.8h
smin v1.8h, v1.8h, v28.8h
sqrshrun v3.4h, v22.4s, #6
smin v2.8h, v2.8h, v28.8h
sqrshrun2 v3.8h, v23.4s, #6
smin v3.8h, v3.8h, v28.8h
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v2.8h, v3.8h}, [x0], x1
b.gt 1b
ret
endfunc
.endm
AVG_WEIGHT add_add
AVG_WEIGHT add_sub
AVG_WEIGHT sub_add
function pixel_avg_w4_neon
lsl x1, x1, #1
lsl x3, x3, #1
lsl x5, x5, #1
1: subs w9, w9, #2
ld1 {v0.d}[0], [x2], x3
ld1 {v2.d}[0], [x4], x5
ld1 {v0.d}[1], [x2], x3
ld1 {v2.d}[1], [x4], x5
urhadd v0.8h, v0.8h, v2.8h
st1 {v0.d}[0], [x0], x1
st1 {v0.d}[1], [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg_w8_neon
lsl x1, x1, #1
lsl x3, x3, #1
lsl x5, x5, #1
1: subs w9, w9, #4
ld1 {v0.8h}, [x2], x3
ld1 {v1.8h}, [x4], x5
ld1 {v2.8h}, [x2], x3
urhadd v0.8h, v0.8h, v1.8h
ld1 {v3.8h}, [x4], x5
st1 {v0.8h}, [x0], x1
ld1 {v4.8h}, [x2], x3
urhadd v1.8h, v2.8h, v3.8h
ld1 {v5.8h}, [x4], x5
st1 {v1.8h}, [x0], x1
ld1 {v6.8h}, [x2], x3
ld1 {v7.8h}, [x4], x5
urhadd v2.8h, v4.8h, v5.8h
urhadd v3.8h, v6.8h, v7.8h
st1 {v2.8h}, [x0], x1
st1 {v3.8h}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg_w16_neon
lsl x1, x1, #1
lsl x3, x3, #1
lsl x5, x5, #1
1: subs w9, w9, #4
ld1 {v0.8h, v1.8h}, [x2], x3
ld1 {v2.8h, v3.8h}, [x4], x5
ld1 {v4.8h, v5.8h}, [x2], x3
urhadd v0.8h, v0.8h, v2.8h
urhadd v1.8h, v1.8h, v3.8h
ld1 {v6.8h, v7.8h}, [x4], x5
ld1 {v20.8h, v21.8h}, [x2], x3
st1 {v0.8h, v1.8h}, [x0], x1
urhadd v4.8h, v4.8h, v6.8h
urhadd v5.8h, v5.8h, v7.8h
ld1 {v22.8h, v23.8h}, [x4], x5
ld1 {v24.8h, v25.8h}, [x2], x3
st1 {v4.8h, v5.8h}, [x0], x1
ld1 {v26.8h, v27.8h}, [x4], x5
urhadd v20.8h, v20.8h, v22.8h
urhadd v21.8h, v21.8h, v23.8h
urhadd v24.8h, v24.8h, v26.8h
urhadd v25.8h, v25.8h, v27.8h
st1 {v20.8h, v21.8h}, [x0], x1
st1 {v24.8h, v25.8h}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg2_w4_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w5, w5, #2
ld1 {v0.4h}, [x2], x3
ld1 {v2.4h}, [x4], x3
ld1 {v1.4h}, [x2], x3
ld1 {v3.4h}, [x4], x3
urhadd v0.4h, v0.4h, v2.4h
urhadd v1.4h, v1.4h, v3.4h
st1 {v0.4h}, [x0], x1
st1 {v1.4h}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg2_w8_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w5, w5, #2
ld1 {v0.8h}, [x2], x3
ld1 {v2.8h}, [x4], x3
ld1 {v1.8h}, [x2], x3
ld1 {v3.8h}, [x4], x3
urhadd v0.8h, v0.8h, v2.8h
urhadd v1.8h, v1.8h, v3.8h
st1 {v0.8h}, [x0], x1
st1 {v1.8h}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg2_w16_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w5, w5, #2
ld1 {v0.8h, v1.8h}, [x2], x3
ld1 {v2.8h, v3.8h}, [x4], x3
ld1 {v4.8h, v5.8h}, [x2], x3
ld1 {v6.8h, v7.8h}, [x4], x3
urhadd v0.8h, v0.8h, v2.8h
urhadd v1.8h, v1.8h, v3.8h
urhadd v4.8h, v4.8h, v6.8h
urhadd v5.8h, v5.8h, v7.8h
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v4.8h, v5.8h}, [x0], x1
b.gt 1b
ret
endfunc
function pixel_avg2_w20_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
sub x1, x1, #32
1:
subs w5, w5, #2
ld1 {v0.8h, v1.8h, v2.8h}, [x2], x3
ld1 {v3.8h, v4.8h, v5.8h}, [x4], x3
ld1 {v20.8h, v21.8h, v22.8h}, [x2], x3
ld1 {v23.8h, v24.8h, v25.8h}, [x4], x3
urhadd v0.8h, v0.8h, v3.8h
urhadd v1.8h, v1.8h, v4.8h
urhadd v2.4h, v2.4h, v5.4h
urhadd v20.8h, v20.8h, v23.8h
urhadd v21.8h, v21.8h, v24.8h
urhadd v22.4h, v22.4h, v25.4h
st1 {v0.8h, v1.8h}, [x0], #32
st1 {v2.4h}, [x0], x1
st1 {v20.8h, v21.8h}, [x0], #32
st1 {v22.4h}, [x0], x1
b.gt 1b
ret
endfunc
// void mc_copy( pixel *dst, intptr_t dst_stride, pixel *src, intptr_t src_stride, int height )
function mc_copy_w4_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w4, w4, #4
ld1 {v0.d}[0], [x2], x3
ld1 {v1.d}[0], [x2], x3
ld1 {v2.d}[0], [x2], x3
ld1 {v3.d}[0], [x2], x3
st1 {v0.d}[0], [x0], x1
st1 {v1.d}[0], [x0], x1
st1 {v2.d}[0], [x0], x1
st1 {v3.d}[0], [x0], x1
b.gt 1b
ret
endfunc
function mc_copy_w8_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
1: subs w4, w4, #4
ld1 {v0.8h}, [x2], x3
ld1 {v1.8h}, [x2], x3
ld1 {v2.8h}, [x2], x3
ld1 {v3.8h}, [x2], x3
st1 {v0.8h}, [x0], x1
st1 {v1.8h}, [x0], x1
st1 {v2.8h}, [x0], x1
st1 {v3.8h}, [x0], x1
b.gt 1b
ret
endfunc
function mc_copy_w16_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
1: subs w4, w4, #4
ld1 {v0.8h, v1.8h}, [x2], x3
ld1 {v2.8h, v3.8h}, [x2], x3
ld1 {v4.8h, v5.8h}, [x2], x3
ld1 {v6.8h, v7.8h}, [x2], x3
st1 {v0.8h, v1.8h}, [x0], x1
st1 {v2.8h, v3.8h}, [x0], x1
st1 {v4.8h, v5.8h}, [x0], x1
st1 {v6.8h, v7.8h}, [x0], x1
b.gt 1b
ret
endfunc
.macro weight_prologue type
mov w9, w5 // height
.ifc \type, full
ldr w12, [x4, #32] // denom
.endif
ldp w4, w5, [x4, #32+4] // scale, offset
dup v0.8h, w4
lsl w5, w5, #2
dup v1.4s, w5
.ifc \type, full
neg w12, w12
dup v2.4s, w12
.endif
.endm
// void mc_weight( pixel *src, intptr_t src_stride, pixel *dst,
// intptr_t dst_stride, const x264_weight_t *weight, int h )
function mc_weight_w20_neon, export=1
weight_prologue full
lsl x3, x3, #1
lsl x1, x1, #1
sub x1, x1, #32
1:
subs w9, w9, #2
ld1 {v16.8h, v17.8h, v18.8h}, [x2], x3
ld1 {v19.8h, v20.8h, v21.8h}, [x2], x3
umull v22.4s, v16.4h, v0.4h
umull2 v23.4s, v16.8h, v0.8h
umull v24.4s, v17.4h, v0.4h
umull2 v25.4s, v17.8h, v0.8h
umull v26.4s, v18.4h, v0.4h
umull v27.4s, v21.4h, v0.4h
srshl v22.4s, v22.4s, v2.4s
srshl v23.4s, v23.4s, v2.4s
srshl v24.4s, v24.4s, v2.4s
srshl v25.4s, v25.4s, v2.4s
srshl v26.4s, v26.4s, v2.4s
srshl v27.4s, v27.4s, v2.4s
add v22.4s, v22.4s, v1.4s
add v23.4s, v23.4s, v1.4s
add v24.4s, v24.4s, v1.4s
add v25.4s, v25.4s, v1.4s
add v26.4s, v26.4s, v1.4s
add v27.4s, v27.4s, v1.4s
sqxtun v22.4h, v22.4s
sqxtun2 v22.8h, v23.4s
sqxtun v23.4h, v24.4s
sqxtun2 v23.8h, v25.4s
sqxtun v24.4h, v26.4s
sqxtun2 v24.8h, v27.4s
umull v16.4s, v19.4h, v0.4h
umull2 v17.4s, v19.8h, v0.8h
umull v18.4s, v20.4h, v0.4h
umull2 v19.4s, v20.8h, v0.8h
srshl v16.4s, v16.4s, v2.4s
srshl v17.4s, v17.4s, v2.4s
srshl v18.4s, v18.4s, v2.4s
srshl v19.4s, v19.4s, v2.4s
add v16.4s, v16.4s, v1.4s
add v17.4s, v17.4s, v1.4s
add v18.4s, v18.4s, v1.4s
add v19.4s, v19.4s, v1.4s
sqxtun v16.4h, v16.4s
sqxtun2 v16.8h, v17.4s
sqxtun v17.4h, v18.4s
sqxtun2 v17.8h, v19.4s
mvni v31.8h, #0xfc, lsl #8
umin v22.8h, v22.8h, v31.8h
umin v23.8h, v23.8h, v31.8h
umin v24.8h, v24.8h, v31.8h
umin v16.8h, v16.8h, v31.8h
umin v17.8h, v17.8h, v31.8h
st1 {v22.8h, v23.8h}, [x0], #32
st1 {v24.d}[0], [x0], x1
st1 {v16.8h, v17.8h}, [x0], #32
st1 {v24.d}[1], [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w16_neon, export=1
weight_prologue full
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w9, w9, #2
ld1 {v4.8h, v5.8h}, [x2], x3
ld1 {v6.8h, v7.8h}, [x2], x3
umull v22.4s, v4.4h, v0.4h
umull2 v23.4s, v4.8h, v0.8h
umull v24.4s, v5.4h, v0.4h
umull2 v25.4s, v5.8h, v0.8h
srshl v22.4s, v22.4s, v2.4s
srshl v23.4s, v23.4s, v2.4s
srshl v24.4s, v24.4s, v2.4s
srshl v25.4s, v25.4s, v2.4s
add v22.4s, v22.4s, v1.4s
add v23.4s, v23.4s, v1.4s
add v24.4s, v24.4s, v1.4s
add v25.4s, v25.4s, v1.4s
sqxtun v22.4h, v22.4s
sqxtun2 v22.8h, v23.4s
sqxtun v23.4h, v24.4s
sqxtun2 v23.8h, v25.4s
umull v26.4s, v6.4h, v0.4h
umull2 v27.4s, v6.8h, v0.8h
umull v28.4s, v7.4h, v0.4h
umull2 v29.4s, v7.8h, v0.8h
srshl v26.4s, v26.4s, v2.4s
srshl v27.4s, v27.4s, v2.4s
srshl v28.4s, v28.4s, v2.4s
srshl v29.4s, v29.4s, v2.4s
add v26.4s, v26.4s, v1.4s
add v27.4s, v27.4s, v1.4s
add v28.4s, v28.4s, v1.4s
add v29.4s, v29.4s, v1.4s
sqxtun v26.4h, v26.4s
sqxtun2 v26.8h, v27.4s
sqxtun v27.4h, v28.4s
sqxtun2 v27.8h, v29.4s
mvni v31.8h, 0xfc, lsl #8
umin v22.8h, v22.8h, v31.8h
umin v23.8h, v23.8h, v31.8h
umin v26.8h, v26.8h, v31.8h
umin v27.8h, v27.8h, v31.8h
st1 {v22.8h, v23.8h}, [x0], x1
st1 {v26.8h, v27.8h}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w8_neon, export=1
weight_prologue full
lsl x3, x3, #1
lsl x1, x1, #1
1:
subs w9, w9, #2
ld1 {v16.8h}, [x2], x3
ld1 {v17.8h}, [x2], x3
umull v4.4s, v16.4h, v0.4h
umull2 v5.4s, v16.8h, v0.8h
umull v6.4s, v17.4h, v0.4h
umull2 v7.4s, v17.8h, v0.8h
srshl v4.4s, v4.4s, v2.4s
srshl v5.4s, v5.4s, v2.4s
srshl v6.4s, v6.4s, v2.4s
srshl v7.4s, v7.4s, v2.4s
add v4.4s, v4.4s, v1.4s
add v5.4s, v5.4s, v1.4s
add v6.4s, v6.4s, v1.4s
add v7.4s, v7.4s, v1.4s
sqxtun v16.4h, v4.4s
sqxtun2 v16.8h, v5.4s
sqxtun v17.4h, v6.4s
sqxtun2 v17.8h, v7.4s
mvni v28.8h, #0xfc, lsl #8
umin v16.8h, v16.8h, v28.8h
umin v17.8h, v17.8h, v28.8h
st1 {v16.8h}, [x0], x1
st1 {v17.8h}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w4_neon, export=1
weight_prologue full
lsl x3, x3, #1
lsl x1, x1, #1
1:
subs w9, w9, #2
ld1 {v16.d}[0], [x2], x3
ld1 {v16.d}[1], [x2], x3
umull v4.4s, v16.4h, v0.4h
umull2 v5.4s, v16.8h, v0.8h
srshl v4.4s, v4.4s, v2.4s
srshl v5.4s, v5.4s, v2.4s
add v4.4s, v4.4s, v1.4s
add v5.4s, v5.4s, v1.4s
sqxtun v16.4h, v4.4s
sqxtun2 v16.8h, v5.4s
mvni v28.8h, #0xfc, lsl #8
umin v16.8h, v16.8h, v28.8h
st1 {v16.d}[0], [x0], x1
st1 {v16.d}[1], [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w20_nodenom_neon, export=1
weight_prologue nodenom
lsl x3, x3, #1
lsl x1, x1, #1
sub x1, x1, #32
1:
subs w9, w9, #2
ld1 {v16.8h, v17.8h, v18.8h}, [x2], x3
mov v20.16b, v1.16b
mov v21.16b, v1.16b
mov v22.16b, v1.16b
mov v23.16b, v1.16b
mov v24.16b, v1.16b
mov v25.16b, v1.16b
ld1 {v2.8h, v3.8h, v4.8h}, [x2], x3
mov v26.16b, v1.16b
mov v27.16b, v1.16b
mov v28.16b, v1.16b
mov v29.16b, v1.16b
umlal v20.4s, v16.4h, v0.4h
umlal2 v21.4s, v16.8h, v0.8h
umlal v22.4s, v17.4h, v0.4h
umlal2 v23.4s, v17.8h, v0.8h
umlal v24.4s, v18.4h, v0.4h
umlal v25.4s, v4.4h, v0.4h
umlal v26.4s, v2.4h, v0.4h
umlal2 v27.4s, v2.8h, v0.8h
umlal v28.4s, v3.4h, v0.4h
umlal2 v29.4s, v3.8h, v0.8h
sqxtun v2.4h, v20.4s
sqxtun2 v2.8h, v21.4s
sqxtun v3.4h, v22.4s
sqxtun2 v3.8h, v23.4s
sqxtun v4.4h, v24.4s
sqxtun2 v4.8h, v25.4s
sqxtun v5.4h, v26.4s
sqxtun2 v5.8h, v27.4s
sqxtun v6.4h, v28.4s
sqxtun2 v6.8h, v29.4s
mvni v31.8h, 0xfc, lsl #8
umin v2.8h, v2.8h, v31.8h
umin v3.8h, v3.8h, v31.8h
umin v4.8h, v4.8h, v31.8h
umin v5.8h, v5.8h, v31.8h
umin v6.8h, v6.8h, v31.8h
st1 {v2.8h, v3.8h}, [x0], #32
st1 {v4.d}[0], [x0], x1
st1 {v5.8h, v6.8h}, [x0], #32
st1 {v4.d}[1], [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w16_nodenom_neon, export=1
weight_prologue nodenom
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w9, w9, #2
ld1 {v2.8h, v3.8h}, [x2], x3
mov v27.16b, v1.16b
mov v28.16b, v1.16b
mov v29.16b, v1.16b
mov v30.16b, v1.16b
ld1 {v4.8h, v5.8h}, [x2], x3
mov v20.16b, v1.16b
mov v21.16b, v1.16b
mov v22.16b, v1.16b
mov v23.16b, v1.16b
umlal v27.4s, v2.4h, v0.4h
umlal2 v28.4s, v2.8h, v0.8h
umlal v29.4s, v3.4h, v0.4h
umlal2 v30.4s, v3.8h, v0.8h
umlal v20.4s, v4.4h, v0.4h
umlal2 v21.4s, v4.8h, v0.8h
umlal v22.4s, v5.4h, v0.4h
umlal2 v23.4s, v5.8h, v0.8h
sqxtun v2.4h, v27.4s
sqxtun2 v2.8h, v28.4s
sqxtun v3.4h, v29.4s
sqxtun2 v3.8h, v30.4s
sqxtun v4.4h, v20.4s
sqxtun2 v4.8h, v21.4s
sqxtun v5.4h, v22.4s
sqxtun2 v5.8h, v23.4s
mvni v31.8h, 0xfc, lsl #8
umin v2.8h, v2.8h, v31.8h
umin v3.8h, v3.8h, v31.8h
umin v4.8h, v4.8h, v31.8h
umin v5.8h, v5.8h, v31.8h
st1 {v2.8h, v3.8h}, [x0], x1
st1 {v4.8h, v5.8h}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w8_nodenom_neon, export=1
weight_prologue nodenom
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w9, w9, #2
ld1 {v16.8h}, [x2], x3
mov v27.16b, v1.16b
ld1 {v17.8h}, [x2], x3
mov v28.16b, v1.16b
mov v29.16b, v1.16b
mov v30.16b, v1.16b
umlal v27.4s, v16.4h, v0.4h
umlal2 v28.4s, v16.8h, v0.8h
umlal v29.4s, v17.4h, v0.4h
umlal2 v30.4s, v17.8h, v0.8h
sqxtun v4.4h, v27.4s
sqxtun2 v4.8h, v28.4s
sqxtun v5.4h, v29.4s
sqxtun2 v5.8h, v30.4s
mvni v31.8h, 0xfc, lsl #8
umin v4.8h, v4.8h, v31.8h
umin v5.8h, v5.8h, v31.8h
st1 {v4.8h}, [x0], x1
st1 {v5.8h}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w4_nodenom_neon, export=1
weight_prologue nodenom
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w9, w9, #2
ld1 {v16.d}[0], [x2], x3
ld1 {v16.d}[1], [x2], x3
mov v27.16b, v1.16b
mov v28.16b, v1.16b
umlal v27.4s, v16.4h, v0.4h
umlal2 v28.4s, v16.8h, v0.8h
sqxtun v4.4h, v27.4s
sqxtun2 v4.8h, v28.4s
mvni v31.8h, 0xfc, lsl #8
umin v4.8h, v4.8h, v31.8h
st1 {v4.d}[0], [x0], x1
st1 {v4.d}[1], [x0], x1
b.gt 1b
ret
endfunc
.macro weight_simple_prologue
ldr w6, [x4] // offset
lsl w6, w6, #2
dup v1.8h, w6
.endm
.macro weight_simple name op
function mc_weight_w20_\name\()_neon, export=1
weight_simple_prologue
lsl x1, x1, #1
lsl x3, x3, #1
sub x1, x1, #32
1:
subs w5, w5, #2
ld1 {v2.8h, v3.8h, v4.8h}, [x2], x3
ld1 {v5.8h, v6.8h, v7.8h}, [x2], x3
zip1 v4.2d, v4.2d, v7.2d
\op v2.8h, v2.8h, v1.8h
\op v3.8h, v3.8h, v1.8h
\op v4.8h, v4.8h, v1.8h
\op v5.8h, v5.8h, v1.8h
\op v6.8h, v6.8h, v1.8h
mvni v31.8h, #0xfc, lsl #8
umin v2.8h, v2.8h, v28.8h
umin v3.8h, v3.8h, v28.8h
umin v4.8h, v4.8h, v28.8h
umin v5.8h, v5.8h, v28.8h
umin v6.8h, v6.8h, v28.8h
st1 {v2.8h, v3.8h}, [x0], #32
st1 {v4.d}[0], [x0], x1
st1 {v5.8h, v6.8h}, [x0], #32
st1 {v4.d}[1], [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w16_\name\()_neon, export=1
weight_simple_prologue
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w5, w5, #2
ld1 {v16.8h, v17.8h}, [x2], x3
ld1 {v18.8h, v19.8h}, [x2], x3
\op v16.8h, v16.8h, v1.8h
\op v17.8h, v17.8h, v1.8h
\op v18.8h, v18.8h, v1.8h
\op v19.8h, v19.8h, v1.8h
mvni v28.8h, #0xfc, lsl #8
umin v16.8h, v16.8h, v28.8h
umin v17.8h, v17.8h, v28.8h
umin v18.8h, v18.8h, v28.8h
umin v19.8h, v19.8h, v28.8h
st1 {v16.8h, v17.8h}, [x0], x1
st1 {v18.8h, v19.8h}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w8_\name\()_neon, export=1
weight_simple_prologue
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w5, w5, #2
ld1 {v16.8h}, [x2], x3
ld1 {v17.8h}, [x2], x3
\op v16.8h, v16.8h, v1.8h
\op v17.8h, v17.8h, v1.8h
mvni v28.8h, 0xfc, lsl #8
umin v16.8h, v16.8h, v28.8h
umin v17.8h, v17.8h, v28.8h
st1 {v16.8h}, [x0], x1
st1 {v17.8h}, [x0], x1
b.gt 1b
ret
endfunc
function mc_weight_w4_\name\()_neon, export=1
weight_simple_prologue
lsl x1, x1, #1
lsl x3, x3, #1
1:
subs w5, w5, #2
ld1 {v16.d}[0], [x2], x3
ld1 {v16.d}[1], [x2], x3
\op v16.8h, v16.8h, v1.8h
mvni v28.8h, 0xfc, lsl #8
umin v16.8h, v16.8h, v28.8h
st1 {v16.d}[0], [x0], x1
st1 {v16.d}[1], [x0], x1
b.gt 1b
ret
endfunc
.endm
weight_simple offsetadd, uqadd
weight_simple offsetsub, uqsub
// void mc_chroma( pixel *dst_u, pixel *dst_v,
// intptr_t i_dst_stride,
// pixel *src, intptr_t i_src_stride,
// int dx, int dy, int i_width, int i_height );
function mc_chroma_neon, export=1
ldr w15, [sp] // height
sbfx x12, x6, #3, #29 // asr(3) and sign extend
sbfx x11, x5, #3, #29 // asr(3) and sign extend
cmp w7, #4
lsl x4, x4, #1
mul x12, x12, x4
add x3, x3, x11, lsl #2
and w5, w5, #7
and w6, w6, #7
add x3, x3, x12
b.gt mc_chroma_w8_neon
b.eq mc_chroma_w4_neon
endfunc
.macro CHROMA_MC_START r00, r01, r10, r11
mul w12, w5, w6 // cD = d8x *d8y
lsl w13, w5, #3
add w9, w12, #64
lsl w14, w6, #3
tst w12, w12
sub w9, w9, w13
sub w10, w13, w12 // cB = d8x *(8-d8y);
sub w11, w14, w12 // cC = (8-d8x)*d8y
sub w9, w9, w14 // cA = (8-d8x)*(8-d8y);
.endm
.macro CHROMA_MC width, vsize
function mc_chroma_w\width\()_neon
lsl x2, x2, #1
// since the element size varies, there's a different index for the 2nd store
.if \width == 4
.set idx2, 1
.else
.set idx2, 2
.endif
CHROMA_MC_START
b.eq 2f
ld2 {v28.8h, v29.8h}, [x3], x4
dup v0.8h, w9 // cA
dup v1.8h, w10 // cB
ext v6.16b, v28.16b, v28.16b, #2
ext v7.16b, v29.16b, v29.16b, #2
ld2 {v30.8h, v31.8h}, [x3], x4
dup v2.8h, w11 // cC
dup v3.8h, w12 // cD
ext v22.16b, v30.16b, v30.16b, #2
ext v23.16b, v31.16b, v31.16b, #2
trn1 v0.2d, v0.2d, v1.2d
trn1 v2.2d, v2.2d, v3.2d
trn1 v4.2d, v28.2d, v6.2d
trn1 v5.2d, v29.2d, v7.2d
trn1 v20.2d, v30.2d, v22.2d
trn1 v21.2d, v31.2d, v23.2d
1: // height loop, interpolate xy
subs w15, w15, #2
mul v16.8h, v4.8h, v0.8h
mul v17.8h, v5.8h, v0.8h
mla v16.8h, v20.8h, v2.8h
mla v17.8h, v21.8h, v2.8h
ld2 {v28.8h, v29.8h}, [x3], x4
transpose v24.2d, v25.2d, v16.2d, v17.2d
ext v6.16b, v28.16b, v28.16b, #2
ext v7.16b, v29.16b, v29.16b, #2
trn1 v4.2d, v28.2d, v6.2d
trn1 v5.2d, v29.2d, v7.2d
add v16.8h, v24.8h, v25.8h
urshr v16.8h, v16.8h, #6
mul v18.8h, v20.8h, v0.8h
mul v19.8h, v21.8h, v0.8h
mla v18.8h, v4.8h, v2.8h
mla v19.8h, v5.8h, v2.8h
ld2 {v30.8h, v31.8h}, [x3], x4
transpose v26.2d, v27.2d, v18.2d, v19.2d
add v18.8h, v26.8h, v27.8h
urshr v18.8h, v18.8h, #6
ext v22.16b, v30.16b, v30.16b, #2
ext v23.16b, v31.16b, v31.16b, #2
trn1 v20.2d, v30.2d, v22.2d
trn1 v21.2d, v31.2d, v23.2d
st1 {v16.\vsize}[0], [x0], x2
st1 {v16.\vsize}[idx2], [x1], x2
st1 {v18.\vsize}[0], [x0], x2
st1 {v18.\vsize}[idx2], [x1], x2
b.gt 1b
ret
2: // dx or dy are 0
tst w11, w11
add w10, w10, w11
dup v0.8h, w9
dup v1.8h, w10
b.eq 4f
ld1 {v4.8h}, [x3], x4
ld1 {v6.8h}, [x3], x4
3: // vertical interpolation loop
subs w15, w15, #2
mul v16.8h, v4.8h, v0.8h
mla v16.8h, v6.8h, v1.8h
ld1 {v4.8h}, [x3], x4
mul v17.8h, v6.8h, v0.8h
mla v17.8h, v4.8h, v1.8h
ld1 {v6.8h}, [x3], x4
urshr v16.8h, v16.8h, #6
urshr v17.8h, v17.8h, #6
uzp1 v18.8h, v16.8h, v17.8h // d16=uuuu|uuuu, d17=vvvv|vvvv
uzp2 v19.8h, v16.8h, v17.8h // d16=uuuu|uuuu, d17=vvvv|vvvv
st1 {v18.\vsize}[0], [x0], x2
st1 {v18.\vsize}[idx2], [x0], x2
st1 {v19.\vsize}[0], [x1], x2
st1 {v19.\vsize}[idx2], [x1], x2
b.gt 3b
ret
4: // dy is 0
ld1 {v4.8h, v5.8h}, [x3], x4
ld1 {v6.8h, v7.8h}, [x3], x4
ext v5.16b, v4.16b, v5.16b, #4
ext v7.16b, v6.16b, v7.16b, #4
5: // horizontal interpolation loop
subs w15, w15, #2
mul v16.8h, v4.8h, v0.8h
mla v16.8h, v5.8h, v1.8h
mul v17.8h, v6.8h, v0.8h
mla v17.8h, v7.8h, v1.8h
ld1 {v4.8h, v5.8h}, [x3], x4
ld1 {v6.8h, v7.8h}, [x3], x4
urshr v16.8h, v16.8h, #6
urshr v17.8h, v17.8h, #6
ext v5.16b, v4.16b, v5.16b, #4
ext v7.16b, v6.16b, v7.16b, #4
uzp1 v18.8h, v16.8h, v17.8h // d16=uuuu|uuuu, d17=vvvv|vvvv
uzp2 v19.8h, v16.8h, v17.8h // d16=uuuu|uuuu, d17=vvvv|vvvv
st1 {v18.\vsize}[0], [x0], x2
st1 {v18.\vsize}[idx2], [x0], x2
st1 {v19.\vsize}[0], [x1], x2
st1 {v19.\vsize}[idx2], [x1], x2
b.gt 5b
ret
endfunc
.endm
CHROMA_MC 2, s
CHROMA_MC 4, d
function mc_chroma_w8_neon
lsl x2, x2, #1
CHROMA_MC_START
b.eq 2f
sub x4, x4, #32
ld2 {v4.8h, v5.8h}, [x3], #32
ld2 {v6.8h, v7.8h}, [x3], x4
ld2 {v20.8h, v21.8h}, [x3], #32
ld2 {v22.8h, v23.8h}, [x3], x4
dup v0.8h, w9 // cA
dup v1.8h, w10 // cB
ext v24.16b, v4.16b, v6.16b, #2
ext v26.16b, v6.16b, v4.16b, #2
ext v28.16b, v20.16b, v22.16b, #2
ext v30.16b, v22.16b, v20.16b, #2
ext v25.16b, v5.16b, v7.16b, #2
ext v27.16b, v7.16b, v5.16b, #2
ext v29.16b, v21.16b, v23.16b, #2
ext v31.16b, v23.16b, v21.16b, #2
dup v2.8h, w11 // cC
dup v3.8h, w12 // cD
1: // height loop, interpolate xy
subs w15, w15, #2
mul v16.8h, v4.8h, v0.8h
mul v17.8h, v5.8h, v0.8h
mla v16.8h, v24.8h, v1.8h
mla v17.8h, v25.8h, v1.8h
mla v16.8h, v20.8h, v2.8h
mla v17.8h, v21.8h, v2.8h
mla v16.8h, v28.8h, v3.8h
mla v17.8h, v29.8h, v3.8h
urshr v16.8h, v16.8h, #6
urshr v17.8h, v17.8h, #6
st1 {v16.8h}, [x0], x2
st1 {v17.8h}, [x1], x2
ld2 {v4.8h, v5.8h}, [x3], #32
ld2 {v6.8h, v7.8h}, [x3], x4
mul v16.8h, v20.8h, v0.8h
mul v17.8h, v21.8h, v0.8h
ext v24.16b, v4.16b, v6.16b, #2
ext v26.16b, v6.16b, v4.16b, #2
mla v16.8h, v28.8h, v1.8h
mla v17.8h, v29.8h, v1.8h
ext v25.16b, v5.16b, v7.16b, #2
ext v27.16b, v7.16b, v5.16b, #2
mla v16.8h, v4.8h, v2.8h
mla v17.8h, v5.8h, v2.8h
mla v16.8h, v24.8h, v3.8h
mla v17.8h, v25.8h, v3.8h
urshr v16.8h, v16.8h, #6
urshr v17.8h, v17.8h, #6
ld2 {v20.8h, v21.8h}, [x3], #32
ld2 {v22.8h, v23.8h}, [x3], x4
ext v28.16b, v20.16b, v22.16b, #2
ext v30.16b, v22.16b, v20.16b, #2
ext v29.16b, v21.16b, v23.16b, #2
ext v31.16b, v23.16b, v21.16b, #2
st1 {v16.8h}, [x0], x2
st1 {v17.8h}, [x1], x2
b.gt 1b
ret
2: // dx or dy are 0
tst w11, w11
add w10, w10, w11
dup v0.8h, w9
dup v1.8h, w10
b.eq 4f
ld2 {v4.8h, v5.8h}, [x3], x4
ld2 {v6.8h, v7.8h}, [x3], x4
3: // vertical interpolation loop
subs w15, w15, #2
mul v16.8h, v4.8h, v0.8h
mul v17.8h, v5.8h, v0.8h
mla v16.8h, v6.8h, v1.8h
mla v17.8h, v7.8h, v1.8h
urshr v16.8h, v16.8h, #6
urshr v17.8h, v17.8h, #6
st1 {v16.8h}, [x0], x2
st1 {v17.8h}, [x1], x2
ld2 {v4.8h, v5.8h}, [x3], x4
mul v16.8h, v6.8h, v0.8h
mul v17.8h, v7.8h, v0.8h
ld2 {v6.8h, v7.8h}, [x3], x4
mla v16.8h, v4.8h, v1.8h
mla v17.8h, v5.8h, v1.8h
urshr v16.8h, v16.8h, #6
urshr v17.8h, v17.8h, #6
st1 {v16.8h}, [x0], x2
st1 {v17.8h}, [x1], x2
b.gt 3b
ret
4: // dy is 0
sub x4, x4, #32
ld2 {v4.8h, v5.8h}, [x3], #32
ld2 {v6.8h, v7.8h}, [x3], x4
ext v24.16b, v4.16b, v6.16b, #2
ext v26.16b, v6.16b, v4.16b, #2
ld2 {v20.8h, v21.8h}, [x3], #32
ld2 {v22.8h, v23.8h}, [x3], x4
ext v28.16b, v20.16b, v22.16b, #2
ext v30.16b, v22.16b, v20.16b, #2
ext v25.16b, v5.16b, v7.16b, #2
ext v27.16b, v7.16b, v5.16b, #2
ext v29.16b, v21.16b, v23.16b, #2
ext v31.16b, v23.16b, v21.16b, #2
5: // horizontal interpolation loop
subs w15, w15, #2
mul v16.8h, v4.8h, v0.8h
mul v17.8h, v5.8h, v0.8h
mla v16.8h, v24.8h, v1.8h
mla v17.8h, v25.8h, v1.8h
urshr v16.8h, v16.8h, #6
urshr v17.8h, v17.8h, #6
st1 {v16.8h}, [x0], x2
st1 {v17.8h}, [x1], x2
mul v16.8h, v20.8h, v0.8h
mul v17.8h, v21.8h, v0.8h
ld2 {v4.8h, v5.8h}, [x3], #32
ld2 {v6.8h, v7.8h}, [x3], x4
mla v16.8h, v28.8h, v1.8h
mla v17.8h, v29.8h, v1.8h
ld2 {v20.8h,v21.8h}, [x3], #32
ld2 {v22.8h,v23.8h}, [x3], x4
urshr v16.8h, v16.8h, #6
urshr v17.8h, v17.8h, #6
ext v24.16b, v4.16b, v6.16b, #2
ext v26.16b, v6.16b, v4.16b, #2
ext v28.16b, v20.16b, v22.16b, #2
ext v30.16b, v22.16b, v20.16b, #2
ext v29.16b, v21.16b, v23.16b, #2
ext v31.16b, v23.16b, v21.16b, #2
ext v25.16b, v5.16b, v7.16b, #2
ext v27.16b, v7.16b, v5.16b, #2
st1 {v16.8h}, [x0], x2
st1 {v17.8h}, [x1], x2
b.gt 5b
ret
endfunc
.macro integral4h p1, p2
ext v1.16b, \p1\().16b, \p2\().16b, #2
ext v2.16b, \p1\().16b, \p2\().16b, #4
ext v3.16b, \p1\().16b, \p2\().16b, #6
add v0.8h, \p1\().8h, v1.8h
add v4.8h, v2.8h, v3.8h
add v0.8h, v0.8h, v4.8h
add v0.8h, v0.8h, v5.8h
.endm
function integral_init4h_neon, export=1
sub x3, x0, x2, lsl #1
lsl x2, x2, #1
ld1 {v6.8h,v7.8h}, [x1], #32
1:
subs x2, x2, #32
ld1 {v5.8h}, [x3], #16
integral4h v6, v7
ld1 {v6.8h}, [x1], #16
ld1 {v5.8h}, [x3], #16
st1 {v0.8h}, [x0], #16
integral4h v7, v6
ld1 {v7.8h}, [x1], #16
st1 {v0.8h}, [x0], #16
b.gt 1b
ret
endfunc
.macro integral8h p1, p2, s
ext v1.16b, \p1\().16b, \p2\().16b, #2
ext v2.16b, \p1\().16b, \p2\().16b, #4
ext v3.16b, \p1\().16b, \p2\().16b, #6
ext v4.16b, \p1\().16b, \p2\().16b, #8
ext v5.16b, \p1\().16b, \p2\().16b, #10
ext v6.16b, \p1\().16b, \p2\().16b, #12
ext v7.16b, \p1\().16b, \p2\().16b, #14
add v0.8h, \p1\().8h, v1.8h
add v2.8h, v2.8h, v3.8h
add v4.8h, v4.8h, v5.8h
add v6.8h, v6.8h, v7.8h
add v0.8h, v0.8h, v2.8h
add v4.8h, v4.8h, v6.8h
add v0.8h, v0.8h, v4.8h
add v0.8h, v0.8h, \s\().8h
.endm
function integral_init8h_neon, export=1
sub x3, x0, x2, lsl #1
lsl x2, x2, #1
ld1 {v16.8h, v17.8h}, [x1], #32
1:
subs x2, x2, #32
ld1 {v18.8h}, [x3], #16
integral8h v16, v17, v18
ld1 {v16.8h}, [x1], #16
ld1 {v18.8h}, [x3], #16
st1 {v0.8h}, [x0], #16
integral8h v17, v16, v18
ld1 {v17.8h}, [x1], #16
st1 {v0.8h}, [x0], #16
b.gt 1b
ret
endfunc
function integral_init4v_neon, export=1
mov x3, x0
add x4, x0, x2, lsl #3
add x8, x0, x2, lsl #4
lsl x2, x2, #1
sub x2, x2, #16
ld1 {v20.8h, v21.8h, v22.8h}, [x3], #48
ld1 {v16.8h, v17.8h, v18.8h}, [x8], #48
1:
subs x2, x2, #32
ld1 {v24.8h, v25.8h}, [x4], #32
ext v0.16b, v20.16b, v21.16b, #8
ext v1.16b, v21.16b, v22.16b, #8
ext v2.16b, v16.16b, v17.16b, #8
ext v3.16b, v17.16b, v18.16b, #8
sub v24.8h, v24.8h, v20.8h
sub v25.8h, v25.8h, v21.8h
add v0.8h, v0.8h, v20.8h
add v1.8h, v1.8h, v21.8h
add v2.8h, v2.8h, v16.8h
add v3.8h, v3.8h, v17.8h
st1 {v24.8h}, [x1], #16
st1 {v25.8h}, [x1], #16
mov v20.16b, v22.16b
mov v16.16b, v18.16b
sub v0.8h, v2.8h, v0.8h
sub v1.8h, v3.8h, v1.8h
ld1 {v21.8h, v22.8h}, [x3], #32
ld1 {v17.8h, v18.8h}, [x8], #32
st1 {v0.8h}, [x0], #16
st1 {v1.8h}, [x0], #16
b.gt 1b
2:
ret
endfunc
function integral_init8v_neon, export=1
add x2, x0, x1, lsl #4
sub x1, x1, #8
ands x3, x1, #16 - 1
b.eq 1f
subs x1, x1, #8
ld1 {v0.8h}, [x0]
ld1 {v2.8h}, [x2], #16
sub v4.8h, v2.8h, v0.8h
st1 {v4.8h}, [x0], #16
b.le 2f
1:
subs x1, x1, #16
ld1 {v0.8h,v1.8h}, [x0]
ld1 {v2.8h,v3.8h}, [x2], #32
sub v4.8h, v2.8h, v0.8h
sub v5.8h, v3.8h, v1.8h
st1 {v4.8h}, [x0], #16
st1 {v5.8h}, [x0], #16
b.gt 1b
2:
ret
endfunc
// frame_init_lowres_core( pixel *src0, pixel *dst0, pixel *dsth,
// pixel *dstv, pixel *dstc, intptr_t src_stride,
// intptr_t dst_stride, int width, int height )
function frame_init_lowres_core_neon, export=1
ldr w8, [sp]
lsl x5, x5, #1
sub x10, x6, w7, uxtw // dst_stride - width
lsl x10, x10, #1
and x10, x10, #~31
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
1:
mov w9, w7 // width
mov x11, x0 // src0
add x12, x0, x5 // src1 = src0 + src_stride
add x13, x0, x5, lsl #1 // src2 = src1 + src_stride
ld2 {v0.8h, v1.8h}, [x11], #32
ld2 {v2.8h, v3.8h}, [x11], #32
ld2 {v4.8h, v5.8h}, [x12], #32
ld2 {v6.8h, v7.8h}, [x12], #32
ld2 {v28.8h, v29.8h}, [x13], #32
ld2 {v30.8h, v31.8h}, [x13], #32
urhadd v20.8h, v0.8h, v4.8h
urhadd v21.8h, v2.8h, v6.8h
urhadd v22.8h, v4.8h, v28.8h
urhadd v23.8h, v6.8h, v30.8h
2:
subs w9, w9, #16
urhadd v24.8h, v1.8h, v5.8h
urhadd v25.8h, v3.8h, v7.8h
urhadd v26.8h, v5.8h, v29.8h
urhadd v27.8h, v7.8h, v31.8h
ld2 {v0.8h, v1.8h}, [x11], #32
ld2 {v2.8h, v3.8h}, [x11], #32
ld2 {v4.8h, v5.8h}, [x12], #32
ld2 {v6.8h, v7.8h}, [x12], #32
ld2 {v28.8h, v29.8h}, [x13], #32
ld2 {v30.8h, v31.8h}, [x13], #32
urhadd v16.8h, v0.8h, v4.8h
urhadd v17.8h, v2.8h, v6.8h
urhadd v18.8h, v4.8h, v28.8h
urhadd v19.8h, v6.8h, v30.8h
ext v8.16b, v20.16b, v21.16b, #2
ext v9.16b, v21.16b, v16.16b, #2
ext v10.16b, v22.16b, v23.16b, #2
ext v11.16b, v23.16b, v18.16b, #2
urhadd v12.8h, v20.8h, v24.8h
urhadd v8.8h, v24.8h, v8.8h
urhadd v24.8h, v21.8h, v25.8h
urhadd v22.8h, v22.8h, v26.8h
urhadd v10.8h, v26.8h, v10.8h
urhadd v26.8h, v23.8h, v27.8h
urhadd v9.8h, v25.8h, v9.8h
urhadd v11.8h, v27.8h, v11.8h
st1 {v12.8h}, [x1], #16
st1 {v24.8h}, [x1], #16
st1 {v22.8h}, [x3], #16
st1 {v26.8h}, [x3], #16
st1 {v8.8h, v9.8h}, [x2], #32
st1 {v10.8h, v11.8h}, [x4], #32
b.le 3f
subs w9, w9, #16
urhadd v24.8h, v1.8h, v5.8h
urhadd v25.8h, v3.8h, v7.8h
urhadd v26.8h, v5.8h, v29.8h
urhadd v27.8h, v7.8h, v31.8h
ld2 {v0.8h, v1.8h}, [x11], #32
ld2 {v2.8h, v3.8h}, [x11], #32
ld2 {v4.8h, v5.8h}, [x12], #32
ld2 {v6.8h, v7.8h}, [x12], #32
ld2 {v28.8h, v29.8h}, [x13], #32
ld2 {v30.8h, v31.8h}, [x13], #32
urhadd v20.8h, v0.8h, v4.8h
urhadd v21.8h, v2.8h, v6.8h
urhadd v22.8h, v4.8h, v28.8h
urhadd v23.8h, v6.8h, v30.8h
ext v8.16b, v16.16b, v17.16b, #2
ext v9.16b, v17.16b, v20.16b, #2
ext v10.16b, v18.16b, v19.16b, #2
ext v11.16b, v19.16b, v22.16b, #2
urhadd v12.8h, v16.8h, v24.8h
urhadd v13.8h, v17.8h, v25.8h
urhadd v14.8h, v18.8h, v26.8h
urhadd v15.8h, v19.8h, v27.8h
urhadd v16.8h, v24.8h, v8.8h
urhadd v17.8h, v25.8h, v9.8h
urhadd v18.8h, v26.8h, v10.8h
urhadd v19.8h, v27.8h, v11.8h
st1 {v12.8h, v13.8h}, [x1], #32
st1 {v14.8h, v15.8h}, [x3], #32
st1 {v16.8h, v17.8h}, [x2], #32
st1 {v18.8h, v19.8h}, [x4], #32
b.gt 2b
3:
subs w8, w8, #1
add x0, x0, x5, lsl #1
add x1, x1, x10
add x2, x2, x10
add x3, x3, x10
add x4, x4, x10
b.gt 1b
ldp d8, d9, [sp]
ldp d10, d11, [sp, #0x10]
ldp d12, d13, [sp, #0x20]
ldp d14, d15, [sp, #0x30]
add sp, sp, #0x40
ret
endfunc
function load_deinterleave_chroma_fenc_neon, export=1
mov x4, #FENC_STRIDE/2
lsl x4, x4, #1
lsl x2, x2, #1
b load_deinterleave_chroma
endfunc
function load_deinterleave_chroma_fdec_neon, export=1
mov x4, #FDEC_STRIDE/2
lsl x4, x4, #1
lsl x2, x2, #1
load_deinterleave_chroma:
ld2 {v0.8h, v1.8h}, [x1], x2
ld2 {v2.8h, v3.8h}, [x1], x2
subs w3, w3, #2
st1 {v0.8h}, [x0], x4
st1 {v1.8h}, [x0], x4
st1 {v2.8h}, [x0], x4
st1 {v3.8h}, [x0], x4
b.gt load_deinterleave_chroma
ret
endfunc
function store_interleave_chroma_neon, export=1
mov x5, #FDEC_STRIDE
lsl x5, x5, #1
lsl x1, x1, #1
1:
ld1 {v0.8h}, [x2], x5
ld1 {v1.8h}, [x3], x5
ld1 {v2.8h}, [x2], x5
ld1 {v3.8h}, [x3], x5
subs w4, w4, #2
zip1 v4.8h, v0.8h, v1.8h
zip1 v6.8h, v2.8h, v3.8h
zip2 v5.8h, v0.8h, v1.8h
zip2 v7.8h, v2.8h, v3.8h
st1 {v4.8h, v5.8h}, [x0], x1
st1 {v6.8h, v7.8h}, [x0], x1
b.gt 1b
ret
endfunc
function plane_copy_core_neon, export=1
add w8, w4, #31 // 32-bit write clears the upper 32-bit the register
and w4, w8, #~31
// safe use of the full reg since negative width makes no sense
sub x1, x1, x4
sub x3, x3, x4
lsl x1, x1, #1
lsl x3, x3, #1
1:
mov w8, w4
16:
tst w8, #16
b.eq 32f
subs w8, w8, #16
ldp q0, q1, [x2], #32
stp q0, q1, [x0], #32
b.eq 0f
32:
subs w8, w8, #32
ldp q0, q1, [x2], #32
ldp q2, q3, [x2], #32
stp q0, q1, [x0], #32
stp q2, q3, [x0], #32
b.gt 32b
0:
subs w5, w5, #1
add x2, x2, x3
add x0, x0, x1
b.gt 1b
ret
endfunc
function plane_copy_swap_core_neon, export=1
lsl w4, w4, #1
add w8, w4, #31 // 32-bit write clears the upper 32-bit the register
and w4, w8, #~31
sub x1, x1, x4
sub x3, x3, x4
lsl x1, x1, #1
lsl x3, x3, #1
1:
mov w8, w4
tbz w4, #4, 32f
subs w8, w8, #16
ld1 {v0.8h, v1.8h}, [x2], #32
rev32 v0.8h, v0.8h
rev32 v1.8h, v1.8h
st1 {v0.8h, v1.8h}, [x0], #32
b.eq 0f
32:
subs w8, w8, #32
ld1 {v0.8h ,v1.8h, v2.8h, v3.8h}, [x2], #64
rev32 v20.8h, v0.8h
rev32 v21.8h, v1.8h
rev32 v22.8h, v2.8h
rev32 v23.8h, v3.8h
st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [x0], #64
b.gt 32b
0:
subs w5, w5, #1
add x2, x2, x3
add x0, x0, x1
b.gt 1b
ret
endfunc
function plane_copy_deinterleave_neon, export=1
add w9, w6, #15
and w9, w9, #~15
sub x1, x1, x9
sub x3, x3, x9
sub x5, x5, x9, lsl #1
lsl x1, x1, #1
lsl x3, x3, #1
lsl x5, x5, #1
1:
ld2 {v0.8h, v1.8h}, [x4], #32
ld2 {v2.8h, v3.8h}, [x4], #32
subs w9, w9, #16
st1 {v0.8h}, [x0], #16
st1 {v2.8h}, [x0], #16
st1 {v1.8h}, [x2], #16
st1 {v3.8h}, [x2], #16
b.gt 1b
add x4, x4, x5
subs w7, w7, #1
add x0, x0, x1
add x2, x2, x3
mov w9, w6
b.gt 1b
ret
endfunc
function plane_copy_interleave_core_neon, export=1
add w9, w6, #15
and w9, w9, #0xfffffff0
sub x1, x1, x9, lsl #1
sub x3, x3, x9
sub x5, x5, x9
lsl x1, x1, #1
lsl x3, x3, #1
lsl x5, x5, #1
1:
ld1 {v0.8h}, [x2], #16
ld1 {v1.8h}, [x4], #16
ld1 {v2.8h}, [x2], #16
ld1 {v3.8h}, [x4], #16
subs w9, w9, #16
st2 {v0.8h, v1.8h}, [x0], #32
st2 {v2.8h, v3.8h}, [x0], #32
b.gt 1b
subs w7, w7, #1
add x0, x0, x1
add x2, x2, x3
add x4, x4, x5
mov w9, w6
b.gt 1b
ret
endfunc
.macro deinterleave_rgb
subs x11, x11, #8
st1 {v0.8h}, [x0], #16
st1 {v1.8h}, [x2], #16
st1 {v2.8h}, [x4], #16
b.gt 1b
subs w10, w10, #1
add x0, x0, x1
add x2, x2, x3
add x4, x4, x5
add x6, x6, x7
mov x11, x9
b.gt 1b
.endm
function plane_copy_deinterleave_rgb_neon, export=1
#if SYS_MACOSX
ldr w8, [sp]
ldp w9, w10, [sp, #4]
#else
ldr x8, [sp]
ldp x9, x10, [sp, #8]
#endif
cmp w8, #3
uxtw x9, w9
add x11, x9, #7
and x11, x11, #~7
sub x1, x1, x11
sub x3, x3, x11
sub x5, x5, x11
lsl x1, x1, #1
lsl x3, x3, #1
lsl x5, x5, #1
b.ne 4f
sub x7, x7, x11, lsl #1
sub x7, x7, x11
lsl x7, x7, #1
1:
ld3 {v0.8h, v1.8h, v2.8h}, [x6], #48
deinterleave_rgb
ret
4:
sub x7, x7, x11, lsl #2
lsl x7, x7, #1
1:
ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [x6], #64
deinterleave_rgb
ret
endfunc
// void hpel_filter( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
// intptr_t stride, int width, int height, int16_t *buf )
function hpel_filter_neon, export=1
lsl x5, x5, #1
ubfm x9, x3, #3, #7
add w15, w5, w9
sub x13, x3, x9 // align src
sub x10, x0, x9
sub x11, x1, x9
sub x12, x2, x9
movi v30.8h, #5
movi v31.8h, #20
lsl x4, x4, #1
stp d8, d9, [sp, #-0x40]!
stp d10, d11, [sp, #0x10]
stp d12, d13, [sp, #0x20]
stp d14, d15, [sp, #0x30]
str q0, [sp, #-0x50]!
1: // line start
mov x3, x13
mov x2, x12
mov x1, x11
mov x0, x10
add x7, x3, #32 // src pointer next 16b for horiz filter
mov x5, x15 // restore width
sub x3, x3, x4, lsl #1 // src - 2*stride
ld1 {v28.8h, v29.8h}, [x7], #32 // src[16:31]
add x9, x3, x5 // holds src - 2*stride + width
ld1 {v8.8h, v9.8h}, [x3], x4 // src-2*stride[0:15]
ld1 {v10.8h, v11.8h}, [x3], x4 // src-1*stride[0:15]
ld1 {v12.8h, v13.8h}, [x3], x4 // src-0*stride[0:15]
ld1 {v14.8h, v15.8h}, [x3], x4 // src+1*stride[0:15]
ld1 {v16.8h, v17.8h}, [x3], x4 // src+2*stride[0:15]
ld1 {v18.8h, v19.8h}, [x3], x4 // src+3*stride[0:15]
ext v22.16b, v7.16b, v12.16b, #12
ext v23.16b, v12.16b, v13.16b, #12
uaddl v1.4s, v8.4h, v18.4h
uaddl2 v20.4s, v8.8h, v18.8h
ext v24.16b, v12.16b, v13.16b, #6
ext v25.16b, v13.16b, v28.16b, #6
umlsl v1.4s, v10.4h, v30.4h
umlsl2 v20.4s, v10.8h, v30.8h
ext v26.16b, v7.16b, v12.16b, #14
ext v27.16b, v12.16b, v13.16b, #14
umlal v1.4s, v12.4h, v31.4h
umlal2 v20.4s, v12.8h, v31.8h
ext v3.16b, v12.16b, v13.16b, #2
ext v4.16b, v13.16b, v28.16b, #2
umlal v1.4s, v14.4h, v31.4h
umlal2 v20.4s, v14.8h, v31.8h
ext v21.16b, v12.16b, v13.16b, #4
ext v5.16b, v13.16b, v28.16b, #4
umlsl v1.4s, v16.4h, v30.4h
umlsl2 v20.4s, v16.8h, v30.8h
2: // next 16 pixel of line
subs x5, x5, #32
sub x3, x9, x5 // src - 2*stride += 16
uaddl v8.4s, v22.4h, v24.4h
uaddl2 v22.4s, v22.8h, v24.8h
uaddl v10.4s, v23.4h, v25.4h
uaddl2 v23.4s, v23.8h, v25.8h
umlsl v8.4s, v26.4h, v30.4h
umlsl2 v22.4s, v26.8h, v30.8h
umlsl v10.4s, v27.4h, v30.4h
umlsl2 v23.4s, v27.8h, v30.8h
umlal v8.4s, v12.4h, v31.4h
umlal2 v22.4s, v12.8h, v31.8h
umlal v10.4s, v13.4h, v31.4h
umlal2 v23.4s, v13.8h, v31.8h
umlal v8.4s, v3.4h, v31.4h
umlal2 v22.4s, v3.8h, v31.8h
umlal v10.4s, v4.4h, v31.4h
umlal2 v23.4s, v4.8h, v31.8h
umlsl v8.4s, v21.4h, v30.4h
umlsl2 v22.4s, v21.8h, v30.8h
umlsl v10.4s, v5.4h, v30.4h
umlsl2 v23.4s, v5.8h, v30.8h
uaddl v5.4s, v9.4h, v19.4h
uaddl2 v2.4s, v9.8h, v19.8h
sqrshrun v8.4h, v8.4s, #5
sqrshrun2 v8.8h, v22.4s, #5
sqrshrun v10.4h, v10.4s, #5
sqrshrun2 v10.8h, v23.4s, #5
mov v6.16b, v12.16b
mov v7.16b, v13.16b
mvni v23.8h, #0xfc, lsl #8
umin v8.8h, v8.8h, v23.8h
umin v10.8h, v10.8h, v23.8h
st1 {v8.8h}, [x0], #16
st1 {v10.8h}, [x0], #16
umlsl v5.4s, v11.4h, v30.4h
umlsl2 v2.4s, v11.8h, v30.8h
ld1 {v8.8h, v9.8h}, [x3], x4
umlal v5.4s, v13.4h, v31.4h
umlal2 v2.4s, v13.8h, v31.8h
ld1 {v10.8h, v11.8h}, [x3], x4
umlal v5.4s, v15.4h, v31.4h
umlal2 v2.4s, v15.8h, v31.8h
ld1 {v12.8h, v13.8h}, [x3], x4
umlsl v5.4s, v17.4h, v30.4h
umlsl2 v2.4s, v17.8h, v30.8h
ld1 {v14.8h, v15.8h}, [x3], x4
sqrshrun v4.4h, v5.4s, #5
sqrshrun2 v4.8h, v2.4s, #5
sqrshrun v18.4h, v1.4s, #5
sqrshrun2 v18.8h, v20.4s, #5
mvni v17.8h, #0xfc, lsl #8
smin v4.8h, v4.8h, v17.8h
smin v18.8h, v18.8h, v17.8h
st1 {v18.8h}, [x1], #16
st1 {v4.8h}, [x1], #16
ld1 {v16.8h, v17.8h}, [x3], x4 // src+2*stride[0:15]
ld1 {v18.8h, v19.8h}, [x3], x4 // src+3*stride[0:15]
str q9, [sp, #0x10]
str q15, [sp, #0x20]
str q17, [sp, #0x30]
str q19, [sp, #0x40]
ldr q28, [sp]
ext v22.16b, v28.16b, v1.16b, #8
ext v9.16b, v1.16b, v20.16b, #8
ext v26.16b, v1.16b, v20.16b, #12
ext v17.16b, v20.16b, v5.16b, #12
ext v23.16b, v28.16b, v1.16b, #12
ext v19.16b, v1.16b, v20.16b, #12
uaddl v3.4s, v8.4h, v18.4h
uaddl2 v15.4s, v8.8h, v18.8h
umlsl v3.4s, v10.4h, v30.4h
umlsl2 v15.4s, v10.8h, v30.8h
umlal v3.4s, v12.4h, v31.4h
umlal2 v15.4s, v12.8h, v31.8h
umlal v3.4s, v14.4h, v31.4h
umlal2 v15.4s, v14.8h, v31.8h
umlsl v3.4s, v16.4h, v30.4h
umlsl2 v15.4s, v16.8h, v30.8h
add v4.4s, v22.4s, v26.4s
add v26.4s, v9.4s, v17.4s
ext v25.16b, v1.16b, v20.16b, #8
ext v22.16b, v20.16b, v5.16b, #8
ext v24.16b, v1.16b, v20.16b, #4
ext v9.16b, v20.16b, v5.16b, #4
add v31.4s, v23.4s, v25.4s
add v19.4s, v19.4s, v22.4s
add v6.4s, v24.4s, v1.4s
add v17.4s, v9.4s, v20.4s
sub v4.4s, v4.4s, v31.4s // a-b
sub v26.4s, v26.4s, v19.4s // a-b
sub v31.4s, v31.4s, v6.4s // b-c
sub v19.4s, v19.4s, v17.4s // b-c
ext v22.16b, v20.16b, v5.16b, #8
ext v9.16b, v5.16b, v2.16b, #8
ext v24.16b, v5.16b, v2.16b, #12
ext v28.16b, v2.16b, v3.16b, #12
ext v23.16b, v20.16b, v5.16b, #12
ext v30.16b, v5.16b, v2.16b, #12
ext v25.16b, v5.16b, v2.16b, #8
ext v29.16b, v2.16b, v3.16b, #8
add v22.4s, v22.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v23.4s, v23.4s, v25.4s
add v29.4s, v29.4s, v30.4s
ext v24.16b, v5.16b, v2.16b, #4
ext v28.16b, v2.16b, v3.16b, #4
add v24.4s, v24.4s, v5.4s
add v28.4s, v28.4s, v2.4s
sub v22.4s, v22.4s, v23.4s
sub v9.4s, v9.4s, v29.4s
sub v23.4s, v23.4s, v24.4s
sub v29.4s, v29.4s, v28.4s
sshr v4.4s, v4.4s, #2
sshr v0.4s, v26.4s, #2
sshr v22.4s, v22.4s, #2
sshr v9.4s, v9.4s, #2
sub v4.4s, v4.4s, v31.4s
sub v0.4s, v0.4s, v19.4s
sub v22.4s, v22.4s, v23.4s
sub v9.4s, v9.4s, v29.4s
sshr v4.4s, v4.4s, #2
sshr v0.4s, v0.4s, #2
sshr v22.4s, v22.4s, #2
sshr v9.4s, v9.4s, #2
add v4.4s, v4.4s, v6.4s
add v0.4s, v0.4s, v17.4s
add v22.4s, v22.4s, v24.4s
add v9.4s, v9.4s, v28.4s
str q2, [sp]
sqrshrun v4.4h, v4.4s, #6
sqrshrun2 v4.8h, v0.4s, #6
sqrshrun v22.4h, v22.4s, #6
sqrshrun2 v22.8h, v9.4s, #6
mov v0.16b, v5.16b
ld1 {v28.8h, v29.8h}, [x7], #32 // src[16:31]
ldr q9, [sp, #0x10]
ldr q17, [sp, #0x30]
ldr q19, [sp, #0x40]
ext v26.16b, v7.16b, v12.16b, #14
ext v27.16b, v12.16b, v13.16b, #14
mvni v25.8h, 0xfc, lsl #8
smin v22.8h, v22.8h, v25.8h
smin v4.8h, v4.8h, v25.8h
st1 {v4.8h}, [x2], #16
st1 {v22.8h}, [x2], #16
mov v1.16b, v3.16b
mov v20.16b, v15.16b
ldr q15, [sp, #0x20]
ext v22.16b, v7.16b, v12.16b, #12
ext v23.16b, v12.16b, v13.16b, #12
ext v3.16b, v12.16b, v13.16b, #2
ext v4.16b, v13.16b, v28.16b, #2
ext v21.16b, v12.16b, v13.16b, #4
ext v5.16b, v13.16b, v28.16b, #4
ext v24.16b, v12.16b, v13.16b, #6
ext v25.16b, v13.16b, v28.16b, #6
movi v30.8h, #5
movi v31.8h, #20
b.gt 2b
subs w6, w6, #1
add x10, x10, x4
add x11, x11, x4
add x12, x12, x4
add x13, x13, x4
b.gt 1b
add sp, sp, #0x50
ldp d8, d9, [sp]
ldp d10, d11, [sp, #0x10]
ldp d12, d13, [sp, #0x20]
ldp d14, d15, [sp, #0x30]
add sp, sp, #0x40
ret
endfunc
#endif
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerSW/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aestream/faery
| 29,163
|
src/mp4/x264/common/aarch64/deblock-a.S
|
/*****************************************************************************
* deblock.S: aarch64 deblocking
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: Mans Rullgard <mans@mansr.com>
* Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "deblock-a-common.S"
.macro h264_loop_filter_luma
dup v22.16b, w2 // alpha
uxtl v24.8h, v24.8b
uabd v21.16b, v16.16b, v0.16b // abs(p0 - q0)
uxtl v24.4s, v24.4h
uabd v28.16b, v18.16b, v16.16b // abs(p1 - p0)
sli v24.8h, v24.8h, #8
uabd v30.16b, v2.16b, v0.16b // abs(q1 - q0)
sli v24.4s, v24.4s, #16
cmhi v21.16b, v22.16b, v21.16b // < alpha
dup v22.16b, w3 // beta
cmlt v23.16b, v24.16b, #0
cmhi v28.16b, v22.16b, v28.16b // < beta
cmhi v30.16b, v22.16b, v30.16b // < beta
bic v21.16b, v21.16b, v23.16b
uabd v17.16b, v20.16b, v16.16b // abs(p2 - p0)
and v21.16b, v21.16b, v28.16b
uabd v19.16b, v4.16b, v0.16b // abs(q2 - q0)
cmhi v17.16b, v22.16b, v17.16b // < beta
and v21.16b, v21.16b, v30.16b
cmhi v19.16b, v22.16b, v19.16b // < beta
and v17.16b, v17.16b, v21.16b
and v19.16b, v19.16b, v21.16b
and v24.16b, v24.16b, v21.16b
urhadd v28.16b, v16.16b, v0.16b
sub v21.16b, v24.16b, v17.16b
uqadd v23.16b, v18.16b, v24.16b
uhadd v20.16b, v20.16b, v28.16b
sub v21.16b, v21.16b, v19.16b
uhadd v28.16b, v4.16b, v28.16b
umin v23.16b, v23.16b, v20.16b
uqsub v22.16b, v18.16b, v24.16b
uqadd v4.16b, v2.16b, v24.16b
umax v23.16b, v23.16b, v22.16b
uqsub v22.16b, v2.16b, v24.16b
umin v28.16b, v4.16b, v28.16b
uxtl v4.8h, v0.8b
umax v28.16b, v28.16b, v22.16b
uxtl2 v20.8h, v0.16b
usubw v4.8h, v4.8h, v16.8b
usubw2 v20.8h, v20.8h, v16.16b
shl v4.8h, v4.8h, #2
shl v20.8h, v20.8h, #2
uaddw v4.8h, v4.8h, v18.8b
uaddw2 v20.8h, v20.8h, v18.16b
usubw v4.8h, v4.8h, v2.8b
usubw2 v20.8h, v20.8h, v2.16b
rshrn v4.8b, v4.8h, #3
rshrn2 v4.16b, v20.8h, #3
bsl v17.16b, v23.16b, v18.16b
bsl v19.16b, v28.16b, v2.16b
neg v23.16b, v21.16b
uxtl v28.8h, v16.8b
smin v4.16b, v4.16b, v21.16b
uxtl2 v21.8h, v16.16b
smax v4.16b, v4.16b, v23.16b
uxtl v22.8h, v0.8b
uxtl2 v24.8h, v0.16b
saddw v28.8h, v28.8h, v4.8b
saddw2 v21.8h, v21.8h, v4.16b
ssubw v22.8h, v22.8h, v4.8b
ssubw2 v24.8h, v24.8h, v4.16b
sqxtun v16.8b, v28.8h
sqxtun2 v16.16b, v21.8h
sqxtun v0.8b, v22.8h
sqxtun2 v0.16b, v24.8h
.endm
function deblock_v_luma_neon, export=1
h264_loop_filter_start
ld1 {v0.16b}, [x0], x1
ld1 {v2.16b}, [x0], x1
ld1 {v4.16b}, [x0], x1
sub x0, x0, x1, lsl #2
sub x0, x0, x1, lsl #1
ld1 {v20.16b}, [x0], x1
ld1 {v18.16b}, [x0], x1
ld1 {v16.16b}, [x0], x1
h264_loop_filter_luma
sub x0, x0, x1, lsl #1
st1 {v17.16b}, [x0], x1
st1 {v16.16b}, [x0], x1
st1 {v0.16b}, [x0], x1
st1 {v19.16b}, [x0]
ret
endfunc
function deblock_h_luma_neon, export=1
h264_loop_filter_start
sub x0, x0, #4
ld1 {v6.8b}, [x0], x1
ld1 {v20.8b}, [x0], x1
ld1 {v18.8b}, [x0], x1
ld1 {v16.8b}, [x0], x1
ld1 {v0.8b}, [x0], x1
ld1 {v2.8b}, [x0], x1
ld1 {v4.8b}, [x0], x1
ld1 {v26.8b}, [x0], x1
ld1 {v6.d}[1], [x0], x1
ld1 {v20.d}[1], [x0], x1
ld1 {v18.d}[1], [x0], x1
ld1 {v16.d}[1], [x0], x1
ld1 {v0.d}[1], [x0], x1
ld1 {v2.d}[1], [x0], x1
ld1 {v4.d}[1], [x0], x1
ld1 {v26.d}[1], [x0], x1
transpose_8x16.b v6, v20, v18, v16, v0, v2, v4, v26, v21, v23
h264_loop_filter_luma
transpose_4x16.b v17, v16, v0, v19, v21, v23, v25, v27
sub x0, x0, x1, lsl #4
add x0, x0, #2
st1 {v17.s}[0], [x0], x1
st1 {v16.s}[0], [x0], x1
st1 {v0.s}[0], [x0], x1
st1 {v19.s}[0], [x0], x1
st1 {v17.s}[1], [x0], x1
st1 {v16.s}[1], [x0], x1
st1 {v0.s}[1], [x0], x1
st1 {v19.s}[1], [x0], x1
st1 {v17.s}[2], [x0], x1
st1 {v16.s}[2], [x0], x1
st1 {v0.s}[2], [x0], x1
st1 {v19.s}[2], [x0], x1
st1 {v17.s}[3], [x0], x1
st1 {v16.s}[3], [x0], x1
st1 {v0.s}[3], [x0], x1
st1 {v19.s}[3], [x0], x1
ret
endfunc
.macro h264_loop_filter_start_intra
orr w4, w2, w3
cmp w4, #0
b.ne 1f
ret
1:
dup v30.16b, w2 // alpha
dup v31.16b, w3 // beta
.endm
.macro h264_loop_filter_luma_intra
uabd v16.16b, v7.16b, v0.16b // abs(p0 - q0)
uabd v17.16b, v6.16b, v7.16b // abs(p1 - p0)
uabd v18.16b, v1.16b, v0.16b // abs(q1 - q0)
cmhi v19.16b, v30.16b, v16.16b // < alpha
cmhi v17.16b, v31.16b, v17.16b // < beta
cmhi v18.16b, v31.16b, v18.16b // < beta
movi v29.16b, #2
ushr v30.16b, v30.16b, #2 // alpha >> 2
add v30.16b, v30.16b, v29.16b // (alpha >> 2) + 2
cmhi v16.16b, v30.16b, v16.16b // < (alpha >> 2) + 2
and v19.16b, v19.16b, v17.16b
and v19.16b, v19.16b, v18.16b
shrn v20.8b, v19.8h, #4
mov x4, v20.d[0]
cbz x4, 9f
ushll v20.8h, v6.8b, #1
ushll v22.8h, v1.8b, #1
ushll2 v21.8h, v6.16b, #1
ushll2 v23.8h, v1.16b, #1
uaddw v20.8h, v20.8h, v7.8b
uaddw v22.8h, v22.8h, v0.8b
uaddw2 v21.8h, v21.8h, v7.16b
uaddw2 v23.8h, v23.8h, v0.16b
uaddw v20.8h, v20.8h, v1.8b
uaddw v22.8h, v22.8h, v6.8b
uaddw2 v21.8h, v21.8h, v1.16b
uaddw2 v23.8h, v23.8h, v6.16b
rshrn v24.8b, v20.8h, #2 // p0'_1
rshrn v25.8b, v22.8h, #2 // q0'_1
rshrn2 v24.16b, v21.8h, #2 // p0'_1
rshrn2 v25.16b, v23.8h, #2 // q0'_1
uabd v17.16b, v5.16b, v7.16b // abs(p2 - p0)
uabd v18.16b, v2.16b, v0.16b // abs(q2 - q0)
cmhi v17.16b, v31.16b, v17.16b // < beta
cmhi v18.16b, v31.16b, v18.16b // < beta
and v17.16b, v16.16b, v17.16b // if_2 && if_3
and v18.16b, v16.16b, v18.16b // if_2 && if_4
not v30.16b, v17.16b
not v31.16b, v18.16b
and v30.16b, v30.16b, v19.16b // if_1 && !(if_2 && if_3)
and v31.16b, v31.16b, v19.16b // if_1 && !(if_2 && if_4)
and v17.16b, v19.16b, v17.16b // if_1 && if_2 && if_3
and v18.16b, v19.16b, v18.16b // if_1 && if_2 && if_4
//calc p, v7, v6, v5, v4, v17, v7, v6, v5, v4
uaddl v26.8h, v5.8b, v7.8b
uaddl2 v27.8h, v5.16b, v7.16b
uaddw v26.8h, v26.8h, v0.8b
uaddw2 v27.8h, v27.8h, v0.16b
add v20.8h, v20.8h, v26.8h
add v21.8h, v21.8h, v27.8h
uaddw v20.8h, v20.8h, v0.8b
uaddw2 v21.8h, v21.8h, v0.16b
rshrn v20.8b, v20.8h, #3 // p0'_2
rshrn2 v20.16b, v21.8h, #3 // p0'_2
uaddw v26.8h, v26.8h, v6.8b
uaddw2 v27.8h, v27.8h, v6.16b
rshrn v21.8b, v26.8h, #2 // p1'_2
rshrn2 v21.16b, v27.8h, #2 // p1'_2
uaddl v28.8h, v4.8b, v5.8b
uaddl2 v29.8h, v4.16b, v5.16b
shl v28.8h, v28.8h, #1
shl v29.8h, v29.8h, #1
add v28.8h, v28.8h, v26.8h
add v29.8h, v29.8h, v27.8h
rshrn v19.8b, v28.8h, #3 // p2'_2
rshrn2 v19.16b, v29.8h, #3 // p2'_2
//calc q, v0, v1, v2, v3, v18, v0, v1, v2, v3
uaddl v26.8h, v2.8b, v0.8b
uaddl2 v27.8h, v2.16b, v0.16b
uaddw v26.8h, v26.8h, v7.8b
uaddw2 v27.8h, v27.8h, v7.16b
add v22.8h, v22.8h, v26.8h
add v23.8h, v23.8h, v27.8h
uaddw v22.8h, v22.8h, v7.8b
uaddw2 v23.8h, v23.8h, v7.16b
rshrn v22.8b, v22.8h, #3 // q0'_2
rshrn2 v22.16b, v23.8h, #3 // q0'_2
uaddw v26.8h, v26.8h, v1.8b
uaddw2 v27.8h, v27.8h, v1.16b
rshrn v23.8b, v26.8h, #2 // q1'_2
rshrn2 v23.16b, v27.8h, #2 // q1'_2
uaddl v28.8h, v2.8b, v3.8b
uaddl2 v29.8h, v2.16b, v3.16b
shl v28.8h, v28.8h, #1
shl v29.8h, v29.8h, #1
add v28.8h, v28.8h, v26.8h
add v29.8h, v29.8h, v27.8h
rshrn v26.8b, v28.8h, #3 // q2'_2
rshrn2 v26.16b, v29.8h, #3 // q2'_2
bit v7.16b, v24.16b, v30.16b // p0'_1
bit v0.16b, v25.16b, v31.16b // q0'_1
bit v7.16b, v20.16b, v17.16b // p0'_2
bit v6.16b, v21.16b, v17.16b // p1'_2
bit v5.16b, v19.16b, v17.16b // p2'_2
bit v0.16b, v22.16b, v18.16b // q0'_2
bit v1.16b, v23.16b, v18.16b // q1'_2
bit v2.16b, v26.16b, v18.16b // q2'_2
.endm
function deblock_v_luma_intra_neon, export=1
h264_loop_filter_start_intra
ld1 {v0.16b}, [x0], x1 // q0
ld1 {v1.16b}, [x0], x1 // q1
ld1 {v2.16b}, [x0], x1 // q2
ld1 {v3.16b}, [x0], x1 // q3
sub x0, x0, x1, lsl #3
ld1 {v4.16b}, [x0], x1 // p3
ld1 {v5.16b}, [x0], x1 // p2
ld1 {v6.16b}, [x0], x1 // p1
ld1 {v7.16b}, [x0] // p0
h264_loop_filter_luma_intra
sub x0, x0, x1, lsl #1
st1 {v5.16b}, [x0], x1 // p2
st1 {v6.16b}, [x0], x1 // p1
st1 {v7.16b}, [x0], x1 // p0
st1 {v0.16b}, [x0], x1 // q0
st1 {v1.16b}, [x0], x1 // q1
st1 {v2.16b}, [x0] // q2
9:
ret
endfunc
function deblock_h_luma_intra_neon, export=1
h264_loop_filter_start_intra
sub x0, x0, #4
ld1 {v4.8b}, [x0], x1
ld1 {v5.8b}, [x0], x1
ld1 {v6.8b}, [x0], x1
ld1 {v7.8b}, [x0], x1
ld1 {v0.8b}, [x0], x1
ld1 {v1.8b}, [x0], x1
ld1 {v2.8b}, [x0], x1
ld1 {v3.8b}, [x0], x1
ld1 {v4.d}[1], [x0], x1
ld1 {v5.d}[1], [x0], x1
ld1 {v6.d}[1], [x0], x1
ld1 {v7.d}[1], [x0], x1
ld1 {v0.d}[1], [x0], x1
ld1 {v1.d}[1], [x0], x1
ld1 {v2.d}[1], [x0], x1
ld1 {v3.d}[1], [x0], x1
transpose_8x16.b v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
h264_loop_filter_luma_intra
transpose_8x16.b v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
sub x0, x0, x1, lsl #4
st1 {v4.8b}, [x0], x1
st1 {v5.8b}, [x0], x1
st1 {v6.8b}, [x0], x1
st1 {v7.8b}, [x0], x1
st1 {v0.8b}, [x0], x1
st1 {v1.8b}, [x0], x1
st1 {v2.8b}, [x0], x1
st1 {v3.8b}, [x0], x1
st1 {v4.d}[1], [x0], x1
st1 {v5.d}[1], [x0], x1
st1 {v6.d}[1], [x0], x1
st1 {v7.d}[1], [x0], x1
st1 {v0.d}[1], [x0], x1
st1 {v1.d}[1], [x0], x1
st1 {v2.d}[1], [x0], x1
st1 {v3.d}[1], [x0], x1
9:
ret
endfunc
.macro h264_loop_filter_chroma
dup v22.16b, w2 // alpha
uxtl v24.8h, v24.8b
uabd v26.16b, v16.16b, v0.16b // abs(p0 - q0)
uxtl v4.8h, v0.8b
uxtl2 v5.8h, v0.16b
uabd v28.16b, v18.16b, v16.16b // abs(p1 - p0)
usubw v4.8h, v4.8h, v16.8b
usubw2 v5.8h, v5.8h, v16.16b
sli v24.8h, v24.8h, #8
shl v4.8h, v4.8h, #2
shl v5.8h, v5.8h, #2
uabd v30.16b, v2.16b, v0.16b // abs(q1 - q0)
uxtl v24.4s, v24.4h
uaddw v4.8h, v4.8h, v18.8b
uaddw2 v5.8h, v5.8h, v18.16b
cmhi v26.16b, v22.16b, v26.16b // < alpha
usubw v4.8h, v4.8h, v2.8b
usubw2 v5.8h, v5.8h, v2.16b
sli v24.4s, v24.4s, #16
dup v22.16b, w3 // beta
rshrn v4.8b, v4.8h, #3
rshrn2 v4.16b, v5.8h, #3
cmhi v28.16b, v22.16b, v28.16b // < beta
cmhi v30.16b, v22.16b, v30.16b // < beta
smin v4.16b, v4.16b, v24.16b
neg v25.16b, v24.16b
and v26.16b, v26.16b, v28.16b
smax v4.16b, v4.16b, v25.16b
and v26.16b, v26.16b, v30.16b
uxtl v22.8h, v0.8b
uxtl2 v23.8h, v0.16b
and v4.16b, v4.16b, v26.16b
uxtl v28.8h, v16.8b
uxtl2 v29.8h, v16.16b
saddw v28.8h, v28.8h, v4.8b
saddw2 v29.8h, v29.8h, v4.16b
ssubw v22.8h, v22.8h, v4.8b
ssubw2 v23.8h, v23.8h, v4.16b
sqxtun v16.8b, v28.8h
sqxtun v0.8b, v22.8h
sqxtun2 v16.16b, v29.8h
sqxtun2 v0.16b, v23.8h
.endm
function deblock_v_chroma_neon, export=1
h264_loop_filter_start
sub x0, x0, x1, lsl #1
ld1 {v18.16b}, [x0], x1
ld1 {v16.16b}, [x0], x1
ld1 {v0.16b}, [x0], x1
ld1 {v2.16b}, [x0]
h264_loop_filter_chroma
sub x0, x0, x1, lsl #1
st1 {v16.16b}, [x0], x1
st1 {v0.16b}, [x0], x1
ret
endfunc
function deblock_h_chroma_neon, export=1
h264_loop_filter_start
sub x0, x0, #4
deblock_h_chroma:
ld1 {v18.d}[0], [x0], x1
ld1 {v16.d}[0], [x0], x1
ld1 {v0.d}[0], [x0], x1
ld1 {v2.d}[0], [x0], x1
ld1 {v18.d}[1], [x0], x1
ld1 {v16.d}[1], [x0], x1
ld1 {v0.d}[1], [x0], x1
ld1 {v2.d}[1], [x0], x1
transpose4x8.h v18, v16, v0, v2, v28, v29, v30, v31
h264_loop_filter_chroma
transpose4x8.h v18, v16, v0, v2, v28, v29, v30, v31
sub x0, x0, x1, lsl #3
st1 {v18.d}[0], [x0], x1
st1 {v16.d}[0], [x0], x1
st1 {v0.d}[0], [x0], x1
st1 {v2.d}[0], [x0], x1
st1 {v18.d}[1], [x0], x1
st1 {v16.d}[1], [x0], x1
st1 {v0.d}[1], [x0], x1
st1 {v2.d}[1], [x0], x1
ret
endfunc
function deblock_h_chroma_422_neon, export=1
add x5, x0, x1
sub x0, x0, #4
add x1, x1, x1
h264_loop_filter_start
mov x7, x30
bl deblock_h_chroma
mov x30, x7
sub x0, x5, #4
mov v24.s[0], w6
b deblock_h_chroma
endfunc
.macro h264_loop_filter_chroma8
dup v22.8b, w2 // alpha
uxtl v24.8h, v24.8b
uabd v26.8b, v16.8b, v17.8b // abs(p0 - q0)
uxtl v4.8h, v17.8b
uabd v28.8b, v18.8b, v16.8b // abs(p1 - p0)
usubw v4.8h, v4.8h, v16.8b
sli v24.8h, v24.8h, #8
shl v4.8h, v4.8h, #2
uabd v30.8b, v19.8b, v17.8b // abs(q1 - q0)
uaddw v4.8h, v4.8h, v18.8b
cmhi v26.8b, v22.8b, v26.8b // < alpha
usubw v4.8h, v4.8h, v19.8b
dup v22.8b, w3 // beta
rshrn v4.8b, v4.8h, #3
cmhi v28.8b, v22.8b, v28.8b // < beta
cmhi v30.8b, v22.8b, v30.8b // < beta
smin v4.8b, v4.8b, v24.8b
neg v25.8b, v24.8b
and v26.8b, v26.8b, v28.8b
smax v4.8b, v4.8b, v25.8b
and v26.8b, v26.8b, v30.8b
uxtl v22.8h, v17.8b
and v4.8b, v4.8b, v26.8b
uxtl v28.8h, v16.8b
saddw v28.8h, v28.8h, v4.8b
ssubw v22.8h, v22.8h, v4.8b
sqxtun v16.8b, v28.8h
sqxtun v17.8b, v22.8h
.endm
function deblock_h_chroma_mbaff_neon, export=1
h264_loop_filter_start
sub x4, x0, #4
sub x0, x0, #2
ld1 {v18.8b}, [x4], x1
ld1 {v16.8b}, [x4], x1
ld1 {v17.8b}, [x4], x1
ld1 {v19.8b}, [x4]
transpose4x4.h v18, v16, v17, v19, v28, v29, v30, v31
h264_loop_filter_chroma8
st2 {v16.h,v17.h}[0], [x0], x1
st2 {v16.h,v17.h}[1], [x0], x1
st2 {v16.h,v17.h}[2], [x0], x1
st2 {v16.h,v17.h}[3], [x0]
ret
endfunc
.macro h264_loop_filter_chroma_intra width=16
uabd v26.16b, v16.16b, v17.16b // abs(p0 - q0)
uabd v27.16b, v18.16b, v16.16b // abs(p1 - p0)
uabd v28.16b, v19.16b, v17.16b // abs(q1 - q0)
cmhi v26.16b, v30.16b, v26.16b // < alpha
cmhi v27.16b, v31.16b, v27.16b // < beta
cmhi v28.16b, v31.16b, v28.16b // < beta
and v26.16b, v26.16b, v27.16b
and v26.16b, v26.16b, v28.16b
ushll v4.8h, v18.8b, #1
ushll v6.8h, v19.8b, #1
.ifc \width, 16
ushll2 v5.8h, v18.16b, #1
ushll2 v7.8h, v19.16b, #1
uaddl2 v21.8h, v16.16b, v19.16b
uaddl2 v23.8h, v17.16b, v18.16b
.endif
uaddl v20.8h, v16.8b, v19.8b
uaddl v22.8h, v17.8b, v18.8b
add v20.8h, v20.8h, v4.8h // mlal?
add v22.8h, v22.8h, v6.8h
.ifc \width, 16
add v21.8h, v21.8h, v5.8h
add v23.8h, v23.8h, v7.8h
.endif
uqrshrn v24.8b, v20.8h, #2
uqrshrn v25.8b, v22.8h, #2
.ifc \width, 16
uqrshrn2 v24.16b, v21.8h, #2
uqrshrn2 v25.16b, v23.8h, #2
.endif
bit v16.16b, v24.16b, v26.16b
bit v17.16b, v25.16b, v26.16b
.endm
function deblock_v_chroma_intra_neon, export=1
h264_loop_filter_start_intra
sub x0, x0, x1, lsl #1
ld1 {v18.16b}, [x0], x1
ld1 {v16.16b}, [x0], x1
ld1 {v17.16b}, [x0], x1
ld1 {v19.16b}, [x0]
h264_loop_filter_chroma_intra
sub x0, x0, x1, lsl #1
st1 {v16.16b}, [x0], x1
st1 {v17.16b}, [x0], x1
ret
endfunc
function deblock_h_chroma_intra_mbaff_neon, export=1
h264_loop_filter_start_intra
sub x4, x0, #4
sub x0, x0, #2
ld1 {v18.8b}, [x4], x1
ld1 {v16.8b}, [x4], x1
ld1 {v17.8b}, [x4], x1
ld1 {v19.8b}, [x4], x1
transpose4x4.h v18, v16, v17, v19, v26, v27, v28, v29
h264_loop_filter_chroma_intra width=8
st2 {v16.h,v17.h}[0], [x0], x1
st2 {v16.h,v17.h}[1], [x0], x1
st2 {v16.h,v17.h}[2], [x0], x1
st2 {v16.h,v17.h}[3], [x0], x1
ret
endfunc
function deblock_h_chroma_intra_neon, export=1
h264_loop_filter_start_intra
sub x4, x0, #4
sub x0, x0, #2
ld1 {v18.d}[0], [x4], x1
ld1 {v16.d}[0], [x4], x1
ld1 {v17.d}[0], [x4], x1
ld1 {v19.d}[0], [x4], x1
ld1 {v18.d}[1], [x4], x1
ld1 {v16.d}[1], [x4], x1
ld1 {v17.d}[1], [x4], x1
ld1 {v19.d}[1], [x4], x1
transpose4x8.h v18, v16, v17, v19, v26, v27, v28, v29
h264_loop_filter_chroma_intra
st2 {v16.h,v17.h}[0], [x0], x1
st2 {v16.h,v17.h}[1], [x0], x1
st2 {v16.h,v17.h}[2], [x0], x1
st2 {v16.h,v17.h}[3], [x0], x1
st2 {v16.h,v17.h}[4], [x0], x1
st2 {v16.h,v17.h}[5], [x0], x1
st2 {v16.h,v17.h}[6], [x0], x1
st2 {v16.h,v17.h}[7], [x0], x1
ret
endfunc
function deblock_h_chroma_422_intra_neon, export=1
h264_loop_filter_start_intra
sub x4, x0, #4
sub x0, x0, #2
ld1 {v18.d}[0], [x4], x1
ld1 {v16.d}[0], [x4], x1
ld1 {v17.d}[0], [x4], x1
ld1 {v19.d}[0], [x4], x1
ld1 {v18.d}[1], [x4], x1
ld1 {v16.d}[1], [x4], x1
ld1 {v17.d}[1], [x4], x1
ld1 {v19.d}[1], [x4], x1
transpose4x8.h v18, v16, v17, v19, v26, v27, v28, v29
h264_loop_filter_chroma_intra
st2 {v16.h,v17.h}[0], [x0], x1
st2 {v16.h,v17.h}[1], [x0], x1
st2 {v16.h,v17.h}[2], [x0], x1
st2 {v16.h,v17.h}[3], [x0], x1
st2 {v16.h,v17.h}[4], [x0], x1
st2 {v16.h,v17.h}[5], [x0], x1
st2 {v16.h,v17.h}[6], [x0], x1
st2 {v16.h,v17.h}[7], [x0], x1
ld1 {v18.d}[0], [x4], x1
ld1 {v16.d}[0], [x4], x1
ld1 {v17.d}[0], [x4], x1
ld1 {v19.d}[0], [x4], x1
ld1 {v18.d}[1], [x4], x1
ld1 {v16.d}[1], [x4], x1
ld1 {v17.d}[1], [x4], x1
ld1 {v19.d}[1], [x4], x1
transpose4x8.h v18, v16, v17, v19, v26, v27, v28, v29
h264_loop_filter_chroma_intra
st2 {v16.h,v17.h}[0], [x0], x1
st2 {v16.h,v17.h}[1], [x0], x1
st2 {v16.h,v17.h}[2], [x0], x1
st2 {v16.h,v17.h}[3], [x0], x1
st2 {v16.h,v17.h}[4], [x0], x1
st2 {v16.h,v17.h}[5], [x0], x1
st2 {v16.h,v17.h}[6], [x0], x1
st2 {v16.h,v17.h}[7], [x0], x1
ret
endfunc
// void deblock_strength( uint8_t nnz[X264_SCAN8_SIZE],
// int8_t ref[2][X264_SCAN8_LUMA_SIZE],
// int16_t mv[2][X264_SCAN8_LUMA_SIZE][2],
// uint8_t bs[2][8][4], int mvy_limit,
// int bframe )
function deblock_strength_neon, export=1
movi v4.16b, #0
lsl w4, w4, #8
add x3, x3, #32
sub w4, w4, #(1<<8)-3
movi v5.16b, #0
dup v6.8h, w4
mov x6, #-32
bframe:
// load bytes ref
add x2, x2, #16
ld1 {v31.d}[1], [x1], #8
ld1 {v1.16b}, [x1], #16
movi v0.16b, #0
ld1 {v2.16b}, [x1], #16
ext v3.16b, v0.16b, v1.16b, #15
ext v0.16b, v0.16b, v2.16b, #15
unzip v21.4s, v22.4s, v1.4s, v2.4s
unzip v23.4s, v20.4s, v3.4s, v0.4s
ext v21.16b, v31.16b, v22.16b, #12
eor v0.16b, v20.16b, v22.16b
eor v1.16b, v21.16b, v22.16b
orr v4.16b, v4.16b, v0.16b
orr v5.16b, v5.16b, v1.16b
ld1 {v21.8h}, [x2], #16 // mv + 0x10
ld1 {v19.8h}, [x2], #16 // mv + 0x20
ld1 {v22.8h}, [x2], #16 // mv + 0x30
ld1 {v18.8h}, [x2], #16 // mv + 0x40
ld1 {v23.8h}, [x2], #16 // mv + 0x50
ext v19.16b, v19.16b, v22.16b, #12
ext v18.16b, v18.16b, v23.16b, #12
sabd v0.8h, v22.8h, v19.8h
ld1 {v19.8h}, [x2], #16 // mv + 0x60
sabd v1.8h, v23.8h, v18.8h
ld1 {v24.8h}, [x2], #16 // mv + 0x70
uqxtn v0.8b, v0.8h
ld1 {v18.8h}, [x2], #16 // mv + 0x80
ld1 {v25.8h}, [x2], #16 // mv + 0x90
uqxtn2 v0.16b, v1.8h
ext v19.16b, v19.16b, v24.16b, #12
ext v18.16b, v18.16b, v25.16b, #12
sabd v1.8h, v24.8h, v19.8h
sabd v2.8h, v25.8h, v18.8h
uqxtn v1.8b, v1.8h
uqxtn2 v1.16b, v2.8h
uqsub v0.16b, v0.16b, v6.16b
uqsub v1.16b, v1.16b, v6.16b
uqxtn v0.8b, v0.8h
uqxtn2 v0.16b, v1.8h
sabd v1.8h, v22.8h, v23.8h
orr v4.16b, v4.16b, v0.16b
sabd v0.8h, v21.8h, v22.8h
sabd v2.8h, v23.8h, v24.8h
sabd v3.8h, v24.8h, v25.8h
uqxtn v0.8b, v0.8h
uqxtn2 v0.16b, v1.8h
uqxtn v1.8b, v2.8h
uqxtn2 v1.16b, v3.8h
uqsub v0.16b, v0.16b, v6.16b
uqsub v1.16b, v1.16b, v6.16b
uqxtn v0.8b, v0.8h
uqxtn2 v0.16b, v1.8h
subs w5, w5, #1
orr v5.16b, v5.16b, v0.16b
b.eq bframe
movi v6.16b, #1
// load bytes nnz
ld1 {v31.d}[1], [x0], #8
ld1 {v1.16b}, [x0], #16
movi v0.16b, #0
ld1 {v2.16b}, [x0], #16
ext v3.16b, v0.16b, v1.16b, #15
ext v0.16b, v0.16b, v2.16b, #15
unzip v21.4s, v22.4s, v1.4s, v2.4s
unzip v23.4s, v20.4s, v3.4s, v0.4s
ext v21.16b, v31.16b, v22.16b, #12
movrel x7, transpose_table
ld1 {v7.16b}, [x7]
orr v0.16b, v20.16b, v22.16b
orr v1.16b, v21.16b, v22.16b
umin v0.16b, v0.16b, v6.16b
umin v1.16b, v1.16b, v6.16b
umin v4.16b, v4.16b, v6.16b // mv ? 1 : 0
umin v5.16b, v5.16b, v6.16b
add v0.16b, v0.16b, v0.16b // nnz ? 2 : 0
add v1.16b, v1.16b, v1.16b
umax v4.16b, v4.16b, v0.16b
umax v5.16b, v5.16b, v1.16b
tbl v6.16b, {v4.16b}, v7.16b
st1 {v5.16b}, [x3], x6 // bs[1]
st1 {v6.16b}, [x3] // bs[0]
ret
endfunc
const transpose_table
.byte 0, 4, 8, 12
.byte 1, 5, 9, 13
.byte 2, 6, 10, 14
.byte 3, 7, 11, 15
endconst
|
aestream/faery
| 3,698
|
src/mp4/x264/common/aarch64/deblock-a-sve.S
|
/*****************************************************************************
* deblock-a-sve.S: aarch64 deblocking
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Chen <david.chen@myais.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "deblock-a-common.S"
.arch armv8-a+sve
.macro h264_loop_filter_chroma_sve
ptrue p0.b, vl16
dup v22.16b, w2 // alpha
uxtl v24.8h, v24.8b
uabd v26.16b, v16.16b, v0.16b // abs(p0 - q0)
uxtl v4.8h, v0.8b
uxtl2 v5.8h, v0.16b
uabd v28.16b, v18.16b, v16.16b // abs(p1 - p0)
usubw v4.8h, v4.8h, v16.8b
usubw2 v5.8h, v5.8h, v16.16b
sli v24.8h, v24.8h, #8
shl v4.8h, v4.8h, #2
shl v5.8h, v5.8h, #2
uabd v30.16b, v2.16b, v0.16b // abs(q1 - q0)
uxtl v24.4s, v24.4h
uaddw v4.8h, v4.8h, v18.8b
uaddw2 v5.8h, v5.8h, v18.16b
cmphi p1.b, p0/z, z22.b, z26.b
usubw v4.8h, v4.8h, v2.8b
usubw2 v5.8h, v5.8h, v2.16b
sli v24.4s, v24.4s, #16
dup v22.16b, w3 // beta
rshrn v4.8b, v4.8h, #3
rshrn2 v4.16b, v5.8h, #3
cmphi p2.b, p0/z, z22.b, z28.b
cmphi p3.b, p0/z, z22.b, z30.b
smin v4.16b, v4.16b, v24.16b
neg v25.16b, v24.16b
and p1.b, p0/z, p1.b, p2.b
smax v4.16b, v4.16b, v25.16b
and p1.b, p0/z, p1.b, p3.b
uxtl v22.8h, v0.8b
uxtl2 v23.8h, v0.16b
uxtl v28.8h, v16.8b
uxtl2 v29.8h, v16.16b
saddw v28.8h, v28.8h, v4.8b
saddw2 v29.8h, v29.8h, v4.16b
ssubw v22.8h, v22.8h, v4.8b
ssubw2 v23.8h, v23.8h, v4.16b
sqxtun v16.8b, v28.8h
sqxtun v0.8b, v22.8h
sqxtun2 v16.16b, v29.8h
sqxtun2 v0.16b, v23.8h
.endm
function deblock_v_chroma_sve, export=1
h264_loop_filter_start
sub x0, x0, x1, lsl #1
// No performance improvement if sve load is used. So, continue using
// NEON load here
ld1 {v18.16b}, [x0], x1
ld1 {v16.16b}, [x0], x1
ld1 {v0.16b}, [x0], x1
ld1 {v2.16b}, [x0]
h264_loop_filter_chroma_sve
sub x0, x0, x1, lsl #1
st1b {z16.b}, p1, [x0]
add x0, x0, x1
st1b {z0.b}, p1, [x0]
ret
endfunc
|
aestream/faery
| 4,405
|
src/mp4/x264/common/aarch64/cabac-a.S
|
/*****************************************************************************
* cabac-a.S: aarch64 cabac
*****************************************************************************
* Copyright (C) 2014-2024 x264 project
*
* Authors: Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "asm-offsets.h"
// w11 holds x264_cabac_t.i_low
// w12 holds x264_cabac_t.i_range
function cabac_encode_decision_asm, export=1
add w10, w1, #CABAC_STATE
ldrb w3, [x0, w10, uxtw] // i_state
ldr w12, [x0, #CABAC_I_RANGE]
movrel x8, X264(cabac_range_lps), -4
movrel x9, X264(cabac_transition)
ubfx x4, x3, #1, #7
asr w5, w12, #6
add x8, x8, x4, lsl #2
orr w14, w2, w3, lsl #1
ldrb w4, [x8, w5, uxtw] // i_range_lps
ldr w11, [x0, #CABAC_I_LOW]
eor w6, w2, w3 // b ^ i_state
ldrb w9, [x9, w14, uxtw]
sub w12, w12, w4
add w7, w11, w12
tst w6, #1 // (b ^ i_state) & 1
csel w12, w4, w12, ne
csel w11, w7, w11, ne
strb w9, [x0, w10, uxtw] // i_state
cabac_encode_renorm:
ldr w2, [x0, #CABAC_I_QUEUE]
clz w5, w12
sub w5, w5, #23
lsl w11, w11, w5
lsl w12, w12, w5
adds w2, w2, w5
b.ge cabac_putbyte
stp w11, w12, [x0, #CABAC_I_LOW] // store i_low, i_range
str w2, [x0, #CABAC_I_QUEUE]
ret
.align 5
cabac_putbyte:
ldr w6, [x0, #CABAC_I_BYTES_OUTSTANDING]
add w14, w2, #10
mov w13, #-1
sub w2, w2, #8
asr w4, w11, w14 // out
lsl w13, w13, w14
subs w5, w4, #0xff
bic w11, w11, w13
cinc w6, w6, eq
b.eq 0f
1:
ldr x7, [x0, #CABAC_P]
asr w5, w4, #8 // carry
ldurb w8, [x7, #-1]
add w8, w8, w5
sub w5, w5, #1
sturb w8, [x7, #-1]
cbz w6, 3f
2:
subs w6, w6, #1
strb w5, [x7], #1
b.gt 2b
3:
strb w4, [x7], #1
str x7, [x0, #CABAC_P]
0:
stp w11, w12, [x0, #CABAC_I_LOW] // store i_low, i_range
stp w2, w6, [x0, #CABAC_I_QUEUE] // store i_queue, i_bytes_outstanding
ret
endfunc
function cabac_encode_bypass_asm, export=1, align=5
ldr w12, [x0, #CABAC_I_RANGE]
ldr w11, [x0, #CABAC_I_LOW]
ldr w2, [x0, #CABAC_I_QUEUE]
and w1, w1, w12
add w11, w1, w11, lsl #1
adds w2, w2, #1
b.ge cabac_putbyte
str w11, [x0, #CABAC_I_LOW]
str w2, [x0, #CABAC_I_QUEUE]
ret
endfunc
function cabac_encode_terminal_asm, export=1, align=5
ldr w12, [x0, #CABAC_I_RANGE]
sub w12, w12, #2
tbz w12, #8, 1f
str w12, [x0, #CABAC_I_RANGE]
ret
1:
ldr w2, [x0, #CABAC_I_QUEUE]
ldr w11, [x0, #CABAC_I_LOW]
lsl w12, w12, #1
adds w2, w2, #1
lsl w11, w11, #1
b.ge cabac_putbyte
stp w11, w12, [x0, #CABAC_I_LOW] // store i_low, i_range
str w2, [x0, #CABAC_I_QUEUE]
ret
endfunc
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerSW/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aestream/faery
| 1,727
|
src/mp4/x264/common/aarch64/dct-a-common.S
|
/****************************************************************************
* dct-a-common.S: aarch64 transform and zigzag
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
* David Chen <david.chen@myais.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
// This file contains the NEON macros that are intended to be used by
// the SVE/SVE2 functions as well
.macro DCT_1D v0 v1 v2 v3 v4 v5 v6 v7
SUMSUB_AB \v1, \v6, \v5, \v6
SUMSUB_AB \v3, \v7, \v4, \v7
add \v0, \v3, \v1
add \v4, \v7, \v7
add \v5, \v6, \v6
sub \v2, \v3, \v1
add \v1, \v4, \v6
sub \v3, \v7, \v5
.endm
|
aestream/faery
| 2,207
|
src/mp4/x264/common/aarch64/mc-a-common.S
|
/****************************************************************************
* mc-a-common.S: aarch64 motion compensation
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
* Mans Rullgard <mans@mansr.com>
* Stefan Groenroos <stefan.gronroos@gmail.com>
* David Chen <david.chen@myais.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
// This file contains the NEON macros and functions that are intended to be used by
// the SVE/SVE2 functions as well
#if BIT_DEPTH == 8
// 0 < weight < 64
.macro load_weights_add_add
mov w6, w6
.endm
// weight > 64
.macro load_weights_add_sub
neg w7, w7
.endm
// weight < 0
.macro load_weights_sub_add
neg w6, w6
.endm
function pixel_avg_w4_neon
1: subs w9, w9, #2
ld1 {v0.s}[0], [x2], x3
ld1 {v2.s}[0], [x4], x5
urhadd v0.8b, v0.8b, v2.8b
ld1 {v1.s}[0], [x2], x3
ld1 {v3.s}[0], [x4], x5
urhadd v1.8b, v1.8b, v3.8b
st1 {v0.s}[0], [x0], x1
st1 {v1.s}[0], [x0], x1
b.gt 1b
ret
endfunc
#else // BIT_DEPTH == 10
#endif
|
aestream/faery
| 7,883
|
src/mp4/x264/common/aarch64/asm.S
|
/*****************************************************************************
* asm.S: AArch64 utility macros
*****************************************************************************
* Copyright (C) 2008-2024 x264 project
*
* Authors: Mans Rullgard <mans@mansr.com>
* David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "config.h"
#define GLUE(a, b) a ## b
#define JOIN(a, b) GLUE(a, b)
#ifdef PREFIX
# define BASE _x264_
# define SYM_PREFIX _
#else
# define BASE x264_
# define SYM_PREFIX
#endif
#ifdef BIT_DEPTH
# define EXTERN_ASM JOIN(JOIN(BASE, BIT_DEPTH), _)
#else
# define EXTERN_ASM BASE
#endif
#define X(s) JOIN(EXTERN_ASM, s)
#define X264(s) JOIN(BASE, s)
#define EXT(s) JOIN(SYM_PREFIX, s)
#ifdef __ELF__
# define ELF
#else
# define ELF #
#endif
#ifdef __MACH__
# define MACH
#else
# define MACH #
#endif
#if HAVE_AS_FUNC
# define FUNC
#else
# define FUNC #
#endif
.macro function name, export=0, align=2
.macro endfunc
.if \export
ELF .size EXTERN_ASM\name, . - EXTERN_ASM\name
.else
ELF .size \name, . - \name
.endif
FUNC .endfunc
.purgem endfunc
.endm
.text
.align \align
.if \export
.global EXTERN_ASM\name
ELF .type EXTERN_ASM\name, %function
FUNC .func EXTERN_ASM\name
EXTERN_ASM\name:
.else
ELF .type \name, %function
FUNC .func \name
\name:
.endif
.endm
.macro const name, align=2
.macro endconst
ELF .size \name, . - \name
.purgem endconst
.endm
ELF .section .rodata
MACH .const_data
.align \align
\name:
.endm
.macro movrel rd, val, offset=0
#if defined(__APPLE__)
.if \offset < 0
adrp \rd, \val@PAGE
add \rd, \rd, \val@PAGEOFF
sub \rd, \rd, -(\offset)
.else
adrp \rd, \val+(\offset)@PAGE
add \rd, \rd, \val+(\offset)@PAGEOFF
.endif
#elif defined(PIC) && defined(_WIN32)
.if \offset < 0
adrp \rd, \val
add \rd, \rd, :lo12:\val
sub \rd, \rd, -(\offset)
.else
adrp \rd, \val+(\offset)
add \rd, \rd, :lo12:\val+(\offset)
.endif
#elif defined(PIC)
adrp \rd, \val+(\offset)
add \rd, \rd, :lo12:\val+(\offset)
#else
ldr \rd, =\val+\offset
#endif
.endm
#define FDEC_STRIDE 32
#define FENC_STRIDE 16
.macro SUMSUB_AB sum, sub, a, b
add \sum, \a, \b
sub \sub, \a, \b
.endm
.macro unzip t1, t2, s1, s2
uzp1 \t1, \s1, \s2
uzp2 \t2, \s1, \s2
.endm
.macro transpose t1, t2, s1, s2
trn1 \t1, \s1, \s2
trn2 \t2, \s1, \s2
.endm
.macro transpose4x4.h v0, v1, v2, v3, t0, t1, t2, t3
transpose \t0\().2s, \t2\().2s, \v0\().2s, \v2\().2s
transpose \t1\().2s, \t3\().2s, \v1\().2s, \v3\().2s
transpose \v0\().4h, \v1\().4h, \t0\().4h, \t1\().4h
transpose \v2\().4h, \v3\().4h, \t2\().4h, \t3\().4h
.endm
.macro transpose4x8.h v0, v1, v2, v3, t0, t1, t2, t3
transpose \t0\().4s, \t2\().4s, \v0\().4s, \v2\().4s
transpose \t1\().4s, \t3\().4s, \v1\().4s, \v3\().4s
transpose \v0\().8h, \v1\().8h, \t0\().8h, \t1\().8h
transpose \v2\().8h, \v3\().8h, \t2\().8h, \t3\().8h
.endm
.macro transpose8x8.h r0, r1, r2, r3, r4, r5, r6, r7, r8, r9
trn1 \r8\().8h, \r0\().8h, \r1\().8h
trn2 \r9\().8h, \r0\().8h, \r1\().8h
trn1 \r1\().8h, \r2\().8h, \r3\().8h
trn2 \r3\().8h, \r2\().8h, \r3\().8h
trn1 \r0\().8h, \r4\().8h, \r5\().8h
trn2 \r5\().8h, \r4\().8h, \r5\().8h
trn1 \r2\().8h, \r6\().8h, \r7\().8h
trn2 \r7\().8h, \r6\().8h, \r7\().8h
trn1 \r4\().4s, \r0\().4s, \r2\().4s
trn2 \r2\().4s, \r0\().4s, \r2\().4s
trn1 \r6\().4s, \r5\().4s, \r7\().4s
trn2 \r7\().4s, \r5\().4s, \r7\().4s
trn1 \r5\().4s, \r9\().4s, \r3\().4s
trn2 \r9\().4s, \r9\().4s, \r3\().4s
trn1 \r3\().4s, \r8\().4s, \r1\().4s
trn2 \r8\().4s, \r8\().4s, \r1\().4s
trn1 \r0\().2d, \r3\().2d, \r4\().2d
trn2 \r4\().2d, \r3\().2d, \r4\().2d
trn1 \r1\().2d, \r5\().2d, \r6\().2d
trn2 \r5\().2d, \r5\().2d, \r6\().2d
trn2 \r6\().2d, \r8\().2d, \r2\().2d
trn1 \r2\().2d, \r8\().2d, \r2\().2d
trn1 \r3\().2d, \r9\().2d, \r7\().2d
trn2 \r7\().2d, \r9\().2d, \r7\().2d
.endm
.macro transpose_8x16.b r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
trn1 \t0\().16b, \r0\().16b, \r1\().16b
trn2 \t1\().16b, \r0\().16b, \r1\().16b
trn1 \r1\().16b, \r2\().16b, \r3\().16b
trn2 \r3\().16b, \r2\().16b, \r3\().16b
trn1 \r0\().16b, \r4\().16b, \r5\().16b
trn2 \r5\().16b, \r4\().16b, \r5\().16b
trn1 \r2\().16b, \r6\().16b, \r7\().16b
trn2 \r7\().16b, \r6\().16b, \r7\().16b
trn1 \r4\().8h, \r0\().8h, \r2\().8h
trn2 \r2\().8h, \r0\().8h, \r2\().8h
trn1 \r6\().8h, \r5\().8h, \r7\().8h
trn2 \r7\().8h, \r5\().8h, \r7\().8h
trn1 \r5\().8h, \t1\().8h, \r3\().8h
trn2 \t1\().8h, \t1\().8h, \r3\().8h
trn1 \r3\().8h, \t0\().8h, \r1\().8h
trn2 \t0\().8h, \t0\().8h, \r1\().8h
trn1 \r0\().4s, \r3\().4s, \r4\().4s
trn2 \r4\().4s, \r3\().4s, \r4\().4s
trn1 \r1\().4s, \r5\().4s, \r6\().4s
trn2 \r5\().4s, \r5\().4s, \r6\().4s
trn2 \r6\().4s, \t0\().4s, \r2\().4s
trn1 \r2\().4s, \t0\().4s, \r2\().4s
trn1 \r3\().4s, \t1\().4s, \r7\().4s
trn2 \r7\().4s, \t1\().4s, \r7\().4s
.endm
.macro transpose_4x16.b r0, r1, r2, r3, t4, t5, t6, t7
trn1 \t4\().16b, \r0\().16b, \r1\().16b
trn2 \t5\().16b, \r0\().16b, \r1\().16b
trn1 \t6\().16b, \r2\().16b, \r3\().16b
trn2 \t7\().16b, \r2\().16b, \r3\().16b
trn1 \r0\().8h, \t4\().8h, \t6\().8h
trn2 \r2\().8h, \t4\().8h, \t6\().8h
trn1 \r1\().8h, \t5\().8h, \t7\().8h
trn2 \r3\().8h, \t5\().8h, \t7\().8h
.endm
.macro transpose_4x8.b r0, r1, r2, r3, t4, t5, t6, t7
trn1 \t4\().8b, \r0\().8b, \r1\().8b
trn2 \t5\().8b, \r0\().8b, \r1\().8b
trn1 \t6\().8b, \r2\().8b, \r3\().8b
trn2 \t7\().8b, \r2\().8b, \r3\().8b
trn1 \r0\().4h, \t4\().4h, \t6\().4h
trn2 \r2\().4h, \t4\().4h, \t6\().4h
trn1 \r1\().4h, \t5\().4h, \t7\().4h
trn2 \r3\().4h, \t5\().4h, \t7\().4h
.endm
|
aestream/faery
| 28,714
|
src/mp4/x264/common/aarch64/predict-a.S
|
/*****************************************************************************
* predict.S: aarch64 intra prediction
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Mans Rullgard <mans@mansr.com>
* Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
const p8weight, align=4
.short 1, 2, 3, 4, 1, 2, 3, 4
endconst
const p16weight, align=4
.short 1, 2, 3, 4, 5, 6, 7, 8
endconst
.macro ldcol.8 vd, xn, xm, n=8, hi=0
.if \n == 8 || \hi == 0
ld1 {\vd\().b}[0], [\xn], \xm
ld1 {\vd\().b}[1], [\xn], \xm
ld1 {\vd\().b}[2], [\xn], \xm
ld1 {\vd\().b}[3], [\xn], \xm
.endif
.if \n == 8 || \hi == 1
ld1 {\vd\().b}[4], [\xn], \xm
ld1 {\vd\().b}[5], [\xn], \xm
ld1 {\vd\().b}[6], [\xn], \xm
ld1 {\vd\().b}[7], [\xn], \xm
.endif
.endm
.macro ldcol.16 vd, xn, xm
ldcol.8 \vd, \xn, \xm
ld1 {\vd\().b}[ 8], [\xn], \xm
ld1 {\vd\().b}[ 9], [\xn], \xm
ld1 {\vd\().b}[10], [\xn], \xm
ld1 {\vd\().b}[11], [\xn], \xm
ld1 {\vd\().b}[12], [\xn], \xm
ld1 {\vd\().b}[13], [\xn], \xm
ld1 {\vd\().b}[14], [\xn], \xm
ld1 {\vd\().b}[15], [\xn], \xm
.endm
function predict_4x4_h_aarch64, export=1
ldurb w1, [x0, #0*FDEC_STRIDE-1]
mov w5, #0x01010101
ldrb w2, [x0, #1*FDEC_STRIDE-1]
ldrb w3, [x0, #2*FDEC_STRIDE-1]
mul w1, w1, w5
ldrb w4, [x0, #3*FDEC_STRIDE-1]
mul w2, w2, w5
str w1, [x0, #0*FDEC_STRIDE]
mul w3, w3, w5
str w2, [x0, #1*FDEC_STRIDE]
mul w4, w4, w5
str w3, [x0, #2*FDEC_STRIDE]
str w4, [x0, #3*FDEC_STRIDE]
ret
endfunc
function predict_4x4_v_aarch64, export=1
ldur w1, [x0, #0 - 1 * FDEC_STRIDE]
str w1, [x0, #0 + 0 * FDEC_STRIDE]
str w1, [x0, #0 + 1 * FDEC_STRIDE]
str w1, [x0, #0 + 2 * FDEC_STRIDE]
str w1, [x0, #0 + 3 * FDEC_STRIDE]
ret
endfunc
function predict_4x4_dc_neon, export=1
sub x1, x0, #FDEC_STRIDE
ldurb w4, [x0, #-1 + 0 * FDEC_STRIDE]
ldrb w5, [x0, #-1 + 1 * FDEC_STRIDE]
ldrb w6, [x0, #-1 + 2 * FDEC_STRIDE]
ldrb w7, [x0, #-1 + 3 * FDEC_STRIDE]
add w4, w4, w5
ldr s0, [x1]
add w6, w6, w7
uaddlv h0, v0.8b
add w4, w4, w6
dup v0.4h, v0.h[0]
dup v1.4h, w4
add v0.4h, v0.4h, v1.4h
rshrn v0.8b, v0.8h, #3
str s0, [x0]
str s0, [x0, #1 * FDEC_STRIDE]
str s0, [x0, #2 * FDEC_STRIDE]
str s0, [x0, #3 * FDEC_STRIDE]
ret
endfunc
function predict_4x4_dc_top_neon, export=1
sub x1, x0, #FDEC_STRIDE
ldr s0, [x1]
uaddlv h0, v0.8b
dup v0.4h, v0.h[0]
rshrn v0.8b, v0.8h, #2
str s0, [x0]
str s0, [x0, #1 * FDEC_STRIDE]
str s0, [x0, #2 * FDEC_STRIDE]
str s0, [x0, #3 * FDEC_STRIDE]
ret
ret
endfunc
function predict_4x4_ddr_neon, export=1
sub x1, x0, #FDEC_STRIDE+1
mov x7, #FDEC_STRIDE
ld1 {v0.8b}, [x1], x7 // # -FDEC_STRIDE-1
ld1r {v1.8b}, [x1], x7 // #0*FDEC_STRIDE-1
ld1r {v2.8b}, [x1], x7 // #1*FDEC_STRIDE-1
ext v0.8b, v1.8b, v0.8b, #7
ld1r {v3.8b}, [x1], x7 // #2*FDEC_STRIDE-1
ext v0.8b, v2.8b, v0.8b, #7 // a
ld1r {v4.8b}, [x1], x7 // #3*FDEC_STRIDE-1
ext v1.8b, v3.8b, v0.8b, #7 // b
ext v2.8b, v4.8b, v1.8b, #7 // c
uaddl v0.8h, v0.8b, v1.8b
uaddl v1.8h, v1.8b, v2.8b
add v0.8h, v0.8h, v1.8h
rshrn v0.8b, v0.8h, #2
ext v3.8b, v0.8b, v0.8b, #3
ext v2.8b, v0.8b, v0.8b, #2
ext v1.8b, v0.8b, v0.8b, #1
str s3, [x0], #FDEC_STRIDE
str s2, [x0], #FDEC_STRIDE
str s1, [x0], #FDEC_STRIDE
str s0, [x0]
ret
endfunc
function predict_4x4_ddl_neon, export=1
sub x0, x0, #FDEC_STRIDE
mov x7, #FDEC_STRIDE
ld1 {v0.8b}, [x0], x7
dup v3.8b, v0.b[7]
ext v1.8b, v0.8b, v0.8b, #1
ext v2.8b, v0.8b, v3.8b, #2
uhadd v0.8b, v0.8b, v2.8b
urhadd v0.8b, v0.8b, v1.8b
str s0, [x0], #FDEC_STRIDE
ext v1.8b, v0.8b, v0.8b, #1
ext v2.8b, v0.8b, v0.8b, #2
str s1, [x0], #FDEC_STRIDE
ext v3.8b, v0.8b, v0.8b, #3
str s2, [x0], #FDEC_STRIDE
str s3, [x0]
ret
endfunc
function predict_8x8_dc_neon, export=1
mov x7, #FDEC_STRIDE
ld1 {v0.16b}, [x1], #16
ld1 {v1.8b}, [x1]
ext v0.16b, v0.16b, v0.16b, #7
uaddlv h1, v1.8b
uaddlv h0, v0.8b
add v0.8h, v0.8h, v1.8h
dup v0.8h, v0.h[0]
rshrn v0.8b, v0.8h, #4
.rept 8
st1 {v0.8b}, [x0], x7
.endr
ret
endfunc
function predict_8x8_h_neon, export=1
mov x7, #FDEC_STRIDE
ld1 {v16.16b}, [x1]
dup v0.8b, v16.b[14]
dup v1.8b, v16.b[13]
st1 {v0.8b}, [x0], x7
dup v2.8b, v16.b[12]
st1 {v1.8b}, [x0], x7
dup v3.8b, v16.b[11]
st1 {v2.8b}, [x0], x7
dup v4.8b, v16.b[10]
st1 {v3.8b}, [x0], x7
dup v5.8b, v16.b[9]
st1 {v4.8b}, [x0], x7
dup v6.8b, v16.b[8]
st1 {v5.8b}, [x0], x7
dup v7.8b, v16.b[7]
st1 {v6.8b}, [x0], x7
st1 {v7.8b}, [x0], x7
ret
endfunc
function predict_8x8_v_neon, export=1
add x1, x1, #16
mov x7, #FDEC_STRIDE
ld1 {v0.8b}, [x1]
.rept 8
st1 {v0.8b}, [x0], x7
.endr
ret
endfunc
function predict_8x8_ddl_neon, export=1
add x1, x1, #16
mov x7, #FDEC_STRIDE
ld1 {v0.16b}, [x1]
movi v3.16b, #0
dup v2.16b, v0.b[15]
ext v4.16b, v3.16b, v0.16b, #15
ext v2.16b, v0.16b, v2.16b, #1
uhadd v4.16b, v4.16b, v2.16b
urhadd v0.16b, v0.16b, v4.16b
ext v1.16b, v0.16b, v0.16b, #1
ext v2.16b, v0.16b, v0.16b, #2
st1 {v1.8b}, [x0], x7
ext v3.16b, v0.16b, v0.16b, #3
st1 {v2.8b}, [x0], x7
ext v4.16b, v0.16b, v0.16b, #4
st1 {v3.8b}, [x0], x7
ext v5.16b, v0.16b, v0.16b, #5
st1 {v4.8b}, [x0], x7
ext v6.16b, v0.16b, v0.16b, #6
st1 {v5.8b}, [x0], x7
ext v7.16b, v0.16b, v0.16b, #7
st1 {v6.8b}, [x0], x7
ext v0.16b, v0.16b, v0.16b, #8
st1 {v7.8b}, [x0], x7
st1 {v0.8b}, [x0], x7
ret
endfunc
function predict_8x8_ddr_neon, export=1
ld1 {v0.16b,v1.16b}, [x1]
ext v2.16b, v0.16b, v1.16b, #7
ext v4.16b, v0.16b, v1.16b, #9
ext v3.16b, v0.16b, v1.16b, #8
uhadd v2.16b, v2.16b, v4.16b
urhadd v7.16b, v3.16b, v2.16b
add x0, x0, #7*FDEC_STRIDE
mov x7, #-1*FDEC_STRIDE
ext v6.16b, v7.16b, v7.16b, #1
st1 {v7.8b}, [x0], x7
ext v5.16b, v7.16b, v7.16b, #2
st1 {v6.8b}, [x0], x7
ext v4.16b, v7.16b, v7.16b, #3
st1 {v5.8b}, [x0], x7
ext v3.16b, v7.16b, v7.16b, #4
st1 {v4.8b}, [x0], x7
ext v2.16b, v7.16b, v7.16b, #5
st1 {v3.8b}, [x0], x7
ext v1.16b, v7.16b, v7.16b, #6
st1 {v2.8b}, [x0], x7
ext v0.16b, v7.16b, v7.16b, #7
st1 {v1.8b}, [x0], x7
st1 {v0.8b}, [x0], x7
ret
endfunc
function predict_8x8_vl_neon, export=1
add x1, x1, #16
mov x7, #FDEC_STRIDE
ld1 {v0.16b}, [x1]
ext v1.16b, v1.16b, v0.16b, #15
ext v2.16b, v0.16b, v2.16b, #1
uhadd v1.16b, v1.16b, v2.16b
urhadd v3.16b, v0.16b, v2.16b
urhadd v0.16b, v0.16b, v1.16b
ext v4.16b, v0.16b, v0.16b, #1
st1 {v3.8b}, [x0], x7
ext v5.16b, v3.16b, v3.16b, #1
st1 {v4.8b}, [x0], x7
ext v6.16b, v0.16b, v0.16b, #2
st1 {v5.8b}, [x0], x7
ext v7.16b, v3.16b, v3.16b, #2
st1 {v6.8b}, [x0], x7
ext v4.16b, v0.16b, v0.16b, #3
st1 {v7.8b}, [x0], x7
ext v5.16b, v3.16b, v3.16b, #3
st1 {v4.8b}, [x0], x7
ext v6.16b, v0.16b, v0.16b, #4
st1 {v5.8b}, [x0], x7
st1 {v6.8b}, [x0], x7
ret
endfunc
function predict_8x8_vr_neon, export=1
add x1, x1, #8
mov x7, #FDEC_STRIDE
ld1 {v2.16b}, [x1]
ext v1.16b, v2.16b, v2.16b, #14
ext v0.16b, v2.16b, v2.16b, #15
uhadd v3.16b, v2.16b, v1.16b
urhadd v2.16b, v2.16b, v0.16b
urhadd v0.16b, v0.16b, v3.16b
ext v1.16b, v2.16b, v2.16b, #8
uzp1 v2.8b, v0.8b, v0.8b
uzp2 v3.8b, v0.8b, v0.8b
ext v0.16b, v0.16b, v0.16b, #8
st1 {v1.8b}, [x0], x7
st1 {v0.8b}, [x0], x7
ext v4.8b, v3.8b, v1.8b, #7
ext v5.8b, v2.8b, v0.8b, #7
st1 {v4.8b}, [x0], x7
st1 {v5.8b}, [x0], x7
ext v6.8b, v3.8b, v1.8b, #6
ext v7.8b, v2.8b, v0.8b, #6
st1 {v6.8b}, [x0], x7
st1 {v7.8b}, [x0], x7
ext v1.8b, v3.8b, v1.8b, #5
ext v0.8b, v2.8b, v0.8b, #5
st1 {v1.8b}, [x0], x7
st1 {v0.8b}, [x0], x7
ret
endfunc
function predict_8x8_hd_neon, export=1
add x1, x1, #7
mov x7, #FDEC_STRIDE
ld1 {v1.16b}, [x1]
ext v3.16b, v1.16b, v1.16b, #1
ext v2.16b, v1.16b, v1.16b, #2
urhadd v4.16b, v1.16b, v3.16b
uhadd v1.16b, v1.16b, v2.16b
urhadd v0.16b, v1.16b, v3.16b
zip1 v16.8b, v4.8b, v0.8b
zip2 v17.8b, v4.8b, v0.8b
ext v7.16b, v0.16b, v0.16b, #8
ext v0.8b, v17.8b, v7.8b, #6
ext v1.8b, v17.8b, v7.8b, #4
st1 {v0.8b}, [x0], x7
ext v2.8b, v17.8b, v7.8b, #2
st1 {v1.8b}, [x0], x7
st1 {v2.8b}, [x0], x7
ext v3.8b, v16.8b, v17.8b, #6
st1 {v17.8b}, [x0], x7
ext v4.8b, v16.8b, v17.8b, #4
st1 {v3.8b}, [x0], x7
ext v5.8b, v16.8b, v17.8b, #2
st1 {v4.8b}, [x0], x7
st1 {v5.8b}, [x0], x7
st1 {v16.8b}, [x0], x7
ret
endfunc
function predict_8x8_hu_neon, export=1
add x1, x1, #7
mov x7, #FDEC_STRIDE
ld1 {v7.8b}, [x1]
dup v6.8b, v7.b[0]
rev64 v7.8b, v7.8b
ext v4.8b, v7.8b, v6.8b, #2
ext v2.8b, v7.8b, v6.8b, #1
uhadd v5.8b, v7.8b, v4.8b
urhadd v0.8b, v2.8b, v7.8b
urhadd v1.8b, v5.8b, v2.8b
zip1 v16.8b, v0.8b, v1.8b
zip2 v17.8b, v0.8b, v1.8b
dup v18.4h, v17.h[3]
ext v0.8b, v16.8b, v17.8b, #2
ext v1.8b, v16.8b, v17.8b, #4
ext v2.8b, v16.8b, v17.8b, #6
st1 {v16.8b}, [x0], x7
st1 {v0.8b}, [x0], x7
st1 {v1.8b}, [x0], x7
st1 {v2.8b}, [x0], x7
ext v4.8b, v17.8b, v18.8b, #2
ext v5.8b, v17.8b, v18.8b, #4
ext v6.8b, v17.8b, v18.8b, #6
st1 {v17.8b}, [x0], x7
st1 {v4.8b}, [x0], x7
st1 {v5.8b}, [x0], x7
st1 {v6.8b}, [x0]
ret
endfunc
function predict_8x8c_dc_top_neon, export=1
sub x2, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
ld1 {v0.8b}, [x2]
uaddlp v0.4h, v0.8b
addp v0.4h, v0.4h, v0.4h
rshrn v0.8b, v0.8h, #2
dup v3.8b, v0.b[1]
dup v2.8b, v0.b[0]
transpose v0.2s, v1.2s, v2.2s, v3.2s
b pred8x8c_dc_end
endfunc
function predict_8x8c_dc_left_neon, export=1
ldurb w2, [x0, #0 * FDEC_STRIDE - 1]
ldrb w3, [x0, #1 * FDEC_STRIDE - 1]
ldrb w4, [x0, #2 * FDEC_STRIDE - 1]
ldrb w5, [x0, #3 * FDEC_STRIDE - 1]
mov x1, #FDEC_STRIDE
add w2, w2, w3
add w3, w4, w5
ldrb w6, [x0, #4 * FDEC_STRIDE - 1]
ldrb w7, [x0, #5 * FDEC_STRIDE - 1]
ldrb w8, [x0, #6 * FDEC_STRIDE - 1]
ldrb w9, [x0, #7 * FDEC_STRIDE - 1]
add w6, w6, w7
add w7, w8, w9
add w2, w2, w3
add w6, w6, w7
dup v0.8h, w2
dup v1.8h, w6
rshrn v0.8b, v0.8h, #2
rshrn v1.8b, v1.8h, #2
b pred8x8c_dc_end
endfunc
function predict_8x8c_dc_neon, export=1
mov x1, #FDEC_STRIDE
sub x2, x0, #FDEC_STRIDE
ldurb w10, [x0, #0 * FDEC_STRIDE - 1]
ldrb w11, [x0, #1 * FDEC_STRIDE - 1]
ldrb w12, [x0, #2 * FDEC_STRIDE - 1]
ldrb w13, [x0, #3 * FDEC_STRIDE - 1]
add w10, w10, w11
ldrb w4, [x0, #4 * FDEC_STRIDE - 1]
ldrb w5, [x0, #5 * FDEC_STRIDE - 1]
add w12, w12, w13
ldrb w6, [x0, #6 * FDEC_STRIDE - 1]
ldrb w7, [x0, #7 * FDEC_STRIDE - 1]
add w4, w4, w5
add w6, w6, w7
add w10, w10, w12, lsl #16
add w4, w4, w6, lsl #16
ld1 {v0.8b}, [x2]
add x10, x10, x4, lsl #32
uaddlp v0.4h, v0.8b // s0, s1
mov v1.d[0], x10 // s2, s3
add v3.4h, v0.4h, v1.4h
addp v0.4h, v0.4h, v1.4h // s0, s1, s2, s3
addp v1.4h, v3.4h, v3.4h // s0+s2, s1+s3, s0+s2, s1+s3
uzp2 v0.4h, v0.4h, v0.4h // s1, s3, s1, s3
uzp1 v1.2d, v1.2d, v1.2d
uzp1 v0.2d, v0.2d, v0.2d
rshrn v3.8b, v1.8h, #3
rshrn v2.8b, v0.8h, #2
uzp1 v0.8b, v3.8b, v2.8b
uzp2 v1.8b, v2.8b, v3.8b
pred8x8c_dc_end:
add x2, x0, #2 * FDEC_STRIDE
add x4, x0, #4 * FDEC_STRIDE
add x5, x0, #6 * FDEC_STRIDE
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x2], x1
st1 {v0.8b}, [x0]
st1 {v0.8b}, [x2]
st1 {v1.8b}, [x4], x1
st1 {v1.8b}, [x5], x1
st1 {v1.8b}, [x4]
st1 {v1.8b}, [x5]
ret
endfunc
function predict_8x8c_h_neon, export=1
sub x1, x0, #1
mov x7, #FDEC_STRIDE
.rept 4
ld1r {v0.8b}, [x1], x7
ld1r {v1.8b}, [x1], x7
st1 {v0.8b}, [x0], x7
st1 {v1.8b}, [x0], x7
.endr
ret
endfunc
function predict_8x8c_v_aarch64, export=1
ldur x1, [x0, #-FDEC_STRIDE]
.irp c, 0,1,2,3,4,5,6,7
str x1, [x0, #\c * FDEC_STRIDE]
.endr
ret
endfunc
function predict_8x8c_p_neon, export=1
sub x3, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
add x2, x3, #4
sub x3, x3, #1
ld1 {v0.s}[0], [x3]
ld1 {v2.s}[0], [x2], x1
ldcol.8 v0, x3, x1, 4, hi=1
add x3, x3, x1
ldcol.8 v3, x3, x1, 4
movrel x4, p8weight
movrel x5, p16weight
uaddl v4.8h, v2.8b, v3.8b
rev32 v0.8b, v0.8b
trn1 v2.2s, v2.2s, v3.2s
ld1 {v7.8h}, [x4]
usubl v2.8h, v2.8b, v0.8b
mul v2.8h, v2.8h, v7.8h
ld1 {v0.8h}, [x5]
saddlp v2.4s, v2.8h
addp v2.4s, v2.4s, v2.4s
shl v3.2s, v2.2s, #4
add v2.2s, v2.2s, v3.2s
rshrn v5.4h, v2.4s, #5 // b, c, x, x
addp v2.4h, v5.4h, v5.4h
shl v3.4h, v2.4h, #2
sub v3.4h, v3.4h, v2.4h // 3 * (b + c)
rev64 v4.4h, v4.4h
add v4.4h, v4.4h, v0.4h
shl v2.4h, v4.4h, #4 // a
sub v2.4h, v2.4h, v3.4h // a - 3 * (b + c) + 16
ext v0.16b, v0.16b, v0.16b, #14
sub v6.4h, v5.4h, v3.4h
mov v0.h[0], wzr
mul v0.8h, v0.8h, v5.h[0] // 0,1,2,3,4,5,6,7 * b
dup v1.8h, v2.h[0] // pix
dup v2.8h, v5.h[1] // c
add v1.8h, v1.8h, v0.8h // pix + x*b
mov x3, #8
1:
subs x3, x3, #1
sqshrun v0.8b, v1.8h, #5
add v1.8h, v1.8h, v2.8h
st1 {v0.8b}, [x0], x1
b.ne 1b
ret
endfunc
.macro loadsum4 wd, t1, t2, t3, x, idx
.if \idx == 0
ldurb \wd, [\x, #(\idx + 0) * FDEC_STRIDE - 1]
.else
ldrb \wd, [\x, #(\idx + 0) * FDEC_STRIDE - 1]
.endif
ldrb \t1, [\x, #(\idx + 1) * FDEC_STRIDE - 1]
ldrb \t2, [\x, #(\idx + 2) * FDEC_STRIDE - 1]
ldrb \t3, [\x, #(\idx + 3) * FDEC_STRIDE - 1]
add \wd, \wd, \t1
add \t1, \t2, \t3
add \wd, \wd, \t1
.endm
function predict_8x16c_h_neon, export=1
sub x2, x0, #1
add x3, x0, #FDEC_STRIDE - 1
mov x7, #2 * FDEC_STRIDE
add x1, x0, #FDEC_STRIDE
.rept 4
ld1r {v0.8b}, [x2], x7
ld1r {v1.8b}, [x3], x7
ld1r {v2.8b}, [x2], x7
ld1r {v3.8b}, [x3], x7
st1 {v0.8b}, [x0], x7
st1 {v1.8b}, [x1], x7
st1 {v2.8b}, [x0], x7
st1 {v3.8b}, [x1], x7
.endr
ret
endfunc
function predict_8x16c_v_neon, export=1
sub x1, x0, #FDEC_STRIDE
mov x2, #2 * FDEC_STRIDE
ld1 {v0.8b}, [x1], x2
.rept 8
st1 {v0.8b}, [x0], x2
st1 {v0.8b}, [x1], x2
.endr
ret
endfunc
function predict_8x16c_p_neon, export=1
movrel x4, p16weight
ld1 {v17.8h}, [x4]
sub x3, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
add x2, x3, #4
sub x3, x3, #1
ld1 {v0.8b}, [x3]
ld1 {v2.8b}, [x2], x1
ldcol.8 v1, x3, x1
add x3, x3, x1
ldcol.8 v3, x3, x1
ext v4.8b, v2.8b, v2.8b, #3
ext v5.8b, v3.8b, v3.8b, #7
rev32 v0.8b, v0.8b
rev64 v1.8b, v1.8b
uaddl v4.8h, v5.8b, v4.8b // a * 1/16
usubl v2.8h, v2.8b, v0.8b
mul v2.8h, v2.8h, v17.8h
saddlp v2.4s, v2.8h
addp v2.4s, v2.4s, v2.4s // H
usubl v3.8h, v3.8b, v1.8b
mul v3.8h, v3.8h, v17.8h
saddlp v3.4s, v3.8h
addp v3.4s, v3.4s, v3.4s
addp v3.4s, v3.4s, v3.4s // V
ext v17.16b, v17.16b, v17.16b, #14
shl v4.4h, v4.4h, #4 // a
shl v6.2s, v2.2s, #4 // 16 * H
shl v7.2s, v3.2s, #2 // 4 * V
add v2.2s, v2.2s, v6.2s // 17 * H
add v3.2s, v3.2s, v7.2s // 5 * V
rshrn v2.4h, v2.4s, #5 // b
rshrn v3.4h, v3.4s, #6 // c
mov v17.h[0], wzr
sub v4.4h, v4.4h, v2.4h // a - b
shl v6.4h, v2.4h, #1 // 2 * b
add v4.4h, v4.4h, v3.4h // a - b + c
shl v7.4h, v3.4h, #3 // 8 * c
sub v4.4h, v4.4h, v6.4h // a - 3b + c
sub v4.4h, v4.4h, v7.4h // a - 3b - 7c
mul v0.8h, v17.8h, v2.h[0] // 0,1,2,3,4,5,6,7 * b
dup v1.8h, v4.h[0] // i00
dup v2.8h, v3.h[0] // c
add v1.8h, v1.8h, v0.8h // pix + {0..7}*b
mov x3, #16
1:
subs x3, x3, #2
sqrshrun v4.8b, v1.8h, #5
add v1.8h, v1.8h, v2.8h
sqrshrun v5.8b, v1.8h, #5
st1 {v4.8b}, [x0], x1
add v1.8h, v1.8h, v2.8h
st1 {v5.8b}, [x0], x1
b.ne 1b
ret
endfunc
function predict_8x16c_dc_neon, export=1
mov x1, #FDEC_STRIDE
sub x10, x0, #FDEC_STRIDE
loadsum4 w2, w3, w4, w5, x0, 0
ld1 {v6.8b}, [x10]
loadsum4 w6, w7, w8, w9, x0, 4
uaddlp v6.4h, v6.8b
dup v22.8h, w2 // s2
dup v23.8h, w6 // s3
loadsum4 w2, w3, w4, w5, x0, 8
addp v6.4h, v6.4h, v6.4h // s0, s1
loadsum4 w6, w7, w8, w9, x0, 12
dup v20.8h, v6.h[0] // s0
dup v21.8h, v6.h[1] // s1
dup v24.8h, w2 // s4
dup v25.8h, w6 // s5
ext v16.16b, v20.16b, v21.16b, #8
ext v17.16b, v22.16b, v21.16b, #8
ext v1.16b, v23.16b, v21.16b, #8
ext v2.16b, v24.16b, v21.16b, #8
ext v3.16b, v25.16b, v21.16b, #8
add v0.8h, v16.8h, v17.8h
add v1.8h, v1.8h, v23.8h
add v2.8h, v2.8h, v24.8h
add v3.8h, v3.8h, v25.8h
rshrn v0.8b, v0.8h, #3
rshrn v1.8b, v1.8h, #3
rshrn v2.8b, v2.8h, #3
rshrn v3.8b, v3.8h, #3
add x11, x0, #4 * FDEC_STRIDE
add x12, x0, #8 * FDEC_STRIDE
add x13, x0, #12 * FDEC_STRIDE
.rept 4
st1 {v0.8b}, [x0], x1
st1 {v1.8b}, [x11], x1
st1 {v2.8b}, [x12], x1
st1 {v3.8b}, [x13], x1
.endr
ret
endfunc
function predict_8x16c_dc_left_neon, export=1
mov x1, #FDEC_STRIDE
ldurb w2, [x0, # 0 * FDEC_STRIDE - 1]
ldrb w3, [x0, # 1 * FDEC_STRIDE - 1]
ldrb w4, [x0, # 2 * FDEC_STRIDE - 1]
ldrb w5, [x0, # 3 * FDEC_STRIDE - 1]
add w2, w2, w3
ldrb w6, [x0, # 4 * FDEC_STRIDE - 1]
add w4, w4, w5
ldrb w7, [x0, # 5 * FDEC_STRIDE - 1]
add w2, w2, w4
ldrb w8, [x0, # 6 * FDEC_STRIDE - 1]
ldrb w9, [x0, # 7 * FDEC_STRIDE - 1]
dup v0.8h, w2
add w6, w6, w7
rshrn v0.8b, v0.8h, #2
add w8, w8, w9
ldrb w10, [x0, # 8 * FDEC_STRIDE - 1]
ldrb w11, [x0, # 9 * FDEC_STRIDE - 1]
add w6, w6, w8
ldrb w12, [x0, #10 * FDEC_STRIDE - 1]
ldrb w13, [x0, #11 * FDEC_STRIDE - 1]
dup v1.8h, w6
add w10, w10, w11
rshrn v1.8b, v1.8h, #2
add w12, w12, w13
ldrb w2, [x0, #12 * FDEC_STRIDE - 1]
ldrb w3, [x0, #13 * FDEC_STRIDE - 1]
add w10, w10, w12
ldrb w4, [x0, #14 * FDEC_STRIDE - 1]
ldrb w5, [x0, #15 * FDEC_STRIDE - 1]
dup v2.8h, w10
add w2, w2, w3
rshrn v2.8b, v2.8h, #2
add w4, w4, w5
st1 {v0.8b}, [x0], x1
st1 {v0.8b}, [x0], x1
add w2, w2, w4
st1 {v0.8b}, [x0], x1
dup v3.8h, w2
st1 {v0.8b}, [x0], x1
rshrn v3.8b, v3.8h, #2
.irp idx, 1, 2, 3
.rept 4
st1 {v\idx\().8b}, [x0], x1
.endr
.endr
ret
endfunc
function predict_8x16c_dc_top_neon, export=1
sub x2, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
ld1 {v0.8b}, [x2]
uaddlp v0.4h, v0.8b
addp v0.4h, v0.4h, v0.4h
rshrn v4.8b, v0.8h, #2
dup v0.8b, v4.b[0]
dup v1.8b, v4.b[1]
ext v0.8b, v0.8b, v1.8b, #4
.rept 16
st1 {v0.8b}, [x0], x1
.endr
ret
endfunc
function predict_16x16_dc_top_neon, export=1
sub x2, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
ld1 {v0.16b}, [x2]
uaddlv h0, v0.16b
rshrn v0.8b, v0.8h, #4
dup v0.16b, v0.b[0]
b pred16x16_dc_end
endfunc
function predict_16x16_dc_left_neon, export=1
sub x2, x0, #1
mov x1, #FDEC_STRIDE
ldcol.16 v0, x2, x1
uaddlv h0, v0.16b
rshrn v0.8b, v0.8h, #4
dup v0.16b, v0.b[0]
b pred16x16_dc_end
endfunc
function predict_16x16_dc_neon, export=1
sub x3, x0, #FDEC_STRIDE
sub x2, x0, #1
mov x1, #FDEC_STRIDE
ld1 {v0.16b}, [x3]
ldcol.16 v1, x2, x1
uaddlv h0, v0.16b
uaddlv h1, v1.16b
add v0.4h, v0.4h, v1.4h
rshrn v0.8b, v0.8h, #5
dup v0.16b, v0.b[0]
pred16x16_dc_end:
.rept 16
st1 {v0.16b}, [x0], x1
.endr
ret
endfunc
function predict_16x16_h_neon, export=1
sub x1, x0, #1
mov x7, #FDEC_STRIDE
.rept 8
ld1r {v0.16b}, [x1], x7
ld1r {v1.16b}, [x1], x7
st1 {v0.16b}, [x0], x7
st1 {v1.16b}, [x0], x7
.endr
ret
endfunc
function predict_16x16_v_neon, export=1
sub x0, x0, #FDEC_STRIDE
mov x7, #FDEC_STRIDE
ld1 {v0.16b}, [x0], x7
.rept 16
st1 {v0.16b}, [x0], x7
.endr
ret
endfunc
function predict_16x16_p_neon, export=1
sub x3, x0, #FDEC_STRIDE
mov x1, #FDEC_STRIDE
add x2, x3, #8
sub x3, x3, #1
ld1 {v0.8b}, [x3]
ld1 {v2.8b}, [x2], x1
ldcol.8 v1, x3, x1
add x3, x3, x1
ldcol.8 v3, x3, x1
rev64 v0.8b, v0.8b
rev64 v1.8b, v1.8b
movrel x4, p16weight
uaddl v4.8h, v2.8b, v3.8b
ld1 {v7.8h}, [x4]
usubl v2.8h, v2.8b, v0.8b
usubl v3.8h, v3.8b, v1.8b
mul v2.8h, v2.8h, v7.8h
mul v3.8h, v3.8h, v7.8h
saddlp v2.4s, v2.8h
saddlp v3.4s, v3.8h
addp v2.4s, v2.4s, v3.4s
addp v2.4s, v2.4s, v2.4s
shl v3.2s, v2.2s, #2
add v2.2s, v2.2s, v3.2s
rshrn v5.4h, v2.4s, #6 // b, c, x, x
addp v2.4h, v5.4h, v5.4h
shl v3.4h, v2.4h, #3
sub v3.4h, v3.4h, v2.4h // 7 * (b + c)
ext v4.16b, v4.16b, v4.16b, #14
add v4.4h, v4.4h, v7.4h
shl v2.4h, v4.4h, #4 // a
sub v2.4h, v2.4h, v3.4h // a - 7 * (b + c) + 16
ext v7.16b, v7.16b, v7.16b, #14
mov v7.h[0], wzr
dup v3.8h, v5.h[0]
mul v0.8h, v7.8h, v5.h[0] // 0,1,2,3,4,5,6,7 * b
dup v1.8h, v2.h[0] // pix
dup v2.8h, v5.h[1] // c
shl v3.8h, v3.8h, #3
add v1.8h, v1.8h, v0.8h // pix + x*b
add v3.8h, v3.8h, v1.8h // pix + x{8-15}*b
mov x3, #16
1:
subs x3, x3, #1
sqshrun v0.8b, v1.8h, #5
add v1.8h, v1.8h, v2.8h
sqshrun2 v0.16b, v3.8h, #5
add v3.8h, v3.8h, v2.8h
st1 {v0.16b}, [x0], x1
b.ne 1b
ret
endfunc
|
aestream/faery
| 32,124
|
src/mp4/x264/common/aarch64/quant-a.S
|
/****************************************************************************
* quant.S: arm quantization and level-run
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
* Martin Storsjo <martin@martin.st>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
// This is a common function for both 8 and 10 bit depth, since these two differ
// at data loading only. The distinction is based on the depth parameters that
//are passed to the macro.
.macro decimate_score_1x size depth
function decimate_score\size\()_neon, export=1
.if BIT_DEPTH == 8
ld1 {v0.8h,v1.8h}, [x0]
movrel x5, X264(decimate_table4)
movi v3.16b, #0x01
sqxtn v0.8b, v0.8h
sqxtn2 v0.16b, v1.8h
.else // BIT_DEPTH == 8
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0]
movrel x5, X264(decimate_table4)
sqxtn v20.4h, v0.4s
sqxtn2 v20.8h, v1.4s
sqxtn v21.4h, v2.4s
sqxtn2 v21.8h, v3.4s
sqxtn v0.8b, v20.8h
sqxtn2 v0.16b, v21.8h
.endif // BIT_DEPTH == 8
movi v3.16b, #0x01
abs v2.16b, v0.16b
cmeq v1.16b, v0.16b, #0
cmhi v2.16b, v2.16b, v3.16b
shrn v1.8b, v1.8h, #4
shrn v2.8b, v2.8h, #4
fmov x2, d2
fmov x1, d1
cbnz x2, 9f
mvn x1, x1
mov w0, #0
cbz x1, 0f
.ifc \size, 15
lsr x1, x1, #1
.endif
rbit x1, x1
1:
clz x3, x1
lsr x6, x3, #2
lsl x1, x1, x3
ldrb w7, [x5, x6]
lsl x1, x1, #4
add w0, w0, w7
cbnz x1, 1b
ret
9:
mov w0, #9
0:
ret
endfunc
.endm
const mask64, align=6
.byte 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01
.byte 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01
endconst
.macro decimate_score64 depth
function decimate_score64_neon, export=1
.if BIT_DEPTH == 8
ld1 {v0.8h, v1.8h}, [x0], #32
ld1 {v2.8h, v3.8h}, [x0], #32
ld1 {v4.8h, v5.8h}, [x0], #32
ld1 {v6.8h, v7.8h}, [x0]
sqxtn v16.8b, v1.8h
sqxtn2 v16.16b, v0.8h
sqxtn v17.8b, v3.8h
sqxtn2 v17.16b, v2.8h
sqxtn v18.8b, v5.8h
sqxtn2 v18.16b, v4.8h
sqxtn v19.8b, v7.8h
sqxtn2 v19.16b, v6.8h
.else // BIT_DEPTH == 8
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0], #64
ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x0], #64
ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [x0], #64
ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [x0]
sqxtn v28.4h, v0.4s
sqxtn2 v28.8h, v1.4s
sqxtn v0.4h, v2.4s
sqxtn2 v0.8h, v3.4s
sqxtn v2.4h, v6.4s
sqxtn2 v2.8h, v7.4s
sqxtn v3.4h, v4.4s
sqxtn2 v3.8h, v5.4s
sqxtn v4.4h, v22.4s
sqxtn2 v4.8h, v23.4s
sqxtn v5.4h, v20.4s
sqxtn2 v5.8h, v21.4s
sqxtn v6.4h, v26.4s
sqxtn2 v6.8h, v27.4s
sqxtn v7.4h, v24.4s
sqxtn2 v7.8h, v25.4s
sqxtn v16.8b, v0.8h
sqxtn2 v16.16b, v28.8h
sqxtn v17.8b, v2.8h
sqxtn2 v17.16b, v3.8h
sqxtn v18.8b, v4.8h
sqxtn2 v18.16b, v5.8h
sqxtn v19.8b, v6.8h
sqxtn2 v19.16b, v7.8h
.endif // BIT_DEPTH == 8
movrel x6, mask64
movi v31.16b, #0x01
abs v4.16b, v16.16b
abs v5.16b, v17.16b
abs v6.16b, v18.16b
abs v7.16b, v19.16b
ld1 {v30.16b}, [x6]
cmeq v0.16b, v16.16b, #0
cmeq v1.16b, v17.16b, #0
cmeq v2.16b, v18.16b, #0
cmeq v3.16b, v19.16b, #0
umax v4.16b, v4.16b, v5.16b
umax v6.16b, v6.16b, v7.16b
and v0.16b, v0.16b, v30.16b
and v1.16b, v1.16b, v30.16b
and v2.16b, v2.16b, v30.16b
and v3.16b, v3.16b, v30.16b
umax v4.16b, v4.16b, v6.16b
addp v0.16b, v1.16b, v0.16b
addp v2.16b, v3.16b, v2.16b
cmhi v4.16b, v4.16b, v31.16b
addp v0.16b, v2.16b, v0.16b
shrn v4.8b, v4.8h, #4
addp v0.16b, v0.16b, v0.16b
fmov x2, d4
fmov x1, d0
cbnz x2, 9f
mvn x1, x1
mov w0, #0
cbz x1, 0f
movrel x5, X264(decimate_table8)
1:
clz x3, x1
lsl x1, x1, x3
ldrb w7, [x5, x3]
lsl x1, x1, #1
add w0, w0, w7
cbnz x1, 1b
ret
9:
mov w0, #9
0:
ret
endfunc
.endm
.macro COEFF_LAST_1x size, sub_factor
function coeff_last\size\()_neon, export=1
.if \size == 15
sub x0, x0, \sub_factor
.endif
.if BIT_DEPTH == 8
ld1 {v0.8h, v1.8h}, [x0]
uqxtn v0.8b, v0.8h
uqxtn2 v0.16b, v1.8h
.else // BIT_DEPTH == 8
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0]
uqxtn v0.4h, v0.4s
uqxtn2 v0.8h, v1.4s
uqxtn v1.4h, v2.4s
uqxtn2 v1.8h, v3.4s
uqxtn v0.8b, v0.8h
uqxtn2 v0.16b, v1.8h
.endif // BIT_DEPTH == 8
cmtst v0.16b, v0.16b, v0.16b
shrn v0.8b, v0.8h, #4
fmov x1, d0
mov w3, #\size - 1
clz x2, x1
sub w0, w3, w2, lsr #2
ret
endfunc
.endm
.macro COEFF_LAST64
function coeff_last64_neon, export=1
.if BIT_DEPTH == 8
ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x0], 64
movi v31.8h, #8
movi v30.8h, #1
uqxtn v0.8b, v0.8h
uqxtn2 v0.16b, v1.8h
ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x0], 64
uqxtn v1.8b, v2.8h
uqxtn2 v1.16b, v3.8h
uqxtn v2.8b, v4.8h
uqxtn2 v2.16b, v5.8h
uqxtn v3.8b, v6.8h
uqxtn2 v3.16b, v7.8h
.else // BIT_DEPTH == 8
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0], #64
movi v31.8h, #8
movi v30.8h, #1
ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x0], #64
uqxtn v0.4h, v0.4s
uqxtn2 v0.8h, v1.4s
uqxtn v1.4h, v2.4s
uqxtn2 v1.8h, v3.4s
uqxtn v2.4h, v4.4s
uqxtn2 v2.8h, v5.4s
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0], #64
uqxtn v3.4h, v6.4s
uqxtn2 v3.8h, v7.4s
uqxtn v0.8b, v0.8h
uqxtn2 v0.16b, v1.8h
uqxtn v1.8b, v2.8h
uqxtn2 v1.16b, v3.8h
ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [x0], #64
uqxtn v16.4h, v16.4s
uqxtn2 v16.8h, v17.4s
uqxtn v17.4h, v18.4s
uqxtn2 v17.8h, v19.4s
uqxtn v18.4h, v20.4s
uqxtn2 v18.8h, v21.4s
uqxtn v19.4h, v22.4s
uqxtn2 v19.8h, v23.4s
uqxtn v2.8b, v16.8h
uqxtn2 v2.16b, v17.8h
uqxtn v3.8b, v18.8h
uqxtn2 v3.16b, v19.8h
.endif // BIT_DEPTH == 8
cmtst v0.16b, v0.16b, v0.16b
cmtst v1.16b, v1.16b, v1.16b
cmtst v2.16b, v2.16b, v2.16b
cmtst v3.16b, v3.16b, v3.16b
shrn v0.8b, v0.8h, #4
shrn2 v0.16b, v1.8h, #4
shrn v1.8b, v2.8h, #4
shrn2 v1.16b, v3.8h, #4
clz v0.4s, v0.4s
clz v1.4s, v1.4s
shrn v0.4h, v0.4s, #2
shrn2 v0.8h, v1.4s, #2
sub v0.8h, v31.8h, v0.8h
sshl v0.8h, v30.8h, v0.8h
shrn v0.8b, v0.8h, #1
fmov x2, d0
mov w3, #63
clz x2, x2
sub w0, w3, w2
ret
endfunc
.endm
.macro coeff_level_run_start size, mask
add x6, x1, #\mask // runlevel->mask
mov w7, #0
mov w8, #0
mov w9, #1
mov w4, #\size - 1
.endm
.macro coeff_level_run shift, depth
clz x3, x2
subs w4, w4, w3, lsr #\shift
str w4, [x1], #4
1:
.ifc \depth, 8
ldrh w5, [x0, x4, lsl #1]
strh w5, [x6], #2
.else
lsl w5, w4, #2
ldr w5, [x0, x5]
str w5, [x6], #4
.endif
add w7, w7, #1
lsl w10, w9, w4
orr w8, w8, w10
b.le 2f
add w3, w3, #1 << \shift
sub w4, w4, #1
and x3, x3, #~((1 << \shift) - 1)
lsl x2, x2, x3
clz x3, x2
subs w4, w4, w3, lsr #\shift
b.ge 1b
2:
str w8, [x1]
mov w0, w7
.endm
.if BIT_DEPTH == 8
.macro QUANT_TWO bias0 bias1 mf0_1 mf2_3 mask
add v18.8h, v18.8h, \bias0
add v19.8h, v19.8h, \bias1
umull v20.4s, v18.4h, \mf0_1\().4h
umull2 v21.4s, v18.8h, \mf0_1\().8h
umull v22.4s, v19.4h, \mf2_3\().4h
umull2 v23.4s, v19.8h, \mf2_3\().8h
sshr v16.8h, v16.8h, #15
sshr v17.8h, v17.8h, #15
shrn v18.4h, v20.4s, #16
shrn2 v18.8h, v21.4s, #16
shrn v19.4h, v22.4s, #16
shrn2 v19.8h, v23.4s, #16
eor v18.16b, v18.16b, v16.16b
eor v19.16b, v19.16b, v17.16b
sub v18.8h, v18.8h, v16.8h
sub v19.8h, v19.8h, v17.8h
orr \mask, v18.16b, v19.16b
st1 {v18.8h,v19.8h}, [x0], #32
.endm
.macro QUANT_END d
fmov x2, \d
mov w0, #0
tst x2, x2
cinc w0, w0, ne
ret
.endm
// quant_2x2_dc( int16_t dct[4], int mf, int bias )
function quant_2x2_dc_neon, export=1
ld1 {v0.4h}, [x0]
dup v2.4h, w2
dup v1.4h, w1
abs v3.4h, v0.4h
add v3.4h, v3.4h, v2.4h
umull v3.4s, v3.4h, v1.4h
sshr v0.4h, v0.4h, #15
shrn v3.4h, v3.4s, #16
eor v3.8b, v3.8b, v0.8b
sub v3.4h, v3.4h, v0.4h
st1 {v3.4h}, [x0]
QUANT_END d3
endfunc
// quant_4x4_dc( int16_t dct[16], int mf, int bias )
function quant_4x4_dc_neon, export=1
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
dup v0.8h, w2
dup v2.8h, w1
QUANT_TWO v0.8h, v0.8h, v2, v2, v0.16b
uqxtn v0.8b, v0.8h
QUANT_END d0
endfunc
// quant_4x4( int16_t dct[16], uint16_t mf[16], uint16_t bias[16] )
function quant_4x4_neon, export=1
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
ld1 {v0.8h,v1.8h}, [x2]
ld1 {v2.8h,v3.8h}, [x1]
QUANT_TWO v0.8h, v1.8h, v2, v3, v0.16b
uqxtn v0.8b, v0.8h
QUANT_END d0
endfunc
// quant_4x4x4( int16_t dct[4][16], uint16_t mf[16], uint16_t bias[16] )
function quant_4x4x4_neon, export=1
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
ld1 {v0.8h,v1.8h}, [x2]
ld1 {v2.8h,v3.8h}, [x1]
QUANT_TWO v0.8h, v1.8h, v2, v3, v4.16b
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
QUANT_TWO v0.8h, v1.8h, v2, v3, v5.16b
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
QUANT_TWO v0.8h, v1.8h, v2, v3, v6.16b
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
QUANT_TWO v0.8h, v1.8h, v2, v3, v7.16b
uqxtn v4.8b, v4.8h
uqxtn v7.8b, v7.8h
uqxtn v6.8b, v6.8h
uqxtn v5.8b, v5.8h
fmov x7, d7
fmov x6, d6
fmov x5, d5
fmov x4, d4
mov w0, #0
tst x7, x7
cinc w0, w0, ne
lsl w0, w0, #1
tst x6, x6
cinc w0, w0, ne
lsl w0, w0, #1
tst x5, x5
cinc w0, w0, ne
lsl w0, w0, #1
tst x4, x4
cinc w0, w0, ne
ret
endfunc
// quant_8x8( int16_t dct[64], uint16_t mf[64], uint16_t bias[64] )
function quant_8x8_neon, export=1
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
ld1 {v0.8h,v1.8h}, [x2], #32
ld1 {v2.8h,v3.8h}, [x1], #32
QUANT_TWO v0.8h, v1.8h, v2, v3, v4.16b
.rept 3
ld1 {v16.8h,v17.8h}, [x0]
abs v18.8h, v16.8h
abs v19.8h, v17.8h
ld1 {v0.8h,v1.8h}, [x2], #32
ld1 {v2.8h,v3.8h}, [x1], #32
QUANT_TWO v0.8h, v1.8h, v2, v3, v5.16b
orr v4.16b, v4.16b, v5.16b
.endr
uqxtn v0.8b, v4.8h
QUANT_END d0
endfunc
.macro DEQUANT_START mf_size offset dc=no
mov w3, #0x2b
mul w3, w3, w2
lsr w3, w3, #8 // i_qbits = i_qp / 6
add w5, w3, w3, lsl #1
sub w2, w2, w5, lsl #1 // i_mf = i_qp % 6
lsl w2, w2, #\mf_size
.ifc \dc,no
add x1, x1, w2, sxtw // dequant_mf[i_mf]
.else
ldr x1, [x1, w2, sxtw] // dequant_mf[i_mf][0][0]
.endif
subs w3, w3, #\offset // 6 for 8x8
.endm
// dequant_4x4( int16_t dct[16], int dequant_mf[6][16], int i_qp )
.macro DEQUANT size bits
function dequant_\size\()_neon, export=1
DEQUANT_START \bits+2, \bits
.ifc \size, 8x8
mov w2, #4
.endif
b.lt dequant_\size\()_rshift
dup v31.8h, w3
dequant_\size\()_lshift_loop:
.ifc \size, 8x8
subs w2, w2, #1
.endif
ld1 {v16.4s}, [x1], #16
ld1 {v17.4s}, [x1], #16
sqxtn v2.4h, v16.4s
ld1 {v18.4s}, [x1], #16
sqxtn2 v2.8h, v17.4s
ld1 {v19.4s}, [x1], #16
sqxtn v3.4h, v18.4s
ld1 {v0.8h,v1.8h}, [x0]
sqxtn2 v3.8h, v19.4s
mul v0.8h, v0.8h, v2.8h
mul v1.8h, v1.8h, v3.8h
sshl v0.8h, v0.8h, v31.8h
sshl v1.8h, v1.8h, v31.8h
st1 {v0.8h,v1.8h}, [x0], #32
.ifc \size, 8x8
b.gt dequant_\size\()_lshift_loop
.endif
ret
dequant_\size\()_rshift:
dup v31.4s, w3
.ifc \size, 8x8
dequant_\size\()_rshift_loop:
subs w2, w2, #1
.endif
ld1 {v16.4s}, [x1], #16
ld1 {v17.4s}, [x1], #16
sqxtn v2.4h, v16.4s
ld1 {v18.4s}, [x1], #16
sqxtn2 v2.8h, v17.4s
ld1 {v19.4s}, [x1], #16
sqxtn v3.4h, v18.4s
ld1 {v0.8h,v1.8h}, [x0]
sqxtn2 v3.8h, v19.4s
smull v16.4s, v0.4h, v2.4h
smull2 v17.4s, v0.8h, v2.8h
smull v18.4s, v1.4h, v3.4h
smull2 v19.4s, v1.8h, v3.8h
srshl v16.4s, v16.4s, v31.4s
srshl v17.4s, v17.4s, v31.4s
srshl v18.4s, v18.4s, v31.4s
srshl v19.4s, v19.4s, v31.4s
sqxtn v0.4h, v16.4s
sqxtn2 v0.8h, v17.4s
sqxtn v1.4h, v18.4s
sqxtn2 v1.8h, v19.4s
st1 {v0.8h,v1.8h}, [x0], #32
.ifc \size, 8x8
b.gt dequant_\size\()_rshift_loop
.endif
ret
endfunc
.endm
DEQUANT 4x4, 4
DEQUANT 8x8, 6
// dequant_4x4_dc( int16_t dct[16], int dequant_mf[6][16], int i_qp )
function dequant_4x4_dc_neon, export=1
DEQUANT_START 6, 6, yes
b.lt dequant_4x4_dc_rshift
lsl w1, w1, w3
dup v2.8h, w1
ld1 {v0.8h,v1.8h}, [x0]
mul v0.8h, v0.8h, v2.8h
mul v1.8h, v1.8h, v2.8h
st1 {v0.8h,v1.8h}, [x0]
ret
dequant_4x4_dc_rshift:
dup v4.8h, w1
dup v3.4s, w3
ld1 {v0.8h,v1.8h}, [x0]
smull v16.4s, v0.4h, v4.4h
smull2 v17.4s, v0.8h, v4.8h
smull v18.4s, v1.4h, v4.4h
smull2 v19.4s, v1.8h, v4.8h
srshl v16.4s, v16.4s, v3.4s
srshl v17.4s, v17.4s, v3.4s
srshl v18.4s, v18.4s, v3.4s
srshl v19.4s, v19.4s, v3.4s
sqxtn v0.4h, v16.4s
sqxtn2 v0.8h, v17.4s
sqxtn v1.4h, v18.4s
sqxtn2 v1.8h, v19.4s
st1 {v0.8h,v1.8h}, [x0]
ret
endfunc
decimate_score_1x 15
decimate_score_1x 16
decimate_score64
// int coeff_last( int16_t *l )
function coeff_last4_aarch64, export=1
ldr x2, [x0]
mov w4, #3
clz x0, x2
sub w0, w4, w0, lsr #4
ret
endfunc
function coeff_last8_aarch64, export=1
ldr x3, [x0, #8]
mov w4, #7
clz x2, x3
cmp w2, #64
b.ne 1f
ldr x3, [x0]
sub w4, w4, #4
clz x2, x3
1:
sub w0, w4, w2, lsr #4
ret
endfunc
COEFF_LAST_1x 15, #2
COEFF_LAST_1x 16, #2
COEFF_LAST64
function coeff_level_run4_aarch64, export=1
ldr x2, [x0]
coeff_level_run_start 4, 23
and x6, x6, #~15
coeff_level_run 4, 8
ret
endfunc
.macro X264_COEFF_LEVEL_RUN size
function coeff_level_run\size\()_neon, export=1
.if \size == 15
sub x0, x0, #2
.endif
.if \size < 15
ld1 {v0.8h}, [x0]
uqxtn v0.8b, v0.8h
cmtst v0.8b, v0.8b, v0.8b
.else
ld1 {v0.8h,v1.8h}, [x0]
uqxtn v0.8b, v0.8h
uqxtn2 v0.16b, v1.8h
cmtst v0.16b, v0.16b, v0.16b
shrn v0.8b, v0.8h, #4
.endif
fmov x2, d0
.if \size == 15
add x0, x0, #2
.endif
coeff_level_run_start \size, 23
and x6, x6, #~15
coeff_level_run (4 - (\size + 1) / 8), 8
ret
endfunc
.endm
X264_COEFF_LEVEL_RUN 8
X264_COEFF_LEVEL_RUN 15
X264_COEFF_LEVEL_RUN 16
function denoise_dct_neon, export=1
1: subs w3, w3, #16
ld1 {v0.8h,v1.8h}, [x0]
ld1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x1]
abs v16.8h, v0.8h
abs v17.8h, v1.8h
ld1 {v2.8h,v3.8h}, [x2], #32
cmlt v18.8h, v0.8h, #0
cmlt v19.8h, v1.8h, #0
uaddw v4.4s, v4.4s, v16.4h
uaddw2 v5.4s, v5.4s, v16.8h
uqsub v20.8h, v16.8h, v2.8h
uqsub v21.8h, v17.8h, v3.8h
uaddw v6.4s, v6.4s, v17.4h
uaddw2 v7.4s, v7.4s, v17.8h
neg v22.8h, v20.8h
neg v23.8h, v21.8h
bsl v18.16b, v22.16b, v20.16b
bsl v19.16b, v23.16b, v21.16b
st1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x1], #64
st1 {v18.8h,v19.8h}, [x0], #32
b.gt 1b
ret
endfunc
.else // BIT_DEPTH == 8
.macro QUANT_TWO mask
add v20.4s, v20.4s, v0.4s
add v21.4s, v21.4s, v1.4s
add v22.4s, v22.4s, v2.4s
add v23.4s, v23.4s, v3.4s
mul v24.4s, v20.4s, v4.4s
mul v25.4s, v21.4s, v5.4s
mul v26.4s, v22.4s, v6.4s
mul v27.4s, v23.4s, v7.4s
sshr v16.4s, v16.4s, #31
sshr v17.4s, v17.4s, #31
sshr v18.4s, v18.4s, #31
sshr v19.4s, v19.4s, #31
sshr v20.4s, v24.4s, #16
sshr v21.4s, v25.4s, #16
sshr v22.4s, v26.4s, #16
sshr v23.4s, v27.4s, #16
eor v20.16b, v20.16b, v16.16b
eor v21.16b, v21.16b, v17.16b
eor v22.16b, v22.16b, v18.16b
eor v23.16b, v23.16b, v19.16b
sub v20.4s, v20.4s, v16.4s
sub v21.4s, v21.4s, v17.4s
sub v22.4s, v22.4s, v18.4s
sub v23.4s, v23.4s, v19.4s
orr \mask, v20.16b, v21.16b
orr v16.16b, v22.16b, v23.16b
orr \mask, \mask, v16.16b
st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [x0], #64
.endm
.macro QUANT_END d
// Use parameter d as a register number and extract upper and lower halves.
fmov x2, d\d
fmov x3, v\d\().d[1]
orr x2, x2, x3
mov w0, #0
tst x2, x2
cinc w0, w0, ne
ret
.endm
// quant_2x2_dc( dctcoef dct[4], int mf, int bias )
function quant_2x2_dc_neon, export=1
ld1 {v0.4s}, [x0]
dup v2.4s, w2
dup v1.4s, w1
abs v3.4s, v0.4s
add v3.4s, v3.4s, v2.4s
mul v3.4s, v3.4s, v1.4s
sshr v0.4s, v0.4s, #31
sshr v3.4s, v3.4s, #16
eor v3.16b, v3.16b, v0.16b
sub v0.4s, v3.4s, v0.4s
st1 {v0.4s}, [x0]
QUANT_END 0
endfunc
// quant_4x4_dc( dctcoef dct[16], int mf, int bias )
function quant_4x4_dc_neon, export=1
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
dup v0.4s, w2
dup v1.4s, w2
dup v2.4s, w2
dup v3.4s, w2
dup v4.4s, w1
dup v5.4s, w1
dup v6.4s, w1
dup v7.4s, w1
QUANT_TWO v0.16b
QUANT_END 0
endfunc
// quant_4x4( dctcoef dct[16], udctcoef mf[16], udctcoef bias[16] )
function quant_4x4_neon, export=1
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x2]
ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x1]
QUANT_TWO v0.16b
QUANT_END 0
endfunc
// quant_4x4x4( dctcoef dct[4][16], uint32_t mf[16], uint32_t bias[16] )
function quant_4x4x4_neon, export=1
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x2]
ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x1]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
QUANT_TWO v28.16b
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
QUANT_TWO v29.16b
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
QUANT_TWO v30.16b
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
QUANT_TWO v31.16b
uqxtn v28.4h, v28.4s
uqxtn v29.4h, v29.4s
uqxtn v30.4h, v30.4s
uqxtn v31.4h, v31.4s
fmov x7, d28
fmov x6, d29
fmov x10, d30
fmov x12, d31
mov w0, #0
tst x12, x12
cinc w0, w0, ne
lsl w0, w0, #1
tst x10, x10
cinc w0, w0, ne
lsl w0, w0, #1
tst x6, x6
cinc w0, w0, ne
lsl w0, w0, #1
tst x7, x7
cinc w0, w0, ne
ret
endfunc
// quant_8x8( dctcoef dct[64], uint32_t mf[64], uint32_t bias[64] )
function quant_8x8_neon, export=1
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x2], #64
ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x1], #64
QUANT_TWO v28.16b
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x2], #64
ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x1], #64
QUANT_TWO v29.16b
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x2], #64
ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x1], #64
QUANT_TWO v30.16b
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
abs v20.4s, v16.4s
abs v21.4s, v17.4s
abs v22.4s, v18.4s
abs v23.4s, v19.4s
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x2], #64
ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x1], #64
QUANT_TWO v31.16b
orr v0.16b, v28.16b, v29.16b
orr v0.16b, v0.16b, v30.16b
orr v0.16b, v0.16b, v31.16b
QUANT_END 0
endfunc
.macro DEQUANT_START mf_size offset dc=no
mov w3, #0x2b
mul w3, w3, w2
lsr w3, w3, #8 // i_qbits = i_qp / 6
add w5, w3, w3, lsl #1
sub w2, w2, w5, lsl #1 // i_mf = i_qp % 6
lsl w2, w2, #\mf_size
.ifc \dc,no
add x1, x1, w2, sxtw // dequant_mf[i_mf]
.else
ldr x1, [x1, w2, sxtw] // dequant_mf[i_mf][0][0]
.endif
subs w3, w3, #\offset // 6 for 8x8
.endm
// dequant_4x4( int32_t dct[16], int dequant_mf[6][16], int i_qp )
.macro DEQUANT size bits
function dequant_\size\()_neon, export=1
DEQUANT_START \bits+2, \bits
.ifc \size, 8x8
mov w2, #4
.endif
b.lt dequant_\size\()_rshift
dup v31.4s, w3
dequant_\size\()_lshift_loop:
.ifc \size, 8x8
subs w2, w2, #1
.endif
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x1], #64
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0]
mul v0.4s, v0.4s, v16.4s
mul v1.4s, v1.4s, v17.4s
mul v2.4s, v2.4s, v18.4s
mul v3.4s, v3.4s, v19.4s
sshl v0.4s, v0.4s, v31.4s
sshl v1.4s, v1.4s, v31.4s
sshl v2.4s, v2.4s, v31.4s
sshl v3.4s, v3.4s, v31.4s
st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0], #64
.ifc \size, 8x8
b.gt dequant_\size\()_lshift_loop
.endif
ret
dequant_\size\()_rshift:
dup v31.4s, w3
.ifc \size, 8x8
dequant_\size\()_rshift_loop:
subs w2, w2, #1
.endif
ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x1], #64
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0]
mul v20.4s, v0.4s, v16.4s
mul v21.4s, v1.4s, v17.4s
mul v22.4s, v2.4s, v18.4s
mul v23.4s, v3.4s, v19.4s
srshl v16.4s, v20.4s, v31.4s
srshl v17.4s, v21.4s, v31.4s
srshl v18.4s, v22.4s, v31.4s
srshl v19.4s, v23.4s, v31.4s
st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0], #64
.ifc \size, 8x8
b.gt dequant_\size\()_rshift_loop
.endif
ret
endfunc
.endm
DEQUANT 4x4, 4
DEQUANT 8x8, 6
// dequant_4x4_dc( int32_t dct[16], int dequant_mf[6][16], int i_qp )
function dequant_4x4_dc_neon, export=1
DEQUANT_START 6, 6, yes
b.lt dequant_4x4_dc_rshift
lsl w1, w1, w3
dup v31.4s, w1
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0]
mul v0.4s, v0.4s, v31.4s
mul v1.4s, v1.4s, v31.4s
mul v2.4s, v2.4s, v31.4s
mul v3.4s, v3.4s, v31.4s
st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0]
ret
dequant_4x4_dc_rshift:
dup v31.4s, w1
dup v30.4s, w3
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0]
mul v16.4s, v0.4s, v31.4s
mul v17.4s, v1.4s, v31.4s
mul v18.4s, v2.4s, v31.4s
mul v19.4s, v3.4s, v31.4s
srshl v16.4s, v16.4s, v30.4s
srshl v17.4s, v17.4s, v30.4s
srshl v18.4s, v18.4s, v30.4s
srshl v19.4s, v19.4s, v30.4s
st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [x0]
ret
endfunc
decimate_score_1x 15
decimate_score_1x 16
decimate_score64
// int coeff_last( int32_t *l )
function coeff_last4_neon, export=1
ld1 {v0.4s}, [x0]
uqxtn v0.4h, v0.4s
uqxtn v0.8b, v0.8h
mov w4, #3
cmtst v0.16b, v0.16b, v0.16b
fmov w1, s0
clz w2, w1
sub w0, w4, w2, lsr #3
ret
endfunc
function coeff_last8_neon, export=1
ld1 {v0.4s, v1.4s}, [x0]
uqxtn v0.4h, v0.4s
uqxtn2 v0.8h, v1.4s
uqxtn v0.8b, v0.8h
mov w4, #7
cmtst v0.16b, v0.16b, v0.16b
fmov x1, d0
clz x2, x1
sub x0, x4, x2, lsr #3
ret
endfunc
COEFF_LAST_1x 15, #4
COEFF_LAST_1x 16, #4
COEFF_LAST64
function coeff_level_run4_neon, export=1
ldr x2, [x0]
ld1 {v0.4s}, [x0]
uqxtn v0.4h, v0.4s
uqxtn v0.8b, v0.8h
fmov x2, d0
coeff_level_run_start 8, 16
coeff_level_run 3, 10
ret
endfunc
.macro X264_COEFF_LEVEL_RUN size
function coeff_level_run\size\()_neon, export=1
.if \size == 15
sub x0, x0, #4
.endif
.if \size < 15
ld1 {v0.4s, v1.4s}, [x0]
uqxtn v0.4h, v0.4s
uqxtn2 v0.8h, v1.4s
uqxtn v0.8b, v0.8h
cmtst v0.8b, v0.8b, v0.8b
.else
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0]
uqxtn v0.4h, v0.4s
uqxtn2 v0.8h, v1.4s
uqxtn v1.4h, v2.4s
uqxtn2 v1.8h, v3.4s
uqxtn v0.8b, v0.8h
uqxtn2 v0.16b, v1.8h
cmtst v0.16b, v0.16b, v0.16b
shrn v0.8b, v0.8h, #4
.endif
fmov x2, d0
.if \size == 15
add x0, x0, #4
.endif
coeff_level_run_start \size, 16
coeff_level_run (4 - (\size + 1) / 8), 10
ret
endfunc
.endm
X264_COEFF_LEVEL_RUN 8
X264_COEFF_LEVEL_RUN 15
X264_COEFF_LEVEL_RUN 16
function denoise_dct_neon, export=1
1: subs w3, w3, #16
ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0]
ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x1]
abs v16.4s, v0.4s
abs v17.4s, v1.4s
abs v18.4s, v2.4s
abs v19.4s, v3.4s
cmlt v24.4s, v0.4s, #0
cmlt v25.4s, v1.4s, #0
cmlt v26.4s, v2.4s, #0
cmlt v27.4s, v3.4s, #0
ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [x2], #64
add v4.4s, v4.4s, v16.4s
add v5.4s, v5.4s, v17.4s
sub v28.4s, v16.4s, v20.4s
sub v29.4s, v17.4s, v21.4s
sub v30.4s, v18.4s, v22.4s
sub v31.4s, v19.4s, v23.4s
add v6.4s, v6.4s, v18.4s
add v7.4s, v7.4s, v19.4s
cmlt v20.4s, v28.4s, #0
cmlt v21.4s, v29.4s, #0
cmlt v22.4s, v30.4s, #0
cmlt v23.4s, v31.4s, #0
movi v0.4s, #0
bsl v20.16b, v0.16b, v28.16b
bsl v21.16b, v0.16b, v29.16b
bsl v22.16b, v0.16b, v30.16b
bsl v23.16b, v0.16b, v31.16b
neg v0.4s, v20.4s
neg v1.4s, v21.4s
neg v2.4s, v22.4s
neg v3.4s, v23.4s
bsl v24.16b, v0.16b, v20.16b
bsl v25.16b, v1.16b, v21.16b
bsl v26.16b, v2.16b, v22.16b
bsl v27.16b, v3.16b, v23.16b
st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x1], #64
st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [x0], #64
b.gt 1b
ret
endfunc
.endif
|
aestream/faery
| 1,829
|
src/mp4/x264/common/aarch64/pixel-a-common.S
|
/****************************************************************************
* pixel-a-common.S: aarch64 pixel metrics
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
* David Chen <david.chen@myais.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
// This file contains the NEON macros and constants that are intended to be used by
// the SVE/SVE2 functions as well
const mask_ac_4_8
.short 0, -1, -1, -1, 0, -1, -1, -1
.short 0, -1, -1, -1, -1, -1, -1, -1
endconst
.macro SUMSUB_ABCD s1, d1, s2, d2, a, b, c, d
SUMSUB_AB \s1, \d1, \a, \b
SUMSUB_AB \s2, \d2, \c, \d
.endm
.macro HADAMARD4_V r1, r2, r3, r4, t1, t2, t3, t4
SUMSUB_ABCD \t1, \t2, \t3, \t4, \r1, \r2, \r3, \r4
SUMSUB_ABCD \r1, \r3, \r2, \r4, \t1, \t3, \t2, \t4
.endm
|
aestream/faery
| 15,342
|
src/mp4/x264/common/aarch64/pixel-a-sve.S
|
/*****************************************************************************
* pixel-a-sve.S: aarch64 pixel metrics
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Chen <david.chen@myais.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "pixel-a-common.S"
.arch armv8-a+sve
#if BIT_DEPTH == 8
.macro SSD_START_SVE_4
ptrue p0.h, vl4
ld1b {z16.h}, p0/z, [x0]
ld1b {z17.h}, p0/z, [x2]
add x0, x0, x1
add x2, x2, x3
sub v2.4h, v16.4h, v17.4h
ld1b {z16.h}, p0/z, [x0]
ld1b {z17.h}, p0/z, [x2]
add x0, x0, x1
add x2, x2, x3
smull v0.4s, v2.4h, v2.4h
.endm
.macro SSD_SVE_4
sub v2.4h, v16.4h, v17.4h
ld1b {z16.h}, p0/z, [x0]
ld1b {z17.h}, p0/z, [x2]
add x0, x0, x1
add x2, x2, x3
smlal v0.4s, v2.4h, v2.4h
.endm
.macro SSD_END_SVE_4
sub v2.4h, v16.4h, v17.4h
smlal v0.4s, v2.4h, v2.4h
.endm
.macro SSD_START_SVE_8
ptrue p0.h, vl8
ld1b {z16.h}, p0/z, [x0]
ld1b {z17.h}, p0/z, [x2]
add x0, x0, x1
add x2, x2, x3
sub v2.8h, v16.8h, v17.8h
ld1b {z16.h}, p0/z, [x0]
smull v0.4s, v2.4h, v2.4h
ld1b {z17.h}, p0/z, [x2]
smlal2 v0.4s, v2.8h, v2.8h
add x0, x0, x1
add x2, x2, x3
.endm
.macro SSD_SVE_8
sub v2.8h, v16.8h, v17.8h
ld1b {z16.h}, p0/z, [x0]
smlal v0.4s, v2.4h, v2.4h
ld1b {z17.h}, p0/z, [x2]
smlal2 v0.4s, v2.8h, v2.8h
add x0, x0, x1
add x2, x2, x3
.endm
.macro SSD_END_SVE_8
sub v2.8h, v16.8h, v17.8h
smlal v0.4s, v2.4h, v2.4h
smlal2 v0.4s, v2.8h, v2.8h
.endm
.macro SSD_FUNC_SVE w h
function pixel_ssd_\w\()x\h\()_sve, export=1
SSD_START_SVE_\w
.rept \h-2
SSD_SVE_\w
.endr
SSD_END_SVE_\w
addv s0, v0.4s
mov w0, v0.s[0]
ret
endfunc
.endm
.macro load_diff_fly_sve_8x8
ld1b {z1.h}, p0/z, [x2]
ld1b {z0.h}, p0/z, [x0]
add x2, x2, x3
add x0, x0, x1
ld1b {z3.h}, p0/z, [x2]
ld1b {z2.h}, p0/z, [x0]
add x2, x2, x3
add x0, x0, x1
sub v16.8h, v0.8h, v1.8h
sub v17.8h, v2.8h, v3.8h
ld1b {z5.h}, p0/z, [x2]
ld1b {z4.h}, p0/z, [x0]
add x2, x2, x3
add x0, x0, x1
ld1b {z7.h}, p0/z, [x2]
ld1b {z6.h}, p0/z, [x0]
add x2, x2, x3
add x0, x0, x1
sub v18.8h, v4.8h, v5.8h
sub v19.8h, v6.8h, v7.8h
ld1b {z1.h}, p0/z, [x2]
ld1b {z0.h}, p0/z, [x0]
add x2, x2, x3
add x0, x0, x1
ld1b {z3.h}, p0/z, [x2]
ld1b {z2.h}, p0/z, [x0]
add x2, x2, x3
add x0, x0, x1
sub v20.8h, v0.8h, v1.8h
sub v21.8h, v2.8h, v3.8h
ld1b {z5.h}, p0/z, [x2]
ld1b {z4.h}, p0/z, [x0]
add x2, x2, x3
add x0, x0, x1
ld1b {z7.h}, p0/z, [x2]
ld1b {z6.h}, p0/z, [x0]
add x2, x2, x3
add x0, x0, x1
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
sub v22.8h, v4.8h, v5.8h
sub v23.8h, v6.8h, v7.8h
.endm
.macro pixel_var_sve_8 h
function pixel_var_8x\h\()_sve, export=1
ptrue p0.h, vl8
ld1b {z16.h}, p0/z, [x0]
add x0, x0, x1
ld1b {z17.h}, p0/z, [x0]
add x0, x0, x1
mov x2, \h - 4
mul v1.8h, v16.8h, v16.8h
mul v2.8h, v17.8h, v17.8h
add v0.8h, v16.8h, v17.8h
ld1b {z18.h}, p0/z, [x0]
add x0, x0, x1
uaddlp v1.4s, v1.8h
uaddlp v2.4s, v2.8h
ld1b {z19.h}, p0/z, [x0]
add x0, x0, x1
1: subs x2, x2, #4
add v0.8h, v0.8h, v18.8h
mul v24.8h, v18.8h, v18.8h
ld1b {z20.h}, p0/z, [x0]
add x0, x0, x1
add v0.8h, v0.8h, v19.8h
mul v25.8h, v19.8h, v19.8h
uadalp v1.4s, v24.8h
ld1b {z21.h}, p0/z, [x0]
add x0, x0, x1
add v0.8h, v0.8h, v20.8h
mul v26.8h, v20.8h, v20.8h
uadalp v2.4s, v25.8h
ld1b {z18.h}, p0/z, [x0]
add x0, x0, x1
add v0.8h, v0.8h, v21.8h
mul v27.8h, v21.8h, v21.8h
uadalp v1.4s, v26.8h
ld1b {z19.h}, p0/z, [x0]
add x0, x0, x1
uadalp v2.4s, v27.8h
b.gt 1b
add v0.8h, v0.8h, v18.8h
mul v28.8h, v18.8h, v18.8h
add v0.8h, v0.8h, v19.8h
mul v29.8h, v19.8h, v19.8h
uadalp v1.4s, v28.8h
uadalp v2.4s, v29.8h
b var_end
endfunc
.endm
function var_end
add v1.4s, v1.4s, v2.4s
uaddlv s0, v0.8h
uaddlv d1, v1.4s
mov w0, v0.s[0]
mov x1, v1.d[0]
orr x0, x0, x1, lsl #32
ret
endfunc
.macro SUMSUBL_AB_SVE sum, sub, a, b
add \sum, \a, \b
sub \sub, \a, \b
.endm
function pixel_sa8d_8x8_sve, export=1
ptrue p0.h, vl8
mov x4, x30
bl pixel_sa8d_8x8_sve
add v0.8h, v0.8h, v1.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
add w0, w0, #1
lsr w0, w0, #1
ret x4
endfunc
.macro sa8d_satd_sve_8x8 satd=
function pixel_sa8d_\satd\()8x8_sve
load_diff_fly_sve_8x8
SUMSUB_AB v16.8h, v18.8h, v0.8h, v2.8h
SUMSUB_AB v17.8h, v19.8h, v1.8h, v3.8h
HADAMARD4_V v20.8h, v21.8h, v22.8h, v23.8h, v0.8h, v1.8h, v2.8h, v3.8h
.ifc \satd, satd_
transpose v0.8h, v1.8h, v16.8h, v17.8h
transpose v2.8h, v3.8h, v18.8h, v19.8h
transpose v4.8h, v5.8h, v20.8h, v21.8h
transpose v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_AB v24.8h, v25.8h, v0.8h, v1.8h
SUMSUB_AB v26.8h, v27.8h, v2.8h, v3.8h
SUMSUB_AB v0.8h, v1.8h, v4.8h, v5.8h
SUMSUB_AB v2.8h, v3.8h, v6.8h, v7.8h
transpose v4.4s, v6.4s, v24.4s, v26.4s
transpose v5.4s, v7.4s, v25.4s, v27.4s
transpose v24.4s, v26.4s, v0.4s, v2.4s
transpose v25.4s, v27.4s, v1.4s, v3.4s
abs v0.8h, v4.8h
abs v1.8h, v5.8h
abs v2.8h, v6.8h
abs v3.8h, v7.8h
abs v4.8h, v24.8h
abs v5.8h, v25.8h
abs v6.8h, v26.8h
abs v7.8h, v27.8h
umax v0.8h, v0.8h, v2.8h
umax v1.8h, v1.8h, v3.8h
umax v2.8h, v4.8h, v6.8h
umax v3.8h, v5.8h, v7.8h
add v26.8h, v0.8h, v1.8h
add v27.8h, v2.8h, v3.8h
.endif
SUMSUB_AB v0.8h, v16.8h, v16.8h, v20.8h
SUMSUB_AB v1.8h, v17.8h, v17.8h, v21.8h
SUMSUB_AB v2.8h, v18.8h, v18.8h, v22.8h
SUMSUB_AB v3.8h, v19.8h, v19.8h, v23.8h
transpose v20.8h, v21.8h, v16.8h, v17.8h
transpose v4.8h, v5.8h, v0.8h, v1.8h
transpose v22.8h, v23.8h, v18.8h, v19.8h
transpose v6.8h, v7.8h, v2.8h, v3.8h
SUMSUB_AB v2.8h, v3.8h, v20.8h, v21.8h
SUMSUB_AB v24.8h, v25.8h, v4.8h, v5.8h
SUMSUB_AB v0.8h, v1.8h, v22.8h, v23.8h
SUMSUB_AB v4.8h, v5.8h, v6.8h, v7.8h
transpose v20.4s, v22.4s, v2.4s, v0.4s
transpose v21.4s, v23.4s, v3.4s, v1.4s
transpose v16.4s, v18.4s, v24.4s, v4.4s
transpose v17.4s, v19.4s, v25.4s, v5.4s
SUMSUB_AB v0.8h, v2.8h, v20.8h, v22.8h
SUMSUB_AB v1.8h, v3.8h, v21.8h, v23.8h
SUMSUB_AB v4.8h, v6.8h, v16.8h, v18.8h
SUMSUB_AB v5.8h, v7.8h, v17.8h, v19.8h
transpose v16.2d, v20.2d, v0.2d, v4.2d
transpose v17.2d, v21.2d, v1.2d, v5.2d
transpose v18.2d, v22.2d, v2.2d, v6.2d
transpose v19.2d, v23.2d, v3.2d, v7.2d
abs v16.8h, v16.8h
abs v20.8h, v20.8h
abs v17.8h, v17.8h
abs v21.8h, v21.8h
abs v18.8h, v18.8h
abs v22.8h, v22.8h
abs v19.8h, v19.8h
abs v23.8h, v23.8h
umax v16.8h, v16.8h, v20.8h
umax v17.8h, v17.8h, v21.8h
umax v18.8h, v18.8h, v22.8h
umax v19.8h, v19.8h, v23.8h
add v0.8h, v16.8h, v17.8h
add v1.8h, v18.8h, v19.8h
ret
endfunc
.endm
.macro HADAMARD_AC_SVE w h
function pixel_hadamard_ac_\w\()x\h\()_sve, export=1
ptrue p0.h, vl8
movrel x5, mask_ac_4_8
mov x4, x30
ld1 {v30.8h,v31.8h}, [x5]
movi v28.16b, #0
movi v29.16b, #0
bl hadamard_ac_8x8_sve
.if \h > 8
bl hadamard_ac_8x8_sve
.endif
.if \w > 8
sub x0, x0, x1, lsl #3
add x0, x0, #8
bl hadamard_ac_8x8_sve
.endif
.if \w * \h == 256
sub x0, x0, x1, lsl #4
bl hadamard_ac_8x8_sve
.endif
addv s1, v29.4s
addv s0, v28.4s
mov w1, v1.s[0]
mov w0, v0.s[0]
lsr w1, w1, #2
lsr w0, w0, #1
orr x0, x0, x1, lsl #32
ret x4
endfunc
.endm
// v28: satd v29: sa8d v30: mask_ac4 v31: mask_ac8
function hadamard_ac_8x8_sve
ld1b {z16.h}, p0/z, [x0]
add x0, x0, x1
ld1b {z17.h}, p0/z, [x0]
add x0, x0, x1
ld1b {z18.h}, p0/z, [x0]
add x0, x0, x1
ld1b {z19.h}, p0/z, [x0]
add x0, x0, x1
SUMSUBL_AB_SVE v0.8h, v1.8h, v16.8h, v17.8h
ld1b {z20.h}, p0/z, [x0]
add x0, x0, x1
ld1b {z21.h}, p0/z, [x0]
add x0, x0, x1
SUMSUBL_AB_SVE v2.8h, v3.8h, v18.8h, v19.8h
ld1b {z22.h}, p0/z, [x0]
add x0, x0, x1
ld1b {z23.h}, p0/z, [x0]
add x0, x0, x1
SUMSUBL_AB_SVE v4.8h, v5.8h, v20.8h, v21.8h
SUMSUBL_AB_SVE v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_ABCD v16.8h, v18.8h, v17.8h, v19.8h, v0.8h, v2.8h, v1.8h, v3.8h
SUMSUB_ABCD v20.8h, v22.8h, v21.8h, v23.8h, v4.8h, v6.8h, v5.8h, v7.8h
transpose v0.8h, v1.8h, v16.8h, v17.8h
transpose v2.8h, v3.8h, v18.8h, v19.8h
transpose v4.8h, v5.8h, v20.8h, v21.8h
transpose v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_AB v16.8h, v17.8h, v0.8h, v1.8h
SUMSUB_AB v18.8h, v19.8h, v2.8h, v3.8h
SUMSUB_AB v20.8h, v21.8h, v4.8h, v5.8h
SUMSUB_AB v22.8h, v23.8h, v6.8h, v7.8h
transpose v0.4s, v2.4s, v16.4s, v18.4s
transpose v1.4s, v3.4s, v17.4s, v19.4s
transpose v4.4s, v6.4s, v20.4s, v22.4s
transpose v5.4s, v7.4s, v21.4s, v23.4s
SUMSUB_AB v16.8h, v18.8h, v0.8h, v2.8h
SUMSUB_AB v17.8h, v19.8h, v1.8h, v3.8h
SUMSUB_ABCD v20.8h, v22.8h, v21.8h, v23.8h, v4.8h, v6.8h, v5.8h, v7.8h
abs v0.8h, v16.8h
abs v4.8h, v20.8h
abs v1.8h, v17.8h
abs v5.8h, v21.8h
abs v2.8h, v18.8h
abs v6.8h, v22.8h
abs v3.8h, v19.8h
abs v7.8h, v23.8h
add v0.8h, v0.8h, v4.8h
add v1.8h, v1.8h, v5.8h
and v0.16b, v0.16b, v30.16b
add v2.8h, v2.8h, v6.8h
add v3.8h, v3.8h, v7.8h
add v0.8h, v0.8h, v2.8h
add v1.8h, v1.8h, v3.8h
uadalp v28.4s, v0.8h
uadalp v28.4s, v1.8h
SUMSUB_AB v6.8h, v7.8h, v23.8h, v19.8h
SUMSUB_AB v4.8h, v5.8h, v22.8h, v18.8h
SUMSUB_AB v2.8h, v3.8h, v21.8h, v17.8h
SUMSUB_AB v1.8h, v0.8h, v16.8h, v20.8h
transpose v16.2d, v17.2d, v6.2d, v7.2d
transpose v18.2d, v19.2d, v4.2d, v5.2d
transpose v20.2d, v21.2d, v2.2d, v3.2d
abs v16.8h, v16.8h
abs v17.8h, v17.8h
abs v18.8h, v18.8h
abs v19.8h, v19.8h
abs v20.8h, v20.8h
abs v21.8h, v21.8h
transpose v7.2d, v6.2d, v1.2d, v0.2d
umax v3.8h, v16.8h, v17.8h
umax v2.8h, v18.8h, v19.8h
umax v1.8h, v20.8h, v21.8h
SUMSUB_AB v4.8h, v5.8h, v7.8h, v6.8h
add v2.8h, v2.8h, v3.8h
add v2.8h, v2.8h, v1.8h
and v4.16b, v4.16b, v31.16b
add v2.8h, v2.8h, v2.8h
abs v5.8h, v5.8h
abs v4.8h, v4.8h
add v2.8h, v2.8h, v5.8h
add v2.8h, v2.8h, v4.8h
uadalp v29.4s, v2.8h
ret
endfunc
SSD_FUNC_SVE 4, 4
SSD_FUNC_SVE 4, 8
SSD_FUNC_SVE 4, 16
SSD_FUNC_SVE 8, 4
SSD_FUNC_SVE 8, 8
pixel_var_sve_8 8
pixel_var_sve_8 16
sa8d_satd_sve_8x8
HADAMARD_AC_SVE 8, 8
HADAMARD_AC_SVE 8, 16
HADAMARD_AC_SVE 16, 8
HADAMARD_AC_SVE 16, 16
#else /* BIT_DEPTH == 10 */
.macro SSD_START_SVE_4
ptrue p0.s, vl4
ld1h {z16.s}, p0/z, [x0]
ld1h {z17.s}, p0/z, [x2]
add x0, x0, x1, lsl #1
add x2, x2, x3, lsl #1
sub v2.4s, v16.4s, v17.4s
ld1h {z16.s}, p0/z, [x0]
ld1h {z17.s}, p0/z, [x2]
add x0, x0, x1, lsl #1
add x2, x2, x3, lsl #1
mul v0.4s, v2.4s, v2.4s
.endm
.macro SSD_SVE_4
sub v2.4s, v16.4s, v17.4s
ld1h {z16.s}, p0/z, [x0]
ld1h {z17.s}, p0/z, [x2]
add x0, x0, x1, lsl #1
add x2, x2, x3, lsl #1
mla v0.4s, v2.4s, v2.4s
.endm
.macro SSD_END_SVE_4
sub v2.4s, v16.4s, v17.4s
mla v0.4s, v2.4s, v2.4s
.endm
.macro SSD_FUNC_SVE w h
function pixel_ssd_\w\()x\h\()_sve, export=1
SSD_START_SVE_\w
.rept \h-2
SSD_SVE_\w
.endr
SSD_END_SVE_\w
addv s0, v0.4s
fmov w0, s0
ret
endfunc
.endm
SSD_FUNC_SVE 4, 4
SSD_FUNC_SVE 4, 8
SSD_FUNC_SVE 4, 16
#endif /* BIT_DEPTH == 8 */
|
aestream/faery
| 3,009
|
src/mp4/x264/common/aarch64/dct-a-sve2.S
|
/****************************************************************************
* dct-a-sve2.S: aarch64 transform and zigzag
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Chen <david.chen@myais.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "dct-a-common.S"
.arch armv8-a+sve+sve2
function add4x4_idct_sve2, export=1
mov x2, #FDEC_STRIDE
mov x11, x0
ptrue p0.h, vl8
ptrue p1.h, vl4
ld1 {v0.8h, v1.8h}, [x1]
SUMSUB_AB v4.8h, v5.8h, v0.8h, v1.8h
sshr v7.8h, v0.8h, #1
sshr v6.8h, v1.8h, #1
sub v7.8h, v7.8h, v1.8h
add v6.8h, v6.8h, v0.8h
mov v7.d[0], v7.d[1]
mov v6.d[0], v6.d[1]
ld1b {z28.h}, p0/z, [x11]
add x11, x11, x2
SUMSUB_AB v0.8h, v2.8h, v4.8h, v6.8h
SUMSUB_AB v1.8h, v3.8h, v5.8h, v7.8h
transpose4x4.h v0, v1, v3, v2, v16, v17, v18, v19
SUMSUB_AB v4.4h, v5.4h, v0.4h, v3.4h
sshr v7.4h, v1.4h, #1
sshr v6.4h, v2.4h, #1
sub v7.4h, v7.4h, v2.4h
add v6.4h, v6.4h, v1.4h
ld1b {z29.h}, p0/z, [x11]
add x11, x11, x2
SUMSUB_AB v0.4h, v2.4h, v4.4h, v6.4h
SUMSUB_AB v1.4h, v3.4h, v5.4h, v7.4h
srshr z0.h, p1/m, z0.h, #6
srshr z1.h, p1/m, z1.h, #6
ld1b {z31.h}, p0/z, [x11]
add x11, x11, x2
srshr z2.h, p1/m, z2.h, #6
srshr z3.h, p1/m, z3.h, #6
ld1b {z30.h}, p0/z, [x11]
add v0.8h, v0.8h, v28.8h
add v1.8h, v1.8h, v29.8h
add v2.8h, v2.8h, v30.8h
add v3.8h, v3.8h, v31.8h
sqxtunb z0.b, z0.h
sqxtunb z1.b, z1.h
sqxtunb z2.b, z2.h
sqxtunb z3.b, z3.h
st1b {z0.h}, p1, [x0]
add x0, x0, x2
st1b {z1.h}, p1, [x0]
add x0, x0, x2
st1b {z3.h}, p1, [x0]
add x0, x0, x2
st1b {z2.h}, p1, [x0]
ret
endfunc
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerSW/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aestream/faery
| 82,470
|
src/mp4/x264/common/aarch64/pixel-a.S
|
/*****************************************************************************
* pixel.S: aarch64 pixel metrics
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
#include "pixel-a-common.S"
const mask
.rept 16
.byte 0xff
.endr
.rept 16
.byte 0x00
.endr
endconst
.macro SUMSUBL_AB sum, sub, a, b
uaddl \sum, \a, \b
usubl \sub, \a, \b
.endm
#if BIT_DEPTH == 8
.macro SAD_START_4
ld1 {v1.s}[0], [x2], x3
ld1 {v0.s}[0], [x0], x1
ld1 {v1.s}[1], [x2], x3
ld1 {v0.s}[1], [x0], x1
uabdl v16.8h, v0.8b, v1.8b
.endm
.macro SAD_4
ld1 {v1.s}[0], [x2], x3
ld1 {v0.s}[0], [x0], x1
ld1 {v1.s}[1], [x2], x3
ld1 {v0.s}[1], [x0], x1
uabal v16.8h, v0.8b, v1.8b
.endm
.macro SAD_START_8
ld1 {v1.8b}, [x2], x3
ld1 {v0.8b}, [x0], x1
ld1 {v3.8b}, [x2], x3
ld1 {v2.8b}, [x0], x1
uabdl v16.8h, v0.8b, v1.8b
uabdl v17.8h, v2.8b, v3.8b
.endm
.macro SAD_8
ld1 {v1.8b}, [x2], x3
ld1 {v0.8b}, [x0], x1
ld1 {v3.8b}, [x2], x3
ld1 {v2.8b}, [x0], x1
uabal v16.8h, v0.8b, v1.8b
uabal v17.8h, v2.8b, v3.8b
.endm
.macro SAD_START_16
ld1 {v1.16b}, [x2], x3
ld1 {v0.16b}, [x0], x1
ld1 {v3.16b}, [x2], x3
ld1 {v2.16b}, [x0], x1
uabdl v16.8h, v0.8b, v1.8b
uabdl2 v17.8h, v0.16b, v1.16b
uabal v16.8h, v2.8b, v3.8b
uabal2 v17.8h, v2.16b, v3.16b
.endm
.macro SAD_16
ld1 {v1.16b}, [x2], x3
ld1 {v0.16b}, [x0], x1
ld1 {v3.16b}, [x2], x3
ld1 {v2.16b}, [x0], x1
uabal v16.8h, v0.8b, v1.8b
uabal2 v17.8h, v0.16b, v1.16b
uabal v16.8h, v2.8b, v3.8b
uabal2 v17.8h, v2.16b, v3.16b
.endm
.macro SAD_FUNC w, h, name
function pixel_sad\name\()_\w\()x\h\()_neon, export=1
SAD_START_\w
.rept \h / 2 - 1
SAD_\w
.endr
.if \w > 4
add v16.8h, v16.8h, v17.8h
.endif
uaddlv s0, v16.8h
fmov w0, s0
ret
endfunc
.endm
.macro SAD_X_4 x, first=uabal
ld1 {v0.s}[0], [x0], x7
ld1 {v1.s}[0], [x1], x5
ld1 {v0.s}[1], [x0], x7
ld1 {v1.s}[1], [x1], x5
ld1 {v2.s}[0], [x2], x5
ld1 {v2.s}[1], [x2], x5
\first v16.8h, v1.8b, v0.8b
ld1 {v3.s}[0], [x3], x5
ld1 {v3.s}[1], [x3], x5
\first v17.8h, v2.8b, v0.8b
.if \x == 4
ld1 {v4.s}[0], [x4], x5
ld1 {v4.s}[1], [x4], x5
.endif
\first v18.8h, v3.8b, v0.8b
.if \x == 4
\first v19.8h, v4.8b, v0.8b
.endif
.endm
.macro SAD_X_8 x, first=uabal
ld1 {v0.8b}, [x0], x7
ld1 {v1.8b}, [x1], x5
ld1 {v2.8b}, [x2], x5
\first v16.8h, v1.8b, v0.8b
ld1 {v3.8b}, [x3], x5
\first v17.8h, v2.8b, v0.8b
ld1 {v5.8b}, [x0], x7
ld1 {v1.8b}, [x1], x5
\first v18.8h, v3.8b, v0.8b
ld1 {v2.8b}, [x2], x5
uabal v16.8h, v1.8b, v5.8b
ld1 {v3.8b}, [x3], x5
uabal v17.8h, v2.8b, v5.8b
.if \x == 4
ld1 {v4.8b}, [x4], x5
ld1 {v1.8b}, [x4], x5
.endif
uabal v18.8h, v3.8b, v5.8b
.if \x == 4
\first v19.8h, v4.8b, v0.8b
uabal v19.8h, v1.8b, v5.8b
.endif
.endm
.macro SAD_X_16 x, first=uabal
ld1 {v0.16b}, [x0], x7
ld1 {v1.16b}, [x1], x5
ld1 {v2.16b}, [x2], x5
\first v16.8h, v1.8b, v0.8b
\first\()2 v20.8h, v1.16b, v0.16b
ld1 {v3.16b}, [x3], x5
\first v17.8h, v2.8b, v0.8b
\first\()2 v21.8h, v2.16b, v0.16b
ld1 {v5.16b}, [x0], x7
ld1 {v1.16b}, [x1], x5
\first v18.8h, v3.8b, v0.8b
\first\()2 v22.8h, v3.16b, v0.16b
ld1 {v2.16b}, [x2], x5
uabal v16.8h, v1.8b, v5.8b
uabal2 v20.8h, v1.16b, v5.16b
ld1 {v3.16b}, [x3], x5
uabal v17.8h, v2.8b, v5.8b
uabal2 v21.8h, v2.16b, v5.16b
.if \x == 4
ld1 {v4.16b}, [x4], x5
ld1 {v1.16b}, [x4], x5
.endif
uabal v18.8h, v3.8b, v5.8b
uabal2 v22.8h, v3.16b, v5.16b
.if \x == 4
\first v19.8h, v4.8b, v0.8b
\first\()2 v23.8h, v4.16b, v0.16b
uabal v19.8h, v1.8b, v5.8b
uabal2 v23.8h, v1.16b, v5.16b
.endif
.endm
.macro SAD_X_FUNC x, w, h
function pixel_sad_x\x\()_\w\()x\h\()_neon, export=1
.if \x == 3
mov x6, x5
mov x5, x4
.endif
mov x7, #FENC_STRIDE
SAD_X_\w \x, uabdl
.rept \h / 2 - 1
SAD_X_\w \x
.endr
.if \w > 8
add v16.8h, v16.8h, v20.8h
add v17.8h, v17.8h, v21.8h
add v18.8h, v18.8h, v22.8h
.if \x == 4
add v19.8h, v19.8h, v23.8h
.endif
.endif
// add up the sads
uaddlv s0, v16.8h
uaddlv s1, v17.8h
uaddlv s2, v18.8h
stp s0, s1, [x6], #8
.if \x == 3
str s2, [x6]
.else
uaddlv s3, v19.8h
stp s2, s3, [x6]
.endif
ret
endfunc
.endm
function pixel_vsad_neon, export=1
subs w2, w2, #2
ld1 {v0.16b}, [x0], x1
ld1 {v1.16b}, [x0], x1
uabdl v6.8h, v0.8b, v1.8b
uabdl2 v7.8h, v0.16b, v1.16b
b.le 2f
1:
subs w2, w2, #2
ld1 {v0.16b}, [x0], x1
uabal v6.8h, v1.8b, v0.8b
uabal2 v7.8h, v1.16b, v0.16b
ld1 {v1.16b}, [x0], x1
b.lt 2f
uabal v6.8h, v0.8b, v1.8b
uabal2 v7.8h, v0.16b, v1.16b
b.gt 1b
2:
add v5.8h, v6.8h, v7.8h
uaddlv s0, v5.8h
fmov w0, s0
ret
endfunc
function pixel_asd8_neon, export=1
sub w4, w4, #2
ld1 {v0.8b}, [x0], x1
ld1 {v1.8b}, [x2], x3
ld1 {v2.8b}, [x0], x1
ld1 {v3.8b}, [x2], x3
usubl v16.8h, v0.8b, v1.8b
1:
subs w4, w4, #2
ld1 {v4.8b}, [x0], x1
ld1 {v5.8b}, [x2], x3
usubl v17.8h, v2.8b, v3.8b
usubl v18.8h, v4.8b, v5.8b
add v16.8h, v16.8h, v17.8h
ld1 {v2.8b}, [x0], x1
ld1 {v3.8b}, [x2], x3
add v16.8h, v16.8h, v18.8h
b.gt 1b
usubl v17.8h, v2.8b, v3.8b
add v16.8h, v16.8h, v17.8h
saddlv s0, v16.8h
abs v0.2s, v0.2s
fmov w0, s0
ret
endfunc
.macro SSD_START_4
ld1 {v16.s}[0], [x0], x1
ld1 {v17.s}[0], [x2], x3
usubl v2.8h, v16.8b, v17.8b
ld1 {v16.s}[0], [x0], x1
ld1 {v17.s}[0], [x2], x3
smull v0.4s, v2.4h, v2.4h
.endm
.macro SSD_4
usubl v2.8h, v16.8b, v17.8b
ld1 {v16.s}[0], [x0], x1
ld1 {v17.s}[0], [x2], x3
smlal v0.4s, v2.4h, v2.4h
.endm
.macro SSD_END_4
usubl v2.8h, v16.8b, v17.8b
smlal v0.4s, v2.4h, v2.4h
.endm
.macro SSD_START_8
ld1 {v16.8b}, [x0], x1
ld1 {v17.8b}, [x2], x3
usubl v2.8h, v16.8b, v17.8b
ld1 {v16.8b}, [x0], x1
smull v0.4s, v2.4h, v2.4h
ld1 {v17.8b}, [x2], x3
smlal2 v0.4s, v2.8h, v2.8h
.endm
.macro SSD_8
usubl v2.8h, v16.8b, v17.8b
ld1 {v16.8b}, [x0], x1
smlal v0.4s, v2.4h, v2.4h
ld1 {v17.8b}, [x2], x3
smlal2 v0.4s, v2.8h, v2.8h
.endm
.macro SSD_END_8
usubl v2.8h, v16.8b, v17.8b
smlal v0.4s, v2.4h, v2.4h
smlal2 v0.4s, v2.8h, v2.8h
.endm
.macro SSD_START_16
ld1 {v16.16b}, [x0], x1
ld1 {v17.16b}, [x2], x3
usubl v2.8h, v16.8b, v17.8b
usubl2 v3.8h, v16.16b, v17.16b
ld1 {v16.16b}, [x0], x1
smull v0.4s, v2.4h, v2.4h
smull2 v1.4s, v2.8h, v2.8h
ld1 {v17.16b}, [x2], x3
smlal v0.4s, v3.4h, v3.4h
smlal2 v1.4s, v3.8h, v3.8h
.endm
.macro SSD_16
usubl v2.8h, v16.8b, v17.8b
usubl2 v3.8h, v16.16b, v17.16b
ld1 {v16.16b}, [x0], x1
smlal v0.4s, v2.4h, v2.4h
smlal2 v1.4s, v2.8h, v2.8h
ld1 {v17.16b}, [x2], x3
smlal v0.4s, v3.4h, v3.4h
smlal2 v1.4s, v3.8h, v3.8h
.endm
.macro SSD_END_16
usubl v2.8h, v16.8b, v17.8b
usubl2 v3.8h, v16.16b, v17.16b
smlal v0.4s, v2.4h, v2.4h
smlal2 v1.4s, v2.8h, v2.8h
smlal v0.4s, v3.4h, v3.4h
smlal2 v1.4s, v3.8h, v3.8h
add v0.4s, v0.4s, v1.4s
.endm
.macro SSD_FUNC w h
function pixel_ssd_\w\()x\h\()_neon, export=1
SSD_START_\w
.rept \h-2
SSD_\w
.endr
SSD_END_\w
addv s0, v0.4s
mov w0, v0.s[0]
ret
endfunc
.endm
function pixel_satd_4x4_neon, export=1
ld1 {v1.s}[0], [x2], x3
ld1 {v0.s}[0], [x0], x1
ld1 {v3.s}[0], [x2], x3
ld1 {v2.s}[0], [x0], x1
ld1 {v1.s}[1], [x2], x3
ld1 {v0.s}[1], [x0], x1
ld1 {v3.s}[1], [x2], x3
ld1 {v2.s}[1], [x0], x1
usubl v0.8h, v0.8b, v1.8b
usubl v1.8h, v2.8b, v3.8b
SUMSUB_AB v2.8h, v3.8h, v0.8h, v1.8h
zip1 v0.2d, v2.2d, v3.2d
zip2 v1.2d, v2.2d, v3.2d
SUMSUB_AB v2.8h, v3.8h, v0.8h, v1.8h
trn1 v0.8h, v2.8h, v3.8h
trn2 v1.8h, v2.8h, v3.8h
SUMSUB_AB v2.8h, v3.8h, v0.8h, v1.8h
trn1 v0.4s, v2.4s, v3.4s
trn2 v1.4s, v2.4s, v3.4s
abs v0.8h, v0.8h
abs v1.8h, v1.8h
umax v0.8h, v0.8h, v1.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret
endfunc
function pixel_satd_4x8_neon, export=1
ld1 {v1.s}[0], [x2], x3
ld1 {v0.s}[0], [x0], x1
ld1 {v3.s}[0], [x2], x3
ld1 {v2.s}[0], [x0], x1
ld1 {v5.s}[0], [x2], x3
ld1 {v4.s}[0], [x0], x1
ld1 {v7.s}[0], [x2], x3
ld1 {v6.s}[0], [x0], x1
ld1 {v1.s}[1], [x2], x3
ld1 {v0.s}[1], [x0], x1
ld1 {v3.s}[1], [x2], x3
ld1 {v2.s}[1], [x0], x1
ld1 {v5.s}[1], [x2], x3
ld1 {v4.s}[1], [x0], x1
ld1 {v7.s}[1], [x2], x3
ld1 {v6.s}[1], [x0], x1
b satd_4x8_8x4_end_neon
endfunc
function pixel_satd_8x4_neon, export=1
ld1 {v1.8b}, [x2], x3
ld1 {v0.8b}, [x0], x1
ld1 {v3.8b}, [x2], x3
ld1 {v2.8b}, [x0], x1
ld1 {v5.8b}, [x2], x3
ld1 {v4.8b}, [x0], x1
ld1 {v7.8b}, [x2], x3
ld1 {v6.8b}, [x0], x1
endfunc
function satd_4x8_8x4_end_neon
usubl v0.8h, v0.8b, v1.8b
usubl v1.8h, v2.8b, v3.8b
usubl v2.8h, v4.8b, v5.8b
usubl v3.8h, v6.8b, v7.8b
SUMSUB_AB v16.8h, v17.8h, v0.8h, v1.8h
SUMSUB_AB v18.8h, v19.8h, v2.8h, v3.8h
SUMSUB_AB v4.8h, v6.8h, v16.8h, v18.8h
SUMSUB_AB v5.8h, v7.8h, v17.8h, v19.8h
trn1 v0.8h, v4.8h, v5.8h
trn2 v1.8h, v4.8h, v5.8h
trn1 v2.8h, v6.8h, v7.8h
trn2 v3.8h, v6.8h, v7.8h
SUMSUB_AB v16.8h, v17.8h, v0.8h, v1.8h
SUMSUB_AB v18.8h, v19.8h, v2.8h, v3.8h
trn1 v0.4s, v16.4s, v18.4s
trn2 v1.4s, v16.4s, v18.4s
trn1 v2.4s, v17.4s, v19.4s
trn2 v3.4s, v17.4s, v19.4s
abs v0.8h, v0.8h
abs v1.8h, v1.8h
abs v2.8h, v2.8h
abs v3.8h, v3.8h
umax v0.8h, v0.8h, v1.8h
umax v1.8h, v2.8h, v3.8h
add v0.8h, v0.8h, v1.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret
endfunc
function pixel_satd_4x16_neon, export=1
mov x4, x30
ld1 {v1.s}[0], [x2], x3
ld1 {v0.s}[0], [x0], x1
ld1 {v3.s}[0], [x2], x3
ld1 {v2.s}[0], [x0], x1
ld1 {v5.s}[0], [x2], x3
ld1 {v4.s}[0], [x0], x1
ld1 {v7.s}[0], [x2], x3
ld1 {v6.s}[0], [x0], x1
ld1 {v1.s}[1], [x2], x3
ld1 {v0.s}[1], [x0], x1
ld1 {v3.s}[1], [x2], x3
ld1 {v2.s}[1], [x0], x1
ld1 {v5.s}[1], [x2], x3
ld1 {v4.s}[1], [x0], x1
ld1 {v7.s}[1], [x2], x3
ld1 {v6.s}[1], [x0], x1
usubl v16.8h, v0.8b, v1.8b
usubl v17.8h, v2.8b, v3.8b
usubl v18.8h, v4.8b, v5.8b
usubl v19.8h, v6.8b, v7.8b
ld1 {v1.s}[0], [x2], x3
ld1 {v0.s}[0], [x0], x1
ld1 {v3.s}[0], [x2], x3
ld1 {v2.s}[0], [x0], x1
ld1 {v5.s}[0], [x2], x3
ld1 {v4.s}[0], [x0], x1
ld1 {v7.s}[0], [x2], x3
ld1 {v6.s}[0], [x0], x1
ld1 {v1.s}[1], [x2], x3
ld1 {v0.s}[1], [x0], x1
ld1 {v3.s}[1], [x2], x3
ld1 {v2.s}[1], [x0], x1
ld1 {v5.s}[1], [x2], x3
ld1 {v4.s}[1], [x0], x1
ld1 {v7.s}[1], [x2], x3
ld1 {v6.s}[1], [x0], x1
usubl v20.8h, v0.8b, v1.8b
usubl v21.8h, v2.8b, v3.8b
usubl v22.8h, v4.8b, v5.8b
usubl v23.8h, v6.8b, v7.8b
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
bl satd_8x4v_8x8h_neon
add v30.8h, v0.8h, v1.8h
add v31.8h, v2.8h, v3.8h
add v0.8h, v30.8h, v31.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret x4
endfunc
.macro load_diff_fly_8x8
ld1 {v1.8b}, [x2], x3
ld1 {v0.8b}, [x0], x1
ld1 {v3.8b}, [x2], x3
ld1 {v2.8b}, [x0], x1
usubl v16.8h, v0.8b, v1.8b
ld1 {v5.8b}, [x2], x3
ld1 {v4.8b}, [x0], x1
usubl v17.8h, v2.8b, v3.8b
ld1 {v7.8b}, [x2], x3
ld1 {v6.8b}, [x0], x1
usubl v18.8h, v4.8b, v5.8b
ld1 {v1.8b}, [x2], x3
ld1 {v0.8b}, [x0], x1
usubl v19.8h, v6.8b, v7.8b
ld1 {v3.8b}, [x2], x3
ld1 {v2.8b}, [x0], x1
usubl v20.8h, v0.8b, v1.8b
ld1 {v5.8b}, [x2], x3
ld1 {v4.8b}, [x0], x1
usubl v21.8h, v2.8b, v3.8b
ld1 {v7.8b}, [x2], x3
ld1 {v6.8b}, [x0], x1
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
usubl v22.8h, v4.8b, v5.8b
usubl v23.8h, v6.8b, v7.8b
.endm
function pixel_satd_8x8_neon, export=1
mov x4, x30
bl satd_8x8_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v0.8h, v0.8h, v1.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret x4
endfunc
function pixel_satd_8x16_neon, export=1
mov x4, x30
bl satd_8x8_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v0.8h, v1.8h
bl satd_8x8_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v31.8h, v0.8h, v1.8h
add v0.8h, v30.8h, v31.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret x4
endfunc
function satd_8x8_neon
load_diff_fly_8x8
endfunc
// one vertical hadamard pass and two horizontal
function satd_8x4v_8x8h_neon
SUMSUB_AB v16.8h, v18.8h, v0.8h, v2.8h
SUMSUB_AB v17.8h, v19.8h, v1.8h, v3.8h
HADAMARD4_V v20.8h, v21.8h, v22.8h, v23.8h, v0.8h, v1.8h, v2.8h, v3.8h
transpose v0.8h, v1.8h, v16.8h, v17.8h
transpose v2.8h, v3.8h, v18.8h, v19.8h
transpose v4.8h, v5.8h, v20.8h, v21.8h
transpose v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_AB v16.8h, v17.8h, v0.8h, v1.8h
SUMSUB_AB v18.8h, v19.8h, v2.8h, v3.8h
SUMSUB_AB v20.8h, v21.8h, v4.8h, v5.8h
SUMSUB_AB v22.8h, v23.8h, v6.8h, v7.8h
transpose v0.4s, v2.4s, v16.4s, v18.4s
transpose v1.4s, v3.4s, v17.4s, v19.4s
transpose v4.4s, v6.4s, v20.4s, v22.4s
transpose v5.4s, v7.4s, v21.4s, v23.4s
abs v0.8h, v0.8h
abs v1.8h, v1.8h
abs v2.8h, v2.8h
abs v3.8h, v3.8h
abs v4.8h, v4.8h
abs v5.8h, v5.8h
abs v6.8h, v6.8h
abs v7.8h, v7.8h
umax v0.8h, v0.8h, v2.8h
umax v1.8h, v1.8h, v3.8h
umax v2.8h, v4.8h, v6.8h
umax v3.8h, v5.8h, v7.8h
ret
endfunc
function pixel_ssd_nv12_core_neon, export=1
sxtw x8, w4
add x8, x8, #8
and x8, x8, #~15
movi v6.2d, #0
movi v7.2d, #0
sub x1, x1, x8, lsl #1
sub x3, x3, x8, lsl #1
1:
subs w8, w4, #16
ld2 {v0.8b,v1.8b}, [x0], #16
ld2 {v2.8b,v3.8b}, [x2], #16
ld2 {v24.8b,v25.8b}, [x0], #16
ld2 {v26.8b,v27.8b}, [x2], #16
usubl v16.8h, v0.8b, v2.8b
usubl v17.8h, v1.8b, v3.8b
smull v20.4s, v16.4h, v16.4h
smull v21.4s, v17.4h, v17.4h
usubl v18.8h, v24.8b, v26.8b
usubl v19.8h, v25.8b, v27.8b
smlal2 v20.4s, v16.8h, v16.8h
smlal2 v21.4s, v17.8h, v17.8h
b.lt 4f
b.eq 3f
2:
smlal v20.4s, v18.4h, v18.4h
smlal v21.4s, v19.4h, v19.4h
ld2 {v0.8b,v1.8b}, [x0], #16
ld2 {v2.8b,v3.8b}, [x2], #16
smlal2 v20.4s, v18.8h, v18.8h
smlal2 v21.4s, v19.8h, v19.8h
subs w8, w8, #16
usubl v16.8h, v0.8b, v2.8b
usubl v17.8h, v1.8b, v3.8b
smlal v20.4s, v16.4h, v16.4h
smlal v21.4s, v17.4h, v17.4h
ld2 {v24.8b,v25.8b}, [x0], #16
ld2 {v26.8b,v27.8b}, [x2], #16
smlal2 v20.4s, v16.8h, v16.8h
smlal2 v21.4s, v17.8h, v17.8h
b.lt 4f
usubl v18.8h, v24.8b, v26.8b
usubl v19.8h, v25.8b, v27.8b
b.gt 2b
3:
smlal v20.4s, v18.4h, v18.4h
smlal v21.4s, v19.4h, v19.4h
smlal2 v20.4s, v18.8h, v18.8h
smlal2 v21.4s, v19.8h, v19.8h
4:
subs w5, w5, #1
uaddw v6.2d, v6.2d, v20.2s
uaddw v7.2d, v7.2d, v21.2s
add x0, x0, x1
add x2, x2, x3
uaddw2 v6.2d, v6.2d, v20.4s
uaddw2 v7.2d, v7.2d, v21.4s
b.gt 1b
addp v6.2d, v6.2d, v7.2d
st1 {v6.d}[0], [x6]
st1 {v6.d}[1], [x7]
ret
endfunc
.macro pixel_var_8 h
function pixel_var_8x\h\()_neon, export=1
ld1 {v16.8b}, [x0], x1
ld1 {v17.8b}, [x0], x1
mov x2, \h - 4
umull v1.8h, v16.8b, v16.8b
uxtl v0.8h, v16.8b
umull v2.8h, v17.8b, v17.8b
uaddw v0.8h, v0.8h, v17.8b
ld1 {v18.8b}, [x0], x1
uaddlp v1.4s, v1.8h
uaddlp v2.4s, v2.8h
ld1 {v19.8b}, [x0], x1
1: subs x2, x2, #4
uaddw v0.8h, v0.8h, v18.8b
umull v24.8h, v18.8b, v18.8b
ld1 {v20.8b}, [x0], x1
uaddw v0.8h, v0.8h, v19.8b
umull v25.8h, v19.8b, v19.8b
uadalp v1.4s, v24.8h
ld1 {v21.8b}, [x0], x1
uaddw v0.8h, v0.8h, v20.8b
umull v26.8h, v20.8b, v20.8b
uadalp v2.4s, v25.8h
ld1 {v18.8b}, [x0], x1
uaddw v0.8h, v0.8h, v21.8b
umull v27.8h, v21.8b, v21.8b
uadalp v1.4s, v26.8h
ld1 {v19.8b}, [x0], x1
uadalp v2.4s, v27.8h
b.gt 1b
uaddw v0.8h, v0.8h, v18.8b
umull v28.8h, v18.8b, v18.8b
uaddw v0.8h, v0.8h, v19.8b
umull v29.8h, v19.8b, v19.8b
uadalp v1.4s, v28.8h
uadalp v2.4s, v29.8h
b var_end
endfunc
.endm
function pixel_var_16x16_neon, export=1
ld1 {v16.16b}, [x0], x1
ld1 {v17.16b}, [x0], x1
mov x2, #14
umull v1.8h, v16.8b, v16.8b
umull2 v2.8h, v16.16b, v16.16b
uxtl v0.8h, v16.8b
uaddlp v1.4s, v1.8h
uaddlp v2.4s, v2.8h
uaddw2 v0.8h, v0.8h, v16.16b
1: subs x2, x2, #2
ld1 {v18.16b}, [x0], x1
uaddw v0.8h, v0.8h, v17.8b
umull v3.8h, v17.8b, v17.8b
uaddw2 v0.8h, v0.8h, v17.16b
umull2 v4.8h, v17.16b, v17.16b
uadalp v1.4s, v3.8h
uadalp v2.4s, v4.8h
ld1 {v17.16b}, [x0], x1
uaddw v0.8h, v0.8h, v18.8b
umull v5.8h, v18.8b, v18.8b
uaddw2 v0.8h, v0.8h, v18.16b
umull2 v6.8h, v18.16b, v18.16b
uadalp v1.4s, v5.8h
uadalp v2.4s, v6.8h
b.gt 1b
uaddw v0.8h, v0.8h, v17.8b
umull v3.8h, v17.8b, v17.8b
uaddw2 v0.8h, v0.8h, v17.16b
umull2 v4.8h, v17.16b, v17.16b
uadalp v1.4s, v3.8h
uadalp v2.4s, v4.8h
endfunc
function var_end
add v1.4s, v1.4s, v2.4s
uaddlv s0, v0.8h
uaddlv d1, v1.4s
mov w0, v0.s[0]
mov x1, v1.d[0]
orr x0, x0, x1, lsl #32
ret
endfunc
.macro pixel_var2_8 h
function pixel_var2_8x\h\()_neon, export=1
mov x3, #16
ld1 {v16.8b}, [x0], #8
ld1 {v18.8b}, [x1], x3
ld1 {v17.8b}, [x0], #8
ld1 {v19.8b}, [x1], x3
mov x5, \h - 2
usubl v0.8h, v16.8b, v18.8b
usubl v1.8h, v17.8b, v19.8b
ld1 {v16.8b}, [x0], #8
ld1 {v18.8b}, [x1], x3
smull v2.4s, v0.4h, v0.4h
smull2 v3.4s, v0.8h, v0.8h
smull v4.4s, v1.4h, v1.4h
smull2 v5.4s, v1.8h, v1.8h
usubl v6.8h, v16.8b, v18.8b
1: subs x5, x5, #1
ld1 {v17.8b}, [x0], #8
ld1 {v19.8b}, [x1], x3
smlal v2.4s, v6.4h, v6.4h
smlal2 v3.4s, v6.8h, v6.8h
usubl v7.8h, v17.8b, v19.8b
add v0.8h, v0.8h, v6.8h
ld1 {v16.8b}, [x0], #8
ld1 {v18.8b}, [x1], x3
smlal v4.4s, v7.4h, v7.4h
smlal2 v5.4s, v7.8h, v7.8h
usubl v6.8h, v16.8b, v18.8b
add v1.8h, v1.8h, v7.8h
b.gt 1b
ld1 {v17.8b}, [x0], #8
ld1 {v19.8b}, [x1], x3
smlal v2.4s, v6.4h, v6.4h
smlal2 v3.4s, v6.8h, v6.8h
usubl v7.8h, v17.8b, v19.8b
add v0.8h, v0.8h, v6.8h
smlal v4.4s, v7.4h, v7.4h
add v1.8h, v1.8h, v7.8h
smlal2 v5.4s, v7.8h, v7.8h
saddlv s0, v0.8h
saddlv s1, v1.8h
add v2.4s, v2.4s, v3.4s
add v4.4s, v4.4s, v5.4s
mov w0, v0.s[0]
mov w1, v1.s[0]
addv s2, v2.4s
addv s4, v4.4s
mul w0, w0, w0
mul w1, w1, w1
mov w3, v2.s[0]
mov w4, v4.s[0]
sub w0, w3, w0, lsr # 6 + (\h >> 4)
sub w1, w4, w1, lsr # 6 + (\h >> 4)
str w3, [x2]
add w0, w0, w1
str w4, [x2, #4]
ret
endfunc
.endm
function pixel_satd_16x8_neon, export=1
mov x4, x30
bl satd_16x4_neon
add v30.8h, v0.8h, v1.8h
add v31.8h, v2.8h, v3.8h
bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v30.8h, v0.8h
add v31.8h, v31.8h, v1.8h
add v0.8h, v30.8h, v31.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret x4
endfunc
function pixel_satd_16x16_neon, export=1
mov x4, x30
bl satd_16x4_neon
add v30.8h, v0.8h, v1.8h
add v31.8h, v2.8h, v3.8h
bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v30.8h, v0.8h
add v31.8h, v31.8h, v1.8h
bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v30.8h, v0.8h
add v31.8h, v31.8h, v1.8h
bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v30.8h, v0.8h
add v31.8h, v31.8h, v1.8h
add v0.8h, v30.8h, v31.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret x4
endfunc
function satd_16x4_neon
ld1 {v1.16b}, [x2], x3
ld1 {v0.16b}, [x0], x1
ld1 {v3.16b}, [x2], x3
ld1 {v2.16b}, [x0], x1
usubl v16.8h, v0.8b, v1.8b
usubl2 v20.8h, v0.16b, v1.16b
ld1 {v5.16b}, [x2], x3
ld1 {v4.16b}, [x0], x1
usubl v17.8h, v2.8b, v3.8b
usubl2 v21.8h, v2.16b, v3.16b
ld1 {v7.16b}, [x2], x3
ld1 {v6.16b}, [x0], x1
usubl v18.8h, v4.8b, v5.8b
usubl2 v22.8h, v4.16b, v5.16b
usubl v19.8h, v6.8b, v7.8b
usubl2 v23.8h, v6.16b, v7.16b
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
b satd_8x4v_8x8h_neon
endfunc
function pixel_sa8d_8x8_neon, export=1
mov x4, x30
bl pixel_sa8d_8x8_neon
add v0.8h, v0.8h, v1.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
add w0, w0, #1
lsr w0, w0, #1
ret x4
endfunc
function pixel_sa8d_16x16_neon, export=1
mov x4, x30
bl pixel_sa8d_8x8_neon
uaddlp v30.4s, v0.8h
uaddlp v31.4s, v1.8h
bl pixel_sa8d_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
sub x0, x0, x1, lsl #4
sub x2, x2, x3, lsl #4
add x0, x0, #8
add x2, x2, #8
bl pixel_sa8d_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
bl pixel_sa8d_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
add v0.4s, v30.4s, v31.4s
addv s0, v0.4s
mov w0, v0.s[0]
add w0, w0, #1
lsr w0, w0, #1
ret x4
endfunc
.macro sa8d_satd_8x8 satd=
function pixel_sa8d_\satd\()8x8_neon
load_diff_fly_8x8
SUMSUB_AB v16.8h, v18.8h, v0.8h, v2.8h
SUMSUB_AB v17.8h, v19.8h, v1.8h, v3.8h
HADAMARD4_V v20.8h, v21.8h, v22.8h, v23.8h, v0.8h, v1.8h, v2.8h, v3.8h
.ifc \satd, satd_
transpose v0.8h, v1.8h, v16.8h, v17.8h
transpose v2.8h, v3.8h, v18.8h, v19.8h
transpose v4.8h, v5.8h, v20.8h, v21.8h
transpose v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_AB v24.8h, v25.8h, v0.8h, v1.8h
SUMSUB_AB v26.8h, v27.8h, v2.8h, v3.8h
SUMSUB_AB v0.8h, v1.8h, v4.8h, v5.8h
SUMSUB_AB v2.8h, v3.8h, v6.8h, v7.8h
transpose v4.4s, v6.4s, v24.4s, v26.4s
transpose v5.4s, v7.4s, v25.4s, v27.4s
transpose v24.4s, v26.4s, v0.4s, v2.4s
transpose v25.4s, v27.4s, v1.4s, v3.4s
abs v0.8h, v4.8h
abs v1.8h, v5.8h
abs v2.8h, v6.8h
abs v3.8h, v7.8h
abs v4.8h, v24.8h
abs v5.8h, v25.8h
abs v6.8h, v26.8h
abs v7.8h, v27.8h
umax v0.8h, v0.8h, v2.8h
umax v1.8h, v1.8h, v3.8h
umax v2.8h, v4.8h, v6.8h
umax v3.8h, v5.8h, v7.8h
add v26.8h, v0.8h, v1.8h
add v27.8h, v2.8h, v3.8h
.endif
SUMSUB_AB v0.8h, v16.8h, v16.8h, v20.8h
SUMSUB_AB v1.8h, v17.8h, v17.8h, v21.8h
SUMSUB_AB v2.8h, v18.8h, v18.8h, v22.8h
SUMSUB_AB v3.8h, v19.8h, v19.8h, v23.8h
transpose v20.8h, v21.8h, v16.8h, v17.8h
transpose v4.8h, v5.8h, v0.8h, v1.8h
transpose v22.8h, v23.8h, v18.8h, v19.8h
transpose v6.8h, v7.8h, v2.8h, v3.8h
SUMSUB_AB v2.8h, v3.8h, v20.8h, v21.8h
SUMSUB_AB v24.8h, v25.8h, v4.8h, v5.8h
SUMSUB_AB v0.8h, v1.8h, v22.8h, v23.8h
SUMSUB_AB v4.8h, v5.8h, v6.8h, v7.8h
transpose v20.4s, v22.4s, v2.4s, v0.4s
transpose v21.4s, v23.4s, v3.4s, v1.4s
transpose v16.4s, v18.4s, v24.4s, v4.4s
transpose v17.4s, v19.4s, v25.4s, v5.4s
SUMSUB_AB v0.8h, v2.8h, v20.8h, v22.8h
SUMSUB_AB v1.8h, v3.8h, v21.8h, v23.8h
SUMSUB_AB v4.8h, v6.8h, v16.8h, v18.8h
SUMSUB_AB v5.8h, v7.8h, v17.8h, v19.8h
transpose v16.2d, v20.2d, v0.2d, v4.2d
transpose v17.2d, v21.2d, v1.2d, v5.2d
transpose v18.2d, v22.2d, v2.2d, v6.2d
transpose v19.2d, v23.2d, v3.2d, v7.2d
abs v16.8h, v16.8h
abs v20.8h, v20.8h
abs v17.8h, v17.8h
abs v21.8h, v21.8h
abs v18.8h, v18.8h
abs v22.8h, v22.8h
abs v19.8h, v19.8h
abs v23.8h, v23.8h
umax v16.8h, v16.8h, v20.8h
umax v17.8h, v17.8h, v21.8h
umax v18.8h, v18.8h, v22.8h
umax v19.8h, v19.8h, v23.8h
add v0.8h, v16.8h, v17.8h
add v1.8h, v18.8h, v19.8h
ret
endfunc
.endm
function pixel_sa8d_satd_16x16_neon, export=1
mov x4, x30
bl pixel_sa8d_satd_8x8_neon
uaddlp v30.4s, v0.8h
uaddlp v31.4s, v1.8h
uaddlp v28.4s, v26.8h
uaddlp v29.4s, v27.8h
bl pixel_sa8d_satd_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
uadalp v28.4s, v26.8h
uadalp v29.4s, v27.8h
sub x0, x0, x1, lsl #4
sub x2, x2, x3, lsl #4
add x0, x0, #8
add x2, x2, #8
bl pixel_sa8d_satd_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
uadalp v28.4s, v26.8h
uadalp v29.4s, v27.8h
bl pixel_sa8d_satd_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
uadalp v28.4s, v26.8h
uadalp v29.4s, v27.8h
add v0.4s, v30.4s, v31.4s // sa8d
add v1.4s, v28.4s, v29.4s // satd
addv s0, v0.4s
addv s1, v1.4s
urshr v0.4s, v0.4s, #1
fmov w0, s0
fmov w1, s1
add x0, x0, x1, lsl #32
ret x4
endfunc
.macro HADAMARD_AC w h
function pixel_hadamard_ac_\w\()x\h\()_neon, export=1
movrel x5, mask_ac_4_8
mov x4, x30
ld1 {v30.8h,v31.8h}, [x5]
movi v28.16b, #0
movi v29.16b, #0
bl hadamard_ac_8x8_neon
.if \h > 8
bl hadamard_ac_8x8_neon
.endif
.if \w > 8
sub x0, x0, x1, lsl #3
add x0, x0, #8
bl hadamard_ac_8x8_neon
.endif
.if \w * \h == 256
sub x0, x0, x1, lsl #4
bl hadamard_ac_8x8_neon
.endif
addv s1, v29.4s
addv s0, v28.4s
mov w1, v1.s[0]
mov w0, v0.s[0]
lsr w1, w1, #2
lsr w0, w0, #1
orr x0, x0, x1, lsl #32
ret x4
endfunc
.endm
// v28: satd v29: sa8d v30: mask_ac4 v31: mask_ac8
function hadamard_ac_8x8_neon
ld1 {v16.8b}, [x0], x1
ld1 {v17.8b}, [x0], x1
ld1 {v18.8b}, [x0], x1
ld1 {v19.8b}, [x0], x1
SUMSUBL_AB v0.8h, v1.8h, v16.8b, v17.8b
ld1 {v20.8b}, [x0], x1
ld1 {v21.8b}, [x0], x1
SUMSUBL_AB v2.8h, v3.8h, v18.8b, v19.8b
ld1 {v22.8b}, [x0], x1
ld1 {v23.8b}, [x0], x1
SUMSUBL_AB v4.8h, v5.8h, v20.8b, v21.8b
SUMSUBL_AB v6.8h, v7.8h, v22.8b, v23.8b
SUMSUB_ABCD v16.8h, v18.8h, v17.8h, v19.8h, v0.8h, v2.8h, v1.8h, v3.8h
SUMSUB_ABCD v20.8h, v22.8h, v21.8h, v23.8h, v4.8h, v6.8h, v5.8h, v7.8h
transpose v0.8h, v1.8h, v16.8h, v17.8h
transpose v2.8h, v3.8h, v18.8h, v19.8h
transpose v4.8h, v5.8h, v20.8h, v21.8h
transpose v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_AB v16.8h, v17.8h, v0.8h, v1.8h
SUMSUB_AB v18.8h, v19.8h, v2.8h, v3.8h
SUMSUB_AB v20.8h, v21.8h, v4.8h, v5.8h
SUMSUB_AB v22.8h, v23.8h, v6.8h, v7.8h
transpose v0.4s, v2.4s, v16.4s, v18.4s
transpose v1.4s, v3.4s, v17.4s, v19.4s
transpose v4.4s, v6.4s, v20.4s, v22.4s
transpose v5.4s, v7.4s, v21.4s, v23.4s
SUMSUB_AB v16.8h, v18.8h, v0.8h, v2.8h
SUMSUB_AB v17.8h, v19.8h, v1.8h, v3.8h
SUMSUB_ABCD v20.8h, v22.8h, v21.8h, v23.8h, v4.8h, v6.8h, v5.8h, v7.8h
abs v0.8h, v16.8h
abs v4.8h, v20.8h
abs v1.8h, v17.8h
abs v5.8h, v21.8h
abs v2.8h, v18.8h
abs v6.8h, v22.8h
abs v3.8h, v19.8h
abs v7.8h, v23.8h
add v0.8h, v0.8h, v4.8h
add v1.8h, v1.8h, v5.8h
and v0.16b, v0.16b, v30.16b
add v2.8h, v2.8h, v6.8h
add v3.8h, v3.8h, v7.8h
add v0.8h, v0.8h, v2.8h
add v1.8h, v1.8h, v3.8h
uadalp v28.4s, v0.8h
uadalp v28.4s, v1.8h
SUMSUB_AB v6.8h, v7.8h, v23.8h, v19.8h
SUMSUB_AB v4.8h, v5.8h, v22.8h, v18.8h
SUMSUB_AB v2.8h, v3.8h, v21.8h, v17.8h
SUMSUB_AB v1.8h, v0.8h, v16.8h, v20.8h
transpose v16.2d, v17.2d, v6.2d, v7.2d
transpose v18.2d, v19.2d, v4.2d, v5.2d
transpose v20.2d, v21.2d, v2.2d, v3.2d
abs v16.8h, v16.8h
abs v17.8h, v17.8h
abs v18.8h, v18.8h
abs v19.8h, v19.8h
abs v20.8h, v20.8h
abs v21.8h, v21.8h
transpose v7.2d, v6.2d, v1.2d, v0.2d
umax v3.8h, v16.8h, v17.8h
umax v2.8h, v18.8h, v19.8h
umax v1.8h, v20.8h, v21.8h
SUMSUB_AB v4.8h, v5.8h, v7.8h, v6.8h
add v2.8h, v2.8h, v3.8h
add v2.8h, v2.8h, v1.8h
and v4.16b, v4.16b, v31.16b
add v2.8h, v2.8h, v2.8h
abs v5.8h, v5.8h
abs v4.8h, v4.8h
add v2.8h, v2.8h, v5.8h
add v2.8h, v2.8h, v4.8h
uadalp v29.4s, v2.8h
ret
endfunc
function pixel_ssim_4x4x2_core_neon, export=1
ld1 {v0.8b}, [x0], x1
ld1 {v2.8b}, [x2], x3
umull v16.8h, v0.8b, v0.8b
umull v17.8h, v0.8b, v2.8b
umull v18.8h, v2.8b, v2.8b
ld1 {v28.8b}, [x0], x1
ld1 {v29.8b}, [x2], x3
umull v20.8h, v28.8b, v28.8b
umull v21.8h, v28.8b, v29.8b
umull v22.8h, v29.8b, v29.8b
uaddlp v16.4s, v16.8h
uaddlp v17.4s, v17.8h
uaddl v0.8h, v0.8b, v28.8b
uadalp v16.4s, v18.8h
uaddl v1.8h, v2.8b, v29.8b
ld1 {v26.8b}, [x0], x1
ld1 {v27.8b}, [x2], x3
umull v23.8h, v26.8b, v26.8b
umull v24.8h, v26.8b, v27.8b
umull v25.8h, v27.8b, v27.8b
uadalp v16.4s, v20.8h
uaddw v0.8h, v0.8h, v26.8b
uadalp v17.4s, v21.8h
uaddw v1.8h, v1.8h, v27.8b
uadalp v16.4s, v22.8h
ld1 {v28.8b}, [x0], x1
ld1 {v29.8b}, [x2], x3
umull v20.8h, v28.8b, v28.8b
umull v21.8h, v28.8b, v29.8b
umull v22.8h, v29.8b, v29.8b
uadalp v16.4s, v23.8h
uaddw v0.8h, v0.8h, v28.8b
uadalp v17.4s, v24.8h
uaddw v1.8h, v1.8h, v29.8b
uadalp v16.4s, v25.8h
uadalp v16.4s, v20.8h
uadalp v17.4s, v21.8h
uadalp v16.4s, v22.8h
uaddlp v0.4s, v0.8h
uaddlp v1.4s, v1.8h
addp v0.4s, v0.4s, v0.4s
addp v1.4s, v1.4s, v1.4s
addp v2.4s, v16.4s, v16.4s
addp v3.4s, v17.4s, v17.4s
st4 {v0.2s,v1.2s,v2.2s,v3.2s}, [x4]
ret
endfunc
function pixel_ssim_end4_neon, export=1
mov x5, #4
ld1 {v16.4s,v17.4s}, [x0], #32
ld1 {v18.4s,v19.4s}, [x1], #32
mov w4, #0x99bb
subs x2, x5, w2, uxtw
mov w3, #416 // ssim_c1 = .01*.01*255*255*64
movk w4, #0x03, lsl #16 // ssim_c2 = .03*.03*255*255*64*63
add v0.4s, v16.4s, v18.4s
add v1.4s, v17.4s, v19.4s
add v0.4s, v0.4s, v1.4s
ld1 {v20.4s,v21.4s}, [x0], #32
ld1 {v22.4s,v23.4s}, [x1], #32
add v2.4s, v20.4s, v22.4s
add v3.4s, v21.4s, v23.4s
add v1.4s, v1.4s, v2.4s
ld1 {v16.4s}, [x0], #16
ld1 {v18.4s}, [x1], #16
add v16.4s, v16.4s, v18.4s
add v2.4s, v2.4s, v3.4s
add v3.4s, v3.4s, v16.4s
dup v30.4s, w3
dup v31.4s, w4
transpose v4.4s, v5.4s, v0.4s, v1.4s
transpose v6.4s, v7.4s, v2.4s, v3.4s
transpose v0.2d, v2.2d, v4.2d, v6.2d
transpose v1.2d, v3.2d, v5.2d, v7.2d
mul v16.4s, v0.4s, v1.4s // s1*s2
mul v0.4s, v0.4s, v0.4s
mla v0.4s, v1.4s, v1.4s // s1*s1 + s2*s2
shl v3.4s, v3.4s, #7
shl v2.4s, v2.4s, #6
add v1.4s, v16.4s, v16.4s
sub v2.4s, v2.4s, v0.4s // vars
sub v3.4s, v3.4s, v1.4s // covar*2
add v0.4s, v0.4s, v30.4s
add v2.4s, v2.4s, v31.4s
add v1.4s, v1.4s, v30.4s
add v3.4s, v3.4s, v31.4s
scvtf v0.4s, v0.4s
scvtf v2.4s, v2.4s
scvtf v1.4s, v1.4s
scvtf v3.4s, v3.4s
fmul v0.4s, v0.4s, v2.4s
fmul v1.4s, v1.4s, v3.4s
fdiv v0.4s, v1.4s, v0.4s
b.eq 1f
movrel x3, mask
add x3, x3, x2, lsl #2
ld1 {v29.4s}, [x3]
and v0.16b, v0.16b, v29.16b
1:
faddp v0.4s, v0.4s, v0.4s
faddp s0, v0.2s
ret
endfunc
#else /* BIT_DEPTH == 8 */
.macro SAD_START_4
lsl x1, x1, #1
lsl x3, x3, #1
ld1 {v1.d}[0], [x2], x3
ld1 {v0.d}[0], [x0], x1
ld1 {v1.d}[1], [x2], x3
ld1 {v0.d}[1], [x0], x1
uabdl v16.4s, v0.4h, v1.4h
uabdl2 v18.4s, v0.8h, v1.8h
.endm
.macro SAD_4
ld1 {v1.d}[0], [x2], x3
ld1 {v0.d}[0], [x0], x1
ld1 {v1.d}[1], [x2], x3
ld1 {v0.d}[1], [x0], x1
uabal v16.4s, v0.4h, v1.4h
uabal2 v18.4s, v0.8h, v1.8h
.endm
.macro SAD_START_8
lsl x1, x1, #1
lsl x3, x3, #1
ld1 {v1.8h}, [x2], x3
ld1 {v0.8h}, [x0], x1
ld1 {v3.8h}, [x2], x3
ld1 {v2.8h}, [x0], x1
uabdl v16.4s, v0.4h, v1.4h
uabdl2 v17.4s, v0.8h, v1.8h
uabdl v18.4s, v2.4h, v3.4h
uabdl2 v19.4s, v2.8h, v3.8h
.endm
.macro SAD_8
ld1 {v1.8h}, [x2], x3
ld1 {v0.8h}, [x0], x1
ld1 {v3.8h}, [x2], x3
ld1 {v2.8h}, [x0], x1
uabal v16.4s, v0.4h, v1.4h
uabal2 v17.4s, v0.8h, v1.8h
uabal v18.4s, v2.4h, v3.4h
uabal2 v19.4s, v2.8h, v3.8h
.endm
.macro SAD_START_16
lsl x1, x1, #1
lsl x3, x3, #1
ld2 {v0.8h, v1.8h}, [x2], x3
ld2 {v2.8h, v3.8h}, [x0], x1
ld2 {v4.8h, v5.8h}, [x2], x3
ld2 {v6.8h, v7.8h}, [x0], x1
uabdl v16.4s, v0.4h, v2.4h
uabdl2 v17.4s, v0.8h, v2.8h
uabdl v20.4s, v1.4h, v3.4h
uabdl2 v21.4s, v1.8h, v3.8h
uabdl v18.4s, v4.4h, v6.4h
uabdl2 v19.4s, v4.8h, v6.8h
uabdl v22.4s, v5.4h, v7.4h
uabdl2 v23.4s, v5.8h, v7.8h
.endm
.macro SAD_16
ld2 {v0.8h, v1.8h}, [x2], x3
ld2 {v2.8h, v3.8h}, [x0], x1
ld2 {v4.8h, v5.8h}, [x2], x3
ld2 {v6.8h, v7.8h}, [x0], x1
uabal v16.4s, v0.4h, v2.4h
uabal2 v17.4s, v0.8h, v2.8h
uabal v20.4s, v1.4h, v3.4h
uabal2 v21.4s, v1.8h, v3.8h
uabal v18.4s, v4.4h, v6.4h
uabal2 v19.4s, v4.8h, v6.8h
uabal v22.4s, v5.4h, v7.4h
uabal2 v23.4s, v5.8h, v7.8h
.endm
.macro SAD_FUNC w, h, name
function pixel_sad\name\()_\w\()x\h\()_neon, export=1
SAD_START_\w
.rept \h / 2 - 1
SAD_\w
.endr
.if \w > 8
add v20.4s, v20.4s, v21.4s
add v16.4s, v16.4s, v20.4s
add v22.4s, v22.4s, v23.4s
add v18.4s, v18.4s, v22.4s
.endif
.if \w > 4
add v16.4s, v16.4s, v17.4s
add v18.4s, v18.4s, v19.4s
.endif
add v16.4s, v16.4s, v18.4s
uaddlv s0, v16.8h
fmov w0, s0
ret
endfunc
.endm
.macro SAD_X_4 x, first=uaba
ld1 {v0.d}[0], [x0], x7
ld1 {v1.d}[0], [x1], x5
ld1 {v0.d}[1], [x0], x7
ld1 {v1.d}[1], [x1], x5
ld1 {v2.d}[0], [x2], x5
ld1 {v2.d}[1], [x2], x5
\first v16.8h, v1.8h, v0.8h
ld1 {v3.d}[0], [x3], x5
ld1 {v3.d}[1], [x3], x5
\first v17.8h, v2.8h, v0.8h
.if \x == 4
ld1 {v4.d}[0], [x4], x5
ld1 {v4.d}[1], [x4], x5
.endif
\first v18.8h, v3.8h, v0.8h
.if \x == 4
\first v19.8h, v4.8h, v0.8h
.endif
.endm
.macro SAD_X_8 x, first=uaba
ld1 {v0.8h}, [x0], x7
ld1 {v1.8h}, [x1], x5
\first v16.8h, v1.8h, v0.8h
ld1 {v2.8h}, [x2], x5
ld1 {v3.8h}, [x3], x5
\first v17.8h, v2.8h, v0.8h
ld1 {v5.8h}, [x0], x7
ld1 {v1.8h}, [x1], x5
\first v18.8h, v3.8h, v0.8h
ld1 {v2.8h}, [x2], x5
uaba v16.8h, v1.8h, v5.8h
ld1 {v3.8h}, [x3], x5
uaba v17.8h, v2.8h, v5.8h
.if \x == 4
ld1 {v4.8h}, [x4], x5
ld1 {v1.8h}, [x4], x5
.endif
uaba v18.8h, v3.8h, v5.8h
.if \x == 4
\first v19.8h, v4.8h, v0.8h
uaba v19.8h, v1.8h, v5.8h
.endif
.endm
.macro SAD_X_16 x, first=uaba
ld1 {v0.8h, v1.8h}, [x0], x7
ld1 {v2.8h, v3.8h}, [x1], x5
ld1 {v4.8h, v5.8h}, [x2], x5
\first v16.8h, v2.8h, v0.8h
\first v20.8h, v3.8h, v1.8h
ld1 {v24.8h, v25.8h}, [x3], x5
\first v17.8h, v4.8h, v0.8h
\first v21.8h, v5.8h, v1.8h
ld1 {v6.8h, v7.8h}, [x0], x7
ld1 {v2.8h, v3.8h}, [x1], x5
\first v18.8h, v24.8h, v0.8h
\first v22.8h, v25.8h, v1.8h
ld1 {v4.8h, v5.8h}, [x2], x5
uaba v16.8h, v2.8h, v6.8h
uaba v20.8h, v3.8h, v7.8h
ld1 {v24.8h, v25.8h}, [x3], x5
uaba v17.8h, v4.8h, v6.8h
uaba v21.8h, v5.8h, v7.8h
.if \x == 4
ld1 {v26.8h, v27.8h}, [x4], x5
ld1 {v28.8h, v29.8h}, [x4], x5
.endif
uaba v18.8h, v24.8h, v6.8h
uaba v22.8h, v25.8h, v7.8h
.if \x == 4
\first v19.8h, v26.8h, v0.8h
\first v23.8h, v27.8h, v1.8h
uaba v19.8h, v28.8h, v6.8h
uaba v23.8h, v29.8h, v7.8h
.endif
.endm
.macro SAD_X_FUNC x, w, h
function pixel_sad_x\x\()_\w\()x\h\()_neon, export=1
.if \x == 3
mov x6, x5
mov x5, x4
.endif
mov x7, #FENC_STRIDE
lsl x5, x5, #1
lsl x7, x7, #1
SAD_X_\w \x, uabd
.rept \h / 2 - 1
SAD_X_\w \x
.endr
.if \w > 8
add v16.8h, v16.8h, v20.8h
add v17.8h, v17.8h, v21.8h
add v18.8h, v18.8h, v22.8h
.if \x == 4
add v19.8h, v19.8h, v23.8h
.endif
.endif
// add up the sads
uaddlv s0, v16.8h
uaddlv s1, v17.8h
uaddlv s2, v18.8h
stp s0, s1, [x6], #8
.if \x == 3
str s2, [x6]
.else
uaddlv s3, v19.8h
stp s2, s3, [x6]
.endif
ret
endfunc
.endm
function pixel_vsad_neon, export=1
subs w2, w2, #2
lsl x1, x1, #1
ld1 {v0.8h, v1.8h}, [x0], x1
ld1 {v2.8h, v3.8h}, [x0], x1
uabd v6.8h, v0.8h, v2.8h
uabd v7.8h, v1.8h, v3.8h
b.le 2f
1:
subs w2, w2, #2
ld1 {v0.8h, v1.8h}, [x0], x1
uaba v6.8h, v2.8h, v0.8h
uaba v7.8h, v3.8h, v1.8h
ld1 {v2.8h, v3.8h}, [x0], x1
b.lt 2f
uaba v6.8h, v0.8h, v2.8h
uaba v7.8h, v1.8h, v3.8h
b.gt 1b
2:
add v5.8h, v6.8h, v7.8h
uaddlv s0, v5.8h
fmov w0, s0
ret
endfunc
function pixel_asd8_neon, export=1
sub w4, w4, #2
lsl x1, x1, #1
lsl x3, x3, #1
ld1 {v0.8h}, [x0], x1
ld1 {v1.8h}, [x2], x3
ld1 {v2.8h}, [x0], x1
ld1 {v3.8h}, [x2], x3
sub v16.8h, v0.8h, v1.8h
1:
subs w4, w4, #2
ld1 {v4.8h}, [x0], x1
ld1 {v5.8h}, [x2], x3
sub v17.8h, v2.8h, v3.8h
sub v18.8h, v4.8h, v5.8h
add v16.8h, v16.8h, v17.8h
ld1 {v2.8h}, [x0], x1
ld1 {v3.8h}, [x2], x3
add v16.8h, v16.8h, v18.8h
b.gt 1b
sub v17.8h, v2.8h, v3.8h
add v16.8h, v16.8h, v17.8h
saddlv s0, v16.8h
abs v0.4s, v0.4s
fmov w0, s0
ret
endfunc
.macro SSD_START_4
ld1 {v16.d}[0], [x0], x1
ld1 {v17.d}[0], [x2], x3
sub v2.4h, v16.4h, v17.4h
ld1 {v16.d}[0], [x0], x1
ld1 {v17.d}[0], [x2], x3
smull v0.4s, v2.4h, v2.4h
.endm
.macro SSD_4
sub v2.4h, v16.4h, v17.4h
ld1 {v16.d}[0], [x0], x1
ld1 {v17.d}[0], [x2], x3
smlal v0.4s, v2.4h, v2.4h
.endm
.macro SSD_END_4
sub v2.4h, v16.4h, v17.4h
smlal v0.4s, v2.4h, v2.4h
.endm
.macro SSD_START_8
ld1 {v16.8h}, [x0], x1
ld1 {v17.8h}, [x2], x3
sub v2.8h, v16.8h, v17.8h
ld1 {v16.8h}, [x0], x1
ld1 {v17.8h}, [x2], x3
smull v0.4s, v2.4h, v2.4h
smull2 v20.4s, v2.8h, v2.8h
.endm
.macro SSD_8
sub v2.8h, v16.8h, v17.8h
ld1 {v16.8h}, [x0], x1
ld1 {v17.8h}, [x2], x3
smlal v0.4s, v2.4h, v2.4h
smlal2 v20.4s, v2.8h, v2.8h
.endm
.macro SSD_END_8
sub v2.8h, v16.8h, v17.8h
smlal v0.4s, v2.4h, v2.4h
smlal2 v20.4s, v2.8h, v2.8h
add v0.4s, v0.4s, v20.4s
.endm
.macro SSD_START_16
ld1 {v16.8h, v17.8h}, [x0], x1
ld1 {v18.8h, v19.8h}, [x2], x3
sub v2.8h, v16.8h, v18.8h
sub v3.8h, v17.8h, v19.8h
ld1 {v16.8h, v17.8h}, [x0], x1
smull v0.4s, v2.4h, v2.4h
smull2 v20.4s, v2.8h, v2.8h
ld1 {v18.8h, v19.8h}, [x2], x3
smlal v0.4s, v3.4h, v3.4h
smlal2 v20.4s, v3.8h, v3.8h
.endm
.macro SSD_16
sub v2.8h, v16.8h, v18.8h
sub v3.8h, v17.8h, v19.8h
ld1 {v16.8h, v17.8h}, [x0], x1
smlal v0.4s, v2.4h, v2.4h
smlal2 v20.4s, v2.8h, v2.8h
ld1 {v18.8h, v19.8h}, [x2], x3
smlal v0.4s, v3.4h, v3.4h
smlal2 v20.4s, v3.8h, v3.8h
.endm
.macro SSD_END_16
sub v2.8h, v16.8h, v18.8h
sub v3.8h, v17.8h, v19.8h
smlal v0.4s, v2.4h, v2.4h
smlal2 v20.4s, v2.8h, v2.8h
smlal v0.4s, v3.4h, v3.4h
smlal2 v20.4s, v3.8h, v3.8h
add v0.4s, v0.4s, v20.4s
.endm
.macro SSD_FUNC w h
function pixel_ssd_\w\()x\h\()_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
SSD_START_\w
.rept \h-2
SSD_\w
.endr
SSD_END_\w
addv s0, v0.4s
fmov w0, s0
ret
endfunc
.endm
function pixel_satd_4x4_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
ld1 {v1.d}[0], [x2], x3
ld1 {v0.d}[0], [x0], x1
ld1 {v3.d}[0], [x2], x3
ld1 {v2.d}[0], [x0], x1
ld1 {v1.d}[1], [x2], x3
ld1 {v0.d}[1], [x0], x1
ld1 {v3.d}[1], [x2], x3
ld1 {v2.d}[1], [x0], x1
sub v0.8h, v0.8h, v1.8h
sub v1.8h, v2.8h, v3.8h
SUMSUB_AB v2.8h, v3.8h, v0.8h, v1.8h
zip1 v0.2d, v2.2d, v3.2d
zip2 v1.2d, v2.2d, v3.2d
SUMSUB_AB v2.8h, v3.8h, v0.8h, v1.8h
trn1 v0.8h, v2.8h, v3.8h
trn2 v1.8h, v2.8h, v3.8h
SUMSUB_AB v2.8h, v3.8h, v0.8h, v1.8h
trn1 v0.4s, v2.4s, v3.4s
trn2 v1.4s, v2.4s, v3.4s
abs v0.8h, v0.8h
abs v1.8h, v1.8h
umax v0.8h, v0.8h, v1.8h
uaddlv s0, v0.8h
fmov w0, s0
ret
endfunc
function pixel_satd_4x8_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
ld1 {v1.d}[0], [x2], x3
ld1 {v0.d}[0], [x0], x1
ld1 {v3.d}[0], [x2], x3
ld1 {v2.d}[0], [x0], x1
ld1 {v5.d}[0], [x2], x3
ld1 {v4.d}[0], [x0], x1
ld1 {v7.d}[0], [x2], x3
ld1 {v6.d}[0], [x0], x1
ld1 {v1.d}[1], [x2], x3
ld1 {v0.d}[1], [x0], x1
ld1 {v3.d}[1], [x2], x3
ld1 {v2.d}[1], [x0], x1
ld1 {v5.d}[1], [x2], x3
ld1 {v4.d}[1], [x0], x1
ld1 {v7.d}[1], [x2], x3
ld1 {v6.d}[1], [x0], x1
b satd_4x8_8x4_end_neon
endfunc
function pixel_satd_8x4_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
ld1 {v1.8h}, [x2], x3
ld1 {v0.8h}, [x0], x1
ld1 {v3.8h}, [x2], x3
ld1 {v2.8h}, [x0], x1
ld1 {v5.8h}, [x2], x3
ld1 {v4.8h}, [x0], x1
ld1 {v7.8h}, [x2], x3
ld1 {v6.8h}, [x0], x1
endfunc
function satd_4x8_8x4_end_neon
sub v0.8h, v0.8h, v1.8h
sub v1.8h, v2.8h, v3.8h
sub v2.8h, v4.8h, v5.8h
sub v3.8h, v6.8h, v7.8h
SUMSUB_AB v16.8h, v17.8h, v0.8h, v1.8h
SUMSUB_AB v18.8h, v19.8h, v2.8h, v3.8h
SUMSUB_AB v4.8h, v6.8h, v16.8h, v18.8h
SUMSUB_AB v5.8h, v7.8h, v17.8h, v19.8h
trn1 v0.8h, v4.8h, v5.8h
trn2 v1.8h, v4.8h, v5.8h
trn1 v2.8h, v6.8h, v7.8h
trn2 v3.8h, v6.8h, v7.8h
SUMSUB_AB v16.8h, v17.8h, v0.8h, v1.8h
SUMSUB_AB v18.8h, v19.8h, v2.8h, v3.8h
trn1 v0.4s, v16.4s, v18.4s
trn2 v1.4s, v16.4s, v18.4s
trn1 v2.4s, v17.4s, v19.4s
trn2 v3.4s, v17.4s, v19.4s
abs v0.8h, v0.8h
abs v1.8h, v1.8h
abs v2.8h, v2.8h
abs v3.8h, v3.8h
umax v0.8h, v0.8h, v1.8h
umax v1.8h, v2.8h, v3.8h
add v0.8h, v0.8h, v1.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret
endfunc
function pixel_satd_4x16_neon, export=1
mov x4, x30
lsl x1, x1, #1
lsl x3, x3, #1
ld1 {v1.d}[0], [x2], x3
ld1 {v0.d}[0], [x0], x1
ld1 {v3.d}[0], [x2], x3
ld1 {v2.d}[0], [x0], x1
ld1 {v5.d}[0], [x2], x3
ld1 {v4.d}[0], [x0], x1
ld1 {v7.d}[0], [x2], x3
ld1 {v6.d}[0], [x0], x1
ld1 {v1.d}[1], [x2], x3
ld1 {v0.d}[1], [x0], x1
ld1 {v3.d}[1], [x2], x3
ld1 {v2.d}[1], [x0], x1
ld1 {v5.d}[1], [x2], x3
ld1 {v4.d}[1], [x0], x1
ld1 {v7.d}[1], [x2], x3
ld1 {v6.d}[1], [x0], x1
sub v16.8h, v0.8h, v1.8h
sub v17.8h, v2.8h, v3.8h
sub v18.8h, v4.8h, v5.8h
sub v19.8h, v6.8h, v7.8h
ld1 {v1.d}[0], [x2], x3
ld1 {v0.d}[0], [x0], x1
ld1 {v3.d}[0], [x2], x3
ld1 {v2.d}[0], [x0], x1
ld1 {v5.d}[0], [x2], x3
ld1 {v4.d}[0], [x0], x1
ld1 {v7.d}[0], [x2], x3
ld1 {v6.d}[0], [x0], x1
ld1 {v1.d}[1], [x2], x3
ld1 {v0.d}[1], [x0], x1
ld1 {v3.d}[1], [x2], x3
ld1 {v2.d}[1], [x0], x1
ld1 {v5.d}[1], [x2], x3
ld1 {v4.d}[1], [x0], x1
ld1 {v7.d}[1], [x2], x3
ld1 {v6.d}[1], [x0], x1
sub v20.8h, v0.8h, v1.8h
sub v21.8h, v2.8h, v3.8h
sub v22.8h, v4.8h, v5.8h
sub v23.8h, v6.8h, v7.8h
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
bl satd_8x4v_8x8h_neon
add v30.8h, v0.8h, v1.8h
add v31.8h, v2.8h, v3.8h
add v0.8h, v30.8h, v31.8h
uaddlv s0, v0.8h
fmov w0, s0
ret x4
endfunc
.macro load_diff_fly_8x8
ld1 {v1.8h}, [x2], x3
ld1 {v0.8h}, [x0], x1
ld1 {v3.8h}, [x2], x3
ld1 {v2.8h}, [x0], x1
sub v16.8h, v0.8h, v1.8h
ld1 {v5.8h}, [x2], x3
ld1 {v4.8h}, [x0], x1
sub v17.8h, v2.8h, v3.8h
ld1 {v7.8h}, [x2], x3
ld1 {v6.8h}, [x0], x1
sub v18.8h, v4.8h, v5.8h
ld1 {v1.8h}, [x2], x3
ld1 {v0.8h}, [x0], x1
sub v19.8h, v6.8h, v7.8h
ld1 {v3.8h}, [x2], x3
ld1 {v2.8h}, [x0], x1
sub v20.8h, v0.8h, v1.8h
ld1 {v5.8h}, [x2], x3
ld1 {v4.8h}, [x0], x1
sub v21.8h, v2.8h, v3.8h
ld1 {v7.8h}, [x2], x3
ld1 {v6.8h}, [x0], x1
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
sub v22.8h, v4.8h, v5.8h
sub v23.8h, v6.8h, v7.8h
.endm
function pixel_satd_8x8_neon, export=1
mov x4, x30
lsl x1, x1, #1
lsl x3, x3, #1
bl satd_8x8_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v0.8h, v0.8h, v1.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret x4
endfunc
function pixel_satd_8x16_neon, export=1
mov x4, x30
lsl x1, x1, #1
lsl x3, x3, #1
bl satd_8x8_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v0.8h, v1.8h
bl satd_8x8_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v31.8h, v0.8h, v1.8h
add v0.8h, v30.8h, v31.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret x4
endfunc
function satd_8x8_neon
load_diff_fly_8x8
endfunc
// one vertical hadamard pass and two horizontal
function satd_8x4v_8x8h_neon
SUMSUB_AB v16.8h, v18.8h, v0.8h, v2.8h
SUMSUB_AB v17.8h, v19.8h, v1.8h, v3.8h
HADAMARD4_V v20.8h, v21.8h, v22.8h, v23.8h, v0.8h, v1.8h, v2.8h, v3.8h
transpose v0.8h, v1.8h, v16.8h, v17.8h
transpose v2.8h, v3.8h, v18.8h, v19.8h
transpose v4.8h, v5.8h, v20.8h, v21.8h
transpose v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_AB v16.8h, v17.8h, v0.8h, v1.8h
SUMSUB_AB v18.8h, v19.8h, v2.8h, v3.8h
SUMSUB_AB v20.8h, v21.8h, v4.8h, v5.8h
SUMSUB_AB v22.8h, v23.8h, v6.8h, v7.8h
transpose v0.4s, v2.4s, v16.4s, v18.4s
transpose v1.4s, v3.4s, v17.4s, v19.4s
transpose v4.4s, v6.4s, v20.4s, v22.4s
transpose v5.4s, v7.4s, v21.4s, v23.4s
abs v0.8h, v0.8h
abs v1.8h, v1.8h
abs v2.8h, v2.8h
abs v3.8h, v3.8h
abs v4.8h, v4.8h
abs v5.8h, v5.8h
abs v6.8h, v6.8h
abs v7.8h, v7.8h
umax v0.8h, v0.8h, v2.8h
umax v1.8h, v1.8h, v3.8h
umax v2.8h, v4.8h, v6.8h
umax v3.8h, v5.8h, v7.8h
ret
endfunc
function pixel_ssd_nv12_core_neon, export=1
sxtw x8, w4
add x8, x8, #8
and x8, x8, #~15
movi v6.2d, #0
movi v7.2d, #0
sub x1, x1, x8, lsl #1
sub x3, x3, x8, lsl #1
lsl x1, x1, #1
lsl x3, x3, #1
lsl x4, x4, #1
1:
subs w8, w4, #32
ld2 {v0.8h, v1.8h}, [x0], #32
ld2 {v2.8h, v3.8h}, [x2], #32
ld2 {v24.8h, v25.8h}, [x0], #32
ld2 {v26.8h, v27.8h}, [x2], #32
sub v16.8h, v0.8h, v2.8h
sub v17.8h, v1.8h, v3.8h
smull v20.4s, v16.4h, v16.4h
smull v21.4s, v17.4h, v17.4h
sub v18.8h, v24.8h, v26.8h
sub v19.8h, v25.8h, v27.8h
smlal2 v20.4s, v16.8h, v16.8h
smlal2 v21.4s, v17.8h, v17.8h
b.lt 4f
b.eq 3f
2:
smlal v20.4s, v18.4h, v18.4h
smlal v21.4s, v19.4h, v19.4h
ld2 {v0.8h, v1.8h}, [x0], #32
ld2 {v2.8h, v3.8h}, [x2], #32
smlal2 v20.4s, v18.8h, v18.8h
smlal2 v21.4s, v19.8h, v19.8h
subs w8, w8, #32
sub v16.8h, v0.8h, v2.8h
sub v17.8h, v1.8h, v3.8h
smlal v20.4s, v16.4h, v16.4h
smlal v21.4s, v17.4h, v17.4h
ld2 {v24.8h,v25.8h}, [x0], #32
ld2 {v26.8h,v27.8h}, [x2], #32
smlal2 v20.4s, v16.8h, v16.8h
smlal2 v21.4s, v17.8h, v17.8h
b.lt 4f
sub v18.8h, v24.8h, v26.8h
sub v19.8h, v25.8h, v27.8h
b.gt 2b
3:
smlal v20.4s, v18.4h, v18.4h
smlal v21.4s, v19.4h, v19.4h
smlal2 v20.4s, v18.8h, v18.8h
smlal2 v21.4s, v19.8h, v19.8h
4:
subs w5, w5, #1
uaddw v6.2d, v6.2d, v20.2s
uaddw v7.2d, v7.2d, v21.2s
add x0, x0, x1
add x2, x2, x3
uaddw2 v6.2d, v6.2d, v20.4s
uaddw2 v7.2d, v7.2d, v21.4s
b.gt 1b
addp v6.2d, v6.2d, v7.2d
st1 {v6.d}[0], [x6]
st1 {v6.d}[1], [x7]
ret
endfunc
.macro pixel_var_8 h
function pixel_var_8x\h\()_neon, export=1
lsl x1, x1, #1
ld1 {v16.8h}, [x0], x1
ld1 {v17.8h}, [x0], x1
mov x2, \h - 4
umull v1.4s, v16.4h, v16.4h
umull2 v30.4s, v16.8h, v16.8h
mov v0.16b, v16.16b
umull v2.4s, v17.4h, v17.4h
umull2 v31.4s, v17.8h, v17.8h
add v0.8h, v0.8h, v17.8h
ld1 {v18.8h}, [x0], x1
ld1 {v19.8h}, [x0], x1
1: subs x2, x2, #4
add v0.8h, v0.8h, v18.8h
umull v24.4s, v18.4h, v18.4h
umull2 v25.4s, v18.8h, v18.8h
ld1 {v20.8h}, [x0], x1
add v0.8h, v0.8h, v19.8h
umull v26.4s, v19.4h, v19.4h
umull2 v27.4s, v19.8h, v19.8h
add v1.4s, v1.4s, v24.4s
add v30.4s, v30.4s, v25.4s
ld1 {v21.8h}, [x0], x1
add v0.8h, v0.8h, v20.8h
umull v28.4s, v20.4h, v20.4h
umull2 v29.4s, v20.8h, v20.8h
add v2.4s, v2.4s, v26.4s
add v31.4s, v31.4s, v27.4s
ld1 {v18.8h}, [x0], x1
add v0.8h, v0.8h, v21.8h
umull v3.4s, v21.4h, v21.4h
umull2 v4.4s, v21.8h, v21.8h
add v1.4s, v1.4s, v28.4s
add v30.4s, v30.4s, v29.4s
ld1 {v19.8h}, [x0], x1
add v2.4s, v2.4s, v3.4s
add v31.4s, v31.4s, v4.4s
b.gt 1b
add v0.8h, v0.8h, v18.8h
umull v24.4s, v18.4h, v18.4h
umull2 v25.4s, v18.8h, v18.8h
add v0.8h, v0.8h, v19.8h
umull v26.4s, v19.4h, v19.4h
umull2 v27.4s, v19.8h, v19.8h
add v1.4s, v1.4s, v24.4s
add v30.4s, v30.4s, v25.4s
add v2.4s, v2.4s, v26.4s
add v31.4s, v31.4s, v27.4s
b var_end
endfunc
.endm
function pixel_var_16x16_neon, export=1
lsl x1, x1, #1
ld1 {v16.8h, v17.8h}, [x0], x1
ld1 {v18.8h, v19.8h}, [x0], x1
mov x2, #14
umull v1.4s, v16.4h, v16.4h
umull2 v30.4s, v16.8h, v16.8h
add v0.8h, v16.8h, v17.8h
umull v2.4s, v17.4h, v17.4h
umull2 v31.4s, v17.8h, v17.8h
1: subs x2, x2, #2
ld1 {v20.8h, v21.8h}, [x0], x1
add v0.8h, v0.8h, v18.8h
umlal v1.4s, v18.4h, v18.4h
umlal2 v30.4s, v18.8h, v18.8h
umlal v2.4s, v19.4h, v19.4h
umlal2 v31.4s, v19.8h, v19.8h
add v0.8h, v0.8h, v19.8h
ld1 {v18.8h, v19.8h}, [x0], x1
add v0.8h, v0.8h, v20.8h
umlal v1.4s, v20.4h, v20.4h
umlal2 v30.4s, v20.8h, v20.8h
umlal v2.4s, v21.4h, v21.4h
umlal2 v31.4s, v21.8h, v21.8h
add v0.8h, v0.8h, v21.8h
b.gt 1b
add v0.8h, v0.8h, v18.8h
umlal v1.4s, v18.4h, v18.4h
umlal2 v30.4s, v18.8h, v18.8h
umlal v2.4s, v19.4h, v19.4h
umlal2 v31.4s, v19.8h, v19.8h
add v0.8h, v0.8h, v19.8h
endfunc
function var_end
add v1.4s, v1.4s, v2.4s
add v30.4s, v30.4s, v31.4s
add v1.4s, v1.4s, v30.4s
uaddlv s0, v0.8h
uaddlv d1, v1.4s
mov w0, v0.s[0]
mov x1, v1.d[0]
orr x0, x0, x1, lsl #32
ret
endfunc
.macro pixel_var2_8 h
function pixel_var2_8x\h\()_neon, export=1
mov x3, #32
ld1 {v16.8h}, [x0], #16
ld1 {v18.8h}, [x1], x3
ld1 {v17.8h}, [x0], #16
ld1 {v19.8h}, [x1], x3
mov x5, \h - 2
sub v0.8h, v16.8h, v18.8h
sub v1.8h, v17.8h, v19.8h
ld1 {v16.8h}, [x0], #16
ld1 {v18.8h}, [x1], x3
smull v2.4s, v0.4h, v0.4h
smull2 v3.4s, v0.8h, v0.8h
smull v4.4s, v1.4h, v1.4h
smull2 v5.4s, v1.8h, v1.8h
sub v6.8h, v16.8h, v18.8h
1: subs x5, x5, #1
ld1 {v17.8h}, [x0], #16
ld1 {v19.8h}, [x1], x3
smlal v2.4s, v6.4h, v6.4h
smlal2 v3.4s, v6.8h, v6.8h
sub v7.8h, v17.8h, v19.8h
add v0.8h, v0.8h, v6.8h
ld1 {v16.8h}, [x0], #16
ld1 {v18.8h}, [x1], x3
smlal v4.4s, v7.4h, v7.4h
smlal2 v5.4s, v7.8h, v7.8h
sub v6.8h, v16.8h, v18.8h
add v1.8h, v1.8h, v7.8h
b.gt 1b
ld1 {v17.8h}, [x0], #16
ld1 {v19.8h}, [x1], x3
smlal v2.4s, v6.4h, v6.4h
smlal2 v3.4s, v6.8h, v6.8h
sub v7.8h, v17.8h, v19.8h
add v0.8h, v0.8h, v6.8h
smlal v4.4s, v7.4h, v7.4h
add v1.8h, v1.8h, v7.8h
smlal2 v5.4s, v7.8h, v7.8h
saddlv s0, v0.8h
saddlv s1, v1.8h
add v2.4s, v2.4s, v3.4s
add v4.4s, v4.4s, v5.4s
mov w0, v0.s[0]
mov w1, v1.s[0]
addv s2, v2.4s
addv s4, v4.4s
mul w0, w0, w0
mul w1, w1, w1
mov w3, v2.s[0]
mov w4, v4.s[0]
sub w0, w3, w0, lsr # 6 + (\h >> 4)
sub w1, w4, w1, lsr # 6 + (\h >> 4)
str w3, [x2]
add w0, w0, w1
str w4, [x2, #4]
ret
endfunc
.endm
function pixel_satd_16x8_neon, export=1
mov x4, x30
lsl x1, x1, #1
lsl x3, x3, #1
bl satd_16x4_neon
add v30.8h, v0.8h, v1.8h
add v31.8h, v2.8h, v3.8h
bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
add v30.8h, v30.8h, v0.8h
add v31.8h, v31.8h, v1.8h
add v0.8h, v30.8h, v31.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
ret x4
endfunc
function pixel_satd_16x16_neon, export=1
mov x4, x30
lsl x1, x1, #1
lsl x3, x3, #1
bl satd_16x4_neon
uaddl v30.4s, v0.4h, v1.4h
uaddl v31.4s, v2.4h, v3.4h
uaddl2 v28.4s, v0.8h, v1.8h
uaddl2 v29.4s, v2.8h, v3.8h
add v30.4s, v30.4s, v28.4s
add v31.4s, v31.4s, v29.4s
bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
uaddw v30.4s, v30.4s, v0.4h
uaddw2 v30.4s, v30.4s, v0.8h
uaddw v31.4s, v31.4s, v1.4h
uaddw2 v31.4s, v31.4s, v1.8h
bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
uaddw v30.4s, v30.4s, v0.4h
uaddw2 v30.4s, v30.4s, v0.8h
uaddw v31.4s, v31.4s, v1.4h
uaddw2 v31.4s, v31.4s, v1.8h
bl satd_16x4_neon
add v0.8h, v0.8h, v1.8h
add v1.8h, v2.8h, v3.8h
uaddw v30.4s, v30.4s, v0.4h
uaddw2 v30.4s, v30.4s, v0.8h
uaddw v31.4s, v31.4s, v1.4h
uaddw2 v31.4s, v31.4s, v1.8h
add v0.4s, v30.4s, v31.4s
addv s0, v0.4s
mov w0, v0.s[0]
ret x4
endfunc
function satd_16x4_neon
ld1 {v0.8h, v1.8h}, [x2], x3
ld1 {v2.8h, v3.8h}, [x0], x1
sub v16.8h, v2.8h, v0.8h
sub v20.8h, v3.8h, v1.8h
ld1 {v4.8h, v5.8h}, [x2], x3
ld1 {v6.8h, v7.8h}, [x0], x1
sub v17.8h, v6.8h, v4.8h
sub v21.8h, v7.8h, v5.8h
ld1 {v0.8h, v1.8h}, [x2], x3
ld1 {v2.8h, v3.8h}, [x0], x1
sub v18.8h, v2.8h, v0.8h
sub v22.8h, v3.8h, v1.8h
ld1 {v4.8h, v5.8h}, [x2], x3
ld1 {v6.8h, v7.8h}, [x0], x1
sub v19.8h, v6.8h, v4.8h
sub v23.8h, v7.8h, v5.8h
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
b satd_8x4v_8x8h_neon
endfunc
function pixel_sa8d_8x8_neon, export=1
mov x4, x30
lsl x1, x1, #1
lsl x3, x3, #1
bl pixel_sa8d_8x8_neon
add v0.8h, v0.8h, v1.8h
uaddlv s0, v0.8h
mov w0, v0.s[0]
add w0, w0, #1
lsr w0, w0, #1
ret x4
endfunc
function pixel_sa8d_16x16_neon, export=1
mov x4, x30
lsl x1, x1, #1
lsl x3, x3, #1
bl pixel_sa8d_8x8_neon
uaddlp v30.4s, v0.8h
uaddlp v31.4s, v1.8h
bl pixel_sa8d_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
sub x0, x0, x1, lsl #4
sub x2, x2, x3, lsl #4
add x0, x0, #16
add x2, x2, #16
bl pixel_sa8d_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
bl pixel_sa8d_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
add v0.4s, v30.4s, v31.4s
addv s0, v0.4s
mov w0, v0.s[0]
add w0, w0, #1
lsr w0, w0, #1
ret x4
endfunc
.macro sa8d_satd_8x8 satd=
function pixel_sa8d_\satd\()8x8_neon
load_diff_fly_8x8
SUMSUB_AB v16.8h, v18.8h, v0.8h, v2.8h
SUMSUB_AB v17.8h, v19.8h, v1.8h, v3.8h
HADAMARD4_V v20.8h, v21.8h, v22.8h, v23.8h, v0.8h, v1.8h, v2.8h, v3.8h
.ifc \satd, satd_
transpose v0.8h, v1.8h, v16.8h, v17.8h
transpose v2.8h, v3.8h, v18.8h, v19.8h
transpose v4.8h, v5.8h, v20.8h, v21.8h
transpose v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_AB v24.8h, v25.8h, v0.8h, v1.8h
SUMSUB_AB v26.8h, v27.8h, v2.8h, v3.8h
SUMSUB_AB v0.8h, v1.8h, v4.8h, v5.8h
SUMSUB_AB v2.8h, v3.8h, v6.8h, v7.8h
transpose v4.4s, v6.4s, v24.4s, v26.4s
transpose v5.4s, v7.4s, v25.4s, v27.4s
transpose v24.4s, v26.4s, v0.4s, v2.4s
transpose v25.4s, v27.4s, v1.4s, v3.4s
abs v0.8h, v4.8h
abs v1.8h, v5.8h
abs v2.8h, v6.8h
abs v3.8h, v7.8h
abs v4.8h, v24.8h
abs v5.8h, v25.8h
abs v6.8h, v26.8h
abs v7.8h, v27.8h
umax v0.8h, v0.8h, v2.8h
umax v1.8h, v1.8h, v3.8h
umax v2.8h, v4.8h, v6.8h
umax v3.8h, v5.8h, v7.8h
add v26.8h, v0.8h, v1.8h
add v27.8h, v2.8h, v3.8h
.endif
SUMSUB_AB v0.8h, v16.8h, v16.8h, v20.8h
SUMSUB_AB v1.8h, v17.8h, v17.8h, v21.8h
SUMSUB_AB v2.8h, v18.8h, v18.8h, v22.8h
SUMSUB_AB v3.8h, v19.8h, v19.8h, v23.8h
transpose v20.8h, v21.8h, v16.8h, v17.8h
transpose v4.8h, v5.8h, v0.8h, v1.8h
transpose v22.8h, v23.8h, v18.8h, v19.8h
transpose v6.8h, v7.8h, v2.8h, v3.8h
SUMSUB_AB v2.8h, v3.8h, v20.8h, v21.8h
SUMSUB_AB v24.8h, v25.8h, v4.8h, v5.8h
SUMSUB_AB v0.8h, v1.8h, v22.8h, v23.8h
SUMSUB_AB v4.8h, v5.8h, v6.8h, v7.8h
transpose v20.4s, v22.4s, v2.4s, v0.4s
transpose v21.4s, v23.4s, v3.4s, v1.4s
transpose v16.4s, v18.4s, v24.4s, v4.4s
transpose v17.4s, v19.4s, v25.4s, v5.4s
SUMSUB_AB v0.8h, v2.8h, v20.8h, v22.8h
SUMSUB_AB v1.8h, v3.8h, v21.8h, v23.8h
SUMSUB_AB v4.8h, v6.8h, v16.8h, v18.8h
SUMSUB_AB v5.8h, v7.8h, v17.8h, v19.8h
transpose v16.2d, v20.2d, v0.2d, v4.2d
transpose v17.2d, v21.2d, v1.2d, v5.2d
transpose v18.2d, v22.2d, v2.2d, v6.2d
transpose v19.2d, v23.2d, v3.2d, v7.2d
abs v16.8h, v16.8h
abs v20.8h, v20.8h
abs v17.8h, v17.8h
abs v21.8h, v21.8h
abs v18.8h, v18.8h
abs v22.8h, v22.8h
abs v19.8h, v19.8h
abs v23.8h, v23.8h
umax v16.8h, v16.8h, v20.8h
umax v17.8h, v17.8h, v21.8h
umax v18.8h, v18.8h, v22.8h
umax v19.8h, v19.8h, v23.8h
add v0.8h, v16.8h, v17.8h
add v1.8h, v18.8h, v19.8h
ret
endfunc
.endm
function pixel_sa8d_satd_16x16_neon, export=1
mov x4, x30
lsl x1, x1, #1
lsl x3, x3, #1
bl pixel_sa8d_satd_8x8_neon
uaddlp v30.4s, v0.8h
uaddlp v31.4s, v1.8h
uaddlp v28.4s, v26.8h
uaddlp v29.4s, v27.8h
bl pixel_sa8d_satd_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
uadalp v28.4s, v26.8h
uadalp v29.4s, v27.8h
sub x0, x0, x1, lsl #4
sub x2, x2, x3, lsl #4
add x0, x0, #16
add x2, x2, #16
bl pixel_sa8d_satd_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
uadalp v28.4s, v26.8h
uadalp v29.4s, v27.8h
bl pixel_sa8d_satd_8x8_neon
uadalp v30.4s, v0.8h
uadalp v31.4s, v1.8h
uadalp v28.4s, v26.8h
uadalp v29.4s, v27.8h
add v0.4s, v30.4s, v31.4s // sa8d
add v1.4s, v28.4s, v29.4s // satd
addv s0, v0.4s
addv s1, v1.4s
urshr v0.4s, v0.4s, #1
fmov w0, s0
fmov w1, s1
add x0, x0, x1, lsl #32
ret x4
endfunc
.macro HADAMARD_AC w h
function pixel_hadamard_ac_\w\()x\h\()_neon, export=1
movrel x5, mask_ac_4_8
mov x4, x30
lsl x1, x1, #1
ld1 {v30.8h,v31.8h}, [x5]
movi v28.16b, #0
movi v29.16b, #0
bl hadamard_ac_8x8_neon
.if \h > 8
bl hadamard_ac_8x8_neon
.endif
.if \w > 8
sub x0, x0, x1, lsl #3
add x0, x0, 16
bl hadamard_ac_8x8_neon
.endif
.if \w * \h == 256
sub x0, x0, x1, lsl #4
bl hadamard_ac_8x8_neon
.endif
addv s1, v29.4s
addv s0, v28.4s
mov w1, v1.s[0]
mov w0, v0.s[0]
lsr w1, w1, #2
lsr w0, w0, #1
orr x0, x0, x1, lsl #32
ret x4
endfunc
.endm
// v28: satd v29: sa8d v30: mask_ac4 v31: mask_ac8
function hadamard_ac_8x8_neon
ld1 {v16.8h}, [x0], x1
ld1 {v17.8h}, [x0], x1
ld1 {v18.8h}, [x0], x1
ld1 {v19.8h}, [x0], x1
SUMSUB_AB v0.8h, v1.8h, v16.8h, v17.8h
ld1 {v20.8h}, [x0], x1
ld1 {v21.8h}, [x0], x1
SUMSUB_AB v2.8h, v3.8h, v18.8h, v19.8h
ld1 {v22.8h}, [x0], x1
ld1 {v23.8h}, [x0], x1
SUMSUB_AB v4.8h, v5.8h, v20.8h, v21.8h
SUMSUB_AB v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_ABCD v16.8h, v18.8h, v17.8h, v19.8h, v0.8h, v2.8h, v1.8h, v3.8h
SUMSUB_ABCD v20.8h, v22.8h, v21.8h, v23.8h, v4.8h, v6.8h, v5.8h, v7.8h
transpose v0.8h, v1.8h, v16.8h, v17.8h
transpose v2.8h, v3.8h, v18.8h, v19.8h
transpose v4.8h, v5.8h, v20.8h, v21.8h
transpose v6.8h, v7.8h, v22.8h, v23.8h
SUMSUB_AB v16.8h, v17.8h, v0.8h, v1.8h
SUMSUB_AB v18.8h, v19.8h, v2.8h, v3.8h
SUMSUB_AB v20.8h, v21.8h, v4.8h, v5.8h
SUMSUB_AB v22.8h, v23.8h, v6.8h, v7.8h
transpose v0.4s, v2.4s, v16.4s, v18.4s
transpose v1.4s, v3.4s, v17.4s, v19.4s
transpose v4.4s, v6.4s, v20.4s, v22.4s
transpose v5.4s, v7.4s, v21.4s, v23.4s
SUMSUB_AB v16.8h, v18.8h, v0.8h, v2.8h
SUMSUB_AB v17.8h, v19.8h, v1.8h, v3.8h
SUMSUB_ABCD v20.8h, v22.8h, v21.8h, v23.8h, v4.8h, v6.8h, v5.8h, v7.8h
abs v0.8h, v16.8h
abs v4.8h, v20.8h
abs v1.8h, v17.8h
abs v5.8h, v21.8h
abs v2.8h, v18.8h
abs v6.8h, v22.8h
abs v3.8h, v19.8h
abs v7.8h, v23.8h
add v0.8h, v0.8h, v4.8h
add v1.8h, v1.8h, v5.8h
and v0.16b, v0.16b, v30.16b
add v2.8h, v2.8h, v6.8h
add v3.8h, v3.8h, v7.8h
add v0.8h, v0.8h, v2.8h
add v1.8h, v1.8h, v3.8h
uadalp v28.4s, v0.8h
uadalp v28.4s, v1.8h
SUMSUB_AB v6.8h, v7.8h, v23.8h, v19.8h
SUMSUB_AB v4.8h, v5.8h, v22.8h, v18.8h
SUMSUB_AB v2.8h, v3.8h, v21.8h, v17.8h
SUMSUB_AB v1.8h, v0.8h, v16.8h, v20.8h
transpose v16.2d, v17.2d, v6.2d, v7.2d
transpose v18.2d, v19.2d, v4.2d, v5.2d
transpose v20.2d, v21.2d, v2.2d, v3.2d
abs v16.8h, v16.8h
abs v17.8h, v17.8h
abs v18.8h, v18.8h
abs v19.8h, v19.8h
abs v20.8h, v20.8h
abs v21.8h, v21.8h
transpose v7.2d, v6.2d, v1.2d, v0.2d
umax v3.8h, v16.8h, v17.8h
umax v2.8h, v18.8h, v19.8h
umax v1.8h, v20.8h, v21.8h
SUMSUB_AB v4.8h, v5.8h, v7.8h, v6.8h
add v2.8h, v2.8h, v3.8h
add v2.8h, v2.8h, v1.8h
and v4.16b, v4.16b, v31.16b
add v2.8h, v2.8h, v2.8h
abs v5.8h, v5.8h
abs v4.8h, v4.8h
add v2.8h, v2.8h, v5.8h
add v2.8h, v2.8h, v4.8h
uadalp v29.4s, v2.8h
ret
endfunc
function pixel_ssim_4x4x2_core_neon, export=1
lsl x1, x1, #1
lsl x3, x3, #1
ld1 {v0.8h}, [x0], x1
ld1 {v2.8h}, [x2], x3
ld1 {v28.8h}, [x0], x1
ld1 {v29.8h}, [x2], x3
umull v16.4s, v0.4h, v0.4h
umull2 v17.4s, v0.8h, v0.8h
umull v18.4s, v0.4h, v2.4h
umull2 v19.4s, v0.8h, v2.8h
umlal v16.4s, v2.4h, v2.4h
umlal2 v17.4s, v2.8h, v2.8h
ld1 {v26.8h}, [x0], x1
ld1 {v27.8h}, [x2], x3
umlal v16.4s, v28.4h, v28.4h
umlal2 v17.4s, v28.8h, v28.8h
umlal v18.4s, v28.4h, v29.4h
umlal2 v19.4s, v28.8h, v29.8h
umlal v16.4s, v29.4h, v29.4h
umlal2 v17.4s, v29.8h, v29.8h
add v0.8h, v0.8h, v28.8h
add v1.8h, v2.8h, v29.8h
umlal v16.4s, v26.4h, v26.4h
umlal2 v17.4s, v26.8h, v26.8h
umlal v18.4s, v26.4h, v27.4h
umlal2 v19.4s, v26.8h, v27.8h
umlal v16.4s, v27.4h, v27.4h
umlal2 v17.4s, v27.8h, v27.8h
ld1 {v28.8h}, [x0], x1
ld1 {v29.8h}, [x2], x3
add v0.8h, v0.8h, v26.8h
add v1.8h, v1.8h, v27.8h
umlal v16.4s, v28.4h, v28.4h
umlal2 v17.4s, v28.8h, v28.8h
umlal v18.4s, v28.4h, v29.4h
umlal2 v19.4s, v28.8h, v29.8h
umlal v16.4s, v29.4h, v29.4h
umlal2 v17.4s, v29.8h, v29.8h
add v0.8h, v0.8h, v28.8h
add v1.8h, v1.8h, v29.8h
addp v16.4s, v16.4s, v17.4s
addp v17.4s, v18.4s, v19.4s
uaddlp v0.4s, v0.8h
uaddlp v1.4s, v1.8h
addp v0.4s, v0.4s, v0.4s
addp v1.4s, v1.4s, v1.4s
addp v2.4s, v16.4s, v16.4s
addp v3.4s, v17.4s, v17.4s
st4 {v0.2s, v1.2s, v2.2s, v3.2s}, [x4]
ret
endfunc
function pixel_ssim_end4_neon, export=1
mov x5, #4
ld1 {v16.4s, v17.4s}, [x0], #32
ld1 {v18.4s, v19.4s}, [x1], #32
subs x2, x5, w2, uxtw
// These values must be stored in float, since with 10 bit depth edge cases
// may overflow. The hexadecimal values are IEEE-754 representation of the
// floating point numbers.
ldr w3, =0x45d14e49 // ssim_c1 = .01*.01*1023*1023*64
ldr w4, =0x4a67ca32 // ssim_c2 = .03*.03*1023*1023*64*63
add v0.4s, v16.4s, v18.4s
add v1.4s, v17.4s, v19.4s
add v0.4s, v0.4s, v1.4s
ld1 {v20.4s, v21.4s}, [x0], #32
ld1 {v22.4s, v23.4s}, [x1], #32
add v2.4s, v20.4s, v22.4s
add v3.4s, v21.4s, v23.4s
add v1.4s, v1.4s, v2.4s
ld1 {v16.4s}, [x0], #16
ld1 {v18.4s}, [x1], #16
add v16.4s, v16.4s, v18.4s
add v2.4s, v2.4s, v3.4s
add v3.4s, v3.4s, v16.4s
dup v30.4s, w3
dup v31.4s, w4
transpose v4.4s, v5.4s, v0.4s, v1.4s
transpose v6.4s, v7.4s, v2.4s, v3.4s
transpose v0.2d, v2.2d, v4.2d, v6.2d
transpose v1.2d, v3.2d, v5.2d, v7.2d
// Conversion to floating point number must occur earlier than in 8 bit case
// because of the range overflow
scvtf v0.4s, v0.4s
scvtf v2.4s, v2.4s
scvtf v1.4s, v1.4s
scvtf v3.4s, v3.4s
fmul v16.4s, v0.4s, v1.4s // s1*s2
fmul v0.4s, v0.4s, v0.4s
fmla v0.4s, v1.4s, v1.4s // s1*s1 + s2*s2
// IEEE-754 hexadecimal representation of multipliers
ldr w3, =0x42800000 // 64
ldr w4, =0x43000000 // 128
dup v28.4s, w3
dup v29.4s, w4
fmul v2.4s, v2.4s, v28.4s
fmul v3.4s, v3.4s, v29.4s
fadd v1.4s, v16.4s, v16.4s
fsub v2.4s, v2.4s, v0.4s // vars
fsub v3.4s, v3.4s, v1.4s // covar*2
fadd v0.4s, v0.4s, v30.4s
fadd v2.4s, v2.4s, v31.4s
fadd v1.4s, v1.4s, v30.4s
fadd v3.4s, v3.4s, v31.4s
fmul v0.4s, v0.4s, v2.4s
fmul v1.4s, v1.4s, v3.4s
fdiv v0.4s, v1.4s, v0.4s
b.eq 1f
movrel x3, mask
add x3, x3, x2, lsl #2
ld1 {v29.4s}, [x3]
and v0.16b, v0.16b, v29.16b
1:
faddp v0.4s, v0.4s, v0.4s
faddp s0, v0.2s
ret
endfunc
#endif /* BIT_DEPTH == 8 */
SAD_FUNC 4, 4
SAD_FUNC 4, 8
SAD_FUNC 4, 16
SAD_FUNC 8, 4
SAD_FUNC 8, 8
SAD_FUNC 8, 16
SAD_FUNC 16, 8
SAD_FUNC 16, 16
SAD_X_FUNC 3, 4, 4
SAD_X_FUNC 3, 4, 8
SAD_X_FUNC 3, 8, 4
SAD_X_FUNC 3, 8, 8
SAD_X_FUNC 3, 8, 16
SAD_X_FUNC 3, 16, 8
SAD_X_FUNC 3, 16, 16
SAD_X_FUNC 4, 4, 4
SAD_X_FUNC 4, 4, 8
SAD_X_FUNC 4, 8, 4
SAD_X_FUNC 4, 8, 8
SAD_X_FUNC 4, 8, 16
SAD_X_FUNC 4, 16, 8
SAD_X_FUNC 4, 16, 16
SSD_FUNC 4, 4
SSD_FUNC 4, 8
SSD_FUNC 4, 16
SSD_FUNC 8, 4
SSD_FUNC 8, 8
SSD_FUNC 8, 16
SSD_FUNC 16, 8
SSD_FUNC 16, 16
pixel_var_8 8
pixel_var_8 16
pixel_var2_8 8
pixel_var2_8 16
sa8d_satd_8x8
sa8d_satd_8x8 satd_
HADAMARD_AC 8, 8
HADAMARD_AC 8, 16
HADAMARD_AC 16, 8
HADAMARD_AC 16, 16
|
aestream/faery
| 67,692
|
src/mp4/x264/common/loongarch/dct-a.S
|
/*****************************************************************************
* dct-a.S: LoongArch transform and zigzag
*****************************************************************************
* Copyright (C) 2023-2024 x264 project
*
* Authors: Peng Zhou <zhoupeng@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "loongson_asm.S"
#include "loongson_util.S"
const hsub_mul
.rept 16
.byte 1, -1
.endr
endconst
const last64_shuf
.int 0, 4, 1, 5, 2, 6, 3, 7
endconst
const zigzag_scan4
.short 0, 4, 1, 2, 5, 8, 12, 9, 6, 3, 7, 10, 13, 14, 11, 15
endconst
.macro LOAD_DIFF8x4_LASX s1, s2, s3, s4, s5, s6, s7, s8, s9, s10
fld.d $f\s1, a1, FENC_STRIDE * \s7
fld.d $f\s2, a1, FENC_STRIDE * \s8
fld.d $f\s5, a1, FENC_STRIDE * \s9
fld.d $f\s6, a1, FENC_STRIDE * \s10
xvinsve0.d $xr\s1, $xr\s5, 2
xvinsve0.d $xr\s2, $xr\s6, 2
fld.d $f\s3, a2, FDEC_STRIDE * \s7
fld.d $f\s4, a2, FDEC_STRIDE * \s8
fld.d $f\s5, a2, FDEC_STRIDE * \s9
fld.d $f\s6, a2, FDEC_STRIDE * \s10
xvinsve0.d $xr\s3, $xr\s5, 2
xvinsve0.d $xr\s4, $xr\s6, 2
xvilvl.b $xr\s1, xr8, $xr\s1
xvilvl.b $xr\s2, xr8, $xr\s2
xvilvl.b $xr\s3, xr8, $xr\s3
xvilvl.b $xr\s4, xr8, $xr\s4
xvsub.h $xr\s1, $xr\s1, $xr\s3
xvsub.h $xr\s2, $xr\s2, $xr\s4
.endm
.macro DCT4_1D_LASX s0, s1, s2, s3, s4
xvadd.h \s4, \s3, \s0
xvsub.h \s0, \s0, \s3
xvadd.h \s3, \s2, \s1
xvsub.h \s1, \s1, \s2
xvadd.h \s2, \s3, \s4
xvsub.h \s4, \s4, \s3
xvsub.h \s3, \s0, \s1
xvsub.h \s3, \s3, \s1
xvadd.h \s0, \s0, \s0
xvadd.h \s0, \s0, \s1
.endm
.macro LSX_SUMSUB_H sum, sub, a, b
vadd.h \sum, \a, \b
vsub.h \sub, \a, \b
.endm
.macro DCT4_1D_LSX s0, s1, s2, s3, s4, s5, s6, s7
LSX_SUMSUB_H \s1, \s6, \s5, \s6
LSX_SUMSUB_H \s3, \s7, \s4, \s7
vadd.h \s0, \s3, \s1
vadd.h \s4, \s7, \s7
vadd.h \s5, \s6, \s6
vsub.h \s2, \s3, \s1
vadd.h \s1, \s4, \s6
vsub.h \s3, \s7, \s5
.endm
.macro SUB8x8_DCT_CORE_LASX
LOAD_DIFF8x4_LASX 0, 1, 2, 3, 4, 5, 0, 1, 4, 5
LOAD_DIFF8x4_LASX 2, 3, 4, 5, 6, 7, 2, 3, 6, 7
DCT4_1D_LASX xr0, xr1, xr2, xr3, xr4
LASX_TRANSPOSE2x4x4_H xr0, xr2, xr3, xr4, xr0, xr1, \
xr2, xr3, xr10, xr12, xr13
DCT4_1D_LASX xr2, xr0, xr3, xr1, xr4
xvilvh.d xr0, xr2, xr3 /* 6, 2 */
xvilvl.d xr3, xr2, xr3 /* 4, 0 */
xvilvh.d xr2, xr1, xr4 /* 7, 3 */
xvilvl.d xr4, xr1, xr4 /* 5, 1 */
xvor.v xr1, xr3, xr3
xvpermi.q xr3, xr4, 0x02 /* 1, 0 */
xvor.v xr5, xr0, xr0
xvpermi.q xr0, xr2, 0x02 /* 3, 2 */
xvpermi.q xr1, xr4, 0x13 /* 4, 5 */
xvpermi.q xr5, xr2, 0x13 /* 7, 6 */
xvst xr3, a0, 0
xvst xr0, a0, 16 * 2
xvst xr1, a0, 16 * 4
xvst xr5, a0, 16 * 6
.endm
.macro SUB8x8_DCT_CORE_LSX
fld.d f0, a1, FENC_STRIDE * 0
fld.d f1, a1, FENC_STRIDE * 1
fld.d f4, a1, FENC_STRIDE * 4
fld.d f5, a1, FENC_STRIDE * 5
fld.d f2, a2, FDEC_STRIDE * 0
fld.d f3, a2, FDEC_STRIDE * 1
fld.d f6, a2, FDEC_STRIDE * 4
fld.d f7, a2, FDEC_STRIDE * 5
vilvl.b vr0, vr8, vr0
vilvl.b vr1, vr8, vr1
vilvl.b vr4, vr8, vr4
vilvl.b vr5, vr8, vr5
vilvl.b vr2, vr8, vr2
vilvl.b vr3, vr8, vr3
vilvl.b vr6, vr8, vr6
vilvl.b vr7, vr8, vr7
vsub.h vr0, vr0, vr2
vsub.h vr4, vr4, vr6
vsub.h vr1, vr1, vr3
vsub.h vr5, vr5, vr7
fld.d f2, a1, FENC_STRIDE * 2
fld.d f3, a1, FENC_STRIDE * 3
fld.d f6, a1, FENC_STRIDE * 6
fld.d f7, a1, FENC_STRIDE * 7
fld.d f9, a2, FDEC_STRIDE * 2
fld.d f11, a2, FDEC_STRIDE * 3
fld.d f10, a2, FDEC_STRIDE * 6
fld.d f12, a2, FDEC_STRIDE * 7
vilvl.b vr2, vr8, vr2
vilvl.b vr3, vr8, vr3
vilvl.b vr6, vr8, vr6
vilvl.b vr7, vr8, vr7
vilvl.b vr9, vr8, vr9
vilvl.b vr11, vr8, vr11
vilvl.b vr10, vr8, vr10
vilvl.b vr12, vr8, vr12
vsub.h vr2, vr2, vr9
vsub.h vr6, vr6, vr10
vsub.h vr3, vr3, vr11
vsub.h vr7, vr7, vr12
vadd.h vr9, vr3, vr0
vadd.h vr10, vr7, vr4
vsub.h vr0, vr0, vr3
vsub.h vr4, vr4, vr7
vadd.h vr3, vr2, vr1
vadd.h vr7, vr6, vr5
vsub.h vr1, vr1, vr2
vsub.h vr5, vr5, vr6
vadd.h vr2, vr3, vr9
vadd.h vr6, vr7, vr10
vsub.h vr9, vr9, vr3
vsub.h vr10, vr10, vr7
vsub.h vr3, vr0, vr1
vsub.h vr7, vr4, vr5
vsub.h vr3, vr3, vr1
vsub.h vr7, vr7, vr5
vadd.h vr0, vr0, vr0
vadd.h vr4, vr4, vr4
vadd.h vr0, vr0, vr1
vadd.h vr4, vr4, vr5
vilvh.h vr11, vr0, vr2
vilvh.h vr12, vr4, vr6
vilvl.h vr13, vr0, vr2
vilvl.h vr14, vr4, vr6
vilvh.h vr15, vr3, vr9
vilvh.h vr16, vr7, vr10
vilvl.h vr17, vr3, vr9
vilvl.h vr18, vr7, vr10
vilvh.w vr19, vr17, vr13
vilvh.w vr20, vr18, vr14
vilvl.w vr13, vr17, vr13
vilvl.w vr14, vr18, vr14
vilvh.w vr17, vr15, vr11
vilvh.w vr18, vr16, vr12
vilvl.w vr11, vr15, vr11
vilvl.w vr12, vr16, vr12
vilvh.d vr0, vr11, vr13
vilvh.d vr4, vr12, vr14
vilvl.d vr2, vr11, vr13
vilvl.d vr6, vr12, vr14
vilvh.d vr1, vr17, vr19
vilvh.d vr5, vr18, vr20
vilvl.d vr3, vr17, vr19
vilvl.d vr7, vr18, vr20
vadd.h vr9, vr1, vr2
vadd.h vr10, vr5, vr6
vsub.h vr2, vr2, vr1
vsub.h vr6, vr6, vr5
vadd.h vr1, vr3, vr0
vadd.h vr5, vr7, vr4
vsub.h vr0, vr0, vr3
vsub.h vr4, vr4, vr7
vadd.h vr3, vr1, vr9
vadd.h vr7, vr5, vr10
vsub.h vr9, vr9, vr1
vsub.h vr10, vr10, vr5
vsub.h vr1, vr2, vr0
vsub.h vr5, vr6, vr4
vsub.h vr1, vr1, vr0
vsub.h vr5, vr5, vr4
vadd.h vr2, vr2, vr2
vadd.h vr6, vr6, vr6
vadd.h vr2, vr2, vr0
vadd.h vr6, vr6, vr4
vilvh.d vr0, vr2, vr3
vilvh.d vr4, vr6, vr7
vilvl.d vr3, vr2, vr3
vilvl.d vr7, vr6, vr7
vilvh.d vr2, vr1, vr9
vilvh.d vr6, vr5, vr10
vilvl.d vr9, vr1, vr9
vilvl.d vr10, vr5, vr10
vor.v vr1, vr3, vr3
vor.v vr5, vr7, vr7
vor.v vr12, vr4, vr4
vst vr3, a0, 0
vst vr9, a0, 16
vst vr0, a0, 32
vst vr2, a0, 48
vst vr5, a0, 64
vst vr10, a0, 80
vst vr12, a0, 96
vst vr6, a0, 112
.endm
/* void subwxh_dct( dctcoef*, pixel*, pixel* ) */
function_x264 sub4x4_dct_lsx
fld.s f0, a1, 0
fld.s f4, a2, 0
fld.s f1, a1, FENC_STRIDE
fld.s f5, a2, FDEC_STRIDE
vsllwil.hu.bu vr0, vr0, 0
vsllwil.hu.bu vr1, vr1, 0
vsllwil.hu.bu vr4, vr4, 0
vsllwil.hu.bu vr5, vr5, 0
fld.s f2, a1, FENC_STRIDE * 2
fld.s f6, a2, FDEC_STRIDE * 2
fld.s f3, a1, FENC_STRIDE * 3
fld.s f7, a2, FDEC_STRIDE * 3
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr3, vr3, 0
vsllwil.hu.bu vr6, vr6, 0
vsllwil.hu.bu vr7, vr7, 0
vsub.h vr0, vr0, vr4
vsub.h vr1, vr1, vr5
vsub.h vr2, vr2, vr6
vsub.h vr3, vr3, vr7
DCT4_1D_LSX vr4, vr5, vr6, vr7, vr0, vr1, vr2, vr3
LSX_TRANSPOSE4x4_H vr4, vr5, vr6, vr7, vr4, vr5, vr6, vr7, vr0, vr1
DCT4_1D_LSX vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
vshuf4i.d vr0, vr1, 0x8
vshuf4i.d vr2, vr3, 0x8
vst vr0, a0, 0
vst vr2, a0, 16
endfunc_x264
function_x264 sub8x8_dct_lasx
xvxor.v xr8, xr8, xr8
SUB8x8_DCT_CORE_LASX
endfunc_x264
function_x264 sub8x8_dct_lsx
vxor.v vr8, vr8, vr8
SUB8x8_DCT_CORE_LSX
endfunc_x264
function_x264 sub16x16_dct_lasx
xvxor.v xr8, xr8, xr8
SUB8x8_DCT_CORE_LASX
addi.d a0, a0, 32 * 4
addi.d a1, a1, 8
addi.d a2, a2, 8
SUB8x8_DCT_CORE_LASX
addi.d a0, a0, 32 * 4
addi.d a1, a1, 8*FENC_STRIDE - 8
addi.d a2, a2, 8*FDEC_STRIDE - 8
SUB8x8_DCT_CORE_LASX
addi.d a0, a0, 32 * 4
addi.d a1, a1, 8
addi.d a2, a2, 8
SUB8x8_DCT_CORE_LASX
endfunc_x264
function_x264 sub16x16_dct_lsx
vxor.v vr8, vr8, vr8
SUB8x8_DCT_CORE_LSX
addi.d a0, a0, 32 * 4
addi.d a1, a1, 8
addi.d a2, a2, 8
SUB8x8_DCT_CORE_LSX
addi.d a0, a0, 32 * 4
addi.d a1, a1, 8*FENC_STRIDE - 8
addi.d a2, a2, 8*FDEC_STRIDE - 8
SUB8x8_DCT_CORE_LSX
addi.d a0, a0, 32 * 4
addi.d a1, a1, 8
addi.d a2, a2, 8
SUB8x8_DCT_CORE_LSX
endfunc_x264
/*
* void add4x4_idct( pixel *p_dst, dctcoef dct[16] )
*/
function_x264 add4x4_idct_lsx
vxor.v vr0, vr1, vr1
fld.d f1, a1, 0
fld.d f2, a1, 8
fld.d f3, a1, 16
fld.d f4, a1, 24
vsrai.h vr5, vr2, 1
vsrai.h vr6, vr4, 1
vilvl.h vr1, vr1, vr3
vilvl.h vr15, vr2, vr6
vilvl.h vr16, vr5, vr4
vhaddw.w.h vr7, vr1, vr1
vhsubw.w.h vr8, vr1, vr1
vhaddw.w.h vr9, vr15, vr15
vhsubw.w.h vr10, vr16, vr16
vadd.w vr1, vr7, vr9
vadd.w vr2, vr8, vr10
vsub.w vr3, vr8, vr10
vsub.w vr4, vr7, vr9
vpickev.h vr1, vr1, vr1
vpickev.h vr2, vr2, vr2
vpickev.h vr3, vr3, vr3
vpickev.h vr4, vr4, vr4
LSX_TRANSPOSE4x4_H vr1, vr2, vr3, vr4, vr1, vr2, vr3, vr4, vr5, vr6
vsrai.h vr5, vr2, 1
vsrai.h vr6, vr4, 1
vilvl.h vr1, vr1, vr3
vilvl.h vr15, vr2, vr6
vilvl.h vr16, vr5, vr4
vhaddw.w.h vr7, vr1, vr1
vhsubw.w.h vr8, vr1, vr1
vhaddw.w.h vr9, vr15, vr15
vhsubw.w.h vr10, vr16, vr16
vadd.w vr1, vr7, vr9
vadd.w vr2, vr8, vr10
vsub.w vr3, vr8, vr10
vsub.w vr4, vr7, vr9
vssrarni.h.w vr2, vr1, 6
vssrarni.h.w vr4, vr3, 6
fld.s f1, a0, 0
fld.s f5, a0, FDEC_STRIDE
fld.s f3, a0, FDEC_STRIDE * 2
fld.s f6, a0, FDEC_STRIDE * 3
vilvl.b vr1, vr0, vr1
vilvl.b vr5, vr0, vr5
vilvl.b vr3, vr0, vr3
vilvl.b vr6, vr0, vr6
vilvl.d vr1, vr5, vr1
vilvl.d vr3, vr6, vr3
vadd.h vr7, vr1, vr2
vadd.h vr8, vr3, vr4
vssrarni.bu.h vr8, vr7, 0
vstelm.w vr8, a0, 0, 0
vstelm.w vr8, a0, FDEC_STRIDE, 1
vstelm.w vr8, a0, FDEC_STRIDE * 2, 2
vstelm.w vr8, a0, FDEC_STRIDE * 3, 3
endfunc_x264
.macro LASX_SUMSUB_W sum, diff, in0, in1
xvadd.w \sum, \in0, \in1
xvsub.w \diff, \in0, \in1
.endm
.macro add8x4_idct_core_lasx
fld.d f1, a1, 0
fld.d f2, a1, 8
fld.d f3, a1, 16
fld.d f4, a1, 24
fld.d f5, a1, 32
fld.d f6, a1, 40
fld.d f7, a1, 48
fld.d f8, a1, 56
xvinsve0.d xr1, xr5, 1
xvinsve0.d xr2, xr6, 1
xvinsve0.d xr3, xr7, 1
xvinsve0.d xr4, xr8, 1
xvsrai.h xr8, xr2, 1
xvsrai.h xr9, xr4, 1
vext2xv.w.h xr1, xr1
vext2xv.w.h xr5, xr2
vext2xv.w.h xr6, xr3
vext2xv.w.h xr7, xr4
vext2xv.w.h xr8, xr8
vext2xv.w.h xr9, xr9
LASX_SUMSUB_W xr10, xr11, xr1, xr6
xvadd.w xr12, xr5, xr9
xvsub.w xr13, xr8, xr7
LASX_SUMSUB_W xr6, xr9, xr10, xr12
LASX_SUMSUB_W xr7, xr8, xr11, xr13
xvpickev.h xr10, xr6, xr6
xvpickev.h xr11, xr7, xr7
xvpickev.h xr12, xr8, xr8
xvpickev.h xr13, xr9, xr9
LASX_TRANSPOSE4x8_H xr10, xr11, xr12, xr13, xr10, xr11, xr12, xr13, \
xr4, xr5
xvsllwil.w.h xr10, xr10, 0
xvsllwil.w.h xr11, xr11, 0
xvsllwil.w.h xr12, xr12, 0
xvsllwil.w.h xr13, xr13, 0
xvsrai.w xr14, xr11, 1
xvsrai.w xr15, xr13, 1
LASX_SUMSUB_W xr4, xr5, xr10, xr12
xvadd.w xr6, xr11, xr15
xvsub.w xr7, xr14, xr13
LASX_SUMSUB_W xr10, xr13, xr4, xr6
LASX_SUMSUB_W xr11, xr12, xr5, xr7
xvssrarni.h.w xr11, xr10, 6
xvssrarni.h.w xr13, xr12, 6
fld.s f1, a0, 0
fld.s f2, a0, FDEC_STRIDE
fld.s f3, a0, FDEC_STRIDE * 2
fld.s f4, a0, FDEC_STRIDE * 3
fld.s f5, a0, 4
fld.s f6, a0, FDEC_STRIDE + 4
fld.s f7, a0, FDEC_STRIDE * 2 + 4
fld.s f8, a0, FDEC_STRIDE * 3 + 4
xvinsve0.w xr1, xr2, 1
xvinsve0.w xr3, xr4, 1
xvinsve0.w xr5, xr6, 1
xvinsve0.w xr7, xr8, 1
xvinsve0.d xr1, xr5, 2
xvinsve0.d xr3, xr7, 2
xvilvl.b xr1, xr0, xr1
xvilvl.b xr3, xr0, xr3
xvadd.h xr1, xr1, xr11
xvadd.h xr3, xr3, xr13
xvssrarni.bu.h xr3, xr1, 0
xvstelm.w xr3, a0, 0, 0
xvstelm.w xr3, a0, FDEC_STRIDE, 1
xvstelm.w xr3, a0, FDEC_STRIDE * 2, 2
xvstelm.w xr3, a0, FDEC_STRIDE * 3, 3
xvstelm.w xr3, a0, 4, 4
xvstelm.w xr3, a0, FDEC_STRIDE + 4, 5
xvstelm.w xr3, a0, FDEC_STRIDE * 2 + 4, 6
xvstelm.w xr3, a0, FDEC_STRIDE * 3 + 4, 7
.endm
.macro LSX_SUMSUB_W sum0, sum1, diff0, diff1, in0, in1, in2, in3
vadd.w \sum0, \in0, \in2
vadd.w \sum1, \in1, \in3
vsub.w \diff0, \in0, \in2
vsub.w \diff1, \in1, \in3
.endm
.macro add8x4_idct_core_lsx
fld.d f1, a1, 0
fld.d f2, a1, 8
fld.d f3, a1, 16
fld.d f4, a1, 24
fld.d f5, a1, 32
fld.d f6, a1, 40
fld.d f7, a1, 48
fld.d f8, a1, 56
vpermi.w vr9, vr6, 0x04
vpermi.w vr9, vr2, 0x44
vpermi.w vr10, vr8, 0x04
vpermi.w vr10, vr4, 0x44
vsrai.h vr9, vr9, 1
vsrai.h vr10, vr10, 1
vsllwil.w.h vr1, vr1, 0
vsllwil.w.h vr5, vr5, 0
vsllwil.w.h vr2, vr2, 0
vsllwil.w.h vr6, vr6, 0
vsllwil.w.h vr3, vr3, 0
vsllwil.w.h vr7, vr7, 0
vsllwil.w.h vr4, vr4, 0
vsllwil.w.h vr8, vr8, 0
vexth.w.h vr11, vr9
vsllwil.w.h vr9, vr9, 0
vexth.w.h vr12, vr10
vsllwil.w.h vr10, vr10, 0
LSX_SUMSUB_W vr13, vr14, vr15, vr16, vr1, vr5, vr3, vr7
vadd.w vr17, vr2, vr10
vadd.w vr18, vr6, vr12
vsub.w vr19, vr9, vr4
vsub.w vr20, vr11, vr8
LSX_SUMSUB_W vr3, vr7, vr10, vr12, vr13, vr14, vr17, vr18
LSX_SUMSUB_W vr4, vr8, vr9, vr11, vr15, vr16, vr19, vr20
vpickev.h vr13, vr3, vr3
vpickev.h vr14, vr7, vr7
vpickev.h vr15, vr4, vr4
vpickev.h vr16, vr8, vr8
vpickev.h vr17, vr9, vr9
vpickev.h vr18, vr11, vr11
vpickev.h vr19, vr10, vr10
vpickev.h vr20, vr12, vr12
LSX_TRANSPOSE4x4_H vr13, vr15, vr17, vr19, vr13, vr15, vr17, vr19, vr1, vr3
LSX_TRANSPOSE4x4_H vr14, vr16, vr18, vr20, vr14, vr16, vr18, vr20, vr2, vr4
vsllwil.w.h vr13, vr13, 0
vsllwil.w.h vr14, vr14, 0
vsllwil.w.h vr15, vr15, 0
vsllwil.w.h vr16, vr16, 0
vsllwil.w.h vr17, vr17, 0
vsllwil.w.h vr18, vr18, 0
vsllwil.w.h vr19, vr19, 0
vsllwil.w.h vr20, vr20, 0
vsrai.w vr1, vr15, 1
vsrai.w vr2, vr16, 1
vsrai.w vr3, vr19, 1
vsrai.w vr4, vr20, 1
LSX_SUMSUB_W vr5, vr6, vr21, vr22, vr13, vr14, vr17, vr18
vadd.w vr8, vr15, vr3
vadd.w vr9, vr16, vr4
vsub.w vr10, vr1, vr19
vsub.w vr11, vr2, vr20
LSX_SUMSUB_W vr13, vr14, vr19, vr20, vr5, vr6, vr8, vr9
LSX_SUMSUB_W vr15, vr16, vr17, vr18, vr21, vr22, vr10, vr11
vssrarni.h.w vr15, vr13, 6
vssrarni.h.w vr16, vr14, 6
vssrarni.h.w vr19, vr17, 6
vssrarni.h.w vr20, vr18, 6
fld.s f1, a0, 0
fld.s f2, a0, FDEC_STRIDE
fld.s f3, a0, FDEC_STRIDE * 2
fld.s f4, a0, FDEC_STRIDE * 3
fld.s f5, a0, 4
fld.s f6, a0, FDEC_STRIDE + 4
fld.s f7, a0, FDEC_STRIDE * 2 + 4
fld.s f8, a0, FDEC_STRIDE * 3 + 4
vpickve2gr.w t0, vr2, 0
vinsgr2vr.w vr1, t0, 1
vpickve2gr.w t0, vr4, 0
vinsgr2vr.w vr3, t0, 1
vpickve2gr.w t0, vr6, 0
vinsgr2vr.w vr5, t0, 1
vpickve2gr.w t0, vr8, 0
vinsgr2vr.w vr7, t0, 1
vilvl.b vr1, vr0, vr1
vilvl.b vr5, vr0, vr5
vilvl.b vr3, vr0, vr3
vilvl.b vr7, vr0, vr7
vadd.h vr1, vr1, vr15
vadd.h vr5, vr5, vr16
vadd.h vr3, vr3, vr19
vadd.h vr7, vr7, vr20
vssrarni.bu.h vr3, vr1, 0
vssrarni.bu.h vr7, vr5, 0
vstelm.w vr3, a0, 0, 0
vstelm.w vr3, a0, FDEC_STRIDE, 1
vstelm.w vr3, a0, FDEC_STRIDE * 2, 2
vstelm.w vr3, a0, FDEC_STRIDE * 3, 3
vstelm.w vr7, a0, 4, 0
vstelm.w vr7, a0, FDEC_STRIDE + 4, 1
vstelm.w vr7, a0, FDEC_STRIDE * 2 + 4, 2
vstelm.w vr7, a0, FDEC_STRIDE * 3 + 4, 3
.endm
/*
* void add8x8_idct( pixel *p_dst, dctcoef dct[4][16] )
*
*/
function_x264 add8x8_idct_lasx
xvxor.v xr0, xr1, xr1
add8x4_idct_core_lasx
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 64
add8x4_idct_core_lasx
endfunc_x264
.macro add8x8_idct_core_lsx
add8x4_idct_core_lsx
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 64
add8x4_idct_core_lsx
.endm
function_x264 add8x8_idct_lsx
vxor.v vr0, vr1, vr1
add8x8_idct_core_lsx
endfunc_x264
/*
* void add16x16_idct( pixel *p_dst, dctcoef dct[16][16] )
*/
function_x264 add16x16_idct_lasx
move t4, a0
move t5, a1
xvxor.v xr0, xr1, xr1
add8x4_idct_core_lasx
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 64
add8x4_idct_core_lasx
addi.d a0, t4, 8
addi.d a1, t5, 128
add8x4_idct_core_lasx
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 64
add8x4_idct_core_lasx
addi.d t6, t4, FDEC_STRIDE * 8
move a0, t6
addi.d a1, t5, 256
add8x4_idct_core_lasx
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 64
add8x4_idct_core_lasx
addi.d a0, t6, 8
addi.d a1, t5, 384
add8x4_idct_core_lasx
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 64
add8x4_idct_core_lasx
endfunc_x264
function_x264 add16x16_idct_lsx
move t4, a0
move t5, a1
vxor.v vr0, vr1, vr1
add8x8_idct_core_lsx
addi.d a0, t4, 8
addi.d a1, t5, 128
add8x8_idct_core_lsx
addi.d t6, t4, FDEC_STRIDE * 8
move a0, t6
addi.d a1, t5, 256
add8x8_idct_core_lsx
addi.d a0, t6, 8
addi.d a1, t5, 384
add8x8_idct_core_lsx
endfunc_x264
/*
* void add8x8_idct8( pixel *dst, dctcoef dct[64] )
*/
function_x264 add8x8_idct8_lasx
xvxor.v xr20, xr1, xr1
// dct[0] += 32
ld.h t0, a1, 0
addi.w t0, t0, 32
st.h t0, a1, 0
vld vr0, a1, 0
vld vr2, a1, 32
vld vr4, a1, 64
vld vr6, a1, 96
vsrai.h vr8, vr2, 1
vsrai.h vr10, vr6, 1
vext2xv.w.h xr0, xr0
vext2xv.w.h xr2, xr2
vext2xv.w.h xr4, xr4
vext2xv.w.h xr6, xr6
vext2xv.w.h xr8, xr8
vext2xv.w.h xr10, xr10
LASX_SUMSUB_W xr11, xr12, xr0, xr4
xvsub.w xr13, xr8, xr6
xvadd.w xr14, xr10, xr2
LASX_SUMSUB_W xr15, xr18, xr11, xr14
LASX_SUMSUB_W xr16, xr17, xr12, xr13
vld vr0, a1, 16
vld vr2, a1, 48
vld vr4, a1, 80
vld vr6, a1, 112
vsrai.h vr1, vr0, 1
vsrai.h vr3, vr2, 1
vsrai.h vr5, vr4, 1
vsrai.h vr7, vr6, 1
vext2xv.w.h xr0, xr0
vext2xv.w.h xr2, xr2
vext2xv.w.h xr4, xr4
vext2xv.w.h xr6, xr6
vext2xv.w.h xr1, xr1
vext2xv.w.h xr3, xr3
vext2xv.w.h xr5, xr5
vext2xv.w.h xr7, xr7
LASX_SUMSUB_W xr9, xr10, xr4, xr2
LASX_SUMSUB_W xr11, xr12, xr6, xr0
xvsub.w xr10, xr10, xr6
xvsub.w xr10, xr10, xr7
xvsub.w xr11, xr11, xr2
xvsub.w xr11, xr11, xr3
xvadd.w xr12, xr12, xr4
xvadd.w xr12, xr12, xr5
xvadd.w xr9, xr9, xr0
xvadd.w xr9, xr9, xr1
xvsrai.w xr1, xr10, 2
xvsrai.w xr2, xr11, 2
xvsrai.w xr3, xr12, 2
xvsrai.w xr4, xr9, 2
xvadd.w xr5, xr4, xr10
xvadd.w xr6, xr3, xr11
xvsub.w xr7, xr2, xr12
xvsub.w xr8, xr9, xr1
LASX_SUMSUB_W xr1, xr14, xr15, xr8
LASX_SUMSUB_W xr2, xr13, xr16, xr7
LASX_SUMSUB_W xr3, xr12, xr17, xr6
LASX_SUMSUB_W xr4, xr11, xr18, xr5
LASX_TRANSPOSE8x8_W xr1, xr2, xr3, xr4, xr11, xr12, xr13, xr14, \
xr5, xr6, xr7, xr8, xr15, xr16, xr17, xr18, \
xr9, xr10, xr21, xr22
xvsrai.h xr9, xr7, 1
xvsrai.h xr10, xr17, 1
xvaddwev.w.h xr1, xr5, xr15
xvsubwev.w.h xr2, xr5, xr15
xvsubwev.w.h xr3, xr9, xr17
xvaddwev.w.h xr4, xr10, xr7
LASX_SUMSUB_W xr11, xr14, xr1, xr4
LASX_SUMSUB_W xr12, xr13, xr2, xr3
xvsrai.h xr1, xr6, 1
xvsrai.h xr2, xr8, 1
xvsrai.h xr3, xr16, 1
xvsrai.h xr4, xr18, 1
xvaddwev.w.h xr5, xr16, xr8
xvsubwev.w.h xr10, xr16, xr8
xvaddwev.w.h xr7, xr18, xr6
xvsubwev.w.h xr9, xr18, xr6
xvaddwev.w.h xr4, xr18, xr4
xvsub.w xr10, xr10, xr4
xvaddwev.w.h xr2, xr8, xr2
xvsub.w xr7, xr7, xr2
xvaddwev.w.h xr3, xr16, xr3
xvadd.w xr9, xr9, xr3
xvaddwev.w.h xr1, xr6, xr1
xvadd.w xr5, xr5, xr1
xvsrai.w xr1, xr10, 2
xvsrai.w xr2, xr7, 2
xvsrai.w xr3, xr9, 2
xvsrai.w xr4, xr5, 2
xvadd.w xr15, xr4, xr10
xvadd.w xr16, xr7, xr3
xvsub.w xr17, xr2, xr9
xvsub.w xr18, xr5, xr1
LASX_SUMSUB_W xr1, xr8, xr11, xr18
LASX_SUMSUB_W xr2, xr7, xr12, xr17
LASX_SUMSUB_W xr3, xr6, xr13, xr16
LASX_SUMSUB_W xr4, xr5, xr14, xr15
xvsrai.w xr11, xr1, 6
xvsrai.w xr12, xr2, 6
xvsrai.w xr13, xr3, 6
xvsrai.w xr14, xr4, 6
xvsrai.w xr15, xr5, 6
xvsrai.w xr16, xr6, 6
xvsrai.w xr17, xr7, 6
xvsrai.w xr18, xr8, 6
fld.d f1, a0, 0
fld.d f2, a0, FDEC_STRIDE
fld.d f3, a0, FDEC_STRIDE * 2
fld.d f4, a0, FDEC_STRIDE * 3
fld.d f5, a0, FDEC_STRIDE * 4
fld.d f6, a0, FDEC_STRIDE * 5
fld.d f7, a0, FDEC_STRIDE * 6
fld.d f8, a0, FDEC_STRIDE * 7
vext2xv.wu.bu xr1, xr1
vext2xv.wu.bu xr2, xr2
vext2xv.wu.bu xr3, xr3
vext2xv.wu.bu xr4, xr4
vext2xv.wu.bu xr5, xr5
vext2xv.wu.bu xr6, xr6
vext2xv.wu.bu xr7, xr7
vext2xv.wu.bu xr8, xr8
xvadd.w xr1, xr1, xr11
xvadd.w xr2, xr2, xr12
xvadd.w xr3, xr3, xr13
xvadd.w xr4, xr4, xr14
xvadd.w xr5, xr5, xr15
xvadd.w xr6, xr6, xr16
xvadd.w xr7, xr7, xr17
xvadd.w xr8, xr8, xr18
xvssrarni.hu.w xr2, xr1, 0
xvssrarni.hu.w xr4, xr3, 0
xvssrarni.hu.w xr6, xr5, 0
xvssrarni.hu.w xr8, xr7, 0
xvpermi.d xr12, xr2, 0xd8
xvpermi.d xr14, xr4, 0xd8
xvpermi.d xr16, xr6, 0xd8
xvpermi.d xr18, xr8, 0xd8
xvssrlni.bu.h xr14, xr12, 0
xvssrlni.bu.h xr18, xr16, 0
xvstelm.d xr14, a0, 0, 0
xvstelm.d xr14, a0, FDEC_STRIDE, 2
xvstelm.d xr14, a0, FDEC_STRIDE * 2, 1
xvstelm.d xr14, a0, FDEC_STRIDE * 3, 3
xvstelm.d xr18, a0, FDEC_STRIDE * 4, 0
xvstelm.d xr18, a0, FDEC_STRIDE * 5, 2
xvstelm.d xr18, a0, FDEC_STRIDE * 6, 1
xvstelm.d xr18, a0, FDEC_STRIDE * 7, 3
endfunc_x264
function_x264 add8x8_idct8_lsx
ld.h t0, a1, 0
addi.w t0, t0, 32
st.h t0, a1, 0
vld vr0, a1, 0
vld vr2, a1, 32
vld vr4, a1, 64
vld vr6, a1, 96
vsrai.h vr8, vr2, 1
vsrai.h vr10, vr6, 1
vexth.w.h vr1, vr0
vsllwil.w.h vr0, vr0, 0
vexth.w.h vr3, vr2
vsllwil.w.h vr2, vr2, 0
vexth.w.h vr5, vr4
vsllwil.w.h vr4, vr4, 0
vexth.w.h vr7, vr6
vsllwil.w.h vr6, vr6, 0
vexth.w.h vr9, vr8
vsllwil.w.h vr8, vr8, 0
vexth.w.h vr11, vr10
vsllwil.w.h vr10, vr10, 0
LSX_SUMSUB_W vr12, vr13, vr14, vr15, vr0, vr1, vr4, vr5
vsub.w vr16, vr8, vr6
vsub.w vr17, vr9, vr7
vadd.w vr18, vr10, vr2
vadd.w vr19, vr11, vr3
LSX_SUMSUB_W vr20, vr21, vr18, vr19, vr12, vr13, vr18, vr19
LSX_SUMSUB_W vr22, vr23, vr16, vr17, vr14, vr15, vr16, vr17
vld vr0, a1, 16
vld vr2, a1, 48
vld vr4, a1, 80
vld vr6, a1, 112
vsrai.h vr1, vr0, 1
vsrai.h vr3, vr2, 1
vsrai.h vr5, vr4, 1
vsrai.h vr7, vr6, 1
vexth.w.h vr8, vr0
vsllwil.w.h vr0, vr0, 0
vexth.w.h vr10, vr2
vsllwil.w.h vr2, vr2, 0
vexth.w.h vr12, vr4
vsllwil.w.h vr4, vr4, 0
vexth.w.h vr14, vr6
vsllwil.w.h vr6, vr6, 0
vexth.w.h vr9, vr1
vsllwil.w.h vr1, vr1, 0
vexth.w.h vr11, vr3
vsllwil.w.h vr3, vr3, 0
vexth.w.h vr13, vr5
vsllwil.w.h vr5, vr5, 0
vexth.w.h vr15, vr7
vsllwil.w.h vr7, vr7, 0
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
LSX_SUMSUB_W vr24, vr25, vr26, vr27, vr4, vr12, vr2, vr10
LSX_SUMSUB_W vr28, vr29, vr30, vr31, vr6, vr14, vr0, vr8
vsub.w vr26, vr26, vr6
vsub.w vr27, vr27, vr14
vsub.w vr26, vr26, vr7
vsub.w vr27, vr27, vr15
vsub.w vr28, vr28, vr2
vsub.w vr29, vr29, vr10
vsub.w vr28, vr28, vr3
vsub.w vr29, vr29, vr11
vadd.w vr30, vr30, vr4
vadd.w vr31, vr31, vr12
vadd.w vr30, vr30, vr5
vadd.w vr31, vr31, vr13
vadd.w vr24, vr24, vr0
vadd.w vr25, vr25, vr8
vadd.w vr24, vr24, vr1
vadd.w vr25, vr25, vr9
vsrai.w vr1, vr26, 2
vsrai.w vr9, vr27, 2
vsrai.w vr2, vr28, 2
vsrai.w vr10, vr29, 2
vsrai.w vr3, vr30, 2
vsrai.w vr11, vr31, 2
vsrai.w vr4, vr24, 2
vsrai.w vr12, vr25, 2
vadd.w vr5, vr4, vr26
vadd.w vr13, vr12, vr27
vadd.w vr6, vr3, vr28
vadd.w vr14, vr11, vr29
vsub.w vr7, vr2, vr30
vsub.w vr15, vr10, vr31
vsub.w vr0, vr24, vr1
vsub.w vr8, vr25, vr9
LSX_SUMSUB_W vr1, vr9, vr30, vr31, vr20, vr21, vr0, vr8
LSX_SUMSUB_W vr2, vr10, vr28, vr29, vr22, vr23, vr7, vr15
LSX_SUMSUB_W vr3, vr11, vr26, vr27, vr16, vr17, vr6, vr14
LSX_SUMSUB_W vr4, vr12, vr24, vr25, vr18, vr19, vr5, vr13
LSX_TRANSPOSE4x4_W vr1, vr2, vr3, vr4, vr5, vr6, vr7, vr0, vr20, vr22
LSX_TRANSPOSE4x4_W vr9, vr10, vr11, vr12, vr20, vr22, vr16, vr18, vr1, vr2
LSX_TRANSPOSE4x4_W vr24, vr26, vr28, vr30, vr13, vr14, vr15, vr8, vr21, vr23
LSX_TRANSPOSE4x4_W vr25, vr27, vr29, vr31, vr21, vr23, vr17, vr19, vr24, vr26
vsrai.h vr3, vr7, 1
vsrai.h vr11, vr15, 1
vsrai.h vr4, vr16, 1
vsrai.h vr12, vr17, 1
vaddwev.w.h vr1, vr5, vr20
vaddwev.w.h vr9, vr13, vr21
vsubwev.w.h vr2, vr5, vr20
vsubwev.w.h vr10, vr13, vr21
vsubwev.w.h vr3, vr3, vr16
vsubwev.w.h vr11, vr11, vr17
vaddwev.w.h vr4, vr4, vr7
vaddwev.w.h vr12, vr12, vr15
LSX_SUMSUB_W vr24, vr25, vr30, vr31, vr1, vr9, vr4, vr12
LSX_SUMSUB_W vr26, vr27, vr28, vr29, vr2, vr10, vr3, vr11
vsrai.h vr1, vr6, 1
vsrai.h vr9, vr14, 1
vsrai.h vr2, vr0, 1
vsrai.h vr10, vr8, 1
vsrai.h vr3, vr22, 1
vsrai.h vr11, vr23, 1
vsrai.h vr4, vr18, 1
vsrai.h vr12, vr19, 1
vaddwev.w.h vr5, vr22, vr0
vaddwev.w.h vr13, vr23, vr8
vsubwev.w.h vr20, vr22, vr0
vsubwev.w.h vr21, vr23, vr8
vaddwev.w.h vr7, vr18, vr6
vaddwev.w.h vr15, vr19, vr14
vsubwev.w.h vr16, vr18, vr6
vsubwev.w.h vr17, vr19, vr14
vaddwev.w.h vr4, vr18, vr4
vaddwev.w.h vr12, vr19, vr12
vsub.w vr20, vr20, vr4
vsub.w vr21, vr21, vr12
vaddwev.w.h vr2, vr0, vr2
vaddwev.w.h vr10, vr8, vr10
vsub.w vr7, vr7, vr2
vsub.w vr15, vr15, vr10
vaddwev.w.h vr3, vr22, vr3
vaddwev.w.h vr11, vr23, vr11
vadd.w vr16, vr16, vr3
vadd.w vr17, vr17, vr11
vaddwev.w.h vr1, vr6, vr1
vaddwev.w.h vr9, vr14, vr9
vadd.w vr5, vr5, vr1
vadd.w vr13, vr13, vr9
vsrai.w vr1, vr20, 2
vsrai.w vr9, vr21, 2
vsrai.w vr2, vr7, 2
vsrai.w vr10, vr15, 2
vsrai.w vr3, vr16, 2
vsrai.w vr11, vr17, 2
vsrai.w vr4, vr5, 2
vsrai.w vr12, vr13, 2
vadd.w vr20, vr4, vr20
vadd.w vr21, vr12, vr21
vadd.w vr22, vr7, vr3
vadd.w vr23, vr15, vr11
vsub.w vr16, vr2, vr16
vsub.w vr17, vr10, vr17
vsub.w vr18, vr5, vr1
vsub.w vr19, vr13, vr9
LSX_SUMSUB_W vr1, vr9, vr0, vr8, vr24, vr25, vr18, vr19
LSX_SUMSUB_W vr2, vr10, vr7, vr15, vr26, vr27, vr16, vr17
LSX_SUMSUB_W vr3, vr11, vr6, vr14, vr28, vr29, vr22, vr23
LSX_SUMSUB_W vr4, vr12, vr5, vr13, vr30, vr31, vr20, vr21
vsrai.w vr24, vr1, 6
vsrai.w vr25, vr9, 6
vsrai.w vr26, vr2, 6
vsrai.w vr27, vr10, 6
vsrai.w vr28, vr3, 6
vsrai.w vr29, vr11, 6
vsrai.w vr30, vr4, 6
vsrai.w vr31, vr12, 6
vsrai.w vr20, vr5, 6
vsrai.w vr21, vr13, 6
vsrai.w vr22, vr6, 6
vsrai.w vr23, vr14, 6
vsrai.w vr16, vr7, 6
vsrai.w vr17, vr15, 6
vsrai.w vr18, vr0, 6
vsrai.w vr19, vr8, 6
fld.d f1, a0, 0
fld.d f2, a0, FDEC_STRIDE
fld.d f3, a0, FDEC_STRIDE * 2
fld.d f4, a0, FDEC_STRIDE * 3
fld.d f5, a0, FDEC_STRIDE * 4
fld.d f6, a0, FDEC_STRIDE * 5
fld.d f7, a0, FDEC_STRIDE * 6
fld.d f8, a0, FDEC_STRIDE * 7
vsllwil.hu.bu vr1, vr1, 0
vexth.wu.hu vr9, vr1
vsllwil.wu.hu vr1, vr1, 0
vsllwil.hu.bu vr2, vr2, 0
vexth.wu.hu vr10, vr2
vsllwil.wu.hu vr2, vr2, 0
vsllwil.hu.bu vr3, vr3, 0
vexth.wu.hu vr11, vr3
vsllwil.wu.hu vr3, vr3, 0
vsllwil.hu.bu vr4, vr4, 0
vexth.wu.hu vr12, vr4
vsllwil.wu.hu vr4, vr4, 0
vsllwil.hu.bu vr5, vr5, 0
vexth.wu.hu vr13, vr5
vsllwil.wu.hu vr5, vr5, 0
vsllwil.hu.bu vr6, vr6, 0
vexth.wu.hu vr14, vr6
vsllwil.wu.hu vr6, vr6, 0
vsllwil.hu.bu vr7, vr7, 0
vexth.wu.hu vr15, vr7
vsllwil.wu.hu vr7, vr7, 0
vsllwil.hu.bu vr8, vr8, 0
vexth.wu.hu vr0, vr8
vsllwil.wu.hu vr8, vr8, 0
vadd.w vr1, vr1, vr24
vadd.w vr9, vr9, vr25
vadd.w vr2, vr2, vr26
vadd.w vr10, vr10, vr27
vadd.w vr3, vr3, vr28
vadd.w vr11, vr11, vr29
vadd.w vr4, vr4, vr30
vadd.w vr12, vr12, vr31
vadd.w vr5, vr5, vr20
vadd.w vr13, vr13, vr21
vadd.w vr6, vr6, vr22
vadd.w vr14, vr14, vr23
vadd.w vr7, vr7, vr16
vadd.w vr15, vr15, vr17
vadd.w vr8, vr8, vr18
vadd.w vr0, vr0, vr19
vssrarni.hu.w vr2, vr1, 0
vssrarni.hu.w vr10, vr9, 0
vssrarni.hu.w vr4, vr3, 0
vssrarni.hu.w vr12, vr11, 0
vssrarni.hu.w vr6, vr5, 0
vssrarni.hu.w vr14, vr13, 0
vssrarni.hu.w vr8, vr7, 0
vssrarni.hu.w vr0, vr15, 0
vpermi.w vr20, vr10, 0x0E
vpermi.w vr10, vr2, 0x44
vpermi.w vr20, vr2, 0x4E
vpermi.w vr21, vr12, 0x0E
vpermi.w vr12, vr4, 0x44
vpermi.w vr21, vr4, 0x4E
vpermi.w vr22, vr14, 0x0E
vpermi.w vr14, vr6, 0x44
vpermi.w vr22, vr6, 0x4E
vpermi.w vr23, vr0, 0x0E
vpermi.w vr0, vr8, 0x44
vpermi.w vr23, vr8, 0x4E
vssrlni.bu.h vr12, vr10, 0
vssrlni.bu.h vr21, vr20, 0
vssrlni.bu.h vr0, vr14, 0
vssrlni.bu.h vr23, vr22, 0
vstelm.d vr12, a0, 0, 0
vstelm.d vr21, a0, FDEC_STRIDE, 0
vstelm.d vr12, a0, FDEC_STRIDE * 2, 1
vstelm.d vr21, a0, FDEC_STRIDE * 3, 1
vstelm.d vr0, a0, FDEC_STRIDE * 4, 0
vstelm.d vr23, a0, FDEC_STRIDE * 5, 0
vstelm.d vr0, a0, FDEC_STRIDE * 6, 1
vstelm.d vr23, a0, FDEC_STRIDE * 7, 1
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc_x264
.macro add8x4_idct_dc_lasx
xvldrepl.h xr11, a1, 0
xvldrepl.h xr12, a1, 2
xvilvl.d xr12, xr12, xr11
xvsrari.h xr12, xr12, 6
fld.d f0, a0, 0
fld.d f1, a0, FDEC_STRIDE
fld.d f2, a0, FDEC_STRIDE * 2
fld.d f3, a0, FDEC_STRIDE * 3
xvinsve0.d xr0, xr1, 1
xvinsve0.d xr2, xr3, 1
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr2, xr2
xvadd.h xr0, xr0, xr12
xvadd.h xr2, xr2, xr12
xvssrarni.bu.h xr2, xr0, 0
xvstelm.d xr2, a0, 0, 0
xvstelm.d xr2, a0, FDEC_STRIDE, 2
xvstelm.d xr2, a0, FDEC_STRIDE * 2, 1
xvstelm.d xr2, a0, FDEC_STRIDE * 3, 3
.endm
.macro add8x4_idct_dc_lsx
vldrepl.h vr11, a1, 0
vldrepl.h vr12, a1, 2
vilvl.d vr12, vr12, vr11
vsrari.h vr12, vr12, 6
fld.d f0, a0, 0
fld.d f1, a0, FDEC_STRIDE
fld.d f2, a0, FDEC_STRIDE * 2
fld.d f3, a0, FDEC_STRIDE * 3
vsllwil.hu.bu vr0, vr0, 0
vsllwil.hu.bu vr1, vr1, 0
vsllwil.hu.bu vr2, vr2, 0
vsllwil.hu.bu vr3, vr3, 0
vadd.h vr0, vr0, vr12
vadd.h vr1, vr1, vr12
vadd.h vr2, vr2, vr12
vadd.h vr3, vr3, vr12
vssrarni.bu.h vr2, vr0, 0
vssrarni.bu.h vr3, vr1, 0
vstelm.d vr2, a0, 0, 0
vstelm.d vr3, a0, FDEC_STRIDE, 0
vstelm.d vr2, a0, FDEC_STRIDE * 2, 1
vstelm.d vr3, a0, FDEC_STRIDE * 3, 1
.endm
/*
* void add8x8_idct_dc( pixel *p_dst, dctcoef dct[4] )
*/
function_x264 add8x8_idct_dc_lasx
add8x4_idct_dc_lasx
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 4
add8x4_idct_dc_lasx
endfunc_x264
function_x264 add8x8_idct_dc_lsx
add8x4_idct_dc_lsx
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 4
add8x4_idct_dc_lsx
endfunc_x264
.macro add_16x16_idct_dc_core_lasx a0, a1
vldrepl.h vr11, \a1, 0
vldrepl.h vr12, \a1, 2
vldrepl.h vr13, \a1, 4
vldrepl.h vr14, \a1, 6
xvinsve0.d xr11, xr12, 1
xvinsve0.d xr11, xr13, 2
xvinsve0.d xr11, xr14, 3
xvsrari.h xr11, xr11, 6
vld vr0, \a0, 0
vld vr1, \a0, FDEC_STRIDE
vld vr2, \a0, FDEC_STRIDE * 2
vld vr3, \a0, FDEC_STRIDE * 3
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
vext2xv.hu.bu xr3, xr3
xvadd.h xr0, xr0, xr11
xvadd.h xr1, xr1, xr11
xvadd.h xr2, xr2, xr11
xvadd.h xr3, xr3, xr11
xvssrarni.bu.h xr1, xr0, 0
xvssrarni.bu.h xr3, xr2, 0
xvpermi.d xr4, xr1, 0xD8
xvpermi.d xr5, xr1, 0x8D
xvpermi.d xr6, xr3, 0xD8
xvpermi.d xr7, xr3, 0x8D
vst vr4, \a0, 0
vst vr5, \a0, FDEC_STRIDE
vst vr6, \a0, FDEC_STRIDE * 2
vst vr7, \a0, FDEC_STRIDE * 3
.endm
/*
* void add16x16_idct_dc( pixel *p_dst, dctcoef dct[16] )
*/
function_x264 add16x16_idct_dc_lasx
add_16x16_idct_dc_core_lasx a0, a1
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 8
add_16x16_idct_dc_core_lasx a0, a1
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 8
add_16x16_idct_dc_core_lasx a0, a1
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 8
add_16x16_idct_dc_core_lasx a0, a1
endfunc_x264
.macro add_16x16_idct_dc_core_lsx a0, a1
vldrepl.h vr11, \a1, 0
vldrepl.h vr12, \a1, 2
vldrepl.h vr13, \a1, 4
vldrepl.h vr14, \a1, 6
vpermi.w vr12, vr11, 0x44
vpermi.w vr14, vr13, 0x44
vsrari.h vr12, vr12, 6
vsrari.h vr14, vr14, 6
vld vr0, \a0, 0
vld vr1, \a0, FDEC_STRIDE
vld vr2, \a0, FDEC_STRIDE * 2
vld vr3, \a0, FDEC_STRIDE * 3
vexth.hu.bu vr5, vr0
vsllwil.hu.bu vr0, vr0, 0
vexth.hu.bu vr6, vr1
vsllwil.hu.bu vr1, vr1, 0
vexth.hu.bu vr7, vr2
vsllwil.hu.bu vr2, vr2, 0
vexth.hu.bu vr8, vr3
vsllwil.hu.bu vr3, vr3, 0
vadd.h vr0, vr0, vr12
vadd.h vr5, vr5, vr14
vadd.h vr1, vr1, vr12
vadd.h vr6, vr6, vr14
vadd.h vr2, vr2, vr12
vadd.h vr7, vr7, vr14
vadd.h vr3, vr3, vr12
vadd.h vr8, vr8, vr14
vssrarni.bu.h vr1, vr0, 0
vssrarni.bu.h vr6, vr5, 0
vssrarni.bu.h vr3, vr2, 0
vssrarni.bu.h vr8, vr7, 0
vpermi.w vr9, vr6, 0x0E
vpermi.w vr6, vr1, 0x44
vpermi.w vr9, vr1, 0x4E
vpermi.w vr10, vr8, 0x0E
vpermi.w vr8, vr3, 0x44
vpermi.w vr10, vr3, 0x4E
vst vr6, \a0, 0
vst vr9, \a0, FDEC_STRIDE
vst vr8, \a0, FDEC_STRIDE * 2
vst vr10, \a0, FDEC_STRIDE * 3
.endm
function_x264 add16x16_idct_dc_lsx
add_16x16_idct_dc_core_lsx a0, a1
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 8
add_16x16_idct_dc_core_lsx a0, a1
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 8
add_16x16_idct_dc_core_lsx a0, a1
addi.d a0, a0, FDEC_STRIDE * 4
addi.d a1, a1, 8
add_16x16_idct_dc_core_lsx a0, a1
endfunc_x264
/*
* void idct4x4dc( dctcoef d[16] )
*/
function_x264 idct4x4dc_lasx
la.local t0, last64_shuf
xvld xr0, a0, 0
xvld xr20, t0, 0
xvshuf4i.b xr1, xr0, 0x4E
xvhaddw.w.h xr2, xr0, xr0
xvhsubw.w.h xr3, xr1, xr1
xvshuf4i.h xr2, xr2, 0x4E
xvshuf4i.h xr3, xr3, 0x4E
xvhaddw.d.w xr4, xr2, xr2
xvhsubw.d.w xr5, xr2, xr2
xvhsubw.d.w xr6, xr3, xr3
xvhaddw.d.w xr7, xr3, xr3
xvpickev.w xr8, xr5, xr4
xvpickev.w xr9, xr7, xr6
xvpickev.h xr10, xr9, xr8
xvperm.w xr10, xr10, xr20
xvshuf4i.b xr11, xr10, 0x4E
xvhaddw.w.h xr12, xr10, xr10
xvhsubw.w.h xr13, xr11, xr11
xvshuf4i.h xr12, xr12, 0x4E
xvshuf4i.h xr13, xr13, 0x4E
xvhaddw.d.w xr14, xr12, xr12
xvhsubw.d.w xr15, xr12, xr12
xvhsubw.d.w xr16, xr13, xr13
xvhaddw.d.w xr17, xr13, xr13
xvpackev.w xr18, xr15, xr14
xvpackev.w xr19, xr17, xr16
xvilvl.d xr0, xr19, xr18
xvilvh.d xr1, xr19, xr18
xvpickev.h xr2, xr1, xr0
xvst xr2, a0, 0
endfunc_x264
function_x264 idct4x4dc_lsx
vld vr0, a0, 0
vld vr20, a0, 16
vshuf4i.b vr1, vr0, 0x4E
vshuf4i.b vr11, vr20, 0x4E
vhaddw.w.h vr2, vr0, vr0
vhaddw.w.h vr12, vr20, vr20
vhsubw.w.h vr3, vr1, vr1
vhsubw.w.h vr13, vr11, vr11
vshuf4i.h vr2, vr2, 0x4E
vshuf4i.h vr12, vr12, 0x4E
vshuf4i.h vr3, vr3, 0x4E
vshuf4i.h vr13, vr13, 0x4E
vhaddw.d.w vr4, vr2, vr2
vhaddw.d.w vr14, vr12, vr12
vhsubw.d.w vr5, vr2, vr2
vhsubw.d.w vr15, vr12, vr12
vhsubw.d.w vr6, vr3, vr3
vhsubw.d.w vr16, vr13, vr13
vhaddw.d.w vr7, vr3, vr3
vhaddw.d.w vr17, vr13, vr13
vpickev.w vr8, vr5, vr4
vpickev.w vr18, vr15, vr14
vpickev.w vr9, vr7, vr6
vpickev.w vr19, vr17, vr16
vpickev.h vr10, vr9, vr8
vpickev.h vr21, vr19, vr18
vpermi.w vr22, vr21, 0x0E
vpermi.w vr21, vr10, 0x44
vpermi.w vr22, vr10, 0x4E
vpermi.w vr21, vr21, 0xD8
vpermi.w vr22, vr22, 0xD8
vshuf4i.b vr11, vr21, 0x4E
vshuf4i.b vr12, vr22, 0x4E
vhaddw.w.h vr21, vr21, vr21
vhaddw.w.h vr22, vr22, vr22
vhsubw.w.h vr11, vr11, vr11
vhsubw.w.h vr12, vr12, vr12
vshuf4i.h vr21, vr21, 0x4E
vshuf4i.h vr22, vr22, 0x4E
vshuf4i.h vr11, vr11, 0x4E
vshuf4i.h vr12, vr12, 0x4E
vhaddw.d.w vr13, vr21, vr21
vhaddw.d.w vr14, vr22, vr22
vhsubw.d.w vr15, vr21, vr21
vhsubw.d.w vr16, vr22, vr22
vhsubw.d.w vr17, vr11, vr11
vhsubw.d.w vr18, vr12, vr12
vhaddw.d.w vr19, vr11, vr11
vhaddw.d.w vr20, vr12, vr12
vpackev.w vr7, vr15, vr13
vpackev.w vr8, vr16, vr14
vpackev.w vr9, vr19, vr17
vpackev.w vr10, vr20, vr18
vilvl.d vr0, vr9, vr7
vilvl.d vr4, vr10, vr8
vilvh.d vr1, vr9, vr7
vilvh.d vr5, vr10, vr8
vpickev.h vr2, vr1, vr0
vpickev.h vr3, vr5, vr4
vst vr2, a0, 0
vst vr3, a0, 16
endfunc_x264
/*
* void dct4x4dc( dctcoef d[16] )
*/
function_x264 dct4x4dc_lasx
la.local t0, last64_shuf
xvld xr0, a0, 0
xvld xr20, t0, 0
xvshuf4i.b xr1, xr0, 0x4E
xvhaddw.w.h xr2, xr0, xr0
xvhsubw.w.h xr3, xr1, xr1
xvshuf4i.h xr2, xr2, 0x4E
xvshuf4i.h xr3, xr3, 0x4E
xvhaddw.d.w xr4, xr2, xr2
xvhsubw.d.w xr5, xr2, xr2
xvhsubw.d.w xr6, xr3, xr3
xvhaddw.d.w xr7, xr3, xr3
xvpickev.w xr8, xr5, xr4
xvpickev.w xr9, xr7, xr6
xvpickev.h xr10, xr9, xr8
xvperm.w xr10, xr10, xr20
xvshuf4i.b xr11, xr10, 0x4E
xvhaddw.w.h xr12, xr10, xr10
xvhsubw.w.h xr13, xr11, xr11
xvshuf4i.h xr12, xr12, 0x4E
xvshuf4i.h xr13, xr13, 0x4E
xvhaddw.d.w xr14, xr12, xr12
xvhsubw.d.w xr15, xr12, xr12
xvhsubw.d.w xr16, xr13, xr13
xvhaddw.d.w xr17, xr13, xr13
xvpackev.w xr18, xr15, xr14
xvpackev.w xr19, xr17, xr16
xvsrari.w xr18, xr18, 1
xvsrari.w xr19, xr19, 1
xvilvl.d xr0, xr19, xr18
xvilvh.d xr1, xr19, xr18
xvpickev.h xr2, xr1, xr0
xvst xr2, a0, 0
endfunc_x264
function_x264 dct4x4dc_lsx
vld vr0, a0, 0
vld vr20, a0, 16
vshuf4i.b vr1, vr0, 0x4E
vshuf4i.b vr11, vr20, 0x4E
vhaddw.w.h vr2, vr0, vr0
vhaddw.w.h vr12, vr20, vr20
vhsubw.w.h vr3, vr1, vr1
vhsubw.w.h vr13, vr11, vr11
vshuf4i.h vr2, vr2, 0x4E
vshuf4i.h vr12, vr12, 0x4E
vshuf4i.h vr3, vr3, 0x4E
vshuf4i.h vr13, vr13, 0x4E
vhaddw.d.w vr4, vr2, vr2
vhaddw.d.w vr14, vr12, vr12
vhsubw.d.w vr5, vr2, vr2
vhsubw.d.w vr15, vr12, vr12
vhsubw.d.w vr6, vr3, vr3
vhsubw.d.w vr16, vr13, vr13
vhaddw.d.w vr7, vr3, vr3
vhaddw.d.w vr17, vr13, vr13
vpickev.w vr8, vr5, vr4
vpickev.w vr18, vr15, vr14
vpickev.w vr9, vr7, vr6
vpickev.w vr19, vr17, vr16
vpickev.h vr10, vr9, vr8
vpickev.h vr21, vr19, vr18
vpermi.w vr22, vr21, 0x0E
vpermi.w vr21, vr10, 0x44
vpermi.w vr22, vr10, 0x4E
vpermi.w vr21, vr21, 0xD8
vpermi.w vr22, vr22, 0xD8
vshuf4i.b vr11, vr21, 0x4E
vshuf4i.b vr12, vr22, 0x4E
vhaddw.w.h vr21, vr21, vr21
vhaddw.w.h vr22, vr22, vr22
vhsubw.w.h vr11, vr11, vr11
vhsubw.w.h vr12, vr12, vr12
vshuf4i.h vr21, vr21, 0x4E
vshuf4i.h vr22, vr22, 0x4E
vshuf4i.h vr11, vr11, 0x4E
vshuf4i.h vr12, vr12, 0x4E
vhaddw.d.w vr13, vr21, vr21
vhaddw.d.w vr14, vr22, vr22
vhsubw.d.w vr15, vr21, vr21
vhsubw.d.w vr16, vr22, vr22
vhsubw.d.w vr17, vr11, vr11
vhsubw.d.w vr18, vr12, vr12
vhaddw.d.w vr19, vr11, vr11
vhaddw.d.w vr20, vr12, vr12
vpackev.w vr7, vr15, vr13
vpackev.w vr8, vr16, vr14
vpackev.w vr9, vr19, vr17
vpackev.w vr10, vr20, vr18
vsrari.w vr7, vr7, 1
vsrari.w vr8, vr8, 1
vsrari.w vr9, vr9, 1
vsrari.w vr10, vr10, 1
vilvl.d vr0, vr9, vr7
vilvl.d vr4, vr10, vr8
vilvh.d vr1, vr9, vr7
vilvh.d vr10, vr10, vr8
vpickev.h vr2, vr1, vr0
vpickev.h vr3, vr10, vr4
vst vr2, a0, 0
vst vr3, a0, 16
endfunc_x264
.macro LSX_LOAD_PIX_2 data1, data2
vld vr0, a1, 0
vld vr1, a1, FENC_STRIDE
vld vr2, a2, 0
vld vr3, a2, FDEC_STRIDE
vilvl.b vr0, vr8, vr0
vilvl.b vr1, vr8, vr1
vilvl.b vr2, vr8, vr2
vilvl.b vr3, vr8, vr3
vsub.h \data1, vr0, vr2
vsub.h \data2, vr1, vr3
addi.d a1, a1, FENC_STRIDE * 2
addi.d a2, a2, FDEC_STRIDE * 2
.endm
.macro LSX_DCT8_1D
LSX_SUMSUB_H vr0, vr8, vr12, vr19
LSX_SUMSUB_H vr1, vr9, vr13, vr18
LSX_SUMSUB_H vr2, vr10, vr14, vr17
LSX_SUMSUB_H vr3, vr11, vr15, vr16
LSX_SUMSUB_H vr4, vr6, vr0, vr3
LSX_SUMSUB_H vr5, vr7, vr1, vr2
vsrai.h vr20, vr8, 1
vadd.h vr20, vr20, vr9
vadd.h vr20, vr20, vr10
vadd.h vr0, vr20, vr8
vsrai.h vr20, vr10, 1
vsub.h vr21, vr8, vr11
vsub.h vr21, vr21, vr10
vsub.h vr1, vr21, vr20
vsrai.h vr20, vr9, 1
vadd.h vr21, vr8, vr11
vsub.h vr21, vr21, vr9
vsub.h vr2, vr21, vr20
vsrai.h vr20, vr11, 1
vsub.h vr21, vr9, vr10
vadd.h vr21, vr21, vr11
vadd.h vr3, vr21, vr20
vadd.h vr12, vr4, vr5
vsrai.h vr20, vr3, 2
vadd.h vr13, vr0, vr20
vsrai.h vr20, vr7, 1
vadd.h vr14, vr6, vr20
vsrai.h vr20, vr2, 2
vadd.h vr15, vr1, vr20
vsub.h vr16, vr4, vr5
vsrai.h vr20, vr1, 2
vsub.h vr17, vr2, vr20
vsrai.h vr20, vr6, 1
vsub.h vr18, vr20, vr7
vsrai.h vr20, vr0, 2
vsub.h vr19, vr20, vr3
.endm
/*
* void sub8x8_dct8( dctcoef dct[64], pixel *pix1, pixel *pix2 )
*/
function_x264 sub8x8_dct8_lsx
vxor.v vr8, vr0, vr0
// vr12 ... vr19
LSX_LOAD_PIX_2 vr12, vr13
LSX_LOAD_PIX_2 vr14, vr15
LSX_LOAD_PIX_2 vr16, vr17
LSX_LOAD_PIX_2 vr18, vr19
LSX_DCT8_1D
LSX_TRANSPOSE8x8_H vr12, vr13, vr14, vr15, vr16, vr17, vr18, vr19, \
vr12, vr13, vr14, vr15, vr16, vr17, vr18, vr19, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
LSX_DCT8_1D
vst vr12, a0, 0
vst vr13, a0, 16
vst vr14, a0, 32
vst vr15, a0, 48
vst vr16, a0, 64
vst vr17, a0, 80
vst vr18, a0, 96
vst vr19, a0, 112
endfunc_x264
.macro LASX_LOAD_PIX_2 data1, data2
xvld xr0, a1, 0
xvld xr1, a1, FENC_STRIDE
xvld xr2, a2, 0
xvld xr3, a2, FDEC_STRIDE
xvpermi.d xr0, xr0, 0x50
xvpermi.d xr1, xr1, 0x50
xvpermi.d xr2, xr2, 0x50
xvpermi.d xr3, xr3, 0x50
xvxor.v xr4, xr0, xr0
xvilvl.b xr0, xr4, xr0
xvilvl.b xr1, xr4, xr1
xvilvl.b xr2, xr4, xr2
xvilvl.b xr3, xr4, xr3
xvsub.h \data1, xr0, xr2
xvsub.h \data2, xr1, xr3
addi.d a1, a1, FENC_STRIDE * 2
addi.d a2, a2, FDEC_STRIDE * 2
.endm
.macro LASX_SUMSUB_H sum, diff, a, b
xvadd.h \sum, \a, \b
xvsub.h \diff, \a, \b
.endm
.macro LASX_DCT8_1D
LASX_SUMSUB_H xr0, xr8, xr12, xr19
LASX_SUMSUB_H xr1, xr9, xr13, xr18
LASX_SUMSUB_H xr2, xr10, xr14, xr17
LASX_SUMSUB_H xr3, xr11, xr15, xr16
LASX_SUMSUB_H xr4, xr6, xr0, xr3
LASX_SUMSUB_H xr5, xr7, xr1, xr2
xvsrai.h xr20, xr8, 1
xvadd.h xr20, xr20, xr9
xvadd.h xr20, xr20, xr10
xvadd.h xr0, xr20, xr8
xvsrai.h xr20, xr10, 1
xvsub.h xr21, xr8, xr11
xvsub.h xr21, xr21, xr10
xvsub.h xr1, xr21, xr20
xvsrai.h xr20, xr9, 1
xvadd.h xr21, xr8, xr11
xvsub.h xr21, xr21, xr9
xvsub.h xr2, xr21, xr20
xvsrai.h xr20, xr11, 1
xvsub.h xr21, xr9, xr10
xvadd.h xr21, xr21, xr11
xvadd.h xr3, xr21, xr20
xvadd.h xr12, xr4, xr5
xvsrai.h xr20, xr3, 2
xvadd.h xr13, xr0, xr20
xvsrai.h xr20, xr7, 1
xvadd.h xr14, xr6, xr20
xvsrai.h xr20, xr2, 2
xvadd.h xr15, xr1, xr20
xvsub.h xr16, xr4, xr5
xvsrai.h xr20, xr1, 2
xvsub.h xr17, xr2, xr20
xvsrai.h xr20, xr6, 1
xvsub.h xr18, xr20, xr7
xvsrai.h xr20, xr0, 2
xvsub.h xr19, xr20, xr3
.endm
.macro SUB16x8_DCT8_LASX
LASX_LOAD_PIX_2 xr12, xr13
LASX_LOAD_PIX_2 xr14, xr15
LASX_LOAD_PIX_2 xr16, xr17
LASX_LOAD_PIX_2 xr18, xr19
LASX_DCT8_1D
LASX_TRANSPOSE8x8_H xr12, xr13, xr14, xr15, xr16, xr17, xr18, xr19, \
xr12, xr13, xr14, xr15, xr16, xr17, xr18, xr19, \
xr0, xr1, xr2, xr3, xr4, xr5, xr6, xr7
LASX_DCT8_1D
xmov xr0, xr13
xvpermi.q xr13, xr12, 0x20
xvst xr13, a0, 0
xmov xr1, xr15
xvpermi.q xr15, xr14, 0x20
xvst xr15, a0, 32
xmov xr2, xr17
xvpermi.q xr17, xr16, 0x20
xvst xr17, a0, 64
xmov xr3, xr19
xvpermi.q xr19, xr18, 0x20
xvst xr19, a0, 96
xvpermi.q xr12, xr0, 0x13
xvpermi.q xr14, xr1, 0x13
xvpermi.q xr16, xr2, 0x13
xvpermi.q xr18, xr3, 0x13
xvst xr12, a0, 128
xvst xr14, a0, 160
xvst xr16, a0, 192
xvst xr18, a0, 224
.endm
/*
* void sub16x16_dct8( dctcoef dct[4][64], pixel *pix1, pixel *pix2 )
*/
function_x264 sub16x16_dct8_lasx
move t1, a1
move t3, a2
SUB16x8_DCT8_LASX
addi.d a0, a0, 256
addi.d a1, t1, FENC_STRIDE * 8
addi.d a2, t3, FDEC_STRIDE * 8
SUB16x8_DCT8_LASX
endfunc_x264
.macro LSX_LOAD_PIX_22 data1, data2, data3, data4
vld vr0, a1, 0
vld vr4, a1, 16
vld vr1, a1, FENC_STRIDE
vld vr5, a1, FENC_STRIDE + 16
vld vr2, a2, 0
vld vr6, a2, 16
vld vr3, a2, FDEC_STRIDE
vld vr7, a2, FDEC_STRIDE + 16
vpermi.w vr8, vr0, 0x0E
vpermi.w vr0, vr0, 0x44
vpermi.w vr8, vr8, 0x44
vpermi.w vr9, vr1, 0x0E
vpermi.w vr1, vr1, 0x44
vpermi.w vr9, vr9, 0x44
vpermi.w vr10, vr2, 0x0E
vpermi.w vr2, vr2, 0x44
vpermi.w vr10, vr10, 0x44
vpermi.w vr11, vr3, 0x0E
vpermi.w vr3, vr3, 0x44
vpermi.w vr11, vr11, 0x44
vxor.v vr30, vr0, vr0
vxor.v vr31, vr8, vr8
vilvl.b vr0, vr30, vr0
vilvl.b vr8, vr31, vr8
vilvl.b vr1, vr30, vr1
vilvl.b vr9, vr31, vr9
vilvl.b vr2, vr30, vr2
vilvl.b vr10, vr31, vr10
vilvl.b vr3, vr30, vr3
vilvl.b vr11, vr31, vr11
vsub.h \data1, vr0, vr2
vsub.h \data3, vr8, vr10
vsub.h \data2, vr1, vr3
vsub.h \data4, vr9, vr11
addi.d a1, a1, FENC_STRIDE * 2
addi.d a2, a2, FDEC_STRIDE * 2
.endm
.macro SUB16x8_DCT8_LSX
LSX_LOAD_PIX_22 vr12, vr13, vr22, vr23
LSX_LOAD_PIX_22 vr14, vr15, vr24, vr25
LSX_LOAD_PIX_22 vr16, vr17, vr26, vr27
LSX_LOAD_PIX_22 vr18, vr19, vr28, vr29
LSX_DCT8_1D
LSX_TRANSPOSE8x8_H vr12, vr13, vr14, vr15, vr16, vr17, vr18, vr19, \
vr12, vr13, vr14, vr15, vr16, vr17, vr18, vr19, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
LSX_DCT8_1D
vst vr12, a0, 0
vst vr13, a0, 16
vst vr14, a0, 32
vst vr15, a0, 48
vst vr16, a0, 64
vst vr17, a0, 80
vst vr18, a0, 96
vst vr19, a0, 112
vmov vr12, vr22
vmov vr13, vr23
vmov vr14, vr24
vmov vr15, vr25
vmov vr16, vr26
vmov vr17, vr27
vmov vr18, vr28
vmov vr19, vr29
LSX_DCT8_1D
LSX_TRANSPOSE8x8_H vr12, vr13, vr14, vr15, vr16, vr17, vr18, vr19, \
vr12, vr13, vr14, vr15, vr16, vr17, vr18, vr19, \
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7
LSX_DCT8_1D
vst vr12, a0, 128
vst vr13, a0, 144
vst vr14, a0, 160
vst vr15, a0, 176
vst vr16, a0, 192
vst vr17, a0, 208
vst vr18, a0, 224
vst vr19, a0, 240
.endm
function_x264 sub16x16_dct8_lsx
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
move t1, a1
move t3, a2
SUB16x8_DCT8_LSX
addi.d a0, a0, 256
addi.d a1, t1, FENC_STRIDE * 8
addi.d a2, t3, FDEC_STRIDE * 8
SUB16x8_DCT8_LSX
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc_x264
/*
* void zigzag_scan_4x4_frame( dctcoef level[16], dctcoef dct[16] )
*/
function_x264 zigzag_scan_4x4_frame_lasx
xvld xr1, a1, 0
xvor.v xr2, xr1, xr1
xvpermi.q xr2, xr2, 0x13
xvpermi.q xr1, xr1, 0x02
la.local t0, zigzag_scan4
xvld xr3, t0, 0
xvshuf.h xr3, xr2, xr1
xvst xr3, a0, 0
endfunc_x264
function_x264 zigzag_scan_4x4_frame_lsx
vld vr1, a1, 0
vld vr2, a1, 16
vor.v vr3, vr1, vr1
vor.v vr4, vr2, vr2
la.local t0, zigzag_scan4
vld vr5, t0, 0
vld vr6, t0, 16
vshuf.h vr5, vr4, vr1
vshuf.h vr6, vr4, vr1
vst vr5, a0, 0
vst vr6, a0, 16
endfunc_x264
|
aestream/faery
| 22,216
|
src/mp4/x264/common/loongarch/loongson_asm.S
|
/*********************************************************************
* Copyright (c) 2022-2024 Loongson Technology Corporation Limited
* Contributed by Xiwei Gu <guxiwei-hf@loongson.cn>
* Shiyou Yin <yinshiyou-hf@loongson.cn>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*********************************************************************/
/*
* This file is a LoongArch assembly helper file and available under ISC
* license. It provides a large number of macros and alias to simplify
* writing assembly code, especially for LSX and LASX optimizations.
*
* Any one can modify it or add new features for his/her own purposes.
* Contributing a patch will be appreciated as it might be useful for
* others as well. Send patches to loongson contributor mentioned above.
*
* MAJOR version: Usage changes, incompatible with previous version.
* MINOR version: Add new macros/functions, or bug fixes.
* MICRO version: Comment changes or implementation changes.
*/
#define LML_VERSION_MAJOR 0
#define LML_VERSION_MINOR 4
#define LML_VERSION_MICRO 0
#define ASM_PREF
#define DEFAULT_ALIGN 5
/*
*============================================================================
* macros for specific projetc, set them as needed.
* Following LoongML macros for your reference.
*============================================================================
*/
.macro function name, align=DEFAULT_ALIGN
.macro endfunc
jirl $r0, $r1, 0x0
.size ASM_PREF\name, . - ASM_PREF\name
.purgem endfunc
.endm
.text ;
.align \align ;
.globl ASM_PREF\name ;
.type ASM_PREF\name, @function ;
ASM_PREF\name: ;
.endm
.macro const name, align=DEFAULT_ALIGN
.macro endconst
.size \name, . - \name
.purgem endconst
.endm
.section .rodata
.align \align
\name:
.endm
/*
*============================================================================
* LoongArch register alias
*============================================================================
*/
#define a0 $a0
#define a1 $a1
#define a2 $a2
#define a3 $a3
#define a4 $a4
#define a5 $a5
#define a6 $a6
#define a7 $a7
#define t0 $t0
#define t1 $t1
#define t2 $t2
#define t3 $t3
#define t4 $t4
#define t5 $t5
#define t6 $t6
#define t7 $t7
#define t8 $t8
#define s0 $s0
#define s1 $s1
#define s2 $s2
#define s3 $s3
#define s4 $s4
#define s5 $s5
#define s6 $s6
#define s7 $s7
#define s8 $s8
#define zero $zero
#define sp $sp
#define ra $ra
#define fa0 $fa0
#define fa1 $fa1
#define fa2 $fa2
#define fa3 $fa3
#define fa4 $fa4
#define fa5 $fa5
#define fa6 $fa6
#define fa7 $fa7
#define ft0 $ft0
#define ft1 $ft1
#define ft2 $ft2
#define ft3 $ft3
#define ft4 $ft4
#define ft5 $ft5
#define ft6 $ft6
#define ft7 $ft7
#define ft8 $ft8
#define ft9 $ft9
#define ft10 $ft10
#define ft11 $ft11
#define ft12 $ft12
#define ft13 $ft13
#define ft14 $ft14
#define ft15 $ft15
#define fs0 $fs0
#define fs1 $fs1
#define fs2 $fs2
#define fs3 $fs3
#define fs4 $fs4
#define fs5 $fs5
#define fs6 $fs6
#define fs7 $fs7
#define f0 $f0
#define f1 $f1
#define f2 $f2
#define f3 $f3
#define f4 $f4
#define f5 $f5
#define f6 $f6
#define f7 $f7
#define f8 $f8
#define f9 $f9
#define f10 $f10
#define f11 $f11
#define f12 $f12
#define f13 $f13
#define f14 $f14
#define f15 $f15
#define f16 $f16
#define f17 $f17
#define f18 $f18
#define f19 $f19
#define f20 $f20
#define f21 $f21
#define f22 $f22
#define f23 $f23
#define f24 $f24
#define f25 $f25
#define f26 $f26
#define f27 $f27
#define f28 $f28
#define f29 $f29
#define f30 $f30
#define f31 $f31
#define vr0 $vr0
#define vr1 $vr1
#define vr2 $vr2
#define vr3 $vr3
#define vr4 $vr4
#define vr5 $vr5
#define vr6 $vr6
#define vr7 $vr7
#define vr8 $vr8
#define vr9 $vr9
#define vr10 $vr10
#define vr11 $vr11
#define vr12 $vr12
#define vr13 $vr13
#define vr14 $vr14
#define vr15 $vr15
#define vr16 $vr16
#define vr17 $vr17
#define vr18 $vr18
#define vr19 $vr19
#define vr20 $vr20
#define vr21 $vr21
#define vr22 $vr22
#define vr23 $vr23
#define vr24 $vr24
#define vr25 $vr25
#define vr26 $vr26
#define vr27 $vr27
#define vr28 $vr28
#define vr29 $vr29
#define vr30 $vr30
#define vr31 $vr31
#define xr0 $xr0
#define xr1 $xr1
#define xr2 $xr2
#define xr3 $xr3
#define xr4 $xr4
#define xr5 $xr5
#define xr6 $xr6
#define xr7 $xr7
#define xr8 $xr8
#define xr9 $xr9
#define xr10 $xr10
#define xr11 $xr11
#define xr12 $xr12
#define xr13 $xr13
#define xr14 $xr14
#define xr15 $xr15
#define xr16 $xr16
#define xr17 $xr17
#define xr18 $xr18
#define xr19 $xr19
#define xr20 $xr20
#define xr21 $xr21
#define xr22 $xr22
#define xr23 $xr23
#define xr24 $xr24
#define xr25 $xr25
#define xr26 $xr26
#define xr27 $xr27
#define xr28 $xr28
#define xr29 $xr29
#define xr30 $xr30
#define xr31 $xr31
/*
*============================================================================
* LSX/LASX synthesize instructions
*============================================================================
*/
/*
* Description : Dot product of byte vector elements
* Arguments : Inputs - vj, vk
* Outputs - vd
* Return Type - halfword
*/
.macro vdp2.h.bu vd, vj, vk
vmulwev.h.bu \vd, \vj, \vk
vmaddwod.h.bu \vd, \vj, \vk
.endm
.macro vdp2.h.bu.b vd, vj, vk
vmulwev.h.bu.b \vd, \vj, \vk
vmaddwod.h.bu.b \vd, \vj, \vk
.endm
.macro vdp2.w.h vd, vj, vk
vmulwev.w.h \vd, \vj, \vk
vmaddwod.w.h \vd, \vj, \vk
.endm
.macro xvdp2.h.bu xd, xj, xk
xvmulwev.h.bu \xd, \xj, \xk
xvmaddwod.h.bu \xd, \xj, \xk
.endm
.macro xvdp2.h.bu.b xd, xj, xk
xvmulwev.h.bu.b \xd, \xj, \xk
xvmaddwod.h.bu.b \xd, \xj, \xk
.endm
.macro xvdp2.w.h xd, xj, xk
xvmulwev.w.h \xd, \xj, \xk
xvmaddwod.w.h \xd, \xj, \xk
.endm
/*
* Description : Dot product & addition of halfword vector elements
* Arguments : Inputs - vj, vk
* Outputs - vd
* Return Type - twice size of input
*/
.macro vdp2add.h.bu vd, vj, vk
vmaddwev.h.bu \vd, \vj, \vk
vmaddwod.h.bu \vd, \vj, \vk
.endm
.macro vdp2add.h.bu.b vd, vj, vk
vmaddwev.h.bu.b \vd, \vj, \vk
vmaddwod.h.bu.b \vd, \vj, \vk
.endm
.macro vdp2add.w.h vd, vj, vk
vmaddwev.w.h \vd, \vj, \vk
vmaddwod.w.h \vd, \vj, \vk
.endm
.macro xvdp2add.h.bu.b xd, xj, xk
xvmaddwev.h.bu.b \xd, \xj, \xk
xvmaddwod.h.bu.b \xd, \xj, \xk
.endm
.macro xvdp2add.w.h xd, xj, xk
xvmaddwev.w.h \xd, \xj, \xk
xvmaddwod.w.h \xd, \xj, \xk
.endm
/*
* Description : Range element vj[i] to vk[i] ~ vj[i]
* clip: vj > vk ? vj : vk && vj < va ? vj : va
*/
.macro vclip.h vd, vj, vk, va
vmax.h \vd, \vj, \vk
vmin.h \vd, \vd, \va
.endm
.macro vclip.w vd, vj, vk, va
vmax.w \vd, \vj, \vk
vmin.w \vd, \vd, \va
.endm
.macro xvclip.h xd, xj, xk, xa
xvmax.h \xd, \xj, \xk
xvmin.h \xd, \xd, \xa
.endm
.macro xvclip.w xd, xj, xk, xa
xvmax.w \xd, \xj, \xk
xvmin.w \xd, \xd, \xa
.endm
/*
* Description : Range element vj[i] to 0 ~ 255
* clip255: vj < 255 ? vj : 255 && vj > 0 ? vj : 0
*/
.macro vclip255.h vd, vj
vmaxi.h \vd, \vj, 0
vsat.hu \vd, \vd, 7
.endm
.macro vclip255.w vd, vj
vmaxi.w \vd, \vj, 0
vsat.wu \vd, \vd, 7
.endm
.macro xvclip255.h xd, xj
xvmaxi.h \xd, \xj, 0
xvsat.hu \xd, \xd, 7
.endm
.macro xvclip255.w xd, xj
xvmaxi.w \xd, \xj, 0
xvsat.wu \xd, \xd, 7
.endm
/*
* Description : Store elements of vector
* vd : Data vector to be stroed
* rk : Address of data storage
* ra : Offset of address
* si : Index of data in vd
*/
.macro vstelmx.b vd, rk, ra, si
add.d \rk, \rk, \ra
vstelm.b \vd, \rk, 0, \si
.endm
.macro vstelmx.h vd, rk, ra, si
add.d \rk, \rk, \ra
vstelm.h \vd, \rk, 0, \si
.endm
.macro vstelmx.w vd, rk, ra, si
add.d \rk, \rk, \ra
vstelm.w \vd, \rk, 0, \si
.endm
.macro vstelmx.d vd, rk, ra, si
add.d \rk, \rk, \ra
vstelm.d \vd, \rk, 0, \si
.endm
.macro vmov xd, xj
vor.v \xd, \xj, \xj
.endm
.macro xmov xd, xj
xvor.v \xd, \xj, \xj
.endm
.macro xvstelmx.d xd, rk, ra, si
add.d \rk, \rk, \ra
xvstelm.d \xd, \rk, 0, \si
.endm
/*
*============================================================================
* LSX/LASX custom macros
*============================================================================
*/
/*
* Load 4 float, double, V128, v256 elements with stride.
*/
.macro FLDS_LOADX_4 src, stride, stride2, stride3, out0, out1, out2, out3
fld.s \out0, \src, 0
fldx.s \out1, \src, \stride
fldx.s \out2, \src, \stride2
fldx.s \out3, \src, \stride3
.endm
.macro FLDD_LOADX_4 src, stride, stride2, stride3, out0, out1, out2, out3
fld.d \out0, \src, 0
fldx.d \out1, \src, \stride
fldx.d \out2, \src, \stride2
fldx.d \out3, \src, \stride3
.endm
.macro LSX_LOADX_4 src, stride, stride2, stride3, out0, out1, out2, out3
vld \out0, \src, 0
vldx \out1, \src, \stride
vldx \out2, \src, \stride2
vldx \out3, \src, \stride3
.endm
.macro LASX_LOADX_4 src, stride, stride2, stride3, out0, out1, out2, out3
xvld \out0, \src, 0
xvldx \out1, \src, \stride
xvldx \out2, \src, \stride2
xvldx \out3, \src, \stride3
.endm
/*
* Description : Transpose 4x4 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
*/
.macro LSX_TRANSPOSE4x4_H in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
vilvl.h \tmp0, \in1, \in0
vilvl.h \tmp1, \in3, \in2
vilvl.w \out0, \tmp1, \tmp0
vilvh.w \out2, \tmp1, \tmp0
vilvh.d \out1, \out0, \out0
vilvh.d \out3, \out0, \out2
.endm
/*
* Description : Transpose 4x4 block with word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
* Details :
* Example :
* 1, 2, 3, 4 1, 5, 9,13
* 5, 6, 7, 8 to 2, 6,10,14
* 9,10,11,12 =====> 3, 7,11,15
* 13,14,15,16 4, 8,12,16
*/
.macro LSX_TRANSPOSE4x4_W in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
vilvl.w \tmp0, \in1, \in0
vilvh.w \out1, \in1, \in0
vilvl.w \tmp1, \in3, \in2
vilvh.w \out3, \in3, \in2
vilvl.d \out0, \tmp1, \tmp0
vilvl.d \out2, \out3, \out1
vilvh.d \out3, \out3, \out1
vilvh.d \out1, \tmp1, \tmp0
.endm
/*
* Description : Transpose 8x8 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
* Outputs - out0, out1, out2, out3, out4, out5, out6, out7
*/
.macro LSX_TRANSPOSE8x8_H in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
out2, out3, out4, out5, out6, out7, tmp0, tmp1, tmp2, \
tmp3, tmp4, tmp5, tmp6, tmp7
vilvl.h \tmp0, \in6, \in4
vilvl.h \tmp1, \in7, \in5
vilvl.h \tmp2, \in2, \in0
vilvl.h \tmp3, \in3, \in1
vilvl.h \tmp4, \tmp1, \tmp0
vilvh.h \tmp5, \tmp1, \tmp0
vilvl.h \tmp6, \tmp3, \tmp2
vilvh.h \tmp7, \tmp3, \tmp2
vilvh.h \tmp0, \in6, \in4
vilvh.h \tmp1, \in7, \in5
vilvh.h \tmp2, \in2, \in0
vilvh.h \tmp3, \in3, \in1
vpickev.d \out0, \tmp4, \tmp6
vpickod.d \out1, \tmp4, \tmp6
vpickev.d \out2, \tmp5, \tmp7
vpickod.d \out3, \tmp5, \tmp7
vilvl.h \tmp4, \tmp1, \tmp0
vilvh.h \tmp5, \tmp1, \tmp0
vilvl.h \tmp6, \tmp3, \tmp2
vilvh.h \tmp7, \tmp3, \tmp2
vpickev.d \out4, \tmp4, \tmp6
vpickod.d \out5, \tmp4, \tmp6
vpickev.d \out6, \tmp5, \tmp7
vpickod.d \out7, \tmp5, \tmp7
.endm
/*
* Description : Transpose 16x8 block with byte elements in vectors
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
* Outputs - out0, out1, out2, out3, out4, out5, out6, out7
*/
.macro LASX_TRANSPOSE16X8_B in0, in1, in2, in3, in4, in5, in6, in7, \
in8, in9, in10, in11, in12, in13, in14, in15, \
out0, out1, out2, out3, out4, out5, out6, out7,\
tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7
xvilvl.b \tmp0, \in2, \in0
xvilvl.b \tmp1, \in3, \in1
xvilvl.b \tmp2, \in6, \in4
xvilvl.b \tmp3, \in7, \in5
xvilvl.b \tmp4, \in10, \in8
xvilvl.b \tmp5, \in11, \in9
xvilvl.b \tmp6, \in14, \in12
xvilvl.b \tmp7, \in15, \in13
xvilvl.b \out0, \tmp1, \tmp0
xvilvh.b \out1, \tmp1, \tmp0
xvilvl.b \out2, \tmp3, \tmp2
xvilvh.b \out3, \tmp3, \tmp2
xvilvl.b \out4, \tmp5, \tmp4
xvilvh.b \out5, \tmp5, \tmp4
xvilvl.b \out6, \tmp7, \tmp6
xvilvh.b \out7, \tmp7, \tmp6
xvilvl.w \tmp0, \out2, \out0
xvilvh.w \tmp2, \out2, \out0
xvilvl.w \tmp4, \out3, \out1
xvilvh.w \tmp6, \out3, \out1
xvilvl.w \tmp1, \out6, \out4
xvilvh.w \tmp3, \out6, \out4
xvilvl.w \tmp5, \out7, \out5
xvilvh.w \tmp7, \out7, \out5
xvilvl.d \out0, \tmp1, \tmp0
xvilvh.d \out1, \tmp1, \tmp0
xvilvl.d \out2, \tmp3, \tmp2
xvilvh.d \out3, \tmp3, \tmp2
xvilvl.d \out4, \tmp5, \tmp4
xvilvh.d \out5, \tmp5, \tmp4
xvilvl.d \out6, \tmp7, \tmp6
xvilvh.d \out7, \tmp7, \tmp6
.endm
/*
* Description : Transpose 4x4 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
*/
.macro LASX_TRANSPOSE4x4_H in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
xvilvl.h \tmp0, \in1, \in0
xvilvl.h \tmp1, \in3, \in2
xvilvl.w \out0, \tmp1, \tmp0
xvilvh.w \out2, \tmp1, \tmp0
xvilvh.d \out1, \out0, \out0
xvilvh.d \out3, \out0, \out2
.endm
/*
* Description : Transpose 4x8 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
*/
.macro LASX_TRANSPOSE4x8_H in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
xvilvl.h \tmp0, \in2, \in0
xvilvl.h \tmp1, \in3, \in1
xvilvl.h \out2, \tmp1, \tmp0
xvilvh.h \out3, \tmp1, \tmp0
xvilvl.d \out0, \out2, \out2
xvilvh.d \out1, \out2, \out2
xvilvl.d \out2, \out3, \out3
xvilvh.d \out3, \out3, \out3
.endm
/*
* Description : Transpose 8x8 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
* Outputs - out0, out1, out2, out3, out4, out5, out6, out7
*/
.macro LASX_TRANSPOSE8x8_H in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3, out4, out5, out6, out7, \
tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7
xvilvl.h \tmp0, \in6, \in4
xvilvl.h \tmp1, \in7, \in5
xvilvl.h \tmp2, \in2, \in0
xvilvl.h \tmp3, \in3, \in1
xvilvl.h \tmp4, \tmp1, \tmp0
xvilvh.h \tmp5, \tmp1, \tmp0
xvilvl.h \tmp6, \tmp3, \tmp2
xvilvh.h \tmp7, \tmp3, \tmp2
xvilvh.h \tmp0, \in6, \in4
xvilvh.h \tmp1, \in7, \in5
xvilvh.h \tmp2, \in2, \in0
xvilvh.h \tmp3, \in3, \in1
xvpickev.d \out0, \tmp4, \tmp6
xvpickod.d \out1, \tmp4, \tmp6
xvpickev.d \out2, \tmp5, \tmp7
xvpickod.d \out3, \tmp5, \tmp7
xvilvl.h \tmp4, \tmp1, \tmp0
xvilvh.h \tmp5, \tmp1, \tmp0
xvilvl.h \tmp6, \tmp3, \tmp2
xvilvh.h \tmp7, \tmp3, \tmp2
xvpickev.d \out4, \tmp4, \tmp6
xvpickod.d \out5, \tmp4, \tmp6
xvpickev.d \out6, \tmp5, \tmp7
xvpickod.d \out7, \tmp5, \tmp7
.endm
/*
* Description : Transpose 2x4x4 block with half-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
*/
.macro LASX_TRANSPOSE2x4x4_H in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1, tmp2
xvilvh.h \tmp1, \in0, \in1
xvilvl.h \out1, \in0, \in1
xvilvh.h \tmp0, \in2, \in3
xvilvl.h \out3, \in2, \in3
xvilvh.w \tmp2, \out3, \out1
xvilvl.w \out3, \out3, \out1
xvilvl.w \out2, \tmp0, \tmp1
xvilvh.w \tmp1, \tmp0, \tmp1
xvilvh.d \out0, \out2, \out3
xvilvl.d \out2, \out2, \out3
xvilvh.d \out1, \tmp1, \tmp2
xvilvl.d \out3, \tmp1, \tmp2
.endm
/*
* Description : Transpose 4x4 block with word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
* Details :
* Example :
* 1, 2, 3, 4, 1, 2, 3, 4 1,5, 9,13, 1,5, 9,13
* 5, 6, 7, 8, 5, 6, 7, 8 to 2,6,10,14, 2,6,10,14
* 9,10,11,12, 9,10,11,12 =====> 3,7,11,15, 3,7,11,15
* 13,14,15,16, 13,14,15,16 4,8,12,16, 4,8,12,16
*/
.macro LASX_TRANSPOSE4x4_W in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
xvilvl.w \tmp0, \in1, \in0
xvilvh.w \out1, \in1, \in0
xvilvl.w \tmp1, \in3, \in2
xvilvh.w \out3, \in3, \in2
xvilvl.d \out0, \tmp1, \tmp0
xvilvl.d \out2, \out3, \out1
xvilvh.d \out3, \out3, \out1
xvilvh.d \out1, \tmp1, \tmp0
.endm
/*
* Description : Transpose 8x8 block with word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
* Outputs - out0, out1, out2, out3, out4, out5, out6,
* _out7
* Example : LASX_TRANSPOSE8x8_W
* in0 : 1,2,3,4,5,6,7,8
* in1 : 2,2,3,4,5,6,7,8
* in2 : 3,2,3,4,5,6,7,8
* in3 : 4,2,3,4,5,6,7,8
* in4 : 5,2,3,4,5,6,7,8
* in5 : 6,2,3,4,5,6,7,8
* in6 : 7,2,3,4,5,6,7,8
* in7 : 8,2,3,4,5,6,7,8
*
* out0 : 1,2,3,4,5,6,7,8
* out1 : 2,2,2,2,2,2,2,2
* out2 : 3,3,3,3,3,3,3,3
* out3 : 4,4,4,4,4,4,4,4
* out4 : 5,5,5,5,5,5,5,5
* out5 : 6,6,6,6,6,6,6,6
* out6 : 7,7,7,7,7,7,7,7
* out7 : 8,8,8,8,8,8,8,8
*/
.macro LASX_TRANSPOSE8x8_W in0, in1, in2, in3, in4, in5, in6, in7,\
out0, out1, out2, out3, out4, out5, out6, out7,\
tmp0, tmp1, tmp2, tmp3
xvilvl.w \tmp0, \in2, \in0
xvilvl.w \tmp1, \in3, \in1
xvilvh.w \tmp2, \in2, \in0
xvilvh.w \tmp3, \in3, \in1
xvilvl.w \out0, \tmp1, \tmp0
xvilvh.w \out1, \tmp1, \tmp0
xvilvl.w \out2, \tmp3, \tmp2
xvilvh.w \out3, \tmp3, \tmp2
xvilvl.w \tmp0, \in6, \in4
xvilvl.w \tmp1, \in7, \in5
xvilvh.w \tmp2, \in6, \in4
xvilvh.w \tmp3, \in7, \in5
xvilvl.w \out4, \tmp1, \tmp0
xvilvh.w \out5, \tmp1, \tmp0
xvilvl.w \out6, \tmp3, \tmp2
xvilvh.w \out7, \tmp3, \tmp2
xmov \tmp0, \out0
xmov \tmp1, \out1
xmov \tmp2, \out2
xmov \tmp3, \out3
xvpermi.q \out0, \out4, 0x02
xvpermi.q \out1, \out5, 0x02
xvpermi.q \out2, \out6, 0x02
xvpermi.q \out3, \out7, 0x02
xvpermi.q \out4, \tmp0, 0x31
xvpermi.q \out5, \tmp1, 0x31
xvpermi.q \out6, \tmp2, 0x31
xvpermi.q \out7, \tmp3, 0x31
.endm
/*
* Description : Transpose 4x4 block with double-word elements in vectors
* Arguments : Inputs - in0, in1, in2, in3
* Outputs - out0, out1, out2, out3
* Example : LASX_TRANSPOSE4x4_D
* in0 : 1,2,3,4
* in1 : 1,2,3,4
* in2 : 1,2,3,4
* in3 : 1,2,3,4
*
* out0 : 1,1,1,1
* out1 : 2,2,2,2
* out2 : 3,3,3,3
* out3 : 4,4,4,4
*/
.macro LASX_TRANSPOSE4x4_D in0, in1, in2, in3, out0, out1, out2, out3, \
tmp0, tmp1
xvilvl.d \tmp0, \in1, \in0
xvilvh.d \out1, \in1, \in0
xvilvh.d \tmp1, \in3, \in2
xvilvl.d \out2, \in3, \in2
xvor.v \out0, \tmp0, \tmp0
xvor.v \out3, \tmp1, \tmp1
xvpermi.q \out0, \out2, 0x02
xvpermi.q \out2, \tmp0, 0x31
xvpermi.q \out3, \out1, 0x31
xvpermi.q \out1, \tmp1, 0x02
.endm
|
aestream/faery
| 97,931
|
src/mp4/x264/common/loongarch/mc-a.S
|
/*****************************************************************************
* mc-a.S: LoongArch motion compensation
*****************************************************************************
* Copyright (C) 2023-2024 x264 project
*
* Authors: Xiwei Gu <guxiwei-hf@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "loongson_asm.S"
#include "loongson_util.S"
const ch_shuf
.byte 0, 2, 2, 4, 4, 6, 6, 8, 1, 3, 3, 5, 5, 7, 7, 9
.byte 0, 2, 2, 4, 4, 6, 6, 8, 1, 3, 3, 5, 5, 7, 7, 9
endconst
const pw_1024
.rept 16
.short 1024
.endr
endconst
const filt_mul20
.rept 32
.byte 20
.endr
endconst
const filt_mul15
.rept 16
.byte 1, -5
.endr
endconst
const filt_mul51
.rept 16
.byte -5, 1
.endr
endconst
const hpel_shuf
.rept 2
.byte 0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15
.endr
endconst
const shuf_12
.rept 2
.byte 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27
.endr
endconst
const shuf_14
.rept 2
.byte 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29
.endr
endconst
const shuf_15
.rept 2
.byte 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30
.endr
endconst
const shuf_1
.rept 2
.byte 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
.endr
endconst
const shuf_2
.rept 2
.byte 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
.endr
endconst
const shuf_3
.rept 2
.byte 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
.endr
endconst
const shuf_4
.rept 2
.byte 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19
.endr
endconst
const shuf_6
.rept 2
.byte 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21
.endr
endconst
#if !HIGH_BIT_DEPTH
.macro MC_CHROMA_START
srai.d t0, a5, 3
srai.d t1, a6, 3
slli.d t0, t0, 1
mul.d t1, t1, a4
add.d t1, t1, t0
add.d a3, a3, t1 /* src += (m_vy >> 3) * i_src_stride + (m_vx >> 3) * 2 */
.endm
/*
* void mc_chroma( uint8_t *p_dst_u, uint8_t *p_dst_v,
* intptr_t i_dst_stride,
* uint8_t *p_src, intptr_t i_src_stride,
* int32_t m_vx, int32_t m_vy,
* int32_t i_width, int32_t i_height )
*/
function_x264 mc_chroma_lasx
MC_CHROMA_START
andi a5, a5, 0x07 /* m_vx & 0x07 */
andi a6, a6, 0x07 /* m_vy & 0x07 */
move t0, a5
slli.d t0, t0, 8
sub.d t0, t0, a5
li.d a5, 8
addi.d t0, t0, 8
sub.d a5, a5, a6
mul.d a6, a6, t0 /* (x * 255 + 8) * y */
mul.d a5, a5, t0 /* (x * 255 + 8) * (8 - y) */
xvreplgr2vr.h xr6, a6 /* cD cC ... cD cC */
xvreplgr2vr.h xr7, a5 /* cB cA ... cB cA */
la.local t0, ch_shuf
xvld xr5, t0, 0
addi.d t0, a7, -4
ldptr.w a7, sp, 0 /* a7 = i_height */
slli.d t1, a4, 1
blt zero, t0, .L_WIDTH8
.L_LOOP4:
vld vr0, a3, 0
vldx vr1, a3, a4
vldx vr2, a3, t1
xvpermi.q xr0, xr1, 0x02
xvpermi.q xr1, xr2, 0x02
xvshuf.b xr0, xr0, xr0, xr5
xvshuf.b xr1, xr1, xr1, xr5
xvdp2.h.bu xr2, xr0, xr7
xvdp2.h.bu xr3, xr1, xr6
xvadd.h xr0, xr2, xr3
xvssrlrni.bu.h xr0, xr0, 6
xvstelm.w xr0, a0, 0, 0
xvstelm.w xr0, a1, 0, 1
add.d a0, a0, a2
add.d a1, a1, a2
xvstelm.w xr0, a0, 0, 4
xvstelm.w xr0, a1, 0, 5
add.d a0, a0, a2
add.d a1, a1, a2
add.d a3, a3, t1
addi.d a7, a7, -2
blt zero, a7, .L_LOOP4
b .ENDFUNC
.L_WIDTH8:
xvld xr0, a3, 0
xvpermi.d xr0, xr0, 0x94
xvshuf.b xr0, xr0, xr0, xr5
.L_LOOP8:
xvldx xr3, a3, a4
xvpermi.d xr3, xr3, 0x94
xvshuf.b xr3, xr3, xr3, xr5
xvdp2.h.bu xr1, xr0, xr7
xvdp2.h.bu xr2, xr3, xr6
xvdp2.h.bu xr8, xr3, xr7
xvldx xr0, a3, t1
xvpermi.d xr0, xr0, 0x94
xvshuf.b xr0, xr0, xr0, xr5
xvdp2.h.bu xr4, xr0, xr6
xvadd.h xr1, xr1, xr2
xvadd.h xr3, xr8, xr4
xvssrlrni.bu.h xr3, xr1, 6
xvpermi.q xr4, xr3, 0x01
xvpackev.w xr8, xr4, xr3
xvpackod.w xr9, xr4, xr3
vstelm.d vr8, a0, 0, 0
vstelm.d vr9, a1, 0, 0
add.d a0, a0, a2
add.d a1, a1, a2
vstelm.d vr8, a0, 0, 1
vstelm.d vr9, a1, 0, 1
addi.d a7, a7, -2
add.d a0, a0, a2
add.d a1, a1, a2
add.d a3, a3, t1
blt zero, a7, .L_LOOP8
.ENDFUNC:
endfunc_x264
.macro PIXEL_AVG_START
slli.d t0, a3, 1
add.w t1, t0, a3
slli.d t2, a3, 2
slli.d t3, a5, 1
add.w t4, t3, a5
slli.d t5, a5, 2
slli.d t6, a1, 1
add.w t7, t6, a1
slli.d t8, a1, 2
.endm
.macro BIWEIGHT_AVG_START
addi.d t0, zero, 64
sub.d t0, t0, a6
xvreplgr2vr.b xr0, a6
xvreplgr2vr.b xr1, t0
xvpackev.b xr8, xr1, xr0
xvxor.v xr9, xr9, xr9
xvaddi.hu xr9, xr9, 6
.endm
.macro BIWEIGHT_AVG_CORE a, b
xvpermi.d \a, \a, 0x50
xvpermi.d \b, \b, 0x50
xvilvl.b \a, \b, \a
xvmulwev.h.bu.b \b, \a, xr8
xvmaddwod.h.bu.b \b, \a, xr8
xvssrarn.bu.h \b, \b, xr9
xvpermi.d \b, \b, 0x08
.endm
.macro PIXEL_AVG_START_W8
slli.d t0, a3, 1
add.w t1, t0, a3
slli.d t3, a5, 1
add.w t4, t3, a5
.endm
function_x264 pixel_avg_weight_w4_lasx
addi.d t0, zero, 64
sub.d t0, t0, a6
vreplgr2vr.b vr0, a6
vreplgr2vr.b vr1, t0
vpackev.b vr8, vr1, vr0
.LOOP_HEIGHT_W4_1:
fld.s f0, a2, 0
fldx.s f1, a2, a3
fld.s f2, a4, 0
fldx.s f3, a4, a5
vilvl.w vr0, vr1, vr0
vilvl.w vr2, vr3, vr2
vilvl.b vr0, vr2, vr0
vmulwev.h.bu.b vr1, vr0, vr8
vmaddwod.h.bu.b vr1, vr0, vr8
vssrarni.bu.h vr1, vr1, 6
fst.s f1, a0, 0
add.d a0, a0, a1
vstelm.w vr1, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a4, a5, a4, 1
addi.w a7, a7, -2
bnez a7, .LOOP_HEIGHT_W4_1
endfunc_x264
function_x264 pixel_avg_w4_lasx
.LOOP_HEIGHT_W4:
fld.s f0, a2, 0
fldx.s f1, a2, a3
fld.s f4, a4, 0
fldx.s f5, a4, a5
vilvl.w vr0, vr1, vr0
vilvl.w vr4, vr5, vr4
vavgr.bu vr0, vr0, vr4
fst.s f0, a0, 0
add.d a0, a0, a1
vstelm.w vr0, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a4, a5, a4, 1
addi.w a7, a7, -2
bnez a7, .LOOP_HEIGHT_W4
endfunc_x264
function_x264 pixel_avg_weight_w8_lasx
addi.d t0, zero, 64
sub.d t0, t0, a6
xvreplgr2vr.b xr0, a6
xvreplgr2vr.b xr1, t0
xvpackev.b xr8, xr1, xr0
PIXEL_AVG_START_W8
.LOOP_HEIGHT_W8_1:
fld.d f0, a2, 0
fldx.d f1, a2, a3
fldx.d f2, a2, t0
fldx.d f3, a2, t1
fld.d f4, a4, 0
fldx.d f5, a4, a5
fldx.d f6, a4, t3
fldx.d f7, a4, t4
vilvl.b vr0, vr4, vr0
vilvl.b vr1, vr5, vr1
vilvl.b vr2, vr6, vr2
vilvl.b vr3, vr7, vr3
xvpermi.q xr1, xr0, 0x20
xvpermi.q xr3, xr2, 0x20
xvmulwev.h.bu.b xr2, xr1, xr8
xvmaddwod.h.bu.b xr2, xr1, xr8
xvmulwev.h.bu.b xr4, xr3, xr8
xvmaddwod.h.bu.b xr4, xr3, xr8
xvssrarni.bu.h xr4, xr2, 6
fst.d f4, a0, 0
add.d a0, a0, a1
xvstelm.d xr4, a0, 0, 2
add.d a0, a0, a1
xvstelm.d xr4, a0, 0, 1
add.d a0, a0, a1
xvstelm.d xr4, a0, 0, 3
add.d a0, a0, a1
alsl.d a2, a3, a2, 2
alsl.d a4, a5, a4, 2
addi.w a7, a7, -4
bnez a7, .LOOP_HEIGHT_W8_1
endfunc_x264
function_x264 pixel_avg_w8_lasx
PIXEL_AVG_START_W8
.LOOP_HEIGHT_W8:
fld.d f0, a2, 0
fldx.d f1, a2, a3
fldx.d f2, a2, t0
fldx.d f3, a2, t1
fld.d f4, a4, 0
fldx.d f5, a4, a5
fldx.d f6, a4, t3
fldx.d f7, a4, t4
vilvl.d vr0, vr1, vr0
vilvl.d vr2, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr6, vr7, vr6
vavgr.bu vr0, vr0, vr4
vavgr.bu vr2, vr2, vr6
fst.d f0, a0, 0
add.d a0, a0, a1
vstelm.d vr0, a0, 0, 1
fstx.d f2, a0, a1
alsl.d a0, a1, a0, 1
vstelm.d vr2, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 2
alsl.d a4, a5, a4, 2
addi.w a7, a7, -4
bnez a7, .LOOP_HEIGHT_W8
endfunc_x264
function_x264 pixel_avg_weight_w16_lasx
BIWEIGHT_AVG_START
PIXEL_AVG_START
.L_HEIGHT_LOOP_T:
LSX_LOADX_4 a2, a3, t0, t1, vr0, vr1, vr2, vr3
LSX_LOADX_4 a4, a5, t3, t4, vr4, vr5, vr6, vr7
BIWEIGHT_AVG_CORE xr0, xr4
BIWEIGHT_AVG_CORE xr1, xr5
vst vr4, a0, 0
vstx vr5, a0, a1
BIWEIGHT_AVG_CORE xr2, xr6
BIWEIGHT_AVG_CORE xr3, xr7
vstx vr6, a0, t6
vstx vr7, a0, t7
add.d a2, a2, t2
add.d a4, a4, t5
add.d a0, a0, t8
addi.d a7, a7, -4
bnez a7, .L_HEIGHT_LOOP_T
endfunc_x264
function_x264 pixel_avg_w16_lasx
PIXEL_AVG_START
.L_HEIGHT_LOOP:
vld vr0, a2, 0
vldx vr1, a2, a3
vldx vr2, a2, t0
vldx vr3, a2, t1
vld vr4, a4, 0
vldx vr5, a4, a5
vldx vr6, a4, t3
vldx vr7, a4, t4
vavgr.bu vr0, vr0, vr4
vavgr.bu vr1, vr1, vr5
vavgr.bu vr2, vr2, vr6
vavgr.bu vr3, vr3, vr7
vst vr0, a0, 0
vstx vr1, a0, a1
vstx vr2, a0, t6
vstx vr3, a0, t7
add.d a0, a0, t8
add.d a2, a2, t2
add.d a4, a4, t5
vld vr0, a2, 0
vldx vr1, a2, a3
vldx vr2, a2, t0
vldx vr3, a2, t1
vld vr4, a4, 0
vldx vr5, a4, a5
vldx vr6, a4, t3
vldx vr7, a4, t4
vavgr.bu vr0, vr0, vr4
vavgr.bu vr1, vr1, vr5
vavgr.bu vr2, vr2, vr6
vavgr.bu vr3, vr3, vr7
vst vr0, a0, 0
vstx vr1, a0, a1
vstx vr2, a0, t6
vstx vr3, a0, t7
add.d a2, a2, t2
add.d a4, a4, t5
add.d a0, a0, t8
addi.d a7, a7, -8
bnez a7, .L_HEIGHT_LOOP
endfunc_x264
.macro FILT_PACK_LASX s1, s2, s3
xvmulwev.w.h xr16, \s1, \s3
xvmulwev.w.h xr17, \s2, \s3
xvsrarni.h.w xr17, xr16, 15
xvmaxi.h xr17, xr17, 0
xvsat.hu xr17, xr17, 7
xvmulwod.w.h xr18, \s1, \s3
xvmulwod.w.h xr19, \s2, \s3
xvsrarni.h.w xr19, xr18, 15
xvmaxi.h xr19, xr19, 0
xvsat.hu xr19, xr19, 7
xvpackev.b \s1, xr19, xr17
.endm
/* s3: temp, s4: UNUSED, s5: imm */
.macro DO_FILT_V_LASX s1, s2, s3, s4, s5
alsl.d t1, a2, a1, 1 /* t1 = a1 + 2 * a2 */
alsl.d t2, a2, a3, 1 /* t2 = a3 + 2 * a2 */
xvld xr1, a3, 0
xvldx xr2, a3, a2
xvld \s3, t2, 0
xvld xr3, a1, 0
xvldx \s1, a1, a2
xvld \s2, t1, 0
xvilvh.b xr16, xr2, xr1
xvilvl.b xr17, xr2, xr1
xvilvh.b xr18, \s2, \s1
xvilvl.b xr19, \s2, \s1
xvilvh.b xr20, \s3, xr3
xvilvl.b xr21, \s3, xr3
xvdp2.h.bu.b xr1, xr17, xr12
xvdp2.h.bu.b xr4, xr16, xr12
xvdp2.h.bu.b \s1, xr19, xr0
xvdp2.h.bu.b xr2, xr18, xr0
xvdp2.h.bu.b xr3, xr21, xr14
xvdp2.h.bu.b \s2, xr20, xr14
xvadd.h xr1, xr1, \s1
xvadd.h xr4, xr4, xr2
xvadd.h xr1, xr1, xr3
xvadd.h xr4, xr4, \s2
xmov \s1, xr1
xmov \s2, xr1
addi.d a3, a3, 32
addi.d a1, a1, 32
xvpermi.q \s1, xr4, 0x2
xvpermi.q \s2, xr4, 0x13
FILT_PACK_LASX xr1, xr4, xr15
addi.d t1, a4, \s5
xvstx xr1, t0, t1
.endm
.macro FILT_H s1, s2, s3
xvsub.h \s1, \s1, \s2
xvsrai.h \s1, \s1, 2
xvsub.h \s1, \s1, \s2
xvadd.h \s1, \s1, \s3
xvsrai.h \s1, \s1, 2
xvadd.h \s1, \s1, \s3
.endm
.macro FILT_C s1, s2, s3
xmov xr3, \s1
xvpermi.q xr3, \s2, 0x03
xvshuf.b xr1, \s2, xr3, xr23
xvshuf.b xr2, \s2, xr3, xr24
xmov \s1, \s2
xvpermi.q \s1, \s3, 0x03
xvshuf.b xr3, \s1, \s2, xr29
xvshuf.b xr4, \s1, \s2, xr27
xvadd.h xr3, xr2, xr3
xmov xr2, \s1
xmov \s1, \s3
xvshuf.b \s3, xr2, \s2, xr30
xvadd.h xr4, xr4, \s2
xvadd.h \s3, \s3, xr1
FILT_H \s3, xr3, xr4
.endm
.macro DO_FILT_C_LASX s1, s2, s3, s4
FILT_C \s1, \s2, \s3
FILT_C \s2, \s1, \s4
FILT_PACK_LASX \s3, \s4, xr15
xvpermi.d \s3, \s3, 0xd8
xvstx \s3, a5, a4
.endm
.macro DO_FILT_H_LASX s1, s2, s3
xmov xr3, \s1
xvpermi.q xr3, \s2, 0x03
xvshuf.b xr1, \s2, xr3, xr24
xvshuf.b xr2, \s2, xr3, xr25
xmov xr3, \s2
xvpermi.q xr3, \s3, 0x03
xvshuf.b xr4, xr3, \s2, xr26
xvshuf.b xr5, xr3, \s2, xr27
xvshuf.b xr6, xr3, \s2, xr28
xmov \s1, \s2
xvdp2.h.bu.b xr16, xr1, xr12
xvdp2.h.bu.b xr17, xr2, xr12
xvdp2.h.bu.b xr18, \s2, xr14
xvdp2.h.bu.b xr19, xr4, xr14
xvdp2.h.bu.b xr20, xr5, xr0
xvdp2.h.bu.b xr21, xr6, xr0
xvadd.h xr1, xr16, xr18
xvadd.h xr2, xr17, xr19
xvadd.h xr1, xr1, xr20
xvadd.h xr2, xr2, xr21
FILT_PACK_LASX xr1, xr2, xr15
xvshuf.b xr1, xr1, xr1, xr22
xvstx xr1, a0, a4
xmov \s2, \s3
.endm
/*
* void hpel_filter( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
* uint8_t *src, intptr_t stride, int width, int height )
*/
function_x264 hpel_filter_lasx
addi.d sp, sp, -56
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
move a7, a3
addi.d a5, a5, -32
move t0, a1
andi a7, a7, 31
sub.d a3, a3, a7
add.d a0, a0, a5
add.d t0, t0, a5
add.d a7, a7, a5
add.d a5, a5, a2
move a2, a4
sub.d a7, zero, a7
add.d a1, a3, a2
sub.d a3, a3, a2
sub.d a3, a3, a2
move a4, a7
la.local t1, filt_mul51
xvld xr0, t1, 0
la.local t2, filt_mul15
xvld xr12, t2, 0
la.local t3, filt_mul20
xvld xr14, t3, 0
la.local t4, pw_1024
xvld xr15, t4, 0
la.local t1, hpel_shuf
xvld xr22, t1, 0
la.local t2, shuf_12
xvld xr23, t2, 0
la.local t3, shuf_1
xvld xr26, t3, 0
xvaddi.bu xr24, xr23, 2 /* shuf_14 */
xvaddi.bu xr25, xr23, 3 /* shuf_15 */
xvaddi.bu xr27, xr26, 1 /* shuf_2 */
xvaddi.bu xr28, xr26, 2 /* shuf_3 */
xvaddi.bu xr29, xr26, 3 /* shuf_4 */
xvaddi.bu xr30, xr26, 5 /* shuf_6 */
xvxor.v xr9, xr9, xr9
xvxor.v xr10, xr10, xr10
.LOOPY:
DO_FILT_V_LASX xr8, xr7, xr13, xr12, 0
.LOOPX:
DO_FILT_V_LASX xr6, xr5, xr11, xr12, 32
.LASTX:
xvsrli.h xr15, xr15, 1
DO_FILT_C_LASX xr9, xr8, xr7, xr6
xvadd.h xr15, xr15, xr15
xmov xr7, xr5
DO_FILT_H_LASX xr10, xr13, xr11
addi.d a4, a4, 32
blt a4, zero, .LOOPX
addi.d t1, a4, -32
blt t1, zero, .LASTX
//setup regs for next y
sub.d a4, a4, a7
sub.d a4, a4, a2
sub.d a1, a1, a4
sub.d a3, a3, a4
add.d a0, a0, a2
add.d t0, t0, a2
add.d a5, a5, a2
move a4, a7
addi.d a6, a6, -1
blt zero, a6, .LOOPY
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
addi.d sp, sp, 56
endfunc_x264
/*
* void pixel_avg_wxh(pixel *dst, intptr_t dst_stride, pixel *src1, intptr_t src1_stride,
* pixel *src2, intptr_t src2_stride, int weight);
*/
.macro PIXEL_AVG w, h
function_x264 pixel_avg_\w\()x\h\()_lasx
addi.d t0, a6, -32
addi.d a7, zero, \h
bne t0, zero, x264_8_pixel_avg_weight_w\w\()_lasx
b x264_8_pixel_avg_w\w\()_lasx
endfunc_x264
.endm
PIXEL_AVG 16, 8
PIXEL_AVG 8, 16
PIXEL_AVG 8, 8
PIXEL_AVG 8, 4
PIXEL_AVG 4, 16
PIXEL_AVG 4, 8
PIXEL_AVG 4, 4
PIXEL_AVG 4, 2
function_x264 mc_weight_w20_noden_lasx
xvldrepl.h xr1, a4, 40 // offset
xvldrepl.b xr0, a4, 36 // scale
.LOOP_WEIGHTW20_NODEN:
xvld xr3, a2, 0
xvldx xr4, a2, a3
xvmulwev.h.bu.b xr7, xr3, xr0
xvmulwev.h.bu.b xr8, xr4, xr0
xvmulwod.h.bu.b xr3, xr3, xr0
xvmulwod.h.bu.b xr4, xr4, xr0
xvadd.h xr7, xr7, xr1
xvadd.h xr8, xr8, xr1
xvadd.h xr3, xr3, xr1
xvadd.h xr4, xr4, xr1
xvssrarni.bu.h xr8, xr7, 0
xvssrarni.bu.h xr4, xr3, 0
xvilvl.b xr3, xr4, xr8
xvilvh.b xr4, xr4, xr8
vst vr3, a0, 0
xvstelm.w xr3, a0, 16, 4
add.d a0, a0, a1
vst vr4, a0, 0
xvstelm.w xr4, a0, 16, 4
alsl.d a2, a3, a2, 1
add.d a0, a0, a1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHTW20_NODEN
endfunc_x264
function_x264 mc_weight_w16_noden_lasx
xvldrepl.h xr1, a4, 40 // offset
xvldrepl.h xr0, a4, 36 // scale
.LOOP_WEIGHTW16_NODEN:
vld vr3, a2, 0
vldx vr4, a2, a3
vext2xv.hu.bu xr3, xr3
vext2xv.hu.bu xr4, xr4
xvmul.h xr3, xr3, xr0
xvmul.h xr4, xr4, xr0
xvadd.h xr3, xr3, xr1
xvadd.h xr4, xr4, xr1
xvssrarni.bu.h xr4, xr3, 0
xvpermi.d xr3, xr4, 8
xvpermi.d xr4, xr4, 13
vst vr3, a0, 0
vstx vr4, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a0, a1, a0, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHTW16_NODEN
endfunc_x264
function_x264 mc_weight_w8_noden_lasx
xvldrepl.h xr1, a4, 40 // offset
xvldrepl.h xr0, a4, 36 // scale
.LOOP_WEIGHTW8_NODEN:
fld.d f3, a2, 0
fldx.d f4, a2, a3
vilvl.d vr3, vr4, vr3
vext2xv.hu.bu xr3, xr3
xvmul.h xr3, xr3, xr0
xvadd.h xr3, xr3, xr1
xvssrarni.bu.h xr3, xr3, 0
xvstelm.d xr3, a0, 0, 0
add.d a0, a0, a1
xvstelm.d xr3, a0, 0, 2
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHTW8_NODEN
endfunc_x264
function_x264 mc_weight_w4_noden_lasx
xvldrepl.h xr1, a4, 40 // offset
xvldrepl.h xr0, a4, 36 // scale
.LOOP_WEIGHTW4_NODEN:
fld.s f3, a2, 0
fldx.s f4, a2, a3
vilvl.w vr3, vr4, vr3
vext2xv.hu.bu xr3, xr3
xvmul.h xr3, xr3, xr0
xvadd.h xr3, xr3, xr1
xvssrarni.bu.h xr3, xr3, 0
xvstelm.w xr3, a0, 0, 0
add.d a0, a0, a1
xvstelm.w xr3, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHTW4_NODEN
endfunc_x264
function_x264 mc_weight_w20_lasx
xvldrepl.h xr1, a4, 40 // offset
xvldrepl.b xr0, a4, 36 // scale
xvldrepl.h xr2, a4, 32 // denom
xvsll.h xr1, xr1, xr2
.LOOP_WEIGHTW20:
xvld xr3, a2, 0
xvldx xr4, a2, a3
xvmulwev.h.bu.b xr7, xr3, xr0
xvmulwev.h.bu.b xr8, xr4, xr0
xvmulwod.h.bu.b xr3, xr3, xr0
xvmulwod.h.bu.b xr4, xr4, xr0
xvsadd.h xr7, xr7, xr1
xvsadd.h xr8, xr8, xr1
xvsadd.h xr3, xr3, xr1
xvsadd.h xr4, xr4, xr1
xvssrarn.bu.h xr7, xr7, xr2
xvssrarn.bu.h xr8, xr8, xr2
xvssrarn.bu.h xr3, xr3, xr2
xvssrarn.bu.h xr4, xr4, xr2
xvilvl.b xr3, xr3, xr7
xvilvl.b xr4, xr4, xr8
vst vr3, a0, 0
xvstelm.w xr3, a0, 16, 4
add.d a0, a0, a1
vst vr4, a0, 0
xvstelm.w xr4, a0, 16, 4
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHTW20
endfunc_x264
function_x264 mc_weight_w16_lasx
xvldrepl.h xr1, a4, 40 // offset
xvldrepl.h xr0, a4, 36 // scale
xvldrepl.h xr2, a4, 32 // denom
xvsll.h xr1, xr1, xr2
.LOOP_WEIGHTW16:
vld vr3, a2, 0
vldx vr4, a2, a3
vext2xv.hu.bu xr3, xr3
vext2xv.hu.bu xr4, xr4
xvmul.h xr3, xr3, xr0
xvmul.h xr4, xr4, xr0
xvsadd.h xr3, xr3, xr1
xvsadd.h xr4, xr4, xr1
xvssrarn.bu.h xr3, xr3, xr2
xvssrarn.bu.h xr4, xr4, xr2
xvpermi.d xr3, xr3, 8
xvpermi.d xr4, xr4, 8
vst vr3, a0, 0
vstx vr4, a0, a1
alsl.d a0, a1, a0, 1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHTW16
endfunc_x264
function_x264 mc_weight_w8_lasx
xvldrepl.h xr1, a4, 40 // offset
xvldrepl.h xr0, a4, 36 // scale
xvldrepl.h xr2, a4, 32 // denom
xvsll.h xr1, xr1, xr2
.LOOP_WEIGHTW8:
fld.d f3, a2, 0
fldx.d f4, a2, a3
vilvl.d vr3, vr4, vr3
vext2xv.hu.bu xr3, xr3
xvmul.h xr3, xr3, xr0
xvsadd.h xr3, xr3, xr1
xvssrarn.bu.h xr3, xr3, xr2
xvstelm.d xr3, a0, 0, 0
add.d a0, a0, a1
xvstelm.d xr3, a0, 0, 2
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHTW8
endfunc_x264
function_x264 mc_weight_w4_lasx
xvldrepl.h xr1, a4, 40 // offset
xvldrepl.h xr0, a4, 36 // scale
xvldrepl.h xr2, a4, 32 // denom
xvsll.h xr1, xr1, xr2
.LOOP_WEIGHTW4:
fld.s f3, a2, 0
fldx.s f4, a2, a3
vilvl.w vr3, vr4, vr3
vext2xv.hu.bu xr3, xr3
xvmul.h xr3, xr3, xr0
xvsadd.h xr3, xr3, xr1
xvssrarn.bu.h xr3, xr3, xr2
xvstelm.w xr3, a0, 0, 0
add.d a0, a0, a1
xvstelm.w xr3, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHTW4
endfunc_x264
/*
* void x264_pixel_avg2_w4(uint8_t *dst, intptr_t i_dst_stride, uint8_t *src1,
* intptr_t i_src_stride, uint8_t *src2, int i_height)
*/
function_x264 pixel_avg2_w4_lasx
.avg2w4_loop_2:
addi.d a5, a5, -2
fld.s f0, a2, 0
fld.s f1, a4, 0
fldx.s f2, a2, a3
fldx.s f3, a4, a3
alsl.d a2, a3, a2, 1
alsl.d a4, a3, a4, 1
vavgr.bu vr0, vr0, vr1
vavgr.bu vr1, vr2, vr3
fst.s f0, a0, 0
fstx.s f1, a0, a1
alsl.d a0, a1, a0, 1
blt zero, a5, .avg2w4_loop_2
endfunc_x264
/*
* void x264_pixel_avg2_w8(uint8_t *dst, intptr_t i_dst_stride, uint8_t *src1,
* intptr_t i_src_stride, uint8_t *src2, int i_height)
*/
function_x264 pixel_avg2_w8_lasx
.avg2w8_loop_2:
addi.d a5, a5, -2
fld.d f0, a2, 0
fld.d f1, a4, 0
fldx.d f2, a2, a3
fldx.d f3, a4, a3
alsl.d a2, a3, a2, 1
alsl.d a4, a3, a4, 1
vavgr.bu vr0, vr0, vr1
vavgr.bu vr1, vr2, vr3
fst.d f0, a0, 0
fstx.d f1, a0, a1
alsl.d a0, a1, a0, 1
blt zero, a5, .avg2w8_loop_2
endfunc_x264
/*
* void x264_pixel_avg2_w16(uint8_t *dst, intptr_t i_dst_stride, uint8_t *src1,
* intptr_t i_src_stride, uint8_t *src2, int i_height)
*/
function_x264 pixel_avg2_w16_lasx
.avg2w16_loop_2:
addi.d a5, a5, -2
vld vr0, a2, 0
vldx vr1, a2, a3
vld vr2, a4, 0
vldx vr3, a4, a3
alsl.d a2, a3, a2, 1
alsl.d a4, a3, a4, 1
vavgr.bu vr0, vr0, vr2
vavgr.bu vr1, vr1, vr3
vst vr0, a0, 0
vstx vr1, a0, a1
alsl.d a0, a1, a0, 1
blt zero, a5, .avg2w16_loop_2
endfunc_x264
/*
* void x264_pixel_avg2_w20(uint8_t *dst, intptr_t i_dst_stride, uint8_t *src1,
* intptr_t i_src_stride, uint8_t *src2, int i_height)
*/
function_x264 pixel_avg2_w20_lasx
.avg2w20_loop_2:
addi.d a5, a5, -2
xvld xr0, a2, 0
xvldx xr1, a2, a3
xvld xr2, a4, 0
xvldx xr3, a4, a3
alsl.d a2, a3, a2, 1
alsl.d a4, a3, a4, 1
xvavgr.bu xr0, xr0, xr2
xvavgr.bu xr1, xr1, xr3
vst vr0, a0, 0
xvstelm.w xr0, a0, 16, 4
add.d a0, a0, a1
vst vr1, a0, 0
xvstelm.w xr1, a0, 16, 4
add.d a0, a0, a1
blt zero, a5, .avg2w20_loop_2
endfunc_x264
/*
* void mc_copy_width16( uint8_t *p_dst, int32_t i_dst_stride,
* uint8_t *p_src, int32_t i_src_stride,
* int32_t i_height )
*/
function_x264 mc_copy_w16_lasx
slli.d t0, a3, 1
add.d t1, t0, a3
slli.d t2, a1, 1
add.d t3, t2, a1
.LOOP_COPYW16:
vld vr1, a2, 0
vldx vr2, a2, a3
vldx vr3, a2, t0
vldx vr4, a2, t1
vst vr1, a0, 0
vstx vr2, a0, a1
vstx vr3, a0, t2
vstx vr4, a0, t3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
addi.w a4, a4, -4
blt zero, a4, .LOOP_COPYW16
endfunc_x264
/*
* void mc_copy_w8( uint8_t *p_dst, intptr_t i_dst_stride,
* uint8_t *p_src, intptr_t i_src_stride,
* int32_t i_height )
*/
function_x264 mc_copy_w8_lasx
slli.d t0, a3, 1
add.d t1, t0, a3
slli.d t2, a1, 1
add.d t3, t2, a1
.LOOP_COPYW8:
fld.d f0, a2, 0
fldx.d f1, a2, a3
fldx.d f2, a2, t0
fldx.d f3, a2, t1
fst.d f0, a0, 0
fstx.d f1, a0, a1
fstx.d f2, a0, t2
fstx.d f3, a0, t3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
addi.w a4, a4, -4
blt zero, a4, .LOOP_COPYW8
endfunc_x264
/*
* void mc_copy_w4( uint8_t *p_dst, intptr_t i_dst_stride,
* uint8_t *p_src, intptr_t i_src_stride,
* int32_t i_height )
*/
function_x264 mc_copy_w4_lasx
slli.d t0, a3, 1
add.d t1, t0, a3
slli.d t2, a1, 1
add.d t3, t2, a1
.LOOP_COPYW4:
fld.s f0, a2, 0
fldx.s f1, a2, a3
fldx.s f2, a2, t0
fldx.s f3, a2, t1
fst.s f0, a0, 0
fstx.s f1, a0, a1
fstx.s f2, a0, t2
fstx.s f3, a0, t3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
addi.w a4, a4, -4
blt zero, a4, .LOOP_COPYW4
endfunc_x264
/*
* void memzero_aligned( void *p_dst, size_t n )
*/
function_x264 memzero_aligned_lasx
xvxor.v xr1, xr1, xr1
.memzero_loop:
addi.d a1, a1, -128
.rept 4
xvst xr1, a0, 0
addi.d a0, a0, 32
.endr
blt zero, a1, .memzero_loop
endfunc_x264
/*
* void frame_init_lowres_core( pixel *src0, pixel *dst0, pixel *dsth,
* pixel *dstv, pixel *dstc, intptr_t src_stride,
* intptr_t dst_stride, int width, int height )
*/
function_x264 frame_init_lowres_core_lasx
andi t1, a7, 15
sub.w t0, a7, t1
slli.d t2, a5, 1
ldptr.w a7, sp, 0 // use a7 as height variable
.height_loop:
add.d t4, zero, t0
addi.d t3, a0, 0
addi.d t5, a1, 0
addi.d t6, a2, 0
addi.d t7, a3, 0
addi.d t8, a4, 0
.width16_loop:
xvld xr0, t3, 0
xvldx xr1, t3, a5
xvldx xr2, t3, t2
xvavgr.bu xr3, xr0, xr1
xvavgr.bu xr4, xr1, xr2
xvhaddw.hu.bu xr5, xr3, xr3
xvhaddw.hu.bu xr6, xr4, xr4
xvssrarni.bu.h xr6, xr5, 1
xvpermi.d xr7, xr6, 0xd8
vst vr7, t5, 0
xvpermi.q xr7, xr7, 0x11
vst vr7, t7, 0
addi.d t3, t3, 1
xvld xr0, t3, 0
xvldx xr1, t3, a5
xvldx xr2, t3, t2
xvavgr.bu xr3, xr0, xr1
xvavgr.bu xr4, xr1, xr2
xvhaddw.hu.bu xr5, xr3, xr3
xvhaddw.hu.bu xr6, xr4, xr4
xvssrarni.bu.h xr6, xr5, 1
xvpermi.d xr7, xr6, 0xd8
vst vr7, t6, 0
xvpermi.q xr7, xr7, 0x11
vst vr7, t8, 0
addi.d t3, t3, 31
addi.d t5, t5, 16
addi.d t6, t6, 16
addi.d t7, t7, 16
addi.d t8, t8, 16
addi.w t4, t4, -16
blt zero, t4, .width16_loop
beqz t1, .width16_end
vld vr0, t3, 0
vldx vr1, t3, a5
vldx vr2, t3, t2
vavgr.bu vr3, vr0, vr1
vavgr.bu vr4, vr1, vr2
vhaddw.hu.bu vr5, vr3, vr3
vhaddw.hu.bu vr6, vr4, vr4
vssrarni.bu.h vr6, vr5, 1
fst.d f6, t5, 0
vstelm.d vr6, t7, 0, 1
addi.d t3, t3, 1
vld vr0, t3, 0
vldx vr1, t3, a5
vldx vr2, t3, t2
vavgr.bu vr3, vr0, vr1
vavgr.bu vr4, vr1, vr2
vhaddw.hu.bu vr5, vr3, vr3
vhaddw.hu.bu vr6, vr4, vr4
vssrarni.bu.h vr6, vr5, 1
fst.d f6, t6, 0
vstelm.d vr6, t8, 0, 1
.width16_end:
add.d a0, a0, t2
add.d a1, a1, a6
add.d a2, a2, a6
add.d a3, a3, a6
add.d a4, a4, a6
addi.w a7, a7, -1
blt zero, a7, .height_loop
endfunc_x264
/*
* void mc_chroma(uint8_t *p_dst_u, uint8_t *p_dst_v,
* intptr_t i_dst_stride,
* uint8_t *p_src, intptr_t i_src_stride,
* int32_t m_vx, int32_t m_vy,
* int32_t i_width, int32_t i_height)
*/
function_x264 mc_chroma_lsx
MC_CHROMA_START
andi a5, a5, 0x07 /* m_vx & 0x07 */
andi a6, a6, 0x07 /* m_vy & 0x07 */
li.d t8, 8
sub.d t1, t8, a5 // 8-d8x
sub.d t2, t8, a6 // 8-d8y
mul.d t3, t1, t2 // CA
mul.d t4, a5, t2 // CB
mul.d t5, t1, a6 // CC
mul.d t6, a5, a6 // CD
vreplgr2vr.b vr0, t3
vreplgr2vr.b vr1, t4
vreplgr2vr.b vr2, t5
vreplgr2vr.b vr3, t6
add.d t0, a3, a4
ldptr.w t1, sp, 0 /* i_height */
move t3, t0
addi.d t4, zero, 1
addi.d t5, zero, 3
addi.d t6, zero, 7
bge t6, a7, .ENDLOOP_W8
.LOOP_W8:
vld vr4, a3, 0
vld vr5, t0, 0
vld vr6, a3, 2
vld vr7, t0, 2
vmulwev.h.bu vr8, vr4, vr0
vmulwod.h.bu vr9, vr4, vr0
vmulwev.h.bu vr10, vr5, vr2
vmulwod.h.bu vr11, vr5, vr2
vmaddwev.h.bu vr8, vr6, vr1
vmaddwod.h.bu vr9, vr6, vr1
vmaddwev.h.bu vr10, vr7, vr3
vmaddwod.h.bu vr11, vr7, vr3
vadd.h vr12, vr8, vr10
vadd.h vr13, vr9, vr11
vssrarni.bu.h vr13, vr12, 6
vstelm.d vr13, a0, 0, 0
vstelm.d vr13, a1, 0, 1
add.d a0, a0, a2
add.d a1, a1, a2
addi.d t1, t1, -1
move a3, t3
add.d t3, t3, a4
move t0, t3
blt zero, t1, .LOOP_W8
b .ENDLOOP_W2
.ENDLOOP_W8:
bge t5, a7, .ENDLOOP_W4
.LOOP_W4:
vld vr4, a3, 0
vld vr5, t0, 0
vld vr6, a3, 2
vld vr7, t0, 2
vmulwev.h.bu vr8, vr4, vr0
vmulwod.h.bu vr9, vr4, vr0
vmulwev.h.bu vr10, vr5, vr2
vmulwod.h.bu vr11, vr5, vr2
vmaddwev.h.bu vr8, vr6, vr1
vmaddwod.h.bu vr9, vr6, vr1
vmaddwev.h.bu vr10, vr7, vr3
vmaddwod.h.bu vr11, vr7, vr3
vadd.h vr12, vr8, vr10
vadd.h vr13, vr9, vr11
vssrarni.bu.h vr13, vr12, 6
vstelm.w vr13, a0, 0, 0
vstelm.w vr13, a1, 0, 2
add.d a0, a0, a2
add.d a1, a1, a2
move a3, t3
add.d t3, t3, a4
move t0, t3
addi.d t1, t1, -1
blt zero, t1, .LOOP_W4
b .ENDLOOP_W2
.ENDLOOP_W4:
bge t4, a7, .ENDLOOP_W2
.LOOP_W2:
vld vr4, a3, 0
vld vr5, t0, 0
vld vr6, a3, 2
vld vr7, t0, 2
vmulwev.h.bu vr8, vr4, vr0
vmulwod.h.bu vr9, vr4, vr0
vmulwev.h.bu vr10, vr5, vr2
vmulwod.h.bu vr11, vr5, vr2
vmaddwev.h.bu vr8, vr6, vr1
vmaddwod.h.bu vr9, vr6, vr1
vmaddwev.h.bu vr10, vr7, vr3
vmaddwod.h.bu vr11, vr7, vr3
vadd.h vr12, vr8, vr10
vadd.h vr13, vr9, vr11
vssrarni.bu.h vr13, vr12, 6
vstelm.h vr13, a0, 0, 0
vstelm.h vr13, a1, 0, 4
add.d a0, a0, a2
add.d a1, a1, a2
move a3, t3
add.d t3, t3, a4
move t0, t3
addi.d t1, t1, -1
blt zero, t1, .LOOP_W2
.ENDLOOP_W2:
endfunc_x264
function_x264 pixel_avg_weight_w4_lsx
addi.d t0, zero, 64
sub.d t0, t0, a6
vreplgr2vr.b vr0, a6
vreplgr2vr.b vr1, t0
vpackev.b vr8, vr1, vr0
.LOOP_AVG_WEIGHT_W4:
fld.s f0, a2, 0
fldx.s f1, a2, a3
fld.s f2, a4, 0
fldx.s f3, a4, a5
vilvl.w vr0, vr1, vr0
vilvl.w vr2, vr3, vr2
vilvl.b vr0, vr2, vr0
vmulwev.h.bu.b vr1, vr0, vr8
vmaddwod.h.bu.b vr1, vr0, vr8
vssrarni.bu.h vr1, vr1, 6
fst.s f1, a0, 0
add.d a0, a0, a1
vstelm.w vr1, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a4, a5, a4, 1
addi.w a7, a7, -2
bnez a7, .LOOP_AVG_WEIGHT_W4
endfunc_x264
function_x264 pixel_avg_w4_lsx
.LOOP_AVG_W4:
fld.s f0, a2, 0
fldx.s f1, a2, a3
fld.s f4, a4, 0
fldx.s f5, a4, a5
vilvl.w vr0, vr1, vr0
vilvl.w vr4, vr5, vr4
vavgr.bu vr0, vr0, vr4
fst.s f0, a0, 0
add.d a0, a0, a1
vstelm.w vr0, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a4, a5, a4, 1
addi.w a7, a7, -2
bnez a7, .LOOP_AVG_W4
endfunc_x264
function_x264 pixel_avg_weight_w8_lsx
addi.d t0, zero, 64
sub.d t0, t0, a6
slli.d t5, a1, 1
add.d t6, a1, t5
add.d t7, a1, t6
vreplgr2vr.b vr0, a6
vreplgr2vr.b vr1, t0
vpackev.b vr8, vr1, vr0
PIXEL_AVG_START_W8
.LOOP_AVG_HEIGHT_W8:
fld.d f0, a2, 0
fldx.d f1, a2, a3
fldx.d f2, a2, t0
fldx.d f3, a2, t1
fld.d f4, a4, 0
fldx.d f5, a4, a5
fldx.d f6, a4, t3
fldx.d f7, a4, t4
vilvl.b vr0, vr4, vr0
vilvl.b vr1, vr5, vr1
vilvl.b vr2, vr6, vr2
vilvl.b vr3, vr7, vr3
vmulwev.h.bu.b vr4, vr0, vr8
vmulwev.h.bu.b vr5, vr1, vr8
vmulwev.h.bu.b vr6, vr2, vr8
vmulwev.h.bu.b vr7, vr3, vr8
vmaddwod.h.bu.b vr4, vr0, vr8
vmaddwod.h.bu.b vr5, vr1, vr8
vmaddwod.h.bu.b vr6, vr2, vr8
vmaddwod.h.bu.b vr7, vr3, vr8
vssrarni.bu.h vr4, vr4, 6
vssrarni.bu.h vr5, vr5, 6
vssrarni.bu.h vr6, vr6, 6
vssrarni.bu.h vr7, vr7, 6
fst.d f4, a0, 0
fstx.d f5, a0, a1
fstx.d f6, a0, t5
fstx.d f7, a0, t6
add.d a0, a0, t7
alsl.d a2, a3, a2, 2
alsl.d a4, a5, a4, 2
addi.w a7, a7, -4
bnez a7, .LOOP_AVG_HEIGHT_W8
endfunc_x264
function_x264 pixel_avg_w8_lsx
PIXEL_AVG_START_W8
.LOOP_AVG_W8:
fld.d f0, a2, 0
fldx.d f1, a2, a3
fldx.d f2, a2, t0
fldx.d f3, a2, t1
fld.d f4, a4, 0
fldx.d f5, a4, a5
fldx.d f6, a4, t3
fldx.d f7, a4, t4
vilvl.d vr0, vr1, vr0
vilvl.d vr2, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr6, vr7, vr6
vavgr.bu vr0, vr0, vr4
vavgr.bu vr2, vr2, vr6
fst.d f0, a0, 0
add.d a0, a0, a1
vstelm.d vr0, a0, 0, 1
fstx.d f2, a0, a1
alsl.d a0, a1, a0, 1
vstelm.d vr2, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 2
alsl.d a4, a5, a4, 2
addi.w a7, a7, -4
bnez a7, .LOOP_AVG_W8
endfunc_x264
function_x264 pixel_avg_weight_w16_lsx
addi.d t0, zero, 64
sub.d t0, t0, a6
vreplgr2vr.b vr8, a6
vreplgr2vr.b vr9, t0
PIXEL_AVG_START
.LOOP_AVG_HEIGHT_W16:
LSX_LOADX_4 a2, a3, t0, t1, vr0, vr1, vr2, vr3
LSX_LOADX_4 a4, a5, t3, t4, vr4, vr5, vr6, vr7
vmulwev.h.bu.b vr10, vr0, vr8
vmulwev.h.bu.b vr11, vr1, vr8
vmulwev.h.bu.b vr12, vr2, vr8
vmulwev.h.bu.b vr13, vr3, vr8
vmulwod.h.bu.b vr14, vr0, vr8
vmulwod.h.bu.b vr15, vr1, vr8
vmulwod.h.bu.b vr16, vr2, vr8
vmulwod.h.bu.b vr17, vr3, vr8
vmaddwev.h.bu.b vr10, vr4, vr9
vmaddwev.h.bu.b vr11, vr5, vr9
vmaddwev.h.bu.b vr12, vr6, vr9
vmaddwev.h.bu.b vr13, vr7, vr9
vmaddwod.h.bu.b vr14, vr4, vr9
vmaddwod.h.bu.b vr15, vr5, vr9
vmaddwod.h.bu.b vr16, vr6, vr9
vmaddwod.h.bu.b vr17, vr7, vr9
vssrarni.bu.h vr11, vr10, 6
vssrarni.bu.h vr13, vr12, 6
vssrarni.bu.h vr15, vr14, 6
vssrarni.bu.h vr17, vr16, 6
vilvl.b vr10, vr15, vr11
vilvh.b vr11, vr15, vr11
vilvl.b vr12, vr17, vr13
vilvh.b vr13, vr17, vr13
vst vr10, a0, 0
vstx vr11, a0, a1
vstx vr12, a0, t6
vstx vr13, a0, t7
add.d a2, a2, t2
add.d a4, a4, t5
add.d a0, a0, t8
addi.d a7, a7, -4
bnez a7, .LOOP_AVG_HEIGHT_W16
endfunc_x264
function_x264 pixel_avg_w16_lsx
PIXEL_AVG_START
.LOOP_AVG_W16:
vld vr0, a2, 0
vldx vr1, a2, a3
vldx vr2, a2, t0
vldx vr3, a2, t1
vld vr4, a4, 0
vldx vr5, a4, a5
vldx vr6, a4, t3
vldx vr7, a4, t4
vavgr.bu vr0, vr0, vr4
vavgr.bu vr1, vr1, vr5
vavgr.bu vr2, vr2, vr6
vavgr.bu vr3, vr3, vr7
vst vr0, a0, 0
vstx vr1, a0, a1
vstx vr2, a0, t6
vstx vr3, a0, t7
add.d a0, a0, t8
add.d a2, a2, t2
add.d a4, a4, t5
vld vr0, a2, 0
vldx vr1, a2, a3
vldx vr2, a2, t0
vldx vr3, a2, t1
vld vr4, a4, 0
vldx vr5, a4, a5
vldx vr6, a4, t3
vldx vr7, a4, t4
vavgr.bu vr0, vr0, vr4
vavgr.bu vr1, vr1, vr5
vavgr.bu vr2, vr2, vr6
vavgr.bu vr3, vr3, vr7
vst vr0, a0, 0
vstx vr1, a0, a1
vstx vr2, a0, t6
vstx vr3, a0, t7
add.d a2, a2, t2
add.d a4, a4, t5
add.d a0, a0, t8
addi.d a7, a7, -8
bnez a7, .LOOP_AVG_W16
endfunc_x264
/*
* void pixel_avg_wxh(pixel *dst, intptr_t dst_stride, pixel *src1, intptr_t src1_stride,
* pixel *src2, intptr_t src2_stride, int weight);
*/
.macro PIXEL_AVG_LSX w, h
function_x264 pixel_avg_\w\()x\h\()_lsx
addi.d t0, a6, -32
addi.d a7, zero, \h
bne t0, zero, x264_8_pixel_avg_weight_w\w\()_lsx
b x264_8_pixel_avg_w\w\()_lsx
endfunc_x264
.endm
PIXEL_AVG_LSX 16, 16
PIXEL_AVG_LSX 16, 8
PIXEL_AVG_LSX 8, 16
PIXEL_AVG_LSX 8, 8
PIXEL_AVG_LSX 8, 4
PIXEL_AVG_LSX 4, 16
PIXEL_AVG_LSX 4, 8
PIXEL_AVG_LSX 4, 4
PIXEL_AVG_LSX 4, 2
function_x264 mc_weight_w20_noden_lsx
vldrepl.b vr0, a4, 36 // scale
vldrepl.h vr1, a4, 40 // offset
.LOOP_WEIGHT_W20_NODEN:
vld vr3, a2, 0
vld vr4, a2, 16
add.d a2, a2, a3
vld vr5, a2, 0
vld vr6, a2, 16
vilvl.w vr4, vr6, vr4
vmulwev.h.bu.b vr7, vr3, vr0
vmulwod.h.bu.b vr8, vr3, vr0
vmulwev.h.bu.b vr9, vr4, vr0
vmulwod.h.bu.b vr10, vr4, vr0
vmulwev.h.bu.b vr11, vr5, vr0
vmulwod.h.bu.b vr12, vr5, vr0
vadd.h vr7, vr7, vr1
vadd.h vr8, vr8, vr1
vadd.h vr9, vr9, vr1
vadd.h vr10, vr10, vr1
vadd.h vr11, vr11, vr1
vadd.h vr12, vr12, vr1
vssrani.bu.h vr11, vr7, 0
vssrani.bu.h vr12, vr8, 0
vssrani.bu.h vr9, vr9, 0
vssrani.bu.h vr10, vr10, 0
vilvl.b vr7, vr12, vr11
vilvl.b vr9, vr10, vr9
vilvh.b vr11, vr12, vr11
vst vr7, a0, 0
vstelm.w vr9, a0, 16, 0
add.d a0, a0, a1
vst vr11, a0, 0
vstelm.w vr9, a0, 16, 1
add.d a0, a0, a1
add.d a2, a2, a3
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHT_W20_NODEN
endfunc_x264
function_x264 mc_weight_w16_noden_lsx
vldrepl.b vr0, a4, 36 // scale
vldrepl.h vr1, a4, 40 // offset
.LOOP_WEIGHT_W16_NODEN:
vld vr3, a2, 0
vldx vr4, a2, a3
vmulwev.h.bu.b vr5, vr3, vr0
vmulwod.h.bu.b vr6, vr3, vr0
vmulwev.h.bu.b vr7, vr4, vr0
vmulwod.h.bu.b vr8, vr4, vr0
vadd.h vr5, vr5, vr1
vadd.h vr6, vr6, vr1
vadd.h vr7, vr7, vr1
vadd.h vr8, vr8, vr1
vssrani.bu.h vr7, vr5, 0
vssrani.bu.h vr8, vr6, 0
vilvl.b vr5, vr8, vr7
vilvh.b vr7, vr8, vr7
vst vr5, a0, 0
vstx vr7, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a0, a1, a0, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHT_W16_NODEN
endfunc_x264
function_x264 mc_weight_w8_noden_lsx
vldrepl.b vr0, a4, 36 // scale
vldrepl.h vr1, a4, 40 // offset
.LOOP_WEIGHT_W8_NODEN:
fld.d f3, a2, 0
fldx.d f4, a2, a3
vilvl.d vr3, vr4, vr3
vmulwev.h.bu.b vr5, vr3, vr0
vmulwod.h.bu.b vr6, vr3, vr0
vadd.h vr5, vr5, vr1
vadd.h vr6, vr6, vr1
vssrani.bu.h vr5, vr5, 0
vssrani.bu.h vr6, vr6, 0
vilvl.b vr7, vr6, vr5
vstelm.d vr7, a0, 0, 0
add.d a0, a0, a1
vstelm.d vr7, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHT_W8_NODEN
endfunc_x264
function_x264 mc_weight_w4_noden_lsx
vldrepl.h vr0, a4, 36 // scale
vldrepl.h vr1, a4, 40 // offset
.LOOP_WEIGHT_W4_NODEN:
fld.s f3, a2, 0
fldx.s f4, a2, a3
vilvl.w vr3, vr4, vr3
vsllwil.hu.bu vr3, vr3, 0
vmul.h vr3, vr3, vr0
vadd.h vr3, vr3, vr1
vssrani.bu.h vr3, vr3, 0
vstelm.w vr3, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr3, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHT_W4_NODEN
endfunc_x264
function_x264 mc_weight_w20_lsx
vldrepl.h vr1, a4, 40 // offset
vldrepl.b vr0, a4, 36 // scale
vldrepl.h vr2, a4, 32 // denom
vsll.h vr1, vr1, vr2
.LOOP_WEIGHT_W20:
vld vr3, a2, 0
vld vr4, a2, 16
add.d a2, a2, a3
vld vr5, a2, 0
vld vr6, a2, 16
vilvl.w vr4, vr6, vr4
vmulwev.h.bu.b vr7, vr3, vr0
vmulwod.h.bu.b vr8, vr3, vr0
vmulwev.h.bu.b vr9, vr4, vr0
vmulwod.h.bu.b vr10, vr4, vr0
vmulwev.h.bu.b vr11, vr5, vr0
vmulwod.h.bu.b vr12, vr5, vr0
vsadd.h vr7, vr7, vr1
vsadd.h vr8, vr8, vr1
vsadd.h vr9, vr9, vr1
vsadd.h vr10, vr10, vr1
vsadd.h vr11, vr11, vr1
vsadd.h vr12, vr12, vr1
vssrarn.bu.h vr7, vr7, vr2
vssrarn.bu.h vr8, vr8, vr2
vssrarn.bu.h vr9, vr9, vr2
vssrarn.bu.h vr10, vr10, vr2
vssrarn.bu.h vr11, vr11, vr2
vssrarn.bu.h vr12, vr12, vr2
vilvl.b vr7, vr8, vr7
vilvl.b vr9, vr10, vr9
vilvl.b vr11, vr12, vr11
vst vr7, a0, 0
vstelm.w vr9, a0, 16, 0
add.d a0, a0, a1
vst vr11, a0, 0
vstelm.w vr9, a0, 16, 1
add.d a0, a0, a1
add.d a2, a2, a3
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHT_W20
endfunc_x264
function_x264 mc_weight_w16_lsx
vldrepl.h vr1, a4, 40 // offset
vldrepl.b vr0, a4, 36 // scale
vldrepl.h vr2, a4, 32 // denom
vsll.h vr1, vr1, vr2
.LOOP_WEIGHT_W16:
vld vr3, a2, 0
vldx vr4, a2, a3
vmulwev.h.bu.b vr5, vr3, vr0
vmulwod.h.bu.b vr6, vr3, vr0
vmulwev.h.bu.b vr7, vr4, vr0
vmulwod.h.bu.b vr8, vr4, vr0
vsadd.h vr5, vr5, vr1
vsadd.h vr6, vr6, vr1
vsadd.h vr7, vr7, vr1
vsadd.h vr8, vr8, vr1
vssrarn.bu.h vr5, vr5, vr2
vssrarn.bu.h vr6, vr6, vr2
vssrarn.bu.h vr7, vr7, vr2
vssrarn.bu.h vr8, vr8, vr2
vilvl.b vr5, vr6, vr5
vilvl.b vr7, vr8, vr7
vst vr5, a0, 0
vstx vr7, a0, a1
alsl.d a2, a3, a2, 1
alsl.d a0, a1, a0, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHT_W16
endfunc_x264
function_x264 mc_weight_w8_lsx
vldrepl.h vr1, a4, 40 // offset
vldrepl.b vr0, a4, 36 // scale
vldrepl.h vr2, a4, 32 // denom
vsll.h vr1, vr1, vr2
.LOOP_WEIGHT_W8:
fld.d f3, a2, 0
fldx.d f4, a2, a3
vilvl.d vr3, vr4, vr3
vmulwev.h.bu.b vr5, vr3, vr0
vmulwod.h.bu.b vr6, vr3, vr0
vsadd.h vr5, vr5, vr1
vsadd.h vr6, vr6, vr1
vssrarn.bu.h vr5, vr5, vr2
vssrarn.bu.h vr6, vr6, vr2
vilvl.b vr7, vr6, vr5
vstelm.d vr7, a0, 0, 0
add.d a0, a0, a1
vstelm.d vr7, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHT_W8
endfunc_x264
function_x264 mc_weight_w4_lsx
vldrepl.h vr1, a4, 40 // offset
vldrepl.h vr0, a4, 36 // scale
vldrepl.h vr2, a4, 32 // denom
vsll.h vr1, vr1, vr2
.LOOP_WEIGHT_W4:
fld.s f3, a2, 0
fldx.s f4, a2, a3
vilvl.w vr3, vr4, vr3
vsllwil.hu.bu vr3, vr3, 0
vmul.h vr3, vr3, vr0
vsadd.h vr3, vr3, vr1
vssrarn.bu.h vr3, vr3, vr2
vstelm.w vr3, a0, 0, 0
add.d a0, a0, a1
vstelm.w vr3, a0, 0, 1
add.d a0, a0, a1
alsl.d a2, a3, a2, 1
addi.w a5, a5, -2
blt zero, a5, .LOOP_WEIGHT_W4
endfunc_x264
/*
* void x264_pixel_avg2_w4(uint8_t *dst, intptr_t i_dst_stride, uint8_t *src1,
* intptr_t i_src_stride, uint8_t *src2, int i_height)
*/
function_x264 pixel_avg2_w4_lsx
.LOOP_AVG2_W4:
addi.d a5, a5, -2
fld.s f0, a2, 0
fld.s f1, a4, 0
fldx.s f2, a2, a3
fldx.s f3, a4, a3
alsl.d a2, a3, a2, 1
alsl.d a4, a3, a4, 1
vavgr.bu vr0, vr0, vr1
vavgr.bu vr1, vr2, vr3
fst.s f0, a0, 0
fstx.s f1, a0, a1
alsl.d a0, a1, a0, 1
blt zero, a5, .LOOP_AVG2_W4
endfunc_x264
/*
* void x264_pixel_avg2_w8(uint8_t *dst, intptr_t i_dst_stride, uint8_t *src1,
* intptr_t i_src_stride, uint8_t *src2, int i_height)
*/
function_x264 pixel_avg2_w8_lsx
.LOOP_AVG2_W8:
addi.d a5, a5, -2
fld.d f0, a2, 0
fld.d f1, a4, 0
fldx.d f2, a2, a3
fldx.d f3, a4, a3
alsl.d a2, a3, a2, 1
alsl.d a4, a3, a4, 1
vavgr.bu vr0, vr0, vr1
vavgr.bu vr1, vr2, vr3
fst.d f0, a0, 0
fstx.d f1, a0, a1
alsl.d a0, a1, a0, 1
blt zero, a5, .LOOP_AVG2_W8
endfunc_x264
/*
* void x264_pixel_avg2_w16(uint8_t *dst, intptr_t i_dst_stride, uint8_t *src1,
* intptr_t i_src_stride, uint8_t *src2, int i_height)
*/
function_x264 pixel_avg2_w16_lsx
.LOOP_AVG2_W16:
addi.d a5, a5, -2
vld vr0, a2, 0
vldx vr1, a2, a3
vld vr2, a4, 0
vldx vr3, a4, a3
alsl.d a2, a3, a2, 1
alsl.d a4, a3, a4, 1
vavgr.bu vr0, vr0, vr2
vavgr.bu vr1, vr1, vr3
vst vr0, a0, 0
vstx vr1, a0, a1
alsl.d a0, a1, a0, 1
blt zero, a5, .LOOP_AVG2_W16
endfunc_x264
/*
* void x264_pixel_avg2_w20(uint8_t *dst, intptr_t i_dst_stride, uint8_t *src1,
* intptr_t i_src_stride, uint8_t *src2, int i_height)
*/
function_x264 pixel_avg2_w20_lsx
.LOOP_AVG2_W20:
addi.d a5, a5, -2
vld vr0, a2, 0
vld vr1, a2, 16
vld vr2, a4, 0
vld vr3, a4, 16
add.d a2, a2, a3
add.d a4, a4, a3
vld vr4, a2, 0
vld vr5, a2, 16
vld vr6, a4, 0
vld vr7, a4, 16
vavgr.bu vr0, vr0, vr2
vavgr.bu vr1, vr1, vr3
vavgr.bu vr4, vr4, vr6
vavgr.bu vr5, vr5, vr7
vst vr0, a0, 0
vstelm.w vr1, a0, 16, 0
add.d a0, a0, a1
vst vr4, a0, 0
vstelm.w vr5, a0, 16, 0
add.d a2, a2, a3
add.d a4, a4, a3
add.d a0, a0, a1
blt zero, a5, .LOOP_AVG2_W20
endfunc_x264
/*
* void mc_copy_width16( uint8_t *p_dst, int32_t i_dst_stride,
* uint8_t *p_src, int32_t i_src_stride,
* int32_t i_height )
*/
function_x264 mc_copy_w16_lsx
slli.d t0, a3, 1
add.d t1, t0, a3
slli.d t2, a1, 1
add.d t3, t2, a1
.LOOP_COPY_W16:
vld vr1, a2, 0
vldx vr2, a2, a3
vldx vr3, a2, t0
vldx vr4, a2, t1
vst vr1, a0, 0
vstx vr2, a0, a1
vstx vr3, a0, t2
vstx vr4, a0, t3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
addi.w a4, a4, -4
blt zero, a4, .LOOP_COPY_W16
endfunc_x264
/*
* void mc_copy_w8(uint8_t *p_dst, intptr_t i_dst_stride,
* uint8_t *p_src, intptr_t i_src_stride,
* int32_t i_height)
*/
function_x264 mc_copy_w8_lsx
slli.d t0, a3, 1
add.d t1, t0, a3
slli.d t2, a1, 1
add.d t3, t2, a1
.LOOP_COPY_W8:
fld.d f0, a2, 0
fldx.d f1, a2, a3
fldx.d f2, a2, t0
fldx.d f3, a2, t1
fst.d f0, a0, 0
fstx.d f1, a0, a1
fstx.d f2, a0, t2
fstx.d f3, a0, t3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
addi.w a4, a4, -4
blt zero, a4, .LOOP_COPY_W8
endfunc_x264
/*
* void mc_copy_w4(uint8_t *p_dst, intptr_t i_dst_stride,
* uint8_t *p_src, intptr_t i_src_stride,
* int32_t i_height)
*/
function_x264 mc_copy_w4_lsx
slli.d t0, a3, 1
add.d t1, t0, a3
slli.d t2, a1, 1
add.d t3, t2, a1
.LOOP_COPY_W4:
fld.s f0, a2, 0
fldx.s f1, a2, a3
fldx.s f2, a2, t0
fldx.s f3, a2, t1
fst.s f0, a0, 0
fstx.s f1, a0, a1
fstx.s f2, a0, t2
fstx.s f3, a0, t3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
addi.w a4, a4, -4
blt zero, a4, .LOOP_COPY_W4
endfunc_x264
/*
* void store_interleave_chroma(uint8_t *p_dst, intptr_t i_dst_stride,
* uint8_t *p_src0, uint8_t *p_src1,
* int32_t i_height)
*/
function_x264 store_interleave_chroma_lsx
.loop_interleave_chroma:
fld.d f0, a2, 0
fld.d f1, a3, 0
addi.d a2, a2, FDEC_STRIDE
addi.d a3, a3, FDEC_STRIDE
vilvl.b vr0, vr1, vr0
vst vr0, a0, 0
add.d a0, a0, a1
addi.w a4, a4, -1
blt zero, a4, .loop_interleave_chroma
endfunc_x264
/*
* void load_deinterleave_chroma_fenc(pixel *dst, pixel *src,
* intptr_t i_src, int height)
*/
function_x264 load_deinterleave_chroma_fenc_lsx
addi.d t0, a0, FENC_STRIDE/2
andi t1, a3, 1
sub.w t2, a3, t1
.loop_deinterleave_fenc:
vld vr0, a1, 0
vldx vr1, a1, a2
vpickev.b vr2, vr1, vr0
vpickod.b vr3, vr1, vr0
fst.d f2, a0, 0
fst.d f3, t0, 0
vstelm.d vr2, a0, FENC_STRIDE, 1
vstelm.d vr3, t0, FENC_STRIDE, 1
addi.d a0, a0, FENC_STRIDE * 2
addi.d t0, t0, FENC_STRIDE * 2
alsl.d a1, a2, a1, 1
addi.w t2, t2, -2
blt zero, t2, .loop_deinterleave_fenc
beqz t1, .loop_deinterleave_fenc_end
vld vr0, a1, 0
vpickev.b vr1, vr0, vr0
vpickod.b vr2, vr0, vr0
fst.d f1, a0, 0
fst.d f2, t0, 0
.loop_deinterleave_fenc_end:
endfunc_x264
/*
* void load_deinterleave_chroma_fdec(pixel *dst, pixel *src,
* intptr_t i_src, int height)
*/
function_x264 load_deinterleave_chroma_fdec_lsx
addi.d t0, a0, FDEC_STRIDE/2
andi t1, a3, 1
sub.w t2, a3, t1
.loop_deinterleave_fdec:
vld vr0, a1, 0
vldx vr1, a1, a2
vpickev.b vr2, vr1, vr0
vpickod.b vr3, vr1, vr0
fst.d f2, a0, 0
fst.d f3, t0, 0
vstelm.d vr2, a0, FDEC_STRIDE, 1
vstelm.d vr3, t0, FDEC_STRIDE, 1
addi.d a0, a0, FDEC_STRIDE * 2
addi.d t0, t0, FDEC_STRIDE * 2
alsl.d a1, a2, a1, 1
addi.w t2, t2, -2
blt zero, t2, .loop_deinterleave_fdec
beqz t1, .loop_deinterleave_fdec_end
vld vr0, a1, 0
vpickev.b vr1, vr0, vr0
vpickod.b vr2, vr0, vr0
fst.d f1, a0, 0
fst.d f2, t0, 0
.loop_deinterleave_fdec_end:
endfunc_x264
/*
* x264_plane_copy_interleave(pixel *dst, intptr_t i_dst,
* pixel *srcu, intptr_t i_srcu,
* pixel *srcv, intptr_t i_srcv, int w, int h)
*/
function_x264 plane_copy_interleave_core_lsx
.loop_h:
add.d t0, a0, zero
add.d t2, a2, zero
add.d t4, a4, zero
add.d t6, a6, zero
.loop_copy_interleavew16:
vld vr0, t2, 0
vld vr1, t4, 0
vilvl.b vr2, vr1, vr0
vilvh.b vr3, vr1, vr0
vst vr2, t0, 0
vst vr3, t0, 16
addi.d t2, t2, 16
addi.d t4, t4, 16
addi.d t0, t0, 32
addi.w t6, t6, -16
blt zero, t6, .loop_copy_interleavew16
add.d a2, a2, a3
add.d a4, a4, a5
add.d a0, a0, a1
addi.w a7, a7, -1
blt zero, a7, .loop_h
endfunc_x264
/*
* void x264_plane_copy_deinterleave(pixel *dsta, intptr_t i_dsta,
* pixel *dstb, intptr_t i_dstb,
* pixel *src, intptr_t i_src, int w, int h)
*/
function_x264 plane_copy_deinterleave_lsx
.LOOP_PLANE_COPY_H:
add.d t0, a0, zero
add.d t2, a2, zero
add.d t4, a4, zero
add.d t6, a6, zero
.LOOP_PLANE_COPY_W16:
vld vr0, t4, 0
vld vr1, t4, 16
vpickev.b vr2, vr1, vr0
vpickod.b vr3, vr1, vr0
vst vr2, t0, 0
vst vr3, t2, 0
addi.d t4, t4, 32
addi.d t0, t0, 16
addi.d t2, t2, 16
addi.w t6, t6, -16
blt zero, t6, .LOOP_PLANE_COPY_W16
add.d a2, a2, a3
add.d a4, a4, a5
add.d a0, a0, a1
addi.w a7, a7, -1
blt zero, a7, .LOOP_PLANE_COPY_H
endfunc_x264
function_x264 plane_copy_deinterleave_lasx
.LOOP_PLANE_COPY_H_LASX:
add.d t0, a0, zero
add.d t2, a2, zero
add.d t4, a4, zero
add.d t6, a6, zero
.LOOP_PLANE_COPY_W32_LASX:
xvld xr0, t4, 0
xvld xr1, t4, 32
xvpickev.b xr2, xr1, xr0
xvpickod.b xr3, xr1, xr0
xvpermi.d xr2, xr2, 0xd8
xvpermi.d xr3, xr3, 0xd8
xvst xr2, t0, 0
xvst xr3, t2, 0
addi.d t4, t4, 64
addi.d t0, t0, 32
addi.d t2, t2, 32
addi.w t6, t6, -32
blt zero, t6, .LOOP_PLANE_COPY_W32_LASX
add.d a2, a2, a3
add.d a4, a4, a5
add.d a0, a0, a1
addi.w a7, a7, -1
blt zero, a7, .LOOP_PLANE_COPY_H_LASX
endfunc_x264
/*
* void prefetch_ref(uint8_t *pix, intptr_t stride, int32_t parity)
*/
function_x264 prefetch_ref_lsx
addi.d a2, a2, -1
addi.d a0, a0, 64
and a2, a2, a1
alsl.d t1, a2, a0, 3
alsl.d a2, a1, a1, 1
preld 0, t1, 0
add.d t2, t1, a1
preld 0, t2, 0
add.d t2, t2, a1
preld 0, t2, 0
add.d t1, t1, a2
preld 0, t1, 0
alsl.d a0, a1, t2, 1
preld 0, a0, 0
add.d t1, a0, a1
preld 0, t1, 0
add.d t1, t1, a1
preld 0, t1, 0
add.d a0, a0, a2
preld 0, a0, 0
endfunc_x264
/*
* void prefetch_fenc_422(uint8_t *pix_y, intptr_t stride_y,
* uint8_t *pix_uv, intptr_t stride_uv,
* int32_t mb_x)
*/
function_x264 prefetch_fenc_422_lsx
andi t0, a4, 3
mul.d t0, t0, a1
andi a4, a4, 6
mul.d t1, a4, a3
addi.d a0, a0, 64
addi.d a2, a2, 64
alsl.d a0, t0, a0, 2
preld 0, a0, 0
add.d t2, a0, a1
preld 0, t2, 0
add.d a0, t2, a1
preld 0, a0, 0
add.d a0, a0, a1
preld 0, a0, 0
alsl.d a2, t1, a2, 2
preld 0, a2, 0
add.d t3, a2, a3
preld 0, t3, 0
add.d a2, t3, a3
preld 0, a2, 0
add.d a2, a2, a3
preld 0, a2, 0
endfunc_x264
/*
* void prefetch_fenc_420(uint8_t *pix_y, intptr_t stride_y,
* uint8_t *pix_uv, intptr_t stride_uv,
* int32_t mb_x)
*/
function_x264 prefetch_fenc_420_lsx
andi t0, a4, 3
mul.d t0, t0, a1
andi a4, a4, 6
mul.d t1, a4, a3
addi.d a0, a0, 64
addi.d a2, a2, 64
alsl.d a0, t0, a0, 2
preld 0, a0, 0
add.d t2, a0, a1
preld 0, t2, 0
add.d a0, t2, a1
preld 0, a0, 0
add.d a0, a0, a1
preld 0, a0, 0
alsl.d a2, t1, a2, 2
preld 0, a2, 0
add.d a2, a2, a3
preld 0, a2, 0
endfunc_x264
/*
* void *memcpy_aligned(void *dst, const void *src, size_t n)
*/
function_x264 memcpy_aligned_lsx
andi t0, a2, 16
beqz t0, 2f
addi.d a2, a2, -16
vld vr0, a1, 0
vst vr0, a0, 0
addi.d a1, a1, 16
addi.d a0, a0, 16
2:
andi t0, a2, 32
beqz t0, 3f
addi.d a2, a2, -32
vld vr0, a1, 0
vld vr1, a1, 16
vst vr0, a0, 0
vst vr1, a0, 16
addi.d a1, a1, 32
addi.d a0, a0, 32
3:
beqz a2, 5f
4:
addi.d a2, a2, -64
vld vr0, a1, 48
vld vr1, a1, 32
vld vr2, a1, 16
vld vr3, a1, 0
vst vr0, a0, 48
vst vr1, a0, 32
vst vr2, a0, 16
vst vr3, a0, 0
addi.d a1, a1, 64
addi.d a0, a0, 64
blt zero, a2, 4b
5:
endfunc_x264
/*
* void memzero_aligned(void *p_dst, size_t n)
*/
function_x264 memzero_aligned_lsx
vxor.v vr1, vr1, vr1
.loop_memzero:
addi.d a1, a1, -128
vst vr1, a0, 0
vst vr1, a0, 16
vst vr1, a0, 32
vst vr1, a0, 48
vst vr1, a0, 64
vst vr1, a0, 80
vst vr1, a0, 96
vst vr1, a0, 112
addi.d a0, a0, 128
blt zero, a1, .loop_memzero
endfunc_x264
.macro FILT_H_LSX s1, s2, s3
vsub.h \s1, \s1, \s2
vsrai.h \s1, \s1, 2
vsub.h \s1, \s1, \s2
vadd.h \s1, \s1, \s3
vsrai.h \s1, \s1, 2
vadd.h \s1, \s1, \s3
.endm
//s1: s1.0, s2: s2.0, s3: s3.0, s4: s1.1 s5: s2.1 s6: s3.1
.macro FILT_C_LSX s1, s2, s3, s4, s5, s6
vaddi.bu vr17, vr23, 2 //vr24
vaddi.bu vr19, vr26, 1 //vr27
vaddi.bu vr18, vr26, 3 //vr29
vshuf.b vr1, \s2, \s4, vr23
vshuf.b vr2, \s2, \s4, vr17
vshuf.b vr3, \s5, \s2, vr18
vshuf.b vr4, \s5, \s2, vr19
vadd.h vr3, vr2, vr3
vshuf.b vr16, \s5, \s2, vr23
vshuf.b vr17, \s5, \s2, vr17
vshuf.b vr18, \s3, \s5, vr18
vshuf.b vr19, \s3, \s5, vr19
vadd.h vr18, vr17, vr18
vmov vr2, \s5
vmov \s1, \s3
vmov vr20, \s3
vmov \s4, \s6
vaddi.bu vr17, vr26, 5 //vr30
vshuf.b \s3, vr2, \s2, vr17
vshuf.b \s6, vr20, \s5, vr17
vadd.h vr4, vr4, \s2
vadd.h \s3, \s3, vr1
vadd.h vr19, vr19, \s5
vadd.h \s6, \s6, vr16
FILT_H_LSX \s3, vr3, vr4
FILT_H_LSX \s6, vr18, vr19
.endm
.macro FILT_PACK_LSX s1, s2, s3
vmulwev.w.h vr16, \s1, \s3
vmulwev.w.h vr17, \s2, \s3
vsrarni.h.w vr17, vr16, 15
vmaxi.h vr17, vr17, 0
vsat.hu vr17, vr17, 7
vmulwod.w.h vr18, \s1, \s3
vmulwod.w.h vr19, \s2, \s3
vsrarni.h.w vr19, vr18, 15
vmaxi.h vr19, vr19, 0
vsat.hu vr19, vr19, 7
vpackev.b \s1, vr19, vr17
.endm
//s1: s1.0, s2: s2.0, s3: s3.0, s4: s4.0
//s5: s1.1, s6: s2.1, s7: s3.1, s8: s4.1
.macro DO_FILT_C_LSX s1, s2, s3, s4, s5, s6, s7, s8
FILT_C_LSX \s1, \s2, \s3, \s5, \s6, \s7
FILT_C_LSX \s2, \s1, \s4, \s6, \s5, \s8
FILT_PACK_LSX \s3, \s4, vr15
FILT_PACK_LSX \s7, \s8, vr15
vilvl.d vr16, \s7, \s3
vilvh.d vr17, \s7, \s3
addi.d t3, a5, 16
vstx vr16, a5, a4
vstx vr17, t3, a4
.endm
.macro DO_FILT_H_LSX s1, s2, s3, s4, s5, s6
vaddi.bu vr16, vr23, 2 //vr24
vaddi.bu vr17, vr23, 3 //vr25
vaddi.bu vr18, vr26, 1 //vr27
vaddi.bu vr19, vr26, 2 //vr28
vld vr3, t5, 0
vshuf.b vr1, \s2, \s4, vr16
vshuf.b vr2, \s2, \s4, vr17
vshuf.b vr4, \s5, \s2, vr26
vshuf.b vr5, \s5, \s2, vr18
vshuf.b vr6, \s5, \s2, vr19
vdp2.h.bu.b vr16, vr1, vr12
vdp2.h.bu.b vr17, vr2, vr12
vdp2.h.bu.b vr18, \s2, vr14
vdp2.h.bu.b vr19, vr4, vr14
vdp2.h.bu.b vr20, vr5, vr0
vdp2.h.bu.b vr21, vr6, vr0
vadd.h vr1, vr16, vr18
vadd.h vr2, vr17, vr19
vadd.h vr1, vr1, vr20
vadd.h vr2, vr2, vr21
FILT_PACK_LSX vr1, vr2, vr15
vshuf.b vr1, vr1, vr1, vr3
vstx vr1, a0, a4
vaddi.bu vr16, vr23, 2 //vr24
vaddi.bu vr17, vr23, 3 //vr25
vaddi.bu vr18, vr26, 1 //vr27
vaddi.bu vr19, vr26, 2 //vr28
vshuf.b vr1, \s5, \s2, vr16
vshuf.b vr2, \s5, \s2, vr17
vshuf.b vr4, \s3, \s5, vr26
vshuf.b vr5, \s3, \s5, vr18
vshuf.b vr6, \s3, \s5, vr19
vdp2.h.bu.b vr16, vr1, vr12
vdp2.h.bu.b vr17, vr2, vr12
vdp2.h.bu.b vr18, \s5, vr14
vdp2.h.bu.b vr19, vr4, vr14
vdp2.h.bu.b vr20, vr5, vr0
vdp2.h.bu.b vr21, vr6, vr0
vadd.h vr1, vr16, vr18
vadd.h vr2, vr17, vr19
vadd.h vr1, vr1, vr20
vadd.h vr2, vr2, vr21
FILT_PACK_LSX vr1, vr2, vr15
vshuf.b vr1, vr1, vr1, vr3
addi.d a0, a0, 16
vstx vr1, a0, a4
addi.d a0, a0, -16
vmov \s1, \s2
vmov \s2, \s3
vmov \s4, \s5
vmov \s5, \s6
.endm
/* s3: temp, s4: UNUSED, s5: imm */
.macro DO_FILT_V0_LSX s1, s2, s3, s4, s5
alsl.d t1, a2, a1, 1 /* t1 = a1 + 2 * a2 */
alsl.d t2, a2, a3, 1 /* t2 = a3 + 2 * a2 */
vld vr1, a3, 0
vldx vr2, a3, a2
vld \s3, t2, 0
vld vr3, a1, 0
vldx \s1, a1, a2
vld \s2, t1, 0
vilvh.b vr16, vr2, vr1
vilvl.b vr17, vr2, vr1
vilvh.b vr18, \s2, \s1
vilvl.b vr19, \s2, \s1
vilvh.b vr20, \s3, vr3
vilvl.b vr21, \s3, vr3
vdp2.h.bu.b vr1, vr17, vr12
vdp2.h.bu.b vr4, vr16, vr12
vdp2.h.bu.b \s1, vr19, vr0
vdp2.h.bu.b vr2, vr18, vr0
vdp2.h.bu.b vr3, vr21, vr14
vdp2.h.bu.b \s2, vr20, vr14
vadd.h vr1, vr1, \s1
vadd.h vr4, vr4, vr2
vadd.h vr1, vr1, vr3
vadd.h vr4, vr4, \s2
vmov \s1, vr1
vmov \s2, vr4
addi.d a3, a3, 16
addi.d a1, a1, 16
FILT_PACK_LSX vr1, vr4, vr15
addi.d t3, a4, \s5
vstx vr1, t0, t3
.endm
.macro DO_FILT_V1_LSX s1, s2, s3, s4, s5
vld vr1, a3, 0
vldx vr2, a3, a2
vld \s3, t2, 16
vld vr3, a1, 0
vldx \s1, a1, a2
vld \s2, t1, 16
vilvh.b vr16, vr2, vr1
vilvl.b vr17, vr2, vr1
vilvh.b vr18, \s2, \s1
vilvl.b vr19, \s2, \s1
vilvh.b vr20, \s3, vr3
vilvl.b vr21, \s3, vr3
vdp2.h.bu.b vr1, vr17, vr12
vdp2.h.bu.b vr4, vr16, vr12
vdp2.h.bu.b \s1, vr19, vr0
vdp2.h.bu.b vr2, vr18, vr0
vdp2.h.bu.b vr3, vr21, vr14
vdp2.h.bu.b \s2, vr20, vr14
vadd.h vr1, vr1, \s1
vadd.h vr4, vr4, vr2
vadd.h vr1, vr1, vr3
vadd.h vr4, vr4, \s2
vmov \s1, vr1
vmov \s2, vr4
addi.d a3, a3, 16
addi.d a1, a1, 16
FILT_PACK_LSX vr1, vr4, vr15
addi.d t3, a4, \s5
addi.d t3, t3, 16
vstx vr1, t0, t3
.endm
/*
* void hpel_filter( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
* uint8_t *src, intptr_t stride, int width, int height )
*/
function_x264 hpel_filter_lsx
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
move a7, a3
addi.d a5, a5, -32
move t0, a1
andi a7, a7, 31
sub.d a3, a3, a7
add.d a0, a0, a5
add.d t0, t0, a5
add.d a7, a7, a5
add.d a5, a5, a2
move a2, a4
sub.d a7, zero, a7
add.d a1, a3, a2
sub.d a3, a3, a2
sub.d a3, a3, a2
move a4, a7
la.local t1, filt_mul51
vld vr0, t1, 0
la.local t2, filt_mul15
vld vr12, t2, 0
la.local t3, filt_mul20
vld vr14, t3, 0
la.local t4, pw_1024
vld vr15, t4, 0
la.local t5, hpel_shuf
la.local t2, shuf_12
vld vr23, t2, 0
la.local t3, shuf_1
vld vr26, t3, 0
vxor.v vr9, vr9, vr9
vxor.v vr10, vr10, vr10
vxor.v vr11, vr11, vr11
vxor.v vr13, vr13, vr13
.LOOPY_LSX:
DO_FILT_V0_LSX vr24, vr25, vr31, vr12, 0
DO_FILT_V1_LSX vr8, vr7, vr22, vr12, 0
.LOOPX_LSX:
DO_FILT_V0_LSX vr27, vr28, vr29, vr12, 32
DO_FILT_V1_LSX vr6, vr5, vr30, vr12, 32
.LSTX:
vsrli.h vr15, vr15, 1
DO_FILT_C_LSX vr9, vr24, vr8, vr27, vr10, vr25, vr7, vr28
vadd.h vr15, vr15, vr15
vmov vr8, vr6
vmov vr7, vr5
DO_FILT_H_LSX vr11, vr31, vr29, vr13, vr22, vr30
addi.d a4, a4, 32
blt a4, zero, .LOOPX_LSX
addi.d t1, a4, -32
blt t1, zero, .LSTX
//setup regs for next y
sub.d a4, a4, a7
sub.d a4, a4, a2
sub.d a1, a1, a4
sub.d a3, a3, a4
add.d a0, a0, a2
add.d t0, t0, a2
add.d a5, a5, a2
move a4, a7
addi.d a6, a6, -1
blt zero, a6, .LOOPY_LSX
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc_x264
/*
* void frame_init_lowres_core(pixel *src0, pixel *dst0, pixel *dsth,
* pixel *dstv, pixel *dstc, intptr_t src_stride,
* intptr_t dst_stride, int width, int height)
*/
function_x264 frame_init_lowres_core_lsx
addi.d t0, zero, 15
addi.d t1, zero, 7
addi.d t2, zero, 3
addi.d t3, zero, 1
ld.d t4, sp, 0
addi.d sp, sp, -16
st.d s0, sp, 0
st.d s1, sp, 8
slli.d s0, a5, 1
.LOOPH:
bge zero, t4, .ENDLOOPH
addi.d t4, t4, -1
add.d t5, a0, a5
add.d t7, t5, a5
move t6, a7
.LOOPW16:
bge t0, t6, .LOOPW8
vld vr0, a0, 0
vld vr1, t5, 0
vld vr2, t7, 0
vld vr3, a0, 1
vld vr4, t5, 1
vld vr5, t7, 1
vld vr6, a0, 16
vld vr7, t5, 16
vld vr8, t7, 16
vld vr9, a0, 17
vld vr10, t5, 17
vld vr11, t7, 17
// Calculate dst0, dsth, dstv and dstc
vavgr.bu vr12, vr0, vr1
vavgr.bu vr13, vr1, vr2
vavgr.bu vr14, vr3, vr4
vavgr.bu vr15, vr4, vr5
vavgr.bu vr16, vr6, vr7
vavgr.bu vr17, vr7, vr8
vavgr.bu vr18, vr9, vr10
vavgr.bu vr19, vr10, vr11
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vhaddw.hu.bu vr16, vr16, vr16
vhaddw.hu.bu vr17, vr17, vr17
vhaddw.hu.bu vr18, vr18, vr18
vhaddw.hu.bu vr19, vr19, vr19
vssrarni.bu.h vr13, vr12, 1
vssrarni.bu.h vr15, vr14, 1
vssrarni.bu.h vr17, vr16, 1
vssrarni.bu.h vr19, vr18, 1
vilvl.d vr12, vr17, vr13
vilvl.d vr14, vr19, vr15
vilvh.d vr13, vr17, vr13
vilvh.d vr15, vr19, vr15
vst vr12, a1, 0
vst vr14, a2, 0
vst vr13, a3, 0
vst vr15, a4, 0
addi.d a1, a1, 16
addi.d a2, a2, 16
addi.d a3, a3, 16
addi.d a4, a4, 16
addi.d a0, a0, 32
addi.d t5, t5, 32
addi.d t7, t7, 32
addi.d t6, t6, -16
b .LOOPW16
.LOOPW8:
bge t1, t6, .LOOPW4
vld vr0, a0, 0
vld vr1, t5, 0
vld vr2, t7, 0
vld vr3, a0, 1
vld vr4, t5, 1
vld vr5, t7, 1
// Calculate dst0, dsth, dstv and dstc
vavgr.bu vr12, vr0, vr1
vavgr.bu vr13, vr1, vr2
vavgr.bu vr14, vr3, vr4
vavgr.bu vr15, vr4, vr5
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vssrarni.bu.h vr13, vr12, 1
vssrarni.bu.h vr15, vr14, 1
vstelm.d vr13, a1, 0, 0
vstelm.d vr15, a2, 0, 0
vstelm.d vr13, a3, 0, 1
vstelm.d vr15, a4, 0, 1
addi.d a1, a1, 8
addi.d a2, a2, 8
addi.d a3, a3, 8
addi.d a4, a4, 8
addi.d a0, a0, 16
addi.d t5, t5, 16
addi.d t7, t7, 16
addi.d t6, t6, -8
b .LOOPW8
.LOOPW4:
bge t2, t6, .LOOPW2
vld vr0, a0, 0
vld vr1, t5, 0
vld vr2, t7, 0
vld vr3, a0, 1
vld vr4, t5, 1
vld vr5, t7, 1
// Calculate dst0, dsth, dstv and dstc
vavgr.bu vr12, vr0, vr1
vavgr.bu vr13, vr1, vr2
vavgr.bu vr14, vr3, vr4
vavgr.bu vr15, vr4, vr5
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vssrarni.bu.h vr13, vr12, 1
vssrarni.bu.h vr15, vr14, 1
vstelm.w vr13, a1, 0, 0
vstelm.w vr15, a2, 0, 0
vstelm.w vr13, a3, 0, 2
vstelm.w vr15, a4, 0, 2
addi.d a1, a1, 4
addi.d a2, a2, 4
addi.d a3, a3, 4
addi.d a4, a4, 4
addi.d a0, a0, 8
addi.d t5, t5, 8
addi.d t7, t7, 8
addi.d t6, t6, -4
b .LOOPW4
.LOOPW2:
bge t3, t6, .LOOPW1
vld vr0, a0, 0
vld vr1, t5, 0
vld vr2, t7, 0
vld vr3, a0, 1
vld vr4, t5, 1
vld vr5, t7, 1
// Calculate dst0, dsth, dstv and dstc
vavgr.bu vr12, vr0, vr1
vavgr.bu vr13, vr1, vr2
vavgr.bu vr14, vr3, vr4
vavgr.bu vr15, vr4, vr5
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vssrarni.bu.h vr13, vr12, 1
vssrarni.bu.h vr15, vr14, 1
vstelm.h vr13, a1, 0, 0
vstelm.h vr15, a2, 0, 0
vstelm.h vr13, a3, 0, 4
vstelm.h vr15, a4, 0, 4
addi.d a1, a1, 2
addi.d a2, a2, 2
addi.d a3, a3, 2
addi.d a4, a4, 2
addi.d a0, a0, 4
addi.d t5, t5, 4
addi.d t7, t7, 4
addi.d t6, t6, -2
b .LOOPW2
.LOOPW1:
bge zero, t6, .ENDLOOPW1
vld vr0, a0, 0
vld vr1, t5, 0
vld vr2, t7, 0
vld vr3, a0, 1
vld vr4, t5, 1
vld vr5, t7, 1
// Calculate dst0, dsth, dstv and dstc
vavgr.bu vr12, vr0, vr1
vavgr.bu vr13, vr1, vr2
vavgr.bu vr14, vr3, vr4
vavgr.bu vr15, vr4, vr5
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vssrarni.bu.h vr13, vr12, 1
vssrarni.bu.h vr15, vr14, 1
vstelm.b vr13, a1, 0, 0
vstelm.b vr15, a2, 0, 0
vstelm.b vr13, a3, 0, 8
vstelm.b vr15, a4, 0, 8
.ENDLOOPW1:
sub.d s1, a7, t6
sub.d a0, a0, s1
sub.d a0, a0, s1
add.d a0, a0, s0
sub.d a1, a1, s1
add.d a1, a1, a6
sub.d a2, a2, s1
add.d a2, a2, a6
sub.d a3, a3, s1
add.d a3, a3, a6
sub.d a4, a4, s1
add.d a4, a4, a6
b .LOOPH
.ENDLOOPH:
ld.d s0, sp, 0
ld.d s1, sp, 8
addi.d sp, sp, 16
endfunc_x264
#endif /* !HIGH_BIT_DEPTH */
|
aestream/faery
| 57,891
|
src/mp4/x264/common/loongarch/deblock-a.S
|
/*****************************************************************************
* deblock-a.S: loongarch deblock functions
*****************************************************************************
* Copyright (C) 2023-2024 x264 project
*
* Authors: Hao Chen <chenhao@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "loongson_asm.S"
#include "loongson_util.S"
#if !HIGH_BIT_DEPTH
const shuf_loc_locn
.byte 1, 9, 17, 25, 2, 10, 18, 26, 3, 11, 19, 27, 4, 12, 20, 28
.byte 16, 24, 0, 8, 17, 25, 1, 9, 18, 26, 2, 10, 19, 27, 3, 11
endconst
const shuf_locn
.byte 0, 8, 16, 24, 1, 9, 17, 25, 2, 10, 18, 26, 3, 11, 19, 27
endconst
/*Transpose 16 * 6 block with byte elements in vectors*/
.macro LASX_TRANSPOSE in0, in1, in2, in3, in4, in5, in6, in7, \
in8, in9, in10, in11, in12, in13, in14, in15,\
tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,\
out0, out1, out2, out3, out4, out5
xvilvl.b \tmp0, \in1, \in0
xvilvl.b \tmp1, \in3, \in2
xvilvl.b \tmp2, \in5, \in4
xvilvl.b \tmp3, \in7, \in6
xvilvl.b \tmp4, \in9, \in8
xvilvl.b \tmp5, \in11, \in10
xvilvl.b \tmp6, \in13, \in12
xvilvl.b \tmp7, \in15, \in14
xvpermi.d \tmp0, \tmp0, 0xD8
xvpermi.d \tmp1, \tmp1, 0xD8
xvpermi.d \tmp2, \tmp2, 0xD8
xvpermi.d \tmp3, \tmp3, 0xD8
xvpermi.d \tmp4, \tmp4, 0xD8
xvpermi.d \tmp5, \tmp5, 0xD8
xvpermi.d \tmp6, \tmp6, 0xD8
xvpermi.d \tmp7, \tmp7, 0xD8
xvilvl.h \out0, \tmp1, \tmp0
xvilvl.h \out1, \tmp3, \tmp2
xvilvl.h \out2, \tmp5, \tmp4
xvilvl.h \out3, \tmp7, \tmp6
xvilvl.w \tmp0, \out1, \out0
xvilvh.w \tmp1, \out1, \out0
xvilvl.w \tmp2, \out3, \out2
xvilvh.w \tmp3, \out3, \out2
xvilvl.d \out0, \tmp2, \tmp0
xvilvh.d \out1, \tmp2, \tmp0
xvilvl.d \out2, \tmp3, \tmp1
xvilvh.d \out3, \tmp3, \tmp1
xvpermi.d \out4, \out0, 0x4E
xvpermi.d \out5, \out1, 0x4E
.endm
/*
* void deblock_h_luma_lasx(Pixel *pix, intptr_t stride, int alpha,
* int beta, int8_t *tc0)
*/
function_x264 deblock_h_luma_lasx
slli.d t0, a1, 1
slli.d t2, a1, 2
xvldrepl.w xr1, a4, 0
add.d t1, t0, a1
xvreplgr2vr.b xr2, a3
xvilvl.b xr1, xr1, xr1
// Store registers to the stack
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
// Load data from pix
addi.d t4, a0, -3
FLDD_LOADX_4 t4, a1, t0, t1, f10, f11, f12, f13
add.d t5, t4, t2
FLDD_LOADX_4 t5, a1, t0, t1, f14, f15, f16, f17
add.d t5, t5, t2
FLDD_LOADX_4 t5, a1, t0, t1, f20, f21, f22, f23
add.d t6, t5, t2
FLDD_LOADX_4 t6, a1, t0, t1, f24, f25, f26, f27
LASX_TRANSPOSE xr10, xr11, xr12, xr13, xr14, xr15, xr16, xr17, \
xr20, xr21, xr22, xr23, xr24, xr25, xr26, xr27, \
xr8, xr9, xr18, xr19, xr28, xr29, xr30, xr31, \
xr10, xr11, xr12, xr13, xr14, xr15
xvilvl.h xr1, xr1, xr1
vext2xv.hu.bu xr20, xr10
vext2xv.hu.bu xr21, xr11
vext2xv.hu.bu xr22, xr12
vext2xv.hu.bu xr23, xr13
vext2xv.hu.bu xr24, xr14
vext2xv.hu.bu xr25, xr15
vext2xv.h.b xr3, xr1
xvadd.h xr26, xr22, xr23
xvsrari.h xr26, xr26, 1
xvneg.h xr4, xr3
xvadd.h xr27, xr20, xr26
xvadd.h xr28, xr25, xr26
xvsub.h xr29, xr23, xr22
xvsrai.h xr27, xr27, 1
xvsrai.h xr28, xr28, 1
xvslli.h xr29, xr29, 2
xvsub.h xr30, xr21, xr24
xvsub.h xr27, xr27, xr21
xvsub.h xr28, xr28, xr24
xvadd.h xr29, xr29, xr30
xvclip.h xr27, xr27, xr4, xr3
xvclip.h xr28, xr28, xr4, xr3
xvpickev.b xr16, xr25, xr20
xvpickev.b xr17, xr23, xr22
xvabsd.bu xr5, xr16, xr17
xvaddi.hu xr6, xr3, 1
xvslt.bu xr5, xr5, xr2
xvilvl.b xr30, xr5, xr5
xvilvh.b xr31, xr5, xr5
xvbitsel.v xr3, xr3, xr6, xr30
xvsrari.h xr29, xr29, 3
xvaddi.hu xr6, xr3, 1
xvbitsel.v xr3, xr3, xr6, xr31
xvneg.h xr4, xr3
xvclip.h xr29, xr29, xr4, xr3
xvadd.h xr30, xr21, xr27
xvadd.h xr18, xr24, xr28
xvadd.h xr19, xr22, xr29
xvsub.h xr26, xr23, xr29
xvssrarni.bu.h xr26, xr19, 0
xvpickev.b xr25, xr18, xr30
xvpickev.b xr27, xr24, xr21
xvpickev.b xr28, xr23, xr22
xvpickev.b xr18, xr22, xr21
xvabsd.bu xr19, xr18, xr17
xvreplgr2vr.b xr30, a2
xvilvl.d xr31, xr30, xr2
xvabsd.bu xr20, xr14, xr13
xvslt.bu xr19, xr19, xr31
xvslt.bu xr20, xr20, xr2
xvbitsel.v xr25, xr27, xr25, xr5
xvpermi.d xr20, xr20, 0x50
xvand.v xr21, xr20, xr19
xvpermi.d xr7, xr21, 0xB1
xvand.v xr21, xr21, xr7
xvbitsel.v xr25, xr27, xr25, xr21
xvpermi.d xr1, xr1, 0x50
xvbitsel.v xr26, xr28, xr26, xr21
xvslti.b xr30, xr1, 0
xvbitsel.v xr25, xr25, xr27, xr30
xvbitsel.v xr26, xr26, xr28, xr30
xvilvl.b xr10, xr26, xr25
xvilvh.b xr20, xr25, xr26
xvilvl.h xr21, xr20, xr10
xvilvh.h xr22, xr20, xr10
// Store data to pix
addi.d t5, a0, -2
xvstelm.w xr21, t5, 0, 0
add.d t5, t5, a1
xvstelm.w xr21, t5, 0, 1
add.d t5, t5, a1
xvstelm.w xr21, t5, 0, 2
add.d t5, t5, a1
xvstelm.w xr21, t5, 0, 3
add.d t5, t5, a1
xvstelm.w xr22, t5, 0, 0
add.d t5, t5, a1
xvstelm.w xr22, t5, 0, 1
add.d t5, t5, a1
xvstelm.w xr22, t5, 0, 2
add.d t5, t5, a1
xvstelm.w xr22, t5, 0, 3
add.d t5, t5, a1
xvstelm.w xr21, t5, 0, 4
add.d t5, t5, a1
xvstelm.w xr21, t5, 0, 5
add.d t5, t5, a1
xvstelm.w xr21, t5, 0, 6
add.d t5, t5, a1
xvstelm.w xr21, t5, 0, 7
add.d t5, t5, a1
xvstelm.w xr22, t5, 0, 4
add.d t5, t5, a1
xvstelm.w xr22, t5, 0, 5
add.d t5, t5, a1
xvstelm.w xr22, t5, 0, 6
add.d t5, t5, a1
xvstelm.w xr22, t5, 0, 7
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc_x264
/*
* void deblock_v_luma_lasx(Pixel *pix, intptr_t stride,
* int alpha, int beta, int8_t *tc0)
*/
function_x264 deblock_v_luma_lasx
slli.d t0, a1, 1
// Load data from tc0
xvldrepl.w xr1, a4, 0
add.d t1, t0, a1
xvreplgr2vr.b xr2, a3
xvilvl.b xr1, xr1, xr1
// Load data from pix
sub.d t5, a0, t1
vld vr10, t5, 0
vldx vr11, t5, a1
vldx vr12, t5, t0
vld vr13, a0, 0
vldx vr14, a0, a1
vldx vr15, a0, t0
// Store registers to the stack
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
xvilvl.h xr1, xr1, xr1
vext2xv.hu.bu xr20, xr10
vext2xv.hu.bu xr21, xr11
vext2xv.hu.bu xr22, xr12
vext2xv.hu.bu xr23, xr13
vext2xv.hu.bu xr24, xr14
vext2xv.hu.bu xr25, xr15
vext2xv.h.b xr3, xr1
xvadd.h xr26, xr22, xr23
xvsrari.h xr26, xr26, 1
xvneg.h xr4, xr3
xvadd.h xr27, xr20, xr26
xvadd.h xr28, xr25, xr26
xvsub.h xr29, xr23, xr22
xvsrai.h xr27, xr27, 1
xvsrai.h xr28, xr28, 1
xvslli.h xr29, xr29, 2
xvsub.h xr30, xr21, xr24
xvsub.h xr27, xr27, xr21
xvsub.h xr28, xr28, xr24
xvadd.h xr29, xr29, xr30
xvclip.h xr27, xr27, xr4, xr3
xvclip.h xr28, xr28, xr4, xr3
xvpickev.b xr16, xr25, xr20
xvpickev.b xr17, xr23, xr22
xvabsd.bu xr5, xr16, xr17
xvaddi.hu xr6, xr3, 1
xvslt.bu xr5, xr5, xr2
xvilvl.b xr30, xr5, xr5
xvilvh.b xr31, xr5, xr5
xvbitsel.v xr3, xr3, xr6, xr30
xvsrari.h xr29, xr29, 3
xvaddi.hu xr6, xr3, 1
xvbitsel.v xr3, xr3, xr6, xr31
xvneg.h xr4, xr3
xvclip.h xr29, xr29, xr4, xr3
xvadd.h xr30, xr21, xr27
xvadd.h xr18, xr24, xr28
xvadd.h xr19, xr22, xr29
xvsub.h xr26, xr23, xr29
xvssrarni.bu.h xr26, xr19, 0
xvpickev.b xr25, xr18, xr30
xvpickev.b xr27, xr24, xr21
xvpickev.b xr28, xr23, xr22
xvpickev.b xr18, xr22, xr21
xvabsd.bu xr19, xr18, xr17
xvreplgr2vr.b xr30, a2
xvilvl.d xr31, xr30, xr2
xvabsd.bu xr20, xr14, xr13
xvslt.bu xr19, xr19, xr31
xvslt.bu xr20, xr20, xr2
xvbitsel.v xr25, xr27, xr25, xr5
xvpermi.d xr20, xr20, 0x50
xvand.v xr21, xr20, xr19
xvpermi.d xr7, xr21, 0xB1
xvand.v xr21, xr21, xr7
xvbitsel.v xr25, xr27, xr25, xr21
xvpermi.d xr1, xr1, 0x50
xvbitsel.v xr26, xr28, xr26, xr21
xvslti.b xr30, xr1, 0
xvbitsel.v xr25, xr25, xr27, xr30
xvbitsel.v xr26, xr26, xr28, xr30
sub.d t5, a0, t0
xvpermi.d xr0, xr25, 0xd8
xvpermi.d xr1, xr26, 0xd8
xvpermi.d xr2, xr26, 0x8D
xvpermi.d xr3, xr25, 0x8D
// Store data to pix
vst vr0, t5, 0
vstx vr1, t5, a1
vst vr2, a0, 0
vstx vr3, a0, a1
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc_x264
/*
* void deblock_v_luma_intra_lasx(Pixel *pix, intptr_t stride,
* int alpha, int beta)
*/
function_x264 deblock_v_luma_intra_lasx
slli.d t0, a1, 1
slli.d t2, a1, 2
add.d t1, t0, a1
// Load data from pix
sub.d t5, a0, t2
vld vr9, t5, 0
vldx vr10, t5, a1
vldx vr11, t5, t0
vldx vr12, t5, t1
vld vr13, a0, 0
vldx vr14, a0, a1
vldx vr15, a0, t0
vldx vr16, a0, t1
// Store registers to the stack
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
xvreplgr2vr.b xr1, a2
xvreplgr2vr.b xr2, a3
vext2xv.hu.bu xr19, xr9
vext2xv.hu.bu xr20, xr10
vext2xv.hu.bu xr21, xr11
vext2xv.hu.bu xr22, xr12
vext2xv.hu.bu xr23, xr13
vext2xv.hu.bu xr24, xr14
vext2xv.hu.bu xr25, xr15
vext2xv.hu.bu xr26, xr16
xvadd.h xr27, xr21, xr22
xvadd.h xr29, xr19, xr20
xvadd.h xr3, xr27, xr23
xvadd.h xr6, xr27, xr24
xvadd.h xr4, xr3, xr20
xvslli.h xr29, xr29, 1
xvadd.h xr5, xr6, xr4
xvadd.h xr6, xr6, xr21
xvadd.h xr5, xr5, xr23
xvadd.h xr7, xr29, xr4
xvsrari.h xr3, xr4, 2
xvsrari.h xr6, xr6, 2
xvsrari.h xr4, xr5, 3
xvadd.h xr27, xr24, xr23
xvadd.h xr28, xr26, xr25
xvsrari.h xr5, xr7, 3
xvadd.h xr29, xr22, xr27
xvslli.h xr28, xr28, 1
xvadd.h xr7, xr29, xr25
xvadd.h xr17, xr27, xr21
xvadd.h xr8, xr7, xr28
xvadd.h xr18, xr17, xr7
xvadd.h xr17, xr17, xr24
xvadd.h xr18, xr18, xr22
xvsrari.h xr7, xr7, 2
xvsrari.h xr8, xr8, 3
xvsrari.h xr18, xr18, 3
xvsrari.h xr17, xr17, 2
xvpickev.b xr27, xr25, xr20
xvpickev.b xr28, xr24, xr21
xvpickev.b xr29, xr23, xr22
xvpickev.b xr9, xr8, xr5
xvpickev.b xr16, xr7, xr3
xvabsd.bu xr30, xr27, xr29
xvpickev.b xr19, xr18, xr4
xvpickev.b xr26, xr17, xr6
xvslt.bu xr31, xr30, xr2
xvabsd.bu xr20, xr12, xr13
xvabsd.bu xr21, xr11, xr12
xvabsd.bu xr22, xr14, xr13
xvsrli.b xr0, xr1, 2
xvbitsel.v xr19, xr26, xr19, xr31
xvbitsel.v xr9, xr27, xr9, xr31
xvbitsel.v xr16, xr28, xr16, xr31
xvaddi.bu xr0, xr0, 2
xvpermi.d xr20, xr20, 0x50
xvpermi.d xr21, xr21, 0x50
xvpermi.d xr22, xr22, 0x50
xvslt.bu xr10, xr20, xr0
xvslt.bu xr11, xr20, xr1
xvslt.bu xr12, xr21, xr2
xvslt.bu xr13, xr22, xr2
xvand.v xr30, xr11, xr12
xvand.v xr30, xr30, xr13
xvbitsel.v xr9, xr27, xr9, xr10
xvbitsel.v xr16, xr28, xr16, xr10
xvbitsel.v xr19, xr26, xr19, xr10
xvbitsel.v xr9, xr27, xr9, xr30
xvbitsel.v xr16, xr28, xr16, xr30
xvbitsel.v xr19, xr29, xr19, xr30
xvpermi.d xr1, xr9, 0xD8
xvpermi.d xr2, xr16, 0xD8
xvpermi.d xr3, xr19, 0xD8
xvpermi.d xr4, xr19, 0x8D
xvpermi.d xr5, xr16, 0x8D
xvpermi.d xr6, xr9, 0x8D
// Store data to pix
vstx vr1, t5, a1
vstx vr2, t5, t0
vstx vr3, t5, t1
vst vr4, a0, 0
vstx vr5, a0, a1
vstx vr6, a0, t0
// Restore register values
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc_x264
/*
* void deblock_h_luma_intra_lasx(Pixel *pix, intptr_t stride,
* int alpha, int beta)
*/
function_x264 deblock_h_luma_intra_lasx
slli.d t0, a1, 1
slli.d t2, a1, 2
addi.d t5, a0, -4
add.d t1, t0, a1
// Store registers to the stack
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
// Load data from pix
FLDD_LOADX_4 t5, a1, t0, t1, f10, f11, f12, f13
add.d t5, t5, t2
FLDD_LOADX_4 t5, a1, t0, t1, f14, f15, f16, f17
add.d t5, t5, t2
FLDD_LOADX_4 t5, a1, t0, t1, f20, f21, f22, f23
add.d t5, t5, t2
FLDD_LOADX_4 t5, a1, t0, t1, f24, f25, f26, f27
LASX_TRANSPOSE16X8_B xr10, xr11, xr12, xr13, xr14, xr15, xr16, xr17, \
xr20, xr21, xr22, xr23, xr24, xr25, xr26, xr27, \
xr9, xr10, xr11, xr12, xr13, xr14, xr15, xr16, \
xr0, xr1, xr2, xr3, xr4, xr5, xr6, xr7
xvreplgr2vr.b xr1, a2
xvreplgr2vr.b xr2, a3
vext2xv.hu.bu xr19, xr9
vext2xv.hu.bu xr20, xr10
vext2xv.hu.bu xr21, xr11
vext2xv.hu.bu xr22, xr12
vext2xv.hu.bu xr23, xr13
vext2xv.hu.bu xr24, xr14
vext2xv.hu.bu xr25, xr15
vext2xv.hu.bu xr26, xr16
xvadd.h xr27, xr21, xr22
xvadd.h xr29, xr19, xr20
xvadd.h xr3, xr27, xr23
xvadd.h xr6, xr27, xr24
xvadd.h xr4, xr3, xr20
xvslli.h xr29, xr29, 1
xvadd.h xr5, xr6, xr4
xvadd.h xr6, xr6, xr21
xvadd.h xr5, xr5, xr23
xvadd.h xr7, xr29, xr4
xvsrari.h xr3, xr4, 2
xvsrari.h xr6, xr6, 2
xvsrari.h xr4, xr5, 3
xvadd.h xr27, xr24, xr23
xvadd.h xr28, xr26, xr25
xvsrari.h xr5, xr7, 3
xvadd.h xr29, xr22, xr27
xvslli.h xr28, xr28, 1
xvadd.h xr7, xr29, xr25
xvadd.h xr17, xr27, xr21
xvadd.h xr8, xr7, xr28
xvadd.h xr18, xr17, xr7
xvadd.h xr17, xr17, xr24
xvadd.h xr18, xr18, xr22
xvsrari.h xr7, xr7, 2
xvsrari.h xr8, xr8, 3
xvsrari.h xr18, xr18, 3
xvsrari.h xr17, xr17, 2
xvpickev.b xr27, xr25, xr20
xvpickev.b xr28, xr24, xr21
xvpickev.b xr29, xr23, xr22
xvpickev.b xr9, xr8, xr5
xvpickev.b xr16, xr7, xr3
xvabsd.bu xr30, xr27, xr29
xvpickev.b xr19, xr18, xr4
xvpickev.b xr26, xr17, xr6
xvslt.bu xr31, xr30, xr2
xvabsd.bu xr20, xr12, xr13
xvabsd.bu xr21, xr11, xr12
xvabsd.bu xr22, xr14, xr13
xvsrli.b xr0, xr1, 2
xvbitsel.v xr19, xr26, xr19, xr31
xvbitsel.v xr9, xr27, xr9, xr31
xvbitsel.v xr16, xr28, xr16, xr31
xvaddi.bu xr0, xr0, 2
xvpermi.d xr20, xr20, 0x50
xvpermi.d xr21, xr21, 0x50
xvpermi.d xr22, xr22, 0x50
xvslt.bu xr10, xr20, xr0
xvslt.bu xr11, xr20, xr1
xvslt.bu xr12, xr21, xr2
xvslt.bu xr13, xr22, xr2
xvand.v xr30, xr11, xr12
xvand.v xr30, xr30, xr13
xvbitsel.v xr9, xr27, xr9, xr10
xvbitsel.v xr16, xr28, xr16, xr10
xvbitsel.v xr19, xr26, xr19, xr10
xvbitsel.v xr9, xr27, xr9, xr30
xvbitsel.v xr16, xr28, xr16, xr30
xvbitsel.v xr19, xr29, xr19, xr30
xvilvl.b xr0, xr16, xr9
xvpermi.d xr18, xr19, 0xB1
xvilvh.b xr1, xr9, xr16
xvilvl.b xr2, xr18, xr19
addi.d t5, a0, -3
xvilvl.h xr3, xr2, xr0
xvilvh.h xr4, xr2, xr0
// Store data to pix
xvstelm.w xr3, t5, 0, 0
xvstelm.h xr1, t5, 4, 0
add.d t5, t5, a1
xvstelm.w xr3, t5, 0, 1
xvstelm.h xr1, t5, 4, 1
add.d t5, t5, a1
xvstelm.w xr3, t5, 0, 2
xvstelm.h xr1, t5, 4, 2
add.d t5, t5, a1
xvstelm.w xr3, t5, 0, 3
xvstelm.h xr1, t5, 4, 3
add.d t5, t5, a1
xvstelm.w xr4, t5, 0, 0
xvstelm.h xr1, t5, 4, 4
add.d t5, t5, a1
xvstelm.w xr4, t5, 0, 1
xvstelm.h xr1, t5, 4, 5
add.d t5, t5, a1
xvstelm.w xr4, t5, 0, 2
xvstelm.h xr1, t5, 4, 6
add.d t5, t5, a1
xvstelm.w xr4, t5, 0, 3
xvstelm.h xr1, t5, 4, 7
add.d t5, t5, a1
xvstelm.w xr3, t5, 0, 4
xvstelm.h xr1, t5, 4, 8
add.d t5, t5, a1
xvstelm.w xr3, t5, 0, 5
xvstelm.h xr1, t5, 4, 9
add.d t5, t5, a1
xvstelm.w xr3, t5, 0, 6
xvstelm.h xr1, t5, 4, 10
add.d t5, t5, a1
xvstelm.w xr3, t5, 0, 7
xvstelm.h xr1, t5, 4, 11
add.d t5, t5, a1
xvstelm.w xr4, t5, 0, 4
xvstelm.h xr1, t5, 4, 12
add.d t5, t5, a1
xvstelm.w xr4, t5, 0, 5
xvstelm.h xr1, t5, 4, 13
add.d t5, t5, a1
xvstelm.w xr4, t5, 0, 6
xvstelm.h xr1, t5, 4, 14
add.d t5, t5, a1
xvstelm.w xr4, t5, 0, 7
xvstelm.h xr1, t5, 4, 15
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc_x264
/*
* void deblock_strength_c( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
* int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
* int mvy_limit, int bframe )
*/
function_x264 deblock_strength_lasx
// dir = 0 s1 = 8 s2 = 1
vldi vr18, 2
vldi vr19, 1
addi.d t0, zero, 4
xvreplgr2vr.h xr20, t0
xvreplgr2vr.h xr21, a4
xvld xr0, a0, 11
xvpermi.q xr1, xr0, 0x01
la.local t0, shuf_loc_locn
xvld xr23, t0, 0
xvshuf.b xr4, xr1, xr0, xr23
xvpermi.q xr5, xr4, 0x01
vor.v vr6, vr4, vr5
vseqi.b vr6, vr6, 0
vmov vr15, vr6
vxor.v vr8, vr8, vr8
vbitsel.v vr8, vr18, vr8, vr6
xvld xr0, a1, 11
xvpermi.q xr1, xr0, 0x01
xvshuf.b xr4, xr1, xr0, xr23
xvpermi.q xr5, xr4, 0x01
vseq.b vr4, vr4, vr5
vseqi.b vr4, vr4, 0
vld vr0, a2, 44
vld vr1, a2, 76
vld vr5, a2, 108
vld vr6, a2, 140
vilvl.h vr9, vr1, vr0
vilvl.h vr10, vr6, vr5
vilvl.w vr11, vr10, vr9
vilvh.w vr12, vr10, vr9
vilvh.h vr9, vr1, vr0
vilvh.h vr10, vr6, vr5
vilvl.w vr13, vr10, vr9
vilvh.w vr14, vr10, vr9
vilvl.d vr0, vr13, vr12
ld.h t0, a2, 60
ld.h t1, a2, 92
ld.h t2, a2, 124
ld.h t3, a2, 156
vmov vr6, vr14
vinsgr2vr.h vr6, t0, 4
vinsgr2vr.h vr6, t1, 5
vinsgr2vr.h vr6, t2, 6
vinsgr2vr.h vr6, t3, 7
vilvl.d vr1, vr12, vr11
vilvl.d vr5, vr14, vr13
xvpermi.q xr0, xr6, 0x02 // mv[0][loc][0]
xvpermi.q xr5, xr1, 0x20 // mv[0][locn][0]
xvabsd.h xr5, xr0, xr5
xvsle.h xr5, xr20, xr5
vilvh.d vr0, vr13, vr12
ld.h t0, a2, 62
ld.h t1, a2, 94
ld.h t2, a2, 126
ld.h t3, a2, 158
vbsrl.v vr7, vr14, 8
vinsgr2vr.h vr7, t0, 4
vinsgr2vr.h vr7, t1, 5
vinsgr2vr.h vr7, t2, 6
vinsgr2vr.h vr7, t3, 7
vilvh.d vr1, vr12, vr11
vilvh.d vr6, vr14, vr13
xvpermi.q xr0, xr7, 0x02 // mv[0][loc][1]
xvpermi.q xr6, xr1, 0x20 // mv[0][locn][1]
xvabsd.h xr6, xr0, xr6
xvsle.h xr6, xr21, xr6
xvor.v xr5, xr5, xr6
xvpickev.b xr5, xr5, xr5
xvpermi.d xr5, xr5, 0xd8
vor.v vr17, vr4, vr5
beqz a5, .bframe_iszero_0
// bframe != 0
xvld xr0, a1, 51
xvpermi.q xr1, xr0, 0x01
xvshuf.b xr4, xr1, xr0, xr23
xvpermi.q xr5, xr4, 0x01
vseq.b vr4, vr4, vr5
vseqi.b vr4, vr4, 0
vld vr0, a2, 204
vld vr1, a2, 236
vld vr5, a2, 268
vld vr6, a2, 300
vilvl.h vr9, vr1, vr0
vilvl.h vr10, vr6, vr5
vilvl.w vr11, vr10, vr9
vilvh.w vr12, vr10, vr9
vilvh.h vr9, vr1, vr0
vilvh.h vr10, vr6, vr5
vilvl.w vr13, vr10, vr9
vilvh.w vr14, vr10, vr9
vilvl.d vr0, vr13, vr12
ld.h t0, a2, 220
ld.h t1, a2, 252
ld.h t2, a2, 284
ld.h t3, a2, 316
vmov vr6, vr14
vinsgr2vr.h vr6, t0, 4
vinsgr2vr.h vr6, t1, 5
vinsgr2vr.h vr6, t2, 6
vinsgr2vr.h vr6, t3, 7
vilvl.d vr1, vr12, vr11
vilvl.d vr5, vr14, vr13
xvpermi.q xr0, xr6, 0x02 // mv[1][loc][0]
xvpermi.q xr5, xr1, 0x20 // mv[1][locn][0]
xvabsd.h xr5, xr0, xr5
xvsle.h xr5, xr20, xr5
vilvh.d vr0, vr13, vr12
ld.h t0, a2, 222
ld.h t1, a2, 254
ld.h t2, a2, 286
ld.h t3, a2, 318
vbsrl.v vr7, vr14, 8
vinsgr2vr.h vr7, t0, 4
vinsgr2vr.h vr7, t1, 5
vinsgr2vr.h vr7, t2, 6
vinsgr2vr.h vr7, t3, 7
vilvh.d vr1, vr12, vr11
vilvh.d vr6, vr14, vr13
xvpermi.q xr0, xr7, 0x02 // mv[1][loc][1]
xvpermi.q xr6, xr1, 0x20 // mv[1][locn][1]
xvabsd.h xr6, xr0, xr6
xvsle.h xr6, xr21, xr6
xvor.v xr5, xr5, xr6
xvpickev.b xr5, xr5, xr5
xvpermi.d xr5, xr5, 0xd8
vor.v vr5, vr5, vr4
vor.v vr17, vr5, vr17
.bframe_iszero_0:
vxor.v vr22, vr22, vr22
vbitsel.v vr22, vr22, vr19, vr17
vbitsel.v vr22, vr8, vr22, vr15
vst vr22, a3, 0
// dir = 1 s1 = 1 s2 = 8
vld vr0, a0, 4
vld vr1, a0, 20
ld.wu t0, a0, 36
vpickev.w vr2, vr1, vr0
vbsrl.v vr3, vr2, 4
vinsgr2vr.w vr3, t0, 3
vor.v vr2, vr3, vr2
vseqi.b vr2, vr2, 0
vmov vr15, vr2
vxor.v vr3, vr3, vr3
vbitsel.v vr3, vr18, vr3, vr2
vld vr0, a1, 4
vld vr1, a1, 20
ld.w t0, a1, 36
vpickev.w vr2, vr1, vr0
vbsrl.v vr4, vr2, 4
vinsgr2vr.w vr4, t0, 3
vseq.b vr2, vr4, vr2
vseqi.b vr2, vr2, 0
vld vr0, a2, 16
vld vr1, a2, 48
vld vr12, a2, 80
vld vr13, a2, 112
vld vr4, a2, 144
vpickev.h vr5, vr1, vr0
vpickev.h vr14, vr13, vr12
xvpermi.q xr5, xr14, 0x02 // mv[0][locn][0]
vpickev.h vr7, vr4, vr4
xvpermi.d xr6, xr5, 0x39
xvinsve0.d xr6, xr7, 3 // mv[0][loc][0]
xvabsd.h xr5, xr6, xr5
xvsle.h xr5, xr20, xr5
vpickod.h vr6, vr1, vr0
vpickod.h vr14, vr13, vr12
xvpermi.q xr6, xr14, 0x02 // mv[0][locn][1]
vpickod.h vr7, vr4, vr4
xvpermi.d xr8, xr6, 0x39
xvinsve0.d xr8, xr7, 3 // mv[0][loc][1]
xvabsd.h xr6, xr8, xr6
xvsle.h xr6, xr21, xr6
xvor.v xr5, xr6, xr5
xvpickev.b xr6, xr5, xr5
xvpermi.d xr6, xr6, 0xd8
vor.v vr2, vr6, vr2
beqz a5, .bframe_iszero_1
// bframe != 0 ref[1]
vld vr0, a1, 44
vld vr1, a1, 60
ld.w t0, a1, 76
vpickev.w vr0, vr1, vr0
vbsrl.v vr1, vr0, 4
vinsgr2vr.w vr1, t0, 3
vseq.b vr11, vr1, vr0
vseqi.b vr11, vr11, 0
vld vr0, a2, 176
vld vr1, a2, 208
vld vr12, a2, 240
vld vr13, a2, 272
vld vr4, a2, 304
vpickev.h vr5, vr1, vr0
vpickev.h vr14, vr13, vr12
xvpermi.q xr5, xr14, 0x02 // mv[1][locn][0]
vpickev.h vr7, vr4, vr4
xvpermi.d xr6, xr5, 0x39
xvinsve0.d xr6, xr7, 3 // mv[1][loc][0]
xvabsd.h xr5, xr6, xr5
xvsle.h xr5, xr20, xr5
vpickod.h vr6, vr1, vr0
vpickod.h vr14, vr13, vr12
xvpermi.q xr6, xr14, 0x02 // mv[1][locn][1]
vpickod.h vr7, vr4, vr4
xvpermi.d xr8, xr6, 0x39
xvinsve0.d xr8, xr7, 3 // mv[1][loc][1]
xvabsd.h xr6, xr8, xr6
xvsle.h xr6, xr21, xr6
xvor.v xr5, xr6, xr5
xvpickev.b xr6, xr5, xr5
xvpermi.d xr6, xr6, 0xd8
vor.v vr6, vr6, vr11
vor.v vr2, vr6, vr2
.bframe_iszero_1:
vxor.v vr22, vr22, vr22
vbitsel.v vr22, vr22, vr19, vr2
vbitsel.v vr22, vr3, vr22, vr15
vst vr22, a3, 32
endfunc_x264
/*
* void deblock_strength_c( uint8_t nnz[X264_SCAN8_SIZE], int8_t ref[2][X264_SCAN8_LUMA_SIZE],
* int16_t mv[2][X264_SCAN8_LUMA_SIZE][2], uint8_t bs[2][8][4],
* int mvy_limit, int bframe )
*/
function_x264 deblock_strength_lsx
// dir = 0 s1 = 8 s2 = 1
vldi vr18, 2
vldi vr19, 1
addi.d t0, zero, 4
vreplgr2vr.h vr20, t0
vreplgr2vr.h vr21, a4
vld vr0, a0, 11
vld vr1, a0, 27
la.local t0, shuf_loc_locn
la.local t1, shuf_locn
vld vr2, t0, 0
vld vr3, t1, 0
vshuf.b vr4, vr1, vr0, vr2
vshuf.b vr5, vr1, vr0, vr3
vor.v vr6, vr4, vr5
vseqi.b vr6, vr6, 0
vmov vr15, vr6
vxor.v vr8, vr8, vr8
vbitsel.v vr8, vr18, vr8, vr6
vld vr0, a1, 11
vld vr1, a1, 27
vshuf.b vr4, vr1, vr0, vr2
vshuf.b vr5, vr1, vr0, vr3
vseq.b vr4, vr4, vr5
vseqi.b vr4, vr4, 0
vld vr0, a2, 44
vld vr1, a2, 76
vld vr5, a2, 108
vld vr6, a2, 140
vilvl.h vr9, vr1, vr0
vilvl.h vr10, vr6, vr5
vilvl.w vr11, vr10, vr9
vilvh.w vr12, vr10, vr9
vilvh.h vr9, vr1, vr0
vilvh.h vr10, vr6, vr5
vilvl.w vr13, vr10, vr9
vilvh.w vr14, vr10, vr9
vilvl.d vr0, vr13, vr12
ld.h t0, a2, 60
ld.h t1, a2, 92
ld.h t2, a2, 124
ld.h t3, a2, 156
vmov vr6, vr14
vinsgr2vr.h vr6, t0, 4
vinsgr2vr.h vr6, t1, 5
vinsgr2vr.h vr6, t2, 6
vinsgr2vr.h vr6, t3, 7
vilvl.d vr1, vr12, vr11
vilvl.d vr5, vr14, vr13
vabsd.h vr9, vr0, vr1
vabsd.h vr5, vr6, vr5
vsle.h vr9, vr20, vr9
vsle.h vr5, vr20, vr5
vilvh.d vr0, vr13, vr12
ld.h t0, a2, 62
ld.h t1, a2, 94
ld.h t2, a2, 126
ld.h t3, a2, 158
vbsrl.v vr7, vr14, 8
vinsgr2vr.h vr7, t0, 4
vinsgr2vr.h vr7, t1, 5
vinsgr2vr.h vr7, t2, 6
vinsgr2vr.h vr7, t3, 7
vilvh.d vr1, vr12, vr11
vilvh.d vr6, vr14, vr13
vabsd.h vr0, vr0, vr1
vabsd.h vr6, vr7, vr6
vsle.h vr0, vr21, vr0
vsle.h vr6, vr21, vr6
vor.v vr9, vr9, vr0
vor.v vr5, vr5, vr6
vpickev.b vr5, vr5, vr9
vor.v vr17, vr4, vr5
beqz a5, .bframeiszero_0_lsx
// bframe != 0
vld vr0, a1, 51
vld vr1, a1, 67
vshuf.b vr4, vr1, vr0, vr2
vshuf.b vr5, vr1, vr0, vr3
vseq.b vr4, vr4, vr5
vseqi.b vr4, vr4, 0
vld vr0, a2, 204
vld vr1, a2, 236
vld vr5, a2, 268
vld vr6, a2, 300
vilvl.h vr9, vr1, vr0
vilvl.h vr10, vr6, vr5
vilvl.w vr11, vr10, vr9
vilvh.w vr12, vr10, vr9
vilvh.h vr9, vr1, vr0
vilvh.h vr10, vr6, vr5
vilvl.w vr13, vr10, vr9
vilvh.w vr14, vr10, vr9
vilvl.d vr0, vr13, vr12
ld.h t0, a2, 220
ld.h t1, a2, 252
ld.h t2, a2, 284
ld.h t3, a2, 316
vmov vr6, vr14
vinsgr2vr.h vr6, t0, 4
vinsgr2vr.h vr6, t1, 5
vinsgr2vr.h vr6, t2, 6
vinsgr2vr.h vr6, t3, 7
vilvl.d vr1, vr12, vr11
vilvl.d vr5, vr14, vr13
vabsd.h vr9, vr0, vr1
vabsd.h vr5, vr6, vr5
vsle.h vr9, vr20, vr9
vsle.h vr5, vr20, vr5
vilvh.d vr0, vr13, vr12
ld.h t0, a2, 222
ld.h t1, a2, 254
ld.h t2, a2, 286
ld.h t3, a2, 318
vbsrl.v vr7, vr14, 8
vinsgr2vr.h vr7, t0, 4
vinsgr2vr.h vr7, t1, 5
vinsgr2vr.h vr7, t2, 6
vinsgr2vr.h vr7, t3, 7
vilvh.d vr1, vr12, vr11
vilvh.d vr6, vr14, vr13
vabsd.h vr0, vr0, vr1
vabsd.h vr6, vr7, vr6
vsle.h vr0, vr21, vr0
vsle.h vr6, vr21, vr6
vor.v vr9, vr9, vr0
vor.v vr5, vr5, vr6
vpickev.b vr5, vr5, vr9
vor.v vr5, vr5, vr4
vor.v vr17, vr5, vr17
.bframeiszero_0_lsx:
vxor.v vr22, vr22, vr22
vbitsel.v vr22, vr22, vr19, vr17
vbitsel.v vr22, vr8, vr22, vr15
vst vr22, a3, 0
// dir = 1 s1 = 1 s2 = 8
vld vr0, a0, 4
vld vr1, a0, 20
ld.wu t0, a0, 36
vpickev.w vr2, vr1, vr0
vbsrl.v vr3, vr2, 4
vinsgr2vr.w vr3, t0, 3
vor.v vr2, vr3, vr2
vseqi.b vr2, vr2, 0
vmov vr15, vr2
vxor.v vr3, vr3, vr3
vbitsel.v vr3, vr18, vr3, vr2
vld vr0, a1, 4
vld vr1, a1, 20
ld.w t0, a1, 36
vpickev.w vr2, vr1, vr0
vbsrl.v vr4, vr2, 4
vinsgr2vr.w vr4, t0, 3
vseq.b vr2, vr4, vr2
vseqi.b vr2, vr2, 0
vld vr0, a2, 16
vld vr1, a2, 48
vld vr12, a2, 80
vld vr13, a2, 112
vld vr4, a2, 144
vpickev.h vr5, vr1, vr0
vpickev.h vr14, vr13, vr12
vpickev.h vr7, vr4, vr4
vbsrl.v vr6, vr5, 8
vilvl.d vr6, vr14, vr6
vilvh.d vr9, vr7, vr14
vabsd.h vr5, vr6, vr5
vabsd.h vr9, vr9, vr14
vsle.h vr5, vr20, vr5
vsle.h vr9, vr20, vr9
vpickod.h vr6, vr1, vr0
vpickod.h vr14, vr13, vr12
vpickod.h vr7, vr4, vr4
vbsrl.v vr8, vr6, 8
vilvl.d vr8, vr14, vr8
vilvh.d vr7, vr7, vr14
vabsd.h vr8, vr8, vr6
vabsd.h vr7, vr7, vr14
vsle.h vr8, vr21, vr8
vsle.h vr6, vr21, vr7
vor.v vr5, vr5, vr8
vor.v vr6, vr9, vr6
vpickev.b vr6, vr6, vr5
vor.v vr2, vr6, vr2
beqz a5, .bframeiszero_1_lsx
// bframe != 0 ref[1]
vld vr0, a1, 44
vld vr1, a1, 60
ld.w t0, a1, 76
vpickev.w vr0, vr1, vr0
vbsrl.v vr1, vr0, 4
vinsgr2vr.w vr1, t0, 3
vseq.b vr11, vr1, vr0
vseqi.b vr11, vr11, 0
vld vr0, a2, 176
vld vr1, a2, 208
vld vr12, a2, 240
vld vr13, a2, 272
vld vr4, a2, 304
vpickev.h vr5, vr1, vr0
vpickev.h vr14, vr13, vr12
vpickev.h vr7, vr4, vr4
vbsrl.v vr6, vr5, 8
vilvl.d vr6, vr14, vr6
vilvh.d vr9, vr7, vr14
vabsd.h vr5, vr6, vr5
vabsd.h vr9, vr9, vr14
vsle.h vr5, vr20, vr5
vsle.h vr9, vr20, vr9
vpickod.h vr6, vr1, vr0
vpickod.h vr14, vr13, vr12
vpickod.h vr7, vr4, vr4
vbsrl.v vr8, vr6, 8
vilvl.d vr8, vr14, vr8
vilvh.d vr7, vr7, vr14
vabsd.h vr8, vr8, vr6
vabsd.h vr6, vr7, vr14
vsle.h vr8, vr21, vr8
vsle.h vr6, vr21, vr6
vor.v vr5, vr5, vr8
vor.v vr7, vr9, vr6
vpickev.b vr6, vr7, vr5
vor.v vr6, vr6, vr11
vor.v vr2, vr6, vr2
.bframeiszero_1_lsx:
vxor.v vr22, vr22, vr22
vbitsel.v vr22, vr22, vr19, vr2
vbitsel.v vr22, vr3, vr22, vr15
vst vr22, a3, 32
endfunc_x264
/*
* void deblock_v_luma_intra_lsx( pixel *pix, intptr_t stride, int alpha, int beta )
*/
function_x264 deblock_v_luma_intra_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
slli.d t2, a1, 2
// Store registers to the stack
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
// Load data from pix
sub.d t3, a0, t2 // t3 = a0 - 4 * stride
vld vr3, t3, 0 // p3
vldx vr2, t3, a1 // p2
vldx vr1, t3, t0 // p1
vldx vr0, t3, t1 // p0
vld vr10, a0, 0 // q0
vldx vr11, a0, a1 // q1
vldx vr12, a0, t0 // q2
vldx vr13, a0, t1 // q3
vsllwil.hu.bu vr7, vr3, 0
vsllwil.hu.bu vr6, vr2, 0
vsllwil.hu.bu vr5, vr1, 0
vsllwil.hu.bu vr4, vr0, 0
vsllwil.hu.bu vr14, vr10, 0
vsllwil.hu.bu vr15, vr11, 0
vsllwil.hu.bu vr16, vr12, 0
vsllwil.hu.bu vr17, vr13, 0
/* p0', p1', p2' */
vadd.h vr8, vr5, vr4
vadd.h vr9, vr8, vr14
vadd.h vr19, vr7, vr6
vadd.h vr18, vr6, vr9 // pix[-2*xstride]
vslli.h vr19, vr19, 1
vadd.h vr20, vr9, vr18
vadd.h vr19, vr19, vr18 // pix[-3*xstride]
vadd.h vr20, vr20, vr15 // pix[-1*xstride]
/* p0' */
vadd.h vr8, vr8, vr15
vadd.h vr21, vr8, vr5 // pix[-1*xstride]
// /* q0', q1', q2' */
vadd.h vr8, vr15, vr14
vadd.h vr9, vr8, vr4
vadd.h vr23, vr17, vr16
vadd.h vr22, vr9, vr16 // pix[1*xstride]
vslli.h vr23, vr23, 1
vadd.h vr24, vr9, vr22
vadd.h vr23, vr23, vr22 // pix[2*xstride]
vadd.h vr24, vr24, vr5 // pix[0*xstride]
/* q0' */
vadd.h vr8, vr8, vr5
vadd.h vr25, vr8, vr15 // pix[0*xstride]
vexth.hu.bu vr7, vr3
vexth.hu.bu vr6, vr2
vexth.hu.bu vr5, vr1
vexth.hu.bu vr4, vr0
vexth.hu.bu vr14, vr10
vexth.hu.bu vr15, vr11
vexth.hu.bu vr16, vr12
vexth.hu.bu vr17, vr13
/* p0', p1', p2' */
vadd.h vr8, vr5, vr4
vadd.h vr9, vr8, vr14
vadd.h vr27, vr6, vr9 // pix[-2*xstride]
vadd.h vr28, vr7, vr6
vslli.h vr28, vr28, 1
vadd.h vr29, vr9, vr27
vadd.h vr28, vr28, vr27 // pix[-3*xstride]
vadd.h vr29, vr29, vr15 // pix[-1*xstride]
/* p0' */
vadd.h vr8, vr8, vr15
vadd.h vr30, vr8, vr5 // pix[-1*xstride]
/* q0', q1', q2' */
vadd.h vr8, vr15, vr14
vadd.h vr9, vr8, vr4
vadd.h vr3, vr17, vr16
vadd.h vr31, vr9, vr16 // pix[1*xstride]
vslli.h vr3, vr3, 1
vadd.h vr13, vr9, vr31
vadd.h vr3, vr3, vr31 // pix[2*xstride]
vadd.h vr13, vr13, vr5 // pix[0*xstride]
/* q0' */
vadd.h vr8, vr8, vr5
vadd.h vr9, vr8, vr15 // pix[0*xstride]
vsrarni.b.h vr28, vr19, 3 // pix[-3*xstride]
vsrarni.b.h vr27, vr18, 2 // pix[-2*xstride]
vsrarni.b.h vr29, vr20, 3 // pix[-1*xstride]
vsrarni.b.h vr30, vr21, 2 // pix[-1*xstride] p0'
vsrarni.b.h vr13, vr24, 3 // pix[ 0*xstride]
vsrarni.b.h vr31, vr22, 2 // pix[ 1*xstride]
vsrarni.b.h vr3, vr23, 3 // pix[ 2*xstride]
vsrarni.b.h vr9, vr25, 2 // pix[ 0*xstride] q0'
vreplgr2vr.b vr18, a2 // alpha
vreplgr2vr.b vr19, a3 // beta
vabsd.bu vr26, vr0, vr10
vabsd.bu vr8, vr1, vr0
vabsd.bu vr16, vr11, vr10
vslt.bu vr20, vr26, vr18
vslt.bu vr21, vr8, vr19
vslt.bu vr22, vr16, vr19
vand.v vr20, vr20, vr21
vand.v vr20, vr20, vr22 // if_1
vsrli.b vr18, vr18, 2
vaddi.bu vr18, vr18, 2
vslt.bu vr26, vr26, vr18 // if_2
vabsd.bu vr23, vr2, vr0
vslt.bu vr23, vr23, vr19 // if_3
vand.v vr16, vr23, vr26 // if_2 && if_3
vnor.v vr24, vr16, vr16 // !(if_2 && if_3)
vand.v vr24, vr24, vr20 // if_1 && !(if_2 && if_3)
vand.v vr16, vr16, vr20 // if_1 && if_2 && if_3
vbitsel.v vr4, vr2, vr28, vr16 // pix[-3*xstride]
vbitsel.v vr5, vr1, vr27, vr16 // pix[-2*xstride]
vbitsel.v vr6, vr0, vr30, vr24
vbitsel.v vr6, vr6, vr29, vr16 // pix[-1*xstride]
vabsd.bu vr7, vr12, vr10
vslt.bu vr7, vr7, vr19 // if_4
vand.v vr17, vr7, vr26 // if_2 && if_4
vnor.v vr14, vr17, vr17 // !(if_2 && if_4)
vand.v vr14, vr14, vr20 // if_1 && !(if_2 && if_4)
vand.v vr17, vr17, vr20 // if_1 && if_2 && if_4
vbitsel.v vr15, vr10, vr9, vr14
vbitsel.v vr15, vr15, vr13, vr17 // pix[ 0*xstride]
vbitsel.v vr9, vr11, vr31, vr17 // pix[ 1*xstride]
vbitsel.v vr13, vr12, vr3, vr17 // pix[ 2*xstride]
vstx vr4, t3, a1
vstx vr5, t3, t0
vstx vr6, t3, t1
vst vr15, a0, 0
vstx vr9, a0, a1
vstx vr13, a0, t0
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc_x264
/*
* void deblock_h_luma_intra_c( pixel *pix, intptr_t stride, int alpha, int beta )
*/
function_x264 deblock_h_luma_intra_lsx
slli.d t0, a1, 1
slli.d t2, a1, 2
addi.d t5, a0, -4
add.d t1, t0, a1
// Store registers to the stack
addi.d sp, sp, -64
fst.d f24, sp, 0
fst.d f25, sp, 8
fst.d f26, sp, 16
fst.d f27, sp, 24
fst.d f28, sp, 32
fst.d f29, sp, 40
fst.d f30, sp, 48
fst.d f31, sp, 56
// Load data from pix
FLDD_LOADX_4 t5, a1, t0, t1, f10, f11, f12, f13
add.d t5, t5, t2
FLDD_LOADX_4 t5, a1, t0, t1, f14, f15, f16, f17
add.d t5, t5, t2
FLDD_LOADX_4 t5, a1, t0, t1, f20, f21, f22, f23
add.d t5, t5, t2
FLDD_LOADX_4 t5, a1, t0, t1, f24, f25, f26, f27
vilvl.b vr11, vr11, vr10
vilvl.b vr13, vr13, vr12
vilvl.b vr15, vr15, vr14
vilvl.b vr17, vr17, vr16
vilvl.h vr0, vr13, vr11
vilvl.h vr1, vr17, vr15
vilvh.h vr2, vr13, vr11
vilvh.h vr3, vr17, vr15
vilvl.w vr4, vr1, vr0
vilvl.w vr6, vr3, vr2
vilvh.w vr5, vr1, vr0
vilvh.w vr7, vr3, vr2
vilvl.b vr11, vr21, vr20
vilvl.b vr13, vr23, vr22
vilvl.b vr15, vr25, vr24
vilvl.b vr17, vr27, vr26
vilvl.h vr0, vr13, vr11
vilvl.h vr1, vr17, vr15
vilvh.h vr2, vr13, vr11
vilvh.h vr3, vr17, vr15
vilvl.w vr24, vr1, vr0
vilvl.w vr26, vr3, vr2
vilvh.w vr25, vr1, vr0
vilvh.w vr27, vr3, vr2
vilvl.d vr3, vr24, vr4 // p3
vilvh.d vr2, vr24, vr4 // p2
vilvl.d vr1, vr25, vr5 // p1
vilvh.d vr0, vr25, vr5 // p0
vilvl.d vr10, vr26, vr6 // q0
vilvh.d vr11, vr26, vr6 // q1
vilvl.d vr12, vr27, vr7 // q2
vilvh.d vr13, vr27, vr7 // q3
vsllwil.hu.bu vr7, vr3, 0
vsllwil.hu.bu vr6, vr2, 0
vsllwil.hu.bu vr5, vr1, 0
vsllwil.hu.bu vr4, vr0, 0
vsllwil.hu.bu vr14, vr10, 0
vsllwil.hu.bu vr15, vr11, 0
vsllwil.hu.bu vr16, vr12, 0
vsllwil.hu.bu vr17, vr13, 0
/* p0', p1', p2' */
vadd.h vr8, vr5, vr4
vadd.h vr9, vr8, vr14
vadd.h vr19, vr7, vr6
vadd.h vr18, vr6, vr9 // pix[-2*xstride]
vslli.h vr19, vr19, 1
vadd.h vr20, vr9, vr18
vadd.h vr19, vr19, vr18 // pix[-3*xstride]
vadd.h vr20, vr20, vr15 // pix[-1*xstride]
/* p0' */
vadd.h vr8, vr8, vr15
vadd.h vr21, vr8, vr5 // pix[-1*xstride]
/* q0', q1', q2' */
vadd.h vr8, vr15, vr14
vadd.h vr9, vr8, vr4
vadd.h vr23, vr17, vr16
vadd.h vr22, vr9, vr16 // pix[1*xstride]
vslli.h vr23, vr23, 1
vadd.h vr24, vr9, vr22
vadd.h vr23, vr23, vr22 // pix[2*xstride]
vadd.h vr24, vr24, vr5 // pix[0*xstride]
/* q0' */
vadd.h vr8, vr8, vr5
vadd.h vr25, vr8, vr15 // pix[0*xstride]
vexth.hu.bu vr7, vr3
vexth.hu.bu vr6, vr2
vexth.hu.bu vr5, vr1
vexth.hu.bu vr4, vr0
vexth.hu.bu vr14, vr10
vexth.hu.bu vr15, vr11
vexth.hu.bu vr16, vr12
vexth.hu.bu vr17, vr13
/* p0', p1', p2' */
vadd.h vr8, vr5, vr4
vadd.h vr9, vr8, vr14
vadd.h vr27, vr6, vr9 // pix[-2*xstride]
vadd.h vr28, vr7, vr6
vslli.h vr28, vr28, 1
vadd.h vr29, vr9, vr27
vadd.h vr28, vr28, vr27 // pix[-3*xstride]
vadd.h vr29, vr29, vr15 // pix[-1*xstride]
/* p0' */
vadd.h vr8, vr8, vr15
vadd.h vr30, vr8, vr5 // pix[-1*xstride]
/* q0', q1', q2' */
vadd.h vr8, vr15, vr14
vadd.h vr9, vr8, vr4
vadd.h vr3, vr17, vr16
vadd.h vr31, vr9, vr16 // pix[1*xstride]
vslli.h vr3, vr3, 1
vadd.h vr13, vr9, vr31
vadd.h vr3, vr3, vr31 // pix[2*xstride]
vadd.h vr13, vr13, vr5 // pix[0*xstride]
/* q0' */
vadd.h vr8, vr8, vr5
vadd.h vr9, vr8, vr15 // pix[0*xstride]
vsrarni.b.h vr28, vr19, 3 // pix[-3*xstride]
vsrarni.b.h vr27, vr18, 2 // pix[-2*xstride]
vsrarni.b.h vr29, vr20, 3 // pix[-1*xstride]
vsrarni.b.h vr30, vr21, 2 // pix[-1*xstride] p0'
vsrarni.b.h vr13, vr24, 3 // pix[ 0*xstride]
vsrarni.b.h vr31, vr22, 2 // pix[ 1*xstride]
vsrarni.b.h vr3, vr23, 3 // pix[ 2*xstride]
vsrarni.b.h vr9, vr25, 2 // pix[ 0*xstride] q0'
vreplgr2vr.b vr18, a2 // alpha
vreplgr2vr.b vr19, a3 // beta
vabsd.bu vr26, vr0, vr10
vabsd.bu vr8, vr1, vr0
vabsd.bu vr16, vr11, vr10
vslt.bu vr20, vr26, vr18
vslt.bu vr21, vr8, vr19
vslt.bu vr22, vr16, vr19
vand.v vr20, vr20, vr21
vand.v vr20, vr20, vr22 // if_1
vsrli.b vr18, vr18, 2
vaddi.bu vr18, vr18, 2
vslt.bu vr26, vr26, vr18 // if_2
vabsd.bu vr23, vr2, vr0
vslt.bu vr23, vr23, vr19 // if_3
vand.v vr16, vr23, vr26 // if_2 && if_3
vnor.v vr24, vr16, vr16 // !(if_2 && if_3)
vand.v vr24, vr24, vr20 // if_1 && !(if_2 && if_3)
vand.v vr16, vr16, vr20 // if_1 && if_2 && if_3
vbitsel.v vr4, vr2, vr28, vr16 // pix[-3*xstride]
vbitsel.v vr5, vr1, vr27, vr16 // pix[-2*xstride]
vbitsel.v vr6, vr0, vr30, vr24
vbitsel.v vr6, vr6, vr29, vr16 // pix[-1*xstride]
vabsd.bu vr7, vr12, vr10
vslt.bu vr7, vr7, vr19 // if_4
vand.v vr17, vr7, vr26 // if_2 && if_4
vnor.v vr14, vr17, vr17 // !(if_2 && if_4)
vand.v vr14, vr14, vr20 // if_1 && !(if_2 && if_4)
vand.v vr17, vr17, vr20 // if_1 && if_2 && if_4
vbitsel.v vr15, vr10, vr9, vr14
vbitsel.v vr15, vr15, vr13, vr17 // pix[ 0*xstride]
vbitsel.v vr9, vr11, vr31, vr17 // pix[ 1*xstride]
vbitsel.v vr13, vr12, vr3, vr17 // pix[ 2*xstride]
vilvl.b vr16, vr5, vr4
vilvl.b vr17, vr15, vr6
vilvl.b vr18, vr13, vr9
vilvh.b vr19, vr5, vr4
vilvh.b vr20, vr15, vr6
vilvh.b vr21, vr13, vr9
vilvl.h vr0, vr17, vr16
vilvh.h vr1, vr17, vr16
vilvl.h vr2, vr20, vr19
vilvh.h vr3, vr20, vr19
addi.d t6, a0, -3 // t6 = a0 -3
vstelm.w vr0, t6, 0, 0
vstelm.h vr18, t6, 4, 0
add.d t6, t6, a1
vstelm.w vr0, t6, 0, 1
vstelm.h vr18, t6, 4, 1
add.d t6, t6, a1
vstelm.w vr0, t6, 0, 2
vstelm.h vr18, t6, 4, 2
add.d t6, t6, a1
vstelm.w vr0, t6, 0, 3
vstelm.h vr18, t6, 4, 3
add.d t6, t6, a1
vstelm.w vr1, t6, 0, 0
vstelm.h vr18, t6, 4, 4
add.d t6, t6, a1
vstelm.w vr1, t6, 0, 1
vstelm.h vr18, t6, 4, 5
add.d t6, t6, a1
vstelm.w vr1, t6, 0, 2
vstelm.h vr18, t6, 4, 6
add.d t6, t6, a1
vstelm.w vr1, t6, 0, 3
vstelm.h vr18, t6, 4, 7
add.d t6, t6, a1
vstelm.w vr2, t6, 0, 0
vstelm.h vr21, t6, 4, 0
add.d t6, t6, a1
vstelm.w vr2, t6, 0, 1
vstelm.h vr21, t6, 4, 1
add.d t6, t6, a1
vstelm.w vr2, t6, 0, 2
vstelm.h vr21, t6, 4, 2
add.d t6, t6, a1
vstelm.w vr2, t6, 0, 3
vstelm.h vr21, t6, 4, 3
add.d t6, t6, a1
vstelm.w vr3, t6, 0, 0
vstelm.h vr21, t6, 4, 4
add.d t6, t6, a1
vstelm.w vr3, t6, 0, 1
vstelm.h vr21, t6, 4, 5
add.d t6, t6, a1
vstelm.w vr3, t6, 0, 2
vstelm.h vr21, t6, 4, 6
add.d t6, t6, a1
vstelm.w vr3, t6, 0, 3
vstelm.h vr21, t6, 4, 7
fld.d f24, sp, 0
fld.d f25, sp, 8
fld.d f26, sp, 16
fld.d f27, sp, 24
fld.d f28, sp, 32
fld.d f29, sp, 40
fld.d f30, sp, 48
fld.d f31, sp, 56
addi.d sp, sp, 64
endfunc_x264
#endif /* !HIGH_BIT_DEPTH */
|
aestream/faery
| 98,510
|
src/mp4/x264/common/loongarch/sad-a.S
|
/*****************************************************************************
* sad-a.S: loongarch sad functions
*****************************************************************************
* Copyright (C) 2023-2024 x264 project
*
* Authors: Lu Wang <wanglu@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "loongson_asm.S"
#include "loongson_util.S"
#if !HIGH_BIT_DEPTH
/* void x264_pixel_sad_x4_16x16_lasx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_16x16_lasx
slli.d t1, a5, 1
add.d t2, a5, t1
slli.d t3, a5, 2
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
xvld xr3, a0, 0
xvld xr16, a0, 32
vld vr4, a1, 0
vldx vr8, a1, a5
vld vr5, a2, 0
vldx vr9, a2, a5
vld vr6, a3, 0
vldx vr10, a3, a5
vld vr7, a4, 0
vldx vr11, a4, a5
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr3, xr4
xvabsd.bu xr9, xr3, xr5
xvabsd.bu xr10, xr3, xr6
xvabsd.bu xr11, xr3, xr7
xvhaddw.hu.bu xr12, xr8, xr8
xvhaddw.hu.bu xr13, xr9, xr9
xvhaddw.hu.bu xr14, xr10, xr10
xvhaddw.hu.bu xr15, xr11, xr11
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
vldx vr4, a1, t1
vldx vr8, a1, t2
vldx vr5, a2, t1
vldx vr9, a2, t2
vldx vr6, a3, t1
vldx vr10, a3, t2
vldx vr7, a4, t1
vldx vr11, a4, t2
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr16, xr4
xvabsd.bu xr9, xr16, xr5
xvabsd.bu xr10, xr16, xr6
xvabsd.bu xr11, xr16, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
add.d a1, a1, t3
add.d a2, a2, t3
add.d a3, a3, t3
add.d a4, a4, t3
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
xvld xr3, a0, 64
xvld xr16, a0, 96
vld vr4, a1, 0
vldx vr8, a1, a5
vld vr5, a2, 0
vldx vr9, a2, a5
vld vr6, a3, 0
vldx vr10, a3, a5
vld vr7, a4, 0
vldx vr11, a4, a5
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr3, xr4
xvabsd.bu xr9, xr3, xr5
xvabsd.bu xr10, xr3, xr6
xvabsd.bu xr11, xr3, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
vldx vr4, a1, t1
vldx vr8, a1, t2
vldx vr5, a2, t1
vldx vr9, a2, t2
vldx vr6, a3, t1
vldx vr10, a3, t2
vldx vr7, a4, t1
vldx vr11, a4, t2
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr16, xr4
xvabsd.bu xr9, xr16, xr5
xvabsd.bu xr10, xr16, xr6
xvabsd.bu xr11, xr16, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
add.d a1, a1, t3
add.d a2, a2, t3
add.d a3, a3, t3
add.d a4, a4, t3
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
xvld xr3, a0, 128
xvld xr16, a0, 160
vld vr4, a1, 0
vldx vr8, a1, a5
vld vr5, a2, 0
vldx vr9, a2, a5
vld vr6, a3, 0
vldx vr10, a3, a5
vld vr7, a4, 0
vldx vr11, a4, a5
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr3, xr4
xvabsd.bu xr9, xr3, xr5
xvabsd.bu xr10, xr3, xr6
xvabsd.bu xr11, xr3, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
vldx vr4, a1, t1
vldx vr8, a1, t2
vldx vr5, a2, t1
vldx vr9, a2, t2
vldx vr6, a3, t1
vldx vr10, a3, t2
vldx vr7, a4, t1
vldx vr11, a4, t2
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr16, xr4
xvabsd.bu xr9, xr16, xr5
xvabsd.bu xr10, xr16, xr6
xvabsd.bu xr11, xr16, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
add.d a1, a1, t3
add.d a2, a2, t3
add.d a3, a3, t3
add.d a4, a4, t3
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
xvld xr3, a0, 192
xvld xr16, a0, 224
vld vr4, a1, 0
vldx vr8, a1, a5
vld vr5, a2, 0
vldx vr9, a2, a5
vld vr6, a3, 0
vldx vr10, a3, a5
vld vr7, a4, 0
vldx vr11, a4, a5
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr3, xr4
xvabsd.bu xr9, xr3, xr5
xvabsd.bu xr10, xr3, xr6
xvabsd.bu xr11, xr3, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
vldx vr4, a1, t1
vldx vr8, a1, t2
vldx vr5, a2, t1
vldx vr9, a2, t2
vldx vr6, a3, t1
vldx vr10, a3, t2
vldx vr7, a4, t1
vldx vr11, a4, t2
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr16, xr4
xvabsd.bu xr9, xr16, xr5
xvabsd.bu xr10, xr16, xr6
xvabsd.bu xr11, xr16, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
xvori.b xr17, xr12, 0
xvori.b xr18, xr13, 0
xvpermi.q xr12, xr14, 0x02
xvpermi.q xr14, xr17, 0x31
xvpermi.q xr13, xr15, 0x02
xvpermi.q xr15, xr18, 0x31
xvadd.h xr12, xr12, xr14
xvadd.h xr13, xr13, xr15
xvhaddw.w.h xr12, xr12, xr12
xvhaddw.w.h xr13, xr13, xr13
xvhaddw.d.w xr12, xr12, xr12
xvhaddw.d.w xr13, xr13, xr13
xvhaddw.q.d xr12, xr12, xr12
xvhaddw.q.d xr13, xr13, xr13
xvpackev.w xr13, xr13, xr12
// Store data to p_sad_array
xvstelm.d xr13, a6, 0, 0
xvstelm.d xr13, a6, 8, 2
endfunc_x264
/* void x264_pixel_sad_x4_16x8_lasx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_16x8_lasx
slli.d t1, a5, 1
add.d t2, a5, t1
slli.d t3, a5, 2
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
xvld xr3, a0, 0
vld vr4, a1, 0
vldx vr8, a1, a5
vld vr5, a2, 0
vldx vr9, a2, a5
vld vr6, a3, 0
vldx vr10, a3, a5
vld vr7, a4, 0
vldx vr11, a4, a5
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr3, xr4
xvabsd.bu xr9, xr3, xr5
xvabsd.bu xr10, xr3, xr6
xvabsd.bu xr11, xr3, xr7
xvhaddw.hu.bu xr12, xr8, xr8
xvhaddw.hu.bu xr13, xr9, xr9
xvhaddw.hu.bu xr14, xr10, xr10
xvhaddw.hu.bu xr15, xr11, xr11
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
xvld xr3, a0, 32
vldx vr4, a1, t1
vldx vr8, a1, t2
vldx vr5, a2, t1
vldx vr9, a2, t2
vldx vr6, a3, t1
vldx vr10, a3, t2
vldx vr7, a4, t1
vldx vr11, a4, t2
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr3, xr4
xvabsd.bu xr9, xr3, xr5
xvabsd.bu xr10, xr3, xr6
xvabsd.bu xr11, xr3, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
add.d a1, a1, t3
add.d a2, a2, t3
add.d a3, a3, t3
add.d a4, a4, t3
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
xvld xr3, a0, 64
vld vr4, a1, 0
vldx vr8, a1, a5
vld vr5, a2, 0
vldx vr9, a2, a5
vld vr6, a3, 0
vldx vr10, a3, a5
vld vr7, a4, 0
vldx vr11, a4, a5
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr3, xr4
xvabsd.bu xr9, xr3, xr5
xvabsd.bu xr10, xr3, xr6
xvabsd.bu xr11, xr3, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
xvld xr3, a0, 96
vldx vr4, a1, t1
vldx vr8, a1, t2
vldx vr5, a2, t1
vldx vr9, a2, t2
vldx vr6, a3, t1
vldx vr10, a3, t2
vldx vr7, a4, t1
vldx vr11, a4, t2
xvpermi.q xr4, xr8, 0x02
xvpermi.q xr5, xr9, 0x02
xvpermi.q xr6, xr10, 0x02
xvpermi.q xr7, xr11, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr3, xr4
xvabsd.bu xr9, xr3, xr5
xvabsd.bu xr10, xr3, xr6
xvabsd.bu xr11, xr3, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvadd.h xr12, xr12, xr8
xvadd.h xr13, xr13, xr9
xvadd.h xr14, xr14, xr10
xvadd.h xr15, xr15, xr11
xvori.b xr17, xr12, 0
xvori.b xr18, xr13, 0
xvpermi.q xr12, xr14, 0x02
xvpermi.q xr14, xr17, 0x31
xvpermi.q xr13, xr15, 0x02
xvpermi.q xr15, xr18, 0x31
xvadd.h xr12, xr12, xr14
xvadd.h xr13, xr13, xr15
xvhaddw.w.h xr12, xr12, xr12
xvhaddw.w.h xr13, xr13, xr13
xvhaddw.d.w xr12, xr12, xr12
xvhaddw.d.w xr13, xr13, xr13
xvhaddw.q.d xr12, xr12, xr12
xvhaddw.q.d xr13, xr13, xr13
xvpackev.w xr13, xr13, xr12
// Store data to p_sad_array
xvstelm.d xr13, a6, 0, 0
xvstelm.d xr13, a6, 8, 2
endfunc_x264
/* void x264_pixel_sad_x4_8x8_lasx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_8x8_lasx
slli.d t1, a5, 1
add.d t2, t1, a5
slli.d t3, a5, 2
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
FLDD_LOADX_4 a1, a5, t1, t2, f4, f8, f14, f18
FLDD_LOADX_4 a2, a5, t1, t2, f5, f9, f15, f19
FLDD_LOADX_4 a3, a5, t1, t2, f6, f10, f16, f20
FLDD_LOADX_4 a4, a5, t1, t2, f7, f11, f17, f21
vilvl.d vr4, vr5, vr4
vilvl.d vr6, vr7, vr6
vilvl.d vr8, vr9, vr8
vilvl.d vr10, vr11, vr10
vilvl.d vr14, vr15, vr14
vilvl.d vr16, vr17, vr16
vilvl.d vr18, vr19, vr18
vilvl.d vr20, vr21, vr20
xvpermi.q xr4, xr6, 0x02
xvpermi.q xr8, xr10, 0x02
xvpermi.q xr14, xr16, 0x02
xvpermi.q xr18, xr20, 0x02
// Calculate the absolute value of the difference
xvldrepl.d xr3, a0, 0
xvabsd.bu xr5, xr3, xr4
xvldrepl.d xr3, a0, 16
xvabsd.bu xr9, xr3, xr8
xvldrepl.d xr3, a0, 32
xvabsd.bu xr10, xr3, xr14
xvldrepl.d xr3, a0, 48
xvabsd.bu xr11, xr3, xr18
xvaddwev.h.bu xr0, xr5, xr9
xvaddwod.h.bu xr1, xr5, xr9
xvaddwev.h.bu xr2, xr10, xr11
xvaddwod.h.bu xr22, xr10, xr11
add.d a1, a1, t3
add.d a2, a2, t3
add.d a3, a3, t3
add.d a4, a4, t3
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
FLDD_LOADX_4 a1, a5, t1, t2, f4, f8, f14, f18
FLDD_LOADX_4 a2, a5, t1, t2, f5, f9, f15, f19
FLDD_LOADX_4 a3, a5, t1, t2, f6, f10, f16, f20
FLDD_LOADX_4 a4, a5, t1, t2, f7, f11, f17, f21
vilvl.d vr4, vr5, vr4
vilvl.d vr6, vr7, vr6
vilvl.d vr8, vr9, vr8
vilvl.d vr10, vr11, vr10
vilvl.d vr14, vr15, vr14
vilvl.d vr16, vr17, vr16
vilvl.d vr18, vr19, vr18
vilvl.d vr20, vr21, vr20
xvpermi.q xr4, xr6, 0x02
xvpermi.q xr8, xr10, 0x02
xvpermi.q xr14, xr16, 0x02
xvpermi.q xr18, xr20, 0x02
// Calculate the absolute value of the difference
xvldrepl.d xr3, a0, 64
xvabsd.bu xr5, xr3, xr4
xvldrepl.d xr3, a0, 80
xvabsd.bu xr9, xr3, xr8
xvldrepl.d xr3, a0, 96
xvabsd.bu xr10, xr3, xr14
xvldrepl.d xr3, a0, 112
xvabsd.bu xr11, xr3, xr18
xvaddwev.h.bu xr12, xr5, xr9
xvaddwod.h.bu xr13, xr5, xr9
xvaddwev.h.bu xr14, xr10, xr11
xvaddwod.h.bu xr15, xr10, xr11
xvadd.h xr5, xr0, xr12
xvadd.h xr9, xr1, xr13
xvadd.h xr10, xr2, xr14
xvadd.h xr11, xr22, xr15
xvadd.h xr5, xr5, xr9
xvadd.h xr10, xr10, xr11
xvadd.h xr10, xr10, xr5
xvhaddw.wu.hu xr10, xr10, xr10
xvhaddw.du.wu xr10, xr10, xr10
xvpermi.q xr5, xr10, 0x01
xvpickev.w xr10, xr5, xr10
// Store data to p_sad_array
vst vr10, a6, 0
endfunc_x264
/* void x264_pixel_sad_x4_8x4_lasx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_8x4_lasx
slli.d t1, a5, 1
add.d t2, t1, a5
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
fld.d f2, a0, 0
fld.d f3, a0, 16
fld.d f12, a0, 32
fld.d f13, a0, 48
FLDD_LOADX_4 a1, a5, t1, t2, f4, f8, f14, f18
FLDD_LOADX_4 a2, a5, t1, t2, f5, f9, f15, f19
FLDD_LOADX_4 a3, a5, t1, t2, f6, f10, f16, f20
FLDD_LOADX_4 a4, a5, t1, t2, f7, f11, f17, f21
vilvl.d vr3, vr3, vr2
vilvl.d vr4, vr8, vr4
vilvl.d vr5, vr9, vr5
vilvl.d vr6, vr10, vr6
vilvl.d vr7, vr11, vr7
vilvl.d vr13, vr13, vr12
vilvl.d vr14, vr18, vr14
vilvl.d vr15, vr19, vr15
vilvl.d vr16, vr20, vr16
vilvl.d vr17, vr21, vr17
xvpermi.q xr3, xr13, 0x02
xvpermi.q xr4, xr16, 0x02
xvpermi.q xr5, xr17, 0x02
xvpermi.q xr6, xr14, 0x02
xvpermi.q xr7, xr15, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr8, xr3, xr4
xvabsd.bu xr9, xr3, xr5
xvabsd.bu xr10, xr3, xr6
xvabsd.bu xr11, xr3, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvpermi.d xr10, xr10, 0x4e
xvpermi.d xr11, xr11, 0x4e
xvadd.h xr8, xr8, xr10
xvadd.h xr9, xr9, xr11
xvhaddw.w.h xr8, xr8, xr8
xvhaddw.w.h xr9, xr9, xr9
xvhaddw.d.w xr8, xr8, xr8
xvhaddw.d.w xr9, xr9, xr9
xvhaddw.q.d xr8, xr8, xr8
xvhaddw.q.d xr9, xr9, xr9
xvpackev.w xr9, xr9, xr8
// Store data to p_sad_array
xvstelm.d xr9, a6, 0, 0
xvstelm.d xr9, a6, 8, 2
endfunc_x264
/* void x264_pixel_sad_x4_4x4_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_4x4_lsx
slli.d t0, a5, 1
add.d t1, a5, t0
slli.d t2, a5, 2
// Load data from p_src, p_ref0, p_ref1, p_ref2 and p_ref3
fld.s f2, a0, 0
fld.s f3, a0, 16
fld.s f4, a1, 0
fldx.s f8, a1, a5
fld.s f5, a2, 0
fldx.s f9, a2, a5
fld.s f6, a3, 0
fldx.s f10, a3, a5
fld.s f7, a4, 0
fldx.s f11, a4, a5
vilvl.w vr3, vr3, vr2
vilvl.w vr4, vr8, vr4
vilvl.w vr5, vr9, vr5
vilvl.w vr6, vr10, vr6
vilvl.w vr7, vr11, vr7
fld.s f2, a0, 32
fld.s f0, a0, 48
fldx.s f8, a1, t0
fldx.s f12, a1, t1
fldx.s f9, a2, t0
fldx.s f13, a2, t1
fldx.s f10, a3, t0
fldx.s f14, a3, t1
fldx.s f11, a4, t0
fldx.s f15, a4, t1
vilvl.w vr2, vr0, vr2
vilvl.w vr8, vr12, vr8
vilvl.w vr9, vr13, vr9
vilvl.w vr10, vr14, vr10
vilvl.w vr11, vr15, vr11
vilvl.d vr3, vr2, vr3
vilvl.d vr4, vr8, vr4
vilvl.d vr5, vr9, vr5
vilvl.d vr6, vr10, vr6
vilvl.d vr7, vr11, vr7
// Calculate the absolute value of the difference
vabsd.bu vr8, vr3, vr4
vabsd.bu vr9, vr3, vr5
vabsd.bu vr10, vr3, vr6
vabsd.bu vr11, vr3, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vhaddw.du.wu vr8, vr8, vr8
vhaddw.du.wu vr9, vr9, vr9
vhaddw.du.wu vr10, vr10, vr10
vhaddw.du.wu vr11, vr11, vr11
vhaddw.qu.du vr8, vr8, vr8
vhaddw.qu.du vr9, vr9, vr9
vhaddw.qu.du vr10, vr10, vr10
vhaddw.qu.du vr11, vr11, vr11
// Store data to p_sad_array
vstelm.w vr8, a6, 0, 0
vstelm.w vr9, a6, 4, 0
vstelm.w vr10, a6, 8, 0
vstelm.w vr11, a6, 12, 0
endfunc_x264
/* void x264_pixel_sad_x3_16x16_lasx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* intptr_t i_ref_stride,
* int32_t p_sad_array[3])
*/
function_x264 pixel_sad_x3_16x16_lasx
// Load data from p_src, p_ref0, p_ref1 and p_ref2
slli.d t1, a4, 1
add.d t2, a4, t1
slli.d t3, a4, 2
xvld xr2, a0, 0
xvld xr3, a0, 32
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
xvpermi.q xr4, xr7, 0x02
xvpermi.q xr5, xr8, 0x02
xvpermi.q xr6, xr9, 0x02
xvpermi.q xr10, xr13, 0x02
xvpermi.q xr11, xr14, 0x02
xvpermi.q xr12, xr15, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr7, xr2, xr4
xvabsd.bu xr8, xr2, xr5
xvabsd.bu xr9, xr2, xr6
xvabsd.bu xr10, xr3, xr10
xvabsd.bu xr11, xr3, xr11
xvabsd.bu xr12, xr3, xr12
xvhaddw.hu.bu xr16, xr7, xr7
xvhaddw.hu.bu xr17, xr8, xr8
xvhaddw.hu.bu xr18, xr9, xr9
xvhaddw.hu.bu xr19, xr10, xr10
xvhaddw.hu.bu xr20, xr11, xr11
xvhaddw.hu.bu xr21, xr12, xr12
add.d a1, a1, t3
add.d a2, a2, t3
add.d a3, a3, t3
xvld xr2, a0, 64
xvld xr3, a0, 96
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
xvpermi.q xr4, xr7, 0x02
xvpermi.q xr5, xr8, 0x02
xvpermi.q xr6, xr9, 0x02
xvpermi.q xr10, xr13, 0x02
xvpermi.q xr11, xr14, 0x02
xvpermi.q xr12, xr15, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr7, xr2, xr4
xvabsd.bu xr8, xr2, xr5
xvabsd.bu xr9, xr2, xr6
xvabsd.bu xr10, xr3, xr10
xvabsd.bu xr11, xr3, xr11
xvabsd.bu xr12, xr3, xr12
xvhaddw.hu.bu xr7, xr7, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvhaddw.hu.bu xr12, xr12, xr12
xvadd.h xr16, xr16, xr7
xvadd.h xr17, xr17, xr8
xvadd.h xr18, xr18, xr9
xvadd.h xr19, xr19, xr10
xvadd.h xr20, xr20, xr11
xvadd.h xr21, xr21, xr12
add.d a1, a1, t3
add.d a2, a2, t3
add.d a3, a3, t3
xvld xr2, a0, 128
xvld xr3, a0, 160
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
xvpermi.q xr4, xr7, 0x02
xvpermi.q xr5, xr8, 0x02
xvpermi.q xr6, xr9, 0x02
xvpermi.q xr10, xr13, 0x02
xvpermi.q xr11, xr14, 0x02
xvpermi.q xr12, xr15, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr7, xr2, xr4
xvabsd.bu xr8, xr2, xr5
xvabsd.bu xr9, xr2, xr6
xvabsd.bu xr10, xr3, xr10
xvabsd.bu xr11, xr3, xr11
xvabsd.bu xr12, xr3, xr12
xvhaddw.hu.bu xr7, xr7, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvhaddw.hu.bu xr12, xr12, xr12
xvadd.h xr16, xr16, xr7
xvadd.h xr17, xr17, xr8
xvadd.h xr18, xr18, xr9
xvadd.h xr19, xr19, xr10
xvadd.h xr20, xr20, xr11
xvadd.h xr21, xr21, xr12
add.d a1, a1, t3
add.d a2, a2, t3
add.d a3, a3, t3
xvld xr2, a0, 192
xvld xr3, a0, 224
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
xvpermi.q xr4, xr7, 0x02
xvpermi.q xr5, xr8, 0x02
xvpermi.q xr6, xr9, 0x02
xvpermi.q xr10, xr13, 0x02
xvpermi.q xr11, xr14, 0x02
xvpermi.q xr12, xr15, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr7, xr2, xr4
xvabsd.bu xr8, xr2, xr5
xvabsd.bu xr9, xr2, xr6
xvabsd.bu xr10, xr3, xr10
xvabsd.bu xr11, xr3, xr11
xvabsd.bu xr12, xr3, xr12
xvhaddw.hu.bu xr7, xr7, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvhaddw.hu.bu xr12, xr12, xr12
xvadd.h xr16, xr16, xr7
xvadd.h xr17, xr17, xr8
xvadd.h xr18, xr18, xr9
xvadd.h xr19, xr19, xr10
xvadd.h xr20, xr20, xr11
xvadd.h xr21, xr21, xr12
xvadd.h xr11, xr16, xr19
xvadd.h xr12, xr17, xr20
xvadd.h xr13, xr18, xr21
xvhaddw.wu.hu xr11, xr11, xr11
xvhaddw.wu.hu xr12, xr12, xr12
xvhaddw.wu.hu xr13, xr13, xr13
xvhaddw.du.wu xr11, xr11, xr11
xvhaddw.du.wu xr12, xr12, xr12
xvhaddw.du.wu xr13, xr13, xr13
xvhaddw.qu.du xr11, xr11, xr11
xvhaddw.qu.du xr12, xr12, xr12
xvhaddw.qu.du xr13, xr13, xr13
xvpickve.w xr17, xr11, 4
xvpickve.w xr18, xr12, 4
xvpickve.w xr19, xr13, 4
xvadd.w xr11, xr11, xr17
xvadd.w xr12, xr12, xr18
xvadd.w xr13, xr13, xr19
// Store data to p_sad_array
vstelm.w vr11, a5, 0, 0
vstelm.w vr12, a5, 4, 0
vstelm.w vr13, a5, 8, 0
endfunc_x264
/* void x264_pixel_sad_x3_16x8_lasx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* intptr_t i_ref_stride,
* int32_t p_sad_array[3])
*/
function_x264 pixel_sad_x3_16x8_lasx
// Load data from p_src, p_ref0, p_ref1 and p_ref2
slli.d t1, a4, 1
add.d t2, a4, t1
slli.d t3, a4, 2
xvld xr2, a0, 0
xvld xr3, a0, 32
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
xvpermi.q xr4, xr7, 0x02
xvpermi.q xr5, xr8, 0x02
xvpermi.q xr6, xr9, 0x02
xvpermi.q xr10, xr13, 0x02
xvpermi.q xr11, xr14, 0x02
xvpermi.q xr12, xr15, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr7, xr2, xr4
xvabsd.bu xr8, xr2, xr5
xvabsd.bu xr9, xr2, xr6
xvabsd.bu xr10, xr3, xr10
xvabsd.bu xr11, xr3, xr11
xvabsd.bu xr12, xr3, xr12
xvhaddw.hu.bu xr16, xr7, xr7
xvhaddw.hu.bu xr17, xr8, xr8
xvhaddw.hu.bu xr18, xr9, xr9
xvhaddw.hu.bu xr19, xr10, xr10
xvhaddw.hu.bu xr20, xr11, xr11
xvhaddw.hu.bu xr21, xr12, xr12
add.d a1, a1, t3
add.d a2, a2, t3
add.d a3, a3, t3
xvld xr2, a0, 64
xvld xr3, a0, 96
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
xvpermi.q xr4, xr7, 0x02
xvpermi.q xr5, xr8, 0x02
xvpermi.q xr6, xr9, 0x02
xvpermi.q xr10, xr13, 0x02
xvpermi.q xr11, xr14, 0x02
xvpermi.q xr12, xr15, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr7, xr2, xr4
xvabsd.bu xr8, xr2, xr5
xvabsd.bu xr9, xr2, xr6
xvabsd.bu xr10, xr3, xr10
xvabsd.bu xr11, xr3, xr11
xvabsd.bu xr12, xr3, xr12
xvhaddw.hu.bu xr7, xr7, xr7
xvhaddw.hu.bu xr8, xr8, xr8
xvhaddw.hu.bu xr9, xr9, xr9
xvhaddw.hu.bu xr10, xr10, xr10
xvhaddw.hu.bu xr11, xr11, xr11
xvhaddw.hu.bu xr12, xr12, xr12
xvadd.h xr16, xr16, xr7
xvadd.h xr17, xr17, xr8
xvadd.h xr18, xr18, xr9
xvadd.h xr19, xr19, xr10
xvadd.h xr20, xr20, xr11
xvadd.h xr21, xr21, xr12
xvadd.h xr11, xr16, xr19
xvadd.h xr12, xr17, xr20
xvadd.h xr13, xr18, xr21
xvhaddw.wu.hu xr11, xr11, xr11
xvhaddw.wu.hu xr12, xr12, xr12
xvhaddw.wu.hu xr13, xr13, xr13
xvhaddw.du.wu xr11, xr11, xr11
xvhaddw.du.wu xr12, xr12, xr12
xvhaddw.du.wu xr13, xr13, xr13
xvhaddw.qu.du xr11, xr11, xr11
xvhaddw.qu.du xr12, xr12, xr12
xvhaddw.qu.du xr13, xr13, xr13
xvpickve.w xr17, xr11, 4
xvpickve.w xr18, xr12, 4
xvpickve.w xr19, xr13, 4
xvadd.w xr11, xr11, xr17
xvadd.w xr12, xr12, xr18
xvadd.w xr13, xr13, xr19
// Store data to p_sad_array
vstelm.w vr11, a5, 0, 0
vstelm.w vr12, a5, 4, 0
vstelm.w vr13, a5, 8, 0
endfunc_x264
/* void x264_pixel_sad_x3_4x4_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* intptr_t i_ref_stride,
* int32_t p_sad_array[3])
*/
function_x264 pixel_sad_x3_4x4_lsx
slli.d t1, a4, 1
add.d t2, a4, t1
// Load data from p_src, p_ref0, p_ref1 and p_ref2
fld.s f3, a0, 0
fld.s f7, a0, 16
fld.s f11, a0, 32
fld.s f15, a0, 48
FLDS_LOADX_4 a1, a4, t1, t2, f4, f8, f12, f16
FLDS_LOADX_4 a2, a4, t1, t2, f5, f9, f13, f17
FLDS_LOADX_4 a3, a4, t1, t2, f6, f10, f14, f18
vilvl.w vr3, vr7, vr3
vilvl.w vr4, vr8, vr4
vilvl.w vr5, vr9, vr5
vilvl.w vr6, vr10, vr6
vilvl.w vr11, vr15, vr11
vilvl.w vr12, vr16, vr12
vilvl.w vr13, vr17, vr13
vilvl.w vr14, vr18, vr14
vilvl.d vr3, vr11, vr3
vilvl.d vr4, vr12, vr4
vilvl.d vr5, vr13, vr5
vilvl.d vr6, vr14, vr6
// Calculate the absolute value of the difference
vabsd.bu vr7, vr3, vr4
vabsd.bu vr8, vr3, vr5
vabsd.bu vr9, vr3, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.wu.hu vr7, vr7, vr7
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.du.wu vr7, vr7, vr7
vhaddw.du.wu vr8, vr8, vr8
vhaddw.du.wu vr9, vr9, vr9
vhaddw.qu.du vr7, vr7, vr7
vhaddw.qu.du vr8, vr8, vr8
vhaddw.qu.du vr9, vr9, vr9
// Store data to p_sad_array
vstelm.w vr7, a5, 0, 0
vstelm.w vr8, a5, 4, 0
vstelm.w vr9, a5, 8, 0
endfunc_x264
/* int32_t x264_pixel_sad_8x4_lasx(uint8_t *p_src, intptr_t i_src_stride,
* uint8_t *p_ref, intptr_t i_ref_stride)
*/
function_x264 pixel_sad_8x4_lasx
slli.d t1, a1, 1
slli.d t2, a3, 1
add.d t3, a1, t1
add.d t4, a3, t2
// Load data from p_src and p_ref
FLDD_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDD_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.d vr3, vr5, vr3
vilvl.d vr4, vr6, vr4
vilvl.d vr7, vr9, vr7
vilvl.d vr8, vr10, vr8
xvpermi.q xr3, xr7, 0x02
xvpermi.q xr4, xr8, 0x02
// Calculate the absolute value of the difference
xvabsd.bu xr5, xr3, xr4
xvhaddw.hu.bu xr6, xr5, xr5
xvhaddw.wu.hu xr6, xr6, xr6
xvhaddw.du.wu xr6, xr6, xr6
xvhaddw.qu.du xr6, xr6, xr6
xvpickve2gr.wu t2, xr6, 0
xvpickve2gr.wu t3, xr6, 4
add.d a0, t2, t3
endfunc_x264
/* int32_t x264_pixel_sad_4x4_lsx(uint8_t *p_src, intptr_t i_src_stride,
* uint8_t *p_ref, intptr_t i_ref_stride)
*/
function_x264 pixel_sad_4x4_lsx
slli.d t1, a1, 1
slli.d t2, a3, 1
add.d t3, a1, t1
add.d t4, a3, t2
// Load data from p_src and p_ref
FLDS_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDS_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.w vr3, vr5, vr3
vilvl.w vr4, vr6, vr4
vilvl.w vr7, vr9, vr7
vilvl.w vr8, vr10, vr8
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
// Calculate the absolute value of the difference
vabsd.bu vr5, vr3, vr4
vhaddw.hu.bu vr6, vr5, vr5
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.du.wu vr6, vr6, vr6
vhaddw.qu.du vr6, vr6, vr6
vpickve2gr.wu a0, vr6, 0
endfunc_x264
/* int32_t x264_pixel_sad_4x8_lsx(uint8_t *p_src, intptr_t i_src_stride,
* uint8_t *p_ref, intptr_t i_ref_stride)
*/
function_x264 pixel_sad_4x8_lsx
slli.d t1, a1, 1
slli.d t2, a3, 1
add.d t3, a1, t1
add.d t4, a3, t2
// Load data from p_src and p_ref
FLDS_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDS_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.w vr3, vr5, vr3
vilvl.w vr4, vr6, vr4
vilvl.w vr7, vr9, vr7
vilvl.w vr8, vr10, vr8
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
vabsd.bu vr11, vr3, vr4
vhaddw.hu.bu vr11, vr11, vr11
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDS_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDS_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.w vr3, vr5, vr3
vilvl.w vr4, vr6, vr4
vilvl.w vr7, vr9, vr7
vilvl.w vr8, vr10, vr8
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
vabsd.bu vr5, vr3, vr4
vhaddw.hu.bu vr5, vr5, vr5
vadd.h vr6, vr11, vr5
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.du.wu vr6, vr6, vr6
vhaddw.qu.du vr6, vr6, vr6
vpickve2gr.wu a0, vr6, 0
endfunc_x264
/* int32_t x264_pixel_sad_4x16_lsx(uint8_t *p_src, intptr_t i_src_stride,
* uint8_t *p_ref, intptr_t i_ref_stride)
*/
function_x264 pixel_sad_4x16_lsx
slli.d t1, a1, 1
slli.d t2, a3, 1
add.d t3, a1, t1
add.d t4, a3, t2
// Load data from p_src and p_ref
FLDS_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDS_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.w vr3, vr5, vr3
vilvl.w vr4, vr6, vr4
vilvl.w vr7, vr9, vr7
vilvl.w vr8, vr10, vr8
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
vabsd.bu vr11, vr3, vr4
vhaddw.hu.bu vr11, vr11, vr11
.rept 3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDS_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDS_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.w vr3, vr5, vr3
vilvl.w vr4, vr6, vr4
vilvl.w vr7, vr9, vr7
vilvl.w vr8, vr10, vr8
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
vabsd.bu vr12, vr3, vr4
vhaddw.hu.bu vr12, vr12, vr12
vadd.h vr11, vr11, vr12
.endr
vhaddw.wu.hu vr11, vr11, vr11
vhaddw.du.wu vr11, vr11, vr11
vhaddw.qu.du vr11, vr11, vr11
vpickve2gr.wu a0, vr11, 0
endfunc_x264
/* int32_t x264_pixel_sad_8x4_lsx(uint8_t *p_src, intptr_t i_src_stride,
* uint8_t *p_ref, intptr_t i_ref_stride)
*/
function_x264 pixel_sad_8x4_lsx
slli.d t1, a1, 1
slli.d t2, a3, 1
add.d t3, a1, t1
add.d t4, a3, t2
FLDD_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDD_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.d vr3, vr5, vr3
vilvl.d vr7, vr9, vr7
vilvl.d vr4, vr6, vr4
vilvl.d vr8, vr10, vr8
vabsd.bu vr11, vr3, vr4
vabsd.bu vr12, vr7, vr8
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vadd.h vr6, vr11, vr12
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.du.wu vr6, vr6, vr6
vhaddw.qu.du vr6, vr6, vr6
vpickve2gr.wu a0, vr6, 0
endfunc_x264
/* int32_t x264_pixel_sad_8x8_lsx(uint8_t *p_src, intptr_t i_src_stride,
* uint8_t *p_ref, intptr_t i_ref_stride)
*/
function_x264 pixel_sad_8x8_lsx
slli.d t1, a1, 1
slli.d t2, a3, 1
add.d t3, a1, t1
add.d t4, a3, t2
FLDD_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDD_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.d vr3, vr5, vr3
vilvl.d vr7, vr9, vr7
vilvl.d vr4, vr6, vr4
vilvl.d vr8, vr10, vr8
vabsd.bu vr11, vr3, vr4
vabsd.bu vr12, vr7, vr8
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vadd.h vr13, vr11, vr12
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDD_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.d vr3, vr5, vr3
vilvl.d vr7, vr9, vr7
vilvl.d vr4, vr6, vr4
vilvl.d vr8, vr10, vr8
vabsd.bu vr11, vr3, vr4
vabsd.bu vr12, vr7, vr8
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vadd.h vr6, vr11, vr12
vadd.h vr6, vr6, vr13
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.du.wu vr6, vr6, vr6
vhaddw.qu.du vr6, vr6, vr6
vpickve2gr.wu a0, vr6, 0
endfunc_x264
/* int32_t x264_pixel_sad_8x16_lsx(uint8_t *p_src, intptr_t i_src_stride,
* uint8_t *p_ref, intptr_t i_ref_stride)
*/
function_x264 pixel_sad_8x16_lsx
slli.d t1, a1, 1
slli.d t2, a3, 1
add.d t3, a1, t1
add.d t4, a3, t2
FLDD_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDD_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.d vr3, vr5, vr3
vilvl.d vr7, vr9, vr7
vilvl.d vr4, vr6, vr4
vilvl.d vr8, vr10, vr8
vabsd.bu vr11, vr3, vr4
vabsd.bu vr12, vr7, vr8
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vadd.h vr13, vr11, vr12
.rept 3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t1, t3, f3, f5, f7, f9
FLDD_LOADX_4 a2, a3, t2, t4, f4, f6, f8, f10
vilvl.d vr3, vr5, vr3
vilvl.d vr7, vr9, vr7
vilvl.d vr4, vr6, vr4
vilvl.d vr8, vr10, vr8
vabsd.bu vr11, vr3, vr4
vabsd.bu vr12, vr7, vr8
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vadd.h vr14, vr11, vr12
vadd.h vr13, vr13, vr14
.endr
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.du.wu vr13, vr13, vr13
vhaddw.qu.du vr13, vr13, vr13
vpickve2gr.wu a0, vr13, 0
endfunc_x264
/* int32_t x264_pixel_sad_16x8_lsx(uint8_t *p_src, intptr_t i_src_stride,
* uint8_t *p_ref, intptr_t i_ref_stride)
*/
function_x264 pixel_sad_16x8_lsx
slli.d t1, a1, 1
slli.d t2, a3, 1
add.d t3, a1, t1
add.d t4, a3, t2
LSX_LOADX_4 a0, a1, t1, t3, vr0, vr1, vr2, vr3
LSX_LOADX_4 a2, a3, t2, t4, vr4, vr5, vr6, vr7
vabsd.bu vr8, vr0, vr4
vabsd.bu vr9, vr1, vr5
vabsd.bu vr10, vr2, vr6
vabsd.bu vr11, vr3, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vadd.h vr8, vr8, vr9
vadd.h vr9, vr10, vr11
vadd.h vr14, vr8, vr9
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
LSX_LOADX_4 a0, a1, t1, t3, vr0, vr1, vr2, vr3
LSX_LOADX_4 a2, a3, t2, t4, vr4, vr5, vr6, vr7
vabsd.bu vr8, vr0, vr4
vabsd.bu vr9, vr1, vr5
vabsd.bu vr10, vr2, vr6
vabsd.bu vr11, vr3, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vadd.h vr8, vr8, vr9
vadd.h vr9, vr10, vr11
vadd.h vr12, vr8, vr9
vadd.h vr13, vr12, vr14
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.du.wu vr13, vr13, vr13
vhaddw.qu.du vr13, vr13, vr13
vpickve2gr.wu a0, vr13, 0
endfunc_x264
/* int32_t x264_pixel_sad_16x16_lsx(uint8_t *p_src, intptr_t i_src_stride,
* uint8_t *p_ref, intptr_t i_ref_stride)
*/
function_x264 pixel_sad_16x16_lsx
slli.d t1, a1, 1
slli.d t2, a3, 1
add.d t3, a1, t1
add.d t4, a3, t2
LSX_LOADX_4 a0, a1, t1, t3, vr0, vr1, vr2, vr3
LSX_LOADX_4 a2, a3, t2, t4, vr4, vr5, vr6, vr7
vabsd.bu vr8, vr0, vr4
vabsd.bu vr9, vr1, vr5
vabsd.bu vr10, vr2, vr6
vabsd.bu vr11, vr3, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vadd.h vr8, vr8, vr9
vadd.h vr9, vr10, vr11
vadd.h vr13, vr8, vr9
.rept 3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
LSX_LOADX_4 a0, a1, t1, t3, vr0, vr1, vr2, vr3
LSX_LOADX_4 a2, a3, t2, t4, vr4, vr5, vr6, vr7
vabsd.bu vr8, vr0, vr4
vabsd.bu vr9, vr1, vr5
vabsd.bu vr10, vr2, vr6
vabsd.bu vr11, vr3, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vadd.h vr8, vr8, vr9
vadd.h vr9, vr10, vr11
vadd.h vr12, vr8, vr9
vadd.h vr13, vr12, vr13
.endr
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.du.wu vr13, vr13, vr13
vhaddw.qu.du vr13, vr13, vr13
vpickve2gr.wu a0, vr13, 0
endfunc_x264
/*
* void x264_pixel_sad_x3_4x8_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* intptr_t i_ref_stride,
* int32_t p_sad_array[3])
*/
function_x264 pixel_sad_x3_4x8_lsx
slli.d t1, a4, 1
add.d t2, a4, t1
// Load data from p_src, p_ref0, p_ref1 and p_ref2
fld.s f3, a0, 0
fld.s f7, a0, 16
fld.s f11, a0, 32
fld.s f15, a0, 48
FLDS_LOADX_4 a1, a4, t1, t2, f4, f8, f12, f16
FLDS_LOADX_4 a2, a4, t1, t2, f5, f9, f13, f17
FLDS_LOADX_4 a3, a4, t1, t2, f6, f10, f14, f18
vilvl.w vr3, vr7, vr3
vilvl.w vr4, vr8, vr4
vilvl.w vr5, vr9, vr5
vilvl.w vr6, vr10, vr6
vilvl.w vr11, vr15, vr11
vilvl.w vr12, vr16, vr12
vilvl.w vr13, vr17, vr13
vilvl.w vr14, vr18, vr14
vilvl.d vr3, vr11, vr3
vilvl.d vr4, vr12, vr4
vilvl.d vr5, vr13, vr5
vilvl.d vr6, vr14, vr6
vabsd.bu vr0, vr3, vr4
vabsd.bu vr1, vr3, vr5
vabsd.bu vr2, vr3, vr6
alsl.d a1, a4, a1, 2
alsl.d a2, a4, a2, 2
alsl.d a3, a4, a3, 2
fld.s f3, a0, 64
fld.s f7, a0, 80
fld.s f11, a0, 96
fld.s f15, a0, 112
FLDS_LOADX_4 a1, a4, t1, t2, f4, f8, f12, f16
FLDS_LOADX_4 a2, a4, t1, t2, f5, f9, f13, f17
FLDS_LOADX_4 a3, a4, t1, t2, f6, f10, f14, f18
vilvl.w vr3, vr7, vr3
vilvl.w vr4, vr8, vr4
vilvl.w vr5, vr9, vr5
vilvl.w vr6, vr10, vr6
vilvl.w vr11, vr15, vr11
vilvl.w vr12, vr16, vr12
vilvl.w vr13, vr17, vr13
vilvl.w vr14, vr18, vr14
vilvl.d vr3, vr11, vr3
vilvl.d vr4, vr12, vr4
vilvl.d vr5, vr13, vr5
vilvl.d vr6, vr14, vr6
vabsd.bu vr7, vr3, vr4
vabsd.bu vr8, vr3, vr5
vabsd.bu vr9, vr3, vr6
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.hu.bu vr1, vr1, vr1
vhaddw.hu.bu vr2, vr2, vr2
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vadd.h vr7, vr7, vr0
vadd.h vr8, vr8, vr1
vadd.h vr9, vr9, vr2
vhaddw.wu.hu vr7, vr7, vr7
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.du.wu vr7, vr7, vr7
vhaddw.du.wu vr8, vr8, vr8
vhaddw.du.wu vr9, vr9, vr9
vhaddw.qu.du vr7, vr7, vr7
vhaddw.qu.du vr8, vr8, vr8
vhaddw.qu.du vr9, vr9, vr9
// Store data to p_sad_array
vstelm.w vr7, a5, 0, 0
vstelm.w vr8, a5, 4, 0
vstelm.w vr9, a5, 8, 0
endfunc_x264
/*
* void x264_pixel_sad_x3_8x4_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* intptr_t i_ref_stride,
* int32_t p_sad_array[3])
*/
function_x264 pixel_sad_x3_8x4_lsx
slli.d t1, a4, 1
add.d t2, a4, t1
// Load data from p_src, p_ref0, p_ref1 and p_ref2
fld.d f3, a0, 0
fld.d f7, a0, 16
fld.d f11, a0, 32
fld.d f15, a0, 48
FLDD_LOADX_4 a1, a4, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a4, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a4, t1, t2, f6, f10, f14, f18
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
vilvl.d vr5, vr9, vr5
vilvl.d vr6, vr10, vr6
vilvl.d vr11, vr15, vr11
vilvl.d vr12, vr16, vr12
vilvl.d vr13, vr17, vr13
vilvl.d vr14, vr18, vr14
vabsd.bu vr0, vr3, vr4
vabsd.bu vr1, vr3, vr5
vabsd.bu vr2, vr3, vr6
vabsd.bu vr3, vr11, vr12
vabsd.bu vr4, vr11, vr13
vabsd.bu vr5, vr11, vr14
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.hu.bu vr1, vr1, vr1
vhaddw.hu.bu vr2, vr2, vr2
vhaddw.hu.bu vr3, vr3, vr3
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vadd.h vr7, vr0, vr3
vadd.h vr8, vr1, vr4
vadd.h vr9, vr2, vr5
vhaddw.wu.hu vr7, vr7, vr7
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.du.wu vr7, vr7, vr7
vhaddw.du.wu vr8, vr8, vr8
vhaddw.du.wu vr9, vr9, vr9
vhaddw.qu.du vr7, vr7, vr7
vhaddw.qu.du vr8, vr8, vr8
vhaddw.qu.du vr9, vr9, vr9
// Store data to p_sad_array
vstelm.w vr7, a5, 0, 0
vstelm.w vr8, a5, 4, 0
vstelm.w vr9, a5, 8, 0
endfunc_x264
/*
* void x264_pixel_sad_x3_8x8_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* intptr_t i_ref_stride,
* int32_t p_sad_array[3])
*/
function_x264 pixel_sad_x3_8x8_lsx
slli.d t1, a4, 1
add.d t2, a4, t1
// Load data from p_src, p_ref0, p_ref1 and p_ref2
fld.d f3, a0, 0
fld.d f7, a0, 16
fld.d f11, a0, 32
fld.d f15, a0, 48
FLDD_LOADX_4 a1, a4, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a4, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a4, t1, t2, f6, f10, f14, f18
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
vilvl.d vr5, vr9, vr5
vilvl.d vr6, vr10, vr6
vilvl.d vr11, vr15, vr11
vilvl.d vr12, vr16, vr12
vilvl.d vr13, vr17, vr13
vilvl.d vr14, vr18, vr14
vabsd.bu vr7, vr3, vr4
vabsd.bu vr8, vr3, vr5
vabsd.bu vr9, vr3, vr6
vabsd.bu vr10, vr11, vr12
vabsd.bu vr15, vr11, vr13
vabsd.bu vr16, vr11, vr14
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr15, vr15, vr15
vhaddw.hu.bu vr16, vr16, vr16
vadd.h vr0, vr7, vr10
vadd.h vr1, vr8, vr15
vadd.h vr2, vr9, vr16
alsl.d a1, a4, a1, 2
alsl.d a2, a4, a2, 2
alsl.d a3, a4, a3, 2
fld.d f3, a0, 64
fld.d f7, a0, 80
fld.d f11, a0, 96
fld.d f15, a0, 112
FLDD_LOADX_4 a1, a4, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a4, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a4, t1, t2, f6, f10, f14, f18
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
vilvl.d vr5, vr9, vr5
vilvl.d vr6, vr10, vr6
vilvl.d vr11, vr15, vr11
vilvl.d vr12, vr16, vr12
vilvl.d vr13, vr17, vr13
vilvl.d vr14, vr18, vr14
vabsd.bu vr7, vr3, vr4
vabsd.bu vr8, vr3, vr5
vabsd.bu vr9, vr3, vr6
vabsd.bu vr10, vr11, vr12
vabsd.bu vr15, vr11, vr13
vabsd.bu vr16, vr11, vr14
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr15, vr15, vr15
vhaddw.hu.bu vr16, vr16, vr16
vadd.h vr7, vr7, vr10
vadd.h vr8, vr8, vr15
vadd.h vr9, vr9, vr16
vadd.h vr7, vr7, vr0
vadd.h vr8, vr8, vr1
vadd.h vr9, vr9, vr2
vhaddw.wu.hu vr7, vr7, vr7
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.du.wu vr7, vr7, vr7
vhaddw.du.wu vr8, vr8, vr8
vhaddw.du.wu vr9, vr9, vr9
vhaddw.qu.du vr7, vr7, vr7
vhaddw.qu.du vr8, vr8, vr8
vhaddw.qu.du vr9, vr9, vr9
// Store data to p_sad_array
vstelm.w vr7, a5, 0, 0
vstelm.w vr8, a5, 4, 0
vstelm.w vr9, a5, 8, 0
endfunc_x264
/*
* void x264_pixel_sad_x3_8x16_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* intptr_t i_ref_stride,
* int32_t p_sad_array[3])
*/
function_x264 pixel_sad_x3_8x16_lsx
slli.d t1, a4, 1
add.d t2, a4, t1
// Load data from p_src, p_ref0, p_ref1 and p_ref2
fld.d f3, a0, 0
fld.d f7, a0, 16
fld.d f11, a0, 32
fld.d f15, a0, 48
FLDD_LOADX_4 a1, a4, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a4, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a4, t1, t2, f6, f10, f14, f18
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
vilvl.d vr5, vr9, vr5
vilvl.d vr6, vr10, vr6
vilvl.d vr11, vr15, vr11
vilvl.d vr12, vr16, vr12
vilvl.d vr13, vr17, vr13
vilvl.d vr14, vr18, vr14
vabsd.bu vr7, vr3, vr4
vabsd.bu vr8, vr3, vr5
vabsd.bu vr9, vr3, vr6
vabsd.bu vr10, vr11, vr12
vabsd.bu vr15, vr11, vr13
vabsd.bu vr16, vr11, vr14
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr15, vr15, vr15
vhaddw.hu.bu vr16, vr16, vr16
vadd.h vr0, vr7, vr10
vadd.h vr1, vr8, vr15
vadd.h vr2, vr9, vr16
.rept 3
alsl.d a1, a4, a1, 2
alsl.d a2, a4, a2, 2
alsl.d a3, a4, a3, 2
addi.d a0, a0, 64
fld.d f3, a0, 0
fld.d f7, a0, 16
fld.d f11, a0, 32
fld.d f15, a0, 48
FLDD_LOADX_4 a1, a4, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a4, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a4, t1, t2, f6, f10, f14, f18
vilvl.d vr3, vr7, vr3
vilvl.d vr4, vr8, vr4
vilvl.d vr5, vr9, vr5
vilvl.d vr6, vr10, vr6
vilvl.d vr11, vr15, vr11
vilvl.d vr12, vr16, vr12
vilvl.d vr13, vr17, vr13
vilvl.d vr14, vr18, vr14
vabsd.bu vr7, vr3, vr4
vabsd.bu vr8, vr3, vr5
vabsd.bu vr9, vr3, vr6
vabsd.bu vr10, vr11, vr12
vabsd.bu vr15, vr11, vr13
vabsd.bu vr16, vr11, vr14
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr15, vr15, vr15
vhaddw.hu.bu vr16, vr16, vr16
vadd.h vr7, vr7, vr10
vadd.h vr8, vr8, vr15
vadd.h vr9, vr9, vr16
vadd.h vr0, vr7, vr0
vadd.h vr1, vr8, vr1
vadd.h vr2, vr9, vr2
.endr
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.wu.hu vr1, vr1, vr1
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.du.wu vr0, vr0, vr0
vhaddw.du.wu vr1, vr1, vr1
vhaddw.du.wu vr2, vr2, vr2
vhaddw.qu.du vr0, vr0, vr0
vhaddw.qu.du vr1, vr1, vr1
vhaddw.qu.du vr2, vr2, vr2
// Store data to p_sad_array
vstelm.w vr0, a5, 0, 0
vstelm.w vr1, a5, 4, 0
vstelm.w vr2, a5, 8, 0
endfunc_x264
/*
* void x264_pixel_sad_x3_16x8_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* intptr_t i_ref_stride,
* int32_t p_sad_array[3])
*/
function_x264 pixel_sad_x3_16x8_lsx
slli.d t1, a4, 1
add.d t2, a4, t1
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr1, vr7
vabsd.bu vr8, vr1, vr8
vabsd.bu vr9, vr1, vr9
vabsd.bu vr10, vr2, vr10
vabsd.bu vr11, vr2, vr11
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr3, vr13
vabsd.bu vr14, vr3, vr14
vabsd.bu vr15, vr3, vr15
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vadd.h vr0, vr7, vr4
vadd.h vr1, vr13, vr10
vadd.h vr16, vr1, vr0
vadd.h vr0, vr8, vr5
vadd.h vr1, vr14, vr11
vadd.h vr17, vr1, vr0
vadd.h vr0, vr9, vr6
vadd.h vr1, vr15, vr12
vadd.h vr18, vr1, vr0
// vr16, vr17, vr18
alsl.d a1, a4, a1, 2
alsl.d a2, a4, a2, 2
alsl.d a3, a4, a3, 2
vld vr0, a0, 64
vld vr1, a0, 80
vld vr2, a0, 96
vld vr3, a0, 112
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr1, vr7
vabsd.bu vr8, vr1, vr8
vabsd.bu vr9, vr1, vr9
vabsd.bu vr10, vr2, vr10
vabsd.bu vr11, vr2, vr11
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr3, vr13
vabsd.bu vr14, vr3, vr14
vabsd.bu vr15, vr3, vr15
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vadd.h vr0, vr7, vr4
vadd.h vr1, vr13, vr10
vadd.h vr2, vr1, vr0
vadd.h vr0, vr8, vr5
vadd.h vr1, vr14, vr11
vadd.h vr3, vr1, vr0
vadd.h vr0, vr9, vr6
vadd.h vr1, vr15, vr12
vadd.h vr4, vr1, vr0
vadd.h vr0, vr16, vr2
vadd.h vr1, vr17, vr3
vadd.h vr2, vr18, vr4
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.wu.hu vr1, vr1, vr1
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.du.wu vr0, vr0, vr0
vhaddw.du.wu vr1, vr1, vr1
vhaddw.du.wu vr2, vr2, vr2
vhaddw.qu.du vr0, vr0, vr0
vhaddw.qu.du vr1, vr1, vr1
vhaddw.qu.du vr2, vr2, vr2
// Store data to p_sad_array
vstelm.w vr0, a5, 0, 0
vstelm.w vr1, a5, 4, 0
vstelm.w vr2, a5, 8, 0
endfunc_x264
/*
* void x264_pixel_sad_x3_16x16_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* intptr_t i_ref_stride,
* int32_t p_sad_array[3])
*/
function_x264 pixel_sad_x3_16x16_lsx
slli.d t1, a4, 1
add.d t2, a4, t1
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr1, vr7
vabsd.bu vr8, vr1, vr8
vabsd.bu vr9, vr1, vr9
vabsd.bu vr10, vr2, vr10
vabsd.bu vr11, vr2, vr11
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr3, vr13
vabsd.bu vr14, vr3, vr14
vabsd.bu vr15, vr3, vr15
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vadd.h vr0, vr7, vr4
vadd.h vr1, vr13, vr10
vadd.h vr16, vr1, vr0
vadd.h vr0, vr8, vr5
vadd.h vr1, vr14, vr11
vadd.h vr17, vr1, vr0
vadd.h vr0, vr9, vr6
vadd.h vr1, vr15, vr12
vadd.h vr18, vr1, vr0
.rept 3
alsl.d a1, a4, a1, 2
alsl.d a2, a4, a2, 2
alsl.d a3, a4, a3, 2
addi.d a0, a0, 64
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
LSX_LOADX_4 a1, a4, t1, t2, vr4, vr7, vr10, vr13
LSX_LOADX_4 a2, a4, t1, t2, vr5, vr8, vr11, vr14
LSX_LOADX_4 a3, a4, t1, t2, vr6, vr9, vr12, vr15
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr1, vr7
vabsd.bu vr8, vr1, vr8
vabsd.bu vr9, vr1, vr9
vabsd.bu vr10, vr2, vr10
vabsd.bu vr11, vr2, vr11
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr3, vr13
vabsd.bu vr14, vr3, vr14
vabsd.bu vr15, vr3, vr15
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vadd.h vr0, vr7, vr4
vadd.h vr1, vr13, vr10
vadd.h vr2, vr1, vr0
vadd.h vr0, vr8, vr5
vadd.h vr1, vr14, vr11
vadd.h vr3, vr1, vr0
vadd.h vr0, vr9, vr6
vadd.h vr1, vr15, vr12
vadd.h vr4, vr1, vr0
vadd.h vr16, vr16, vr2
vadd.h vr17, vr17, vr3
vadd.h vr18, vr18, vr4
.endr
vhaddw.wu.hu vr16, vr16, vr16
vhaddw.wu.hu vr17, vr17, vr17
vhaddw.wu.hu vr18, vr18, vr18
vhaddw.du.wu vr16, vr16, vr16
vhaddw.du.wu vr17, vr17, vr17
vhaddw.du.wu vr18, vr18, vr18
vhaddw.qu.du vr16, vr16, vr16
vhaddw.qu.du vr17, vr17, vr17
vhaddw.qu.du vr18, vr18, vr18
// Store data to p_sad_array
vstelm.w vr16, a5, 0, 0
vstelm.w vr17, a5, 4, 0
vstelm.w vr18, a5, 8, 0
endfunc_x264
/*
* void x264_pixel_sad_x4_4x8_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_4x8_lsx
slli.d t1, a5, 1
add.d t2, a5, t1
fld.s f0, a0, 0
fld.s f1, a0, 16
fld.s f2, a0, 32
fld.s f3, a0, 48
FLDS_LOADX_4 a1, a5, t1, t2, f4, f8, f12, f16
FLDS_LOADX_4 a2, a5, t1, t2, f5, f9, f13, f17
FLDS_LOADX_4 a3, a5, t1, t2, f6, f10, f14, f18
FLDS_LOADX_4 a4, a5, t1, t2, f7, f11, f15, f19
vilvl.w vr0, vr1, vr0
vilvl.w vr2, vr3, vr2
vilvl.d vr0, vr2, vr0
vilvl.w vr4, vr8, vr4
vilvl.w vr12, vr16, vr12
vilvl.d vr1, vr12, vr4
vilvl.w vr5, vr9, vr5
vilvl.w vr13, vr17, vr13
vilvl.d vr2, vr13, vr5
vilvl.w vr6, vr10, vr6
vilvl.w vr14, vr18, vr14
vilvl.d vr3, vr14, vr6
vilvl.w vr7, vr11, vr7
vilvl.w vr15, vr19, vr15
vilvl.d vr4, vr15, vr7
vabsd.bu vr1, vr0, vr1
vabsd.bu vr2, vr0, vr2
vabsd.bu vr3, vr0, vr3
vabsd.bu vr4, vr0, vr4
vhaddw.hu.bu vr20, vr1, vr1
vhaddw.hu.bu vr21, vr2, vr2
vhaddw.hu.bu vr22, vr3, vr3
vhaddw.hu.bu vr23, vr4, vr4
alsl.d a1, a5, a1, 2
alsl.d a2, a5, a2, 2
alsl.d a3, a5, a3, 2
alsl.d a4, a5, a4, 2
fld.s f0, a0, 64
fld.s f1, a0, 80
fld.s f2, a0, 96
fld.s f3, a0, 112
FLDS_LOADX_4 a1, a5, t1, t2, f4, f8, f12, f16
FLDS_LOADX_4 a2, a5, t1, t2, f5, f9, f13, f17
FLDS_LOADX_4 a3, a5, t1, t2, f6, f10, f14, f18
FLDS_LOADX_4 a4, a5, t1, t2, f7, f11, f15, f19
vilvl.w vr0, vr1, vr0
vilvl.w vr2, vr3, vr2
vilvl.d vr0, vr2, vr0
vilvl.w vr4, vr8, vr4
vilvl.w vr12, vr16, vr12
vilvl.d vr1, vr12, vr4
vilvl.w vr5, vr9, vr5
vilvl.w vr13, vr17, vr13
vilvl.d vr2, vr13, vr5
vilvl.w vr6, vr10, vr6
vilvl.w vr14, vr18, vr14
vilvl.d vr3, vr14, vr6
vilvl.w vr7, vr11, vr7
vilvl.w vr15, vr19, vr15
vilvl.d vr4, vr15, vr7
vabsd.bu vr1, vr0, vr1
vabsd.bu vr2, vr0, vr2
vabsd.bu vr3, vr0, vr3
vabsd.bu vr4, vr0, vr4
vhaddw.hu.bu vr1, vr1, vr1
vhaddw.hu.bu vr2, vr2, vr2
vhaddw.hu.bu vr3, vr3, vr3
vhaddw.hu.bu vr4, vr4, vr4
vadd.h vr16, vr20, vr1
vadd.h vr17, vr21, vr2
vadd.h vr18, vr22, vr3
vadd.h vr19, vr23, vr4
vhaddw.wu.hu vr16, vr16, vr16
vhaddw.wu.hu vr17, vr17, vr17
vhaddw.wu.hu vr18, vr18, vr18
vhaddw.wu.hu vr19, vr19, vr19
vhaddw.du.wu vr16, vr16, vr16
vhaddw.du.wu vr17, vr17, vr17
vhaddw.du.wu vr18, vr18, vr18
vhaddw.du.wu vr19, vr19, vr19
vhaddw.qu.du vr16, vr16, vr16
vhaddw.qu.du vr17, vr17, vr17
vhaddw.qu.du vr18, vr18, vr18
vhaddw.qu.du vr19, vr19, vr19
// Store data to p_sad_array
vstelm.w vr16, a6, 0, 0
vstelm.w vr17, a6, 4, 0
vstelm.w vr18, a6, 8, 0
vstelm.w vr19, a6, 12, 0
endfunc_x264
/*
* void x264_pixel_sad_x4_8x4_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_8x4_lsx
slli.d t1, a5, 1
add.d t2, a5, t1
// Load data from p_src, p_ref0, p_ref1 and p_ref2
fld.d f0, a0, 0
fld.d f1, a0, 16
fld.d f2, a0, 32
fld.d f3, a0, 48
FLDD_LOADX_4 a1, a5, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a5, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a5, t1, t2, f6, f10, f14, f18
FLDD_LOADX_4 a4, a5, t1, t2, f7, f11, f15, f19
vilvl.d vr0, vr1, vr0
vilvl.d vr2, vr3, vr2
vilvl.d vr4, vr8, vr4
vilvl.d vr12, vr16, vr12
vilvl.d vr5, vr9, vr5
vilvl.d vr13, vr17, vr13
vilvl.d vr6, vr10, vr6
vilvl.d vr14, vr18, vr14
vilvl.d vr7, vr11, vr7
vilvl.d vr15, vr19, vr15
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr0, vr7
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr2, vr13
vabsd.bu vr14, vr2, vr14
vabsd.bu vr15, vr2, vr15
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vadd.h vr16, vr4, vr12
vadd.h vr17, vr5, vr13
vadd.h vr18, vr6, vr14
vadd.h vr19, vr7, vr15
vhaddw.wu.hu vr16, vr16, vr16
vhaddw.wu.hu vr17, vr17, vr17
vhaddw.wu.hu vr18, vr18, vr18
vhaddw.wu.hu vr19, vr19, vr19
vhaddw.du.wu vr16, vr16, vr16
vhaddw.du.wu vr17, vr17, vr17
vhaddw.du.wu vr18, vr18, vr18
vhaddw.du.wu vr19, vr19, vr19
vhaddw.qu.du vr16, vr16, vr16
vhaddw.qu.du vr17, vr17, vr17
vhaddw.qu.du vr18, vr18, vr18
vhaddw.qu.du vr19, vr19, vr19
// Store data to p_sad_array
vstelm.w vr16, a6, 0, 0
vstelm.w vr17, a6, 4, 0
vstelm.w vr18, a6, 8, 0
vstelm.w vr19, a6, 12, 0
endfunc_x264
/*
* void x264_pixel_sad_x4_8x8_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_8x8_lsx
slli.d t1, a5, 1
add.d t2, a5, t1
// Load data from p_src, p_ref0, p_ref1 and p_ref2
fld.d f0, a0, 0
fld.d f1, a0, 16
fld.d f2, a0, 32
fld.d f3, a0, 48
FLDD_LOADX_4 a1, a5, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a5, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a5, t1, t2, f6, f10, f14, f18
FLDD_LOADX_4 a4, a5, t1, t2, f7, f11, f15, f19
vilvl.d vr0, vr1, vr0
vilvl.d vr2, vr3, vr2
vilvl.d vr4, vr8, vr4
vilvl.d vr12, vr16, vr12
vilvl.d vr5, vr9, vr5
vilvl.d vr13, vr17, vr13
vilvl.d vr6, vr10, vr6
vilvl.d vr14, vr18, vr14
vilvl.d vr7, vr11, vr7
vilvl.d vr15, vr19, vr15
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr0, vr7
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr2, vr13
vabsd.bu vr14, vr2, vr14
vabsd.bu vr15, vr2, vr15
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vadd.h vr20, vr4, vr12
vadd.h vr21, vr5, vr13
vadd.h vr22, vr6, vr14
vadd.h vr23, vr7, vr15
alsl.d a1, a5, a1, 2
alsl.d a2, a5, a2, 2
alsl.d a3, a5, a3, 2
alsl.d a4, a5, a4, 2
fld.d f0, a0, 64
fld.d f1, a0, 80
fld.d f2, a0, 96
fld.d f3, a0, 112
FLDD_LOADX_4 a1, a5, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a5, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a5, t1, t2, f6, f10, f14, f18
FLDD_LOADX_4 a4, a5, t1, t2, f7, f11, f15, f19
vilvl.d vr0, vr1, vr0
vilvl.d vr2, vr3, vr2
vilvl.d vr4, vr8, vr4
vilvl.d vr12, vr16, vr12
vilvl.d vr5, vr9, vr5
vilvl.d vr13, vr17, vr13
vilvl.d vr6, vr10, vr6
vilvl.d vr14, vr18, vr14
vilvl.d vr7, vr11, vr7
vilvl.d vr15, vr19, vr15
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr0, vr7
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr2, vr13
vabsd.bu vr14, vr2, vr14
vabsd.bu vr15, vr2, vr15
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vadd.h vr16, vr4, vr12
vadd.h vr17, vr5, vr13
vadd.h vr18, vr6, vr14
vadd.h vr19, vr7, vr15
vadd.h vr16, vr16, vr20
vadd.h vr17, vr17, vr21
vadd.h vr18, vr18, vr22
vadd.h vr19, vr19, vr23
vhaddw.wu.hu vr16, vr16, vr16
vhaddw.wu.hu vr17, vr17, vr17
vhaddw.wu.hu vr18, vr18, vr18
vhaddw.wu.hu vr19, vr19, vr19
vhaddw.du.wu vr16, vr16, vr16
vhaddw.du.wu vr17, vr17, vr17
vhaddw.du.wu vr18, vr18, vr18
vhaddw.du.wu vr19, vr19, vr19
vhaddw.qu.du vr16, vr16, vr16
vhaddw.qu.du vr17, vr17, vr17
vhaddw.qu.du vr18, vr18, vr18
vhaddw.qu.du vr19, vr19, vr19
// Store data to p_sad_array
vstelm.w vr16, a6, 0, 0
vstelm.w vr17, a6, 4, 0
vstelm.w vr18, a6, 8, 0
vstelm.w vr19, a6, 12, 0
endfunc_x264
/*
* void x264_pixel_sad_x4_8x16_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_8x16_lsx
slli.d t1, a5, 1
add.d t2, a5, t1
// Load data from p_src, p_ref0, p_ref1 and p_ref2
fld.d f0, a0, 0
fld.d f1, a0, 16
fld.d f2, a0, 32
fld.d f3, a0, 48
FLDD_LOADX_4 a1, a5, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a5, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a5, t1, t2, f6, f10, f14, f18
FLDD_LOADX_4 a4, a5, t1, t2, f7, f11, f15, f19
vilvl.d vr0, vr1, vr0
vilvl.d vr2, vr3, vr2
vilvl.d vr4, vr8, vr4
vilvl.d vr12, vr16, vr12
vilvl.d vr5, vr9, vr5
vilvl.d vr13, vr17, vr13
vilvl.d vr6, vr10, vr6
vilvl.d vr14, vr18, vr14
vilvl.d vr7, vr11, vr7
vilvl.d vr15, vr19, vr15
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr0, vr7
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr2, vr13
vabsd.bu vr14, vr2, vr14
vabsd.bu vr15, vr2, vr15
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vadd.h vr20, vr4, vr12
vadd.h vr21, vr5, vr13
vadd.h vr22, vr6, vr14
vadd.h vr23, vr7, vr15
.rept 3
alsl.d a1, a5, a1, 2
alsl.d a2, a5, a2, 2
alsl.d a3, a5, a3, 2
alsl.d a4, a5, a4, 2
addi.d a0, a0, 64
fld.d f0, a0, 0
fld.d f1, a0, 16
fld.d f2, a0, 32
fld.d f3, a0, 48
FLDD_LOADX_4 a1, a5, t1, t2, f4, f8, f12, f16
FLDD_LOADX_4 a2, a5, t1, t2, f5, f9, f13, f17
FLDD_LOADX_4 a3, a5, t1, t2, f6, f10, f14, f18
FLDD_LOADX_4 a4, a5, t1, t2, f7, f11, f15, f19
vilvl.d vr0, vr1, vr0
vilvl.d vr2, vr3, vr2
vilvl.d vr4, vr8, vr4
vilvl.d vr12, vr16, vr12
vilvl.d vr5, vr9, vr5
vilvl.d vr13, vr17, vr13
vilvl.d vr6, vr10, vr6
vilvl.d vr14, vr18, vr14
vilvl.d vr7, vr11, vr7
vilvl.d vr15, vr19, vr15
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr0, vr7
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr2, vr13
vabsd.bu vr14, vr2, vr14
vabsd.bu vr15, vr2, vr15
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vadd.h vr16, vr4, vr12
vadd.h vr17, vr5, vr13
vadd.h vr18, vr6, vr14
vadd.h vr19, vr7, vr15
vadd.h vr20, vr16, vr20
vadd.h vr21, vr17, vr21
vadd.h vr22, vr18, vr22
vadd.h vr23, vr19, vr23
.endr
vhaddw.wu.hu vr20, vr20, vr20
vhaddw.wu.hu vr21, vr21, vr21
vhaddw.wu.hu vr22, vr22, vr22
vhaddw.wu.hu vr23, vr23, vr23
vhaddw.du.wu vr20, vr20, vr20
vhaddw.du.wu vr21, vr21, vr21
vhaddw.du.wu vr22, vr22, vr22
vhaddw.du.wu vr23, vr23, vr23
vhaddw.qu.du vr20, vr20, vr20
vhaddw.qu.du vr21, vr21, vr21
vhaddw.qu.du vr22, vr22, vr22
vhaddw.qu.du vr23, vr23, vr23
// Store data to p_sad_array
vstelm.w vr20, a6, 0, 0
vstelm.w vr21, a6, 4, 0
vstelm.w vr22, a6, 8, 0
vstelm.w vr23, a6, 12, 0
endfunc_x264
/*
* void x264_pixel_sad_x4_16x8_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_16x8_lsx
slli.d t1, a5, 1
add.d t2, a5, t1
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
LSX_LOADX_4 a1, a5, t1, t2, vr4, vr8, vr12, vr16
LSX_LOADX_4 a2, a5, t1, t2, vr5, vr9, vr13, vr17
LSX_LOADX_4 a3, a5, t1, t2, vr6, vr10, vr14, vr18
LSX_LOADX_4 a4, a5, t1, t2, vr7, vr11, vr15, vr19
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr0, vr7
vabsd.bu vr8, vr1, vr8
vabsd.bu vr9, vr1, vr9
vabsd.bu vr10, vr1, vr10
vabsd.bu vr11, vr1, vr11
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr2, vr13
vabsd.bu vr14, vr2, vr14
vabsd.bu vr15, vr2, vr15
vabsd.bu vr16, vr3, vr16
vabsd.bu vr17, vr3, vr17
vabsd.bu vr18, vr3, vr18
vabsd.bu vr19, vr3, vr19
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vhaddw.hu.bu vr16, vr16, vr16
vhaddw.hu.bu vr17, vr17, vr17
vhaddw.hu.bu vr18, vr18, vr18
vhaddw.hu.bu vr19, vr19, vr19
vadd.h vr0, vr4, vr8
vadd.h vr1, vr12, vr16
vadd.h vr20, vr0, vr1
vadd.h vr0, vr5, vr9
vadd.h vr1, vr13, vr17
vadd.h vr21, vr0, vr1
vadd.h vr0, vr6, vr10
vadd.h vr1, vr14, vr18
vadd.h vr22, vr0, vr1
vadd.h vr0, vr7, vr11
vadd.h vr1, vr15, vr19
vadd.h vr23, vr0, vr1
alsl.d a1, a5, a1, 2
alsl.d a2, a5, a2, 2
alsl.d a3, a5, a3, 2
alsl.d a4, a5, a4, 2
vld vr0, a0, 64
vld vr1, a0, 80
vld vr2, a0, 96
vld vr3, a0, 112
LSX_LOADX_4 a1, a5, t1, t2, vr4, vr8, vr12, vr16
LSX_LOADX_4 a2, a5, t1, t2, vr5, vr9, vr13, vr17
LSX_LOADX_4 a3, a5, t1, t2, vr6, vr10, vr14, vr18
LSX_LOADX_4 a4, a5, t1, t2, vr7, vr11, vr15, vr19
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr0, vr7
vabsd.bu vr8, vr1, vr8
vabsd.bu vr9, vr1, vr9
vabsd.bu vr10, vr1, vr10
vabsd.bu vr11, vr1, vr11
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr2, vr13
vabsd.bu vr14, vr2, vr14
vabsd.bu vr15, vr2, vr15
vabsd.bu vr16, vr3, vr16
vabsd.bu vr17, vr3, vr17
vabsd.bu vr18, vr3, vr18
vabsd.bu vr19, vr3, vr19
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vhaddw.hu.bu vr16, vr16, vr16
vhaddw.hu.bu vr17, vr17, vr17
vhaddw.hu.bu vr18, vr18, vr18
vhaddw.hu.bu vr19, vr19, vr19
vadd.h vr0, vr4, vr8
vadd.h vr1, vr12, vr16
vadd.h vr16, vr0, vr1
vadd.h vr0, vr5, vr9
vadd.h vr1, vr13, vr17
vadd.h vr17, vr0, vr1
vadd.h vr0, vr6, vr10
vadd.h vr1, vr14, vr18
vadd.h vr18, vr0, vr1
vadd.h vr0, vr7, vr11
vadd.h vr1, vr15, vr19
vadd.h vr19, vr0, vr1
vadd.h vr20, vr16, vr20
vadd.h vr21, vr17, vr21
vadd.h vr22, vr18, vr22
vadd.h vr23, vr19, vr23
vhaddw.wu.hu vr20, vr20, vr20
vhaddw.wu.hu vr21, vr21, vr21
vhaddw.wu.hu vr22, vr22, vr22
vhaddw.wu.hu vr23, vr23, vr23
vhaddw.du.wu vr20, vr20, vr20
vhaddw.du.wu vr21, vr21, vr21
vhaddw.du.wu vr22, vr22, vr22
vhaddw.du.wu vr23, vr23, vr23
vhaddw.qu.du vr20, vr20, vr20
vhaddw.qu.du vr21, vr21, vr21
vhaddw.qu.du vr22, vr22, vr22
vhaddw.qu.du vr23, vr23, vr23
// Store data to p_sad_array
vstelm.w vr20, a6, 0, 0
vstelm.w vr21, a6, 4, 0
vstelm.w vr22, a6, 8, 0
vstelm.w vr23, a6, 12, 0
endfunc_x264
/*
* void x264_pixel_sad_x4_16x16_lsx(uint8_t *p_src, uint8_t *p_ref0,
* uint8_t *p_ref1, uint8_t *p_ref2,
* uint8_t *p_ref3, intptr_t i_ref_stride,
* int32_t p_sad_array[4])
*/
function_x264 pixel_sad_x4_16x16_lsx
slli.d t1, a5, 1
add.d t2, a5, t1
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
LSX_LOADX_4 a1, a5, t1, t2, vr4, vr8, vr12, vr16
LSX_LOADX_4 a2, a5, t1, t2, vr5, vr9, vr13, vr17
LSX_LOADX_4 a3, a5, t1, t2, vr6, vr10, vr14, vr18
LSX_LOADX_4 a4, a5, t1, t2, vr7, vr11, vr15, vr19
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr0, vr7
vabsd.bu vr8, vr1, vr8
vabsd.bu vr9, vr1, vr9
vabsd.bu vr10, vr1, vr10
vabsd.bu vr11, vr1, vr11
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr2, vr13
vabsd.bu vr14, vr2, vr14
vabsd.bu vr15, vr2, vr15
vabsd.bu vr16, vr3, vr16
vabsd.bu vr17, vr3, vr17
vabsd.bu vr18, vr3, vr18
vabsd.bu vr19, vr3, vr19
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vhaddw.hu.bu vr16, vr16, vr16
vhaddw.hu.bu vr17, vr17, vr17
vhaddw.hu.bu vr18, vr18, vr18
vhaddw.hu.bu vr19, vr19, vr19
vadd.h vr0, vr4, vr8
vadd.h vr1, vr12, vr16
vadd.h vr20, vr0, vr1
vadd.h vr0, vr5, vr9
vadd.h vr1, vr13, vr17
vadd.h vr21, vr0, vr1
vadd.h vr0, vr6, vr10
vadd.h vr1, vr14, vr18
vadd.h vr22, vr0, vr1
vadd.h vr0, vr7, vr11
vadd.h vr1, vr15, vr19
vadd.h vr23, vr0, vr1
.rept 3
alsl.d a1, a5, a1, 2
alsl.d a2, a5, a2, 2
alsl.d a3, a5, a3, 2
alsl.d a4, a5, a4, 2
addi.d a0, a0, 64
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
LSX_LOADX_4 a1, a5, t1, t2, vr4, vr8, vr12, vr16
LSX_LOADX_4 a2, a5, t1, t2, vr5, vr9, vr13, vr17
LSX_LOADX_4 a3, a5, t1, t2, vr6, vr10, vr14, vr18
LSX_LOADX_4 a4, a5, t1, t2, vr7, vr11, vr15, vr19
vabsd.bu vr4, vr0, vr4
vabsd.bu vr5, vr0, vr5
vabsd.bu vr6, vr0, vr6
vabsd.bu vr7, vr0, vr7
vabsd.bu vr8, vr1, vr8
vabsd.bu vr9, vr1, vr9
vabsd.bu vr10, vr1, vr10
vabsd.bu vr11, vr1, vr11
vabsd.bu vr12, vr2, vr12
vabsd.bu vr13, vr2, vr13
vabsd.bu vr14, vr2, vr14
vabsd.bu vr15, vr2, vr15
vabsd.bu vr16, vr3, vr16
vabsd.bu vr17, vr3, vr17
vabsd.bu vr18, vr3, vr18
vabsd.bu vr19, vr3, vr19
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.hu.bu vr5, vr5, vr5
vhaddw.hu.bu vr6, vr6, vr6
vhaddw.hu.bu vr7, vr7, vr7
vhaddw.hu.bu vr8, vr8, vr8
vhaddw.hu.bu vr9, vr9, vr9
vhaddw.hu.bu vr10, vr10, vr10
vhaddw.hu.bu vr11, vr11, vr11
vhaddw.hu.bu vr12, vr12, vr12
vhaddw.hu.bu vr13, vr13, vr13
vhaddw.hu.bu vr14, vr14, vr14
vhaddw.hu.bu vr15, vr15, vr15
vhaddw.hu.bu vr16, vr16, vr16
vhaddw.hu.bu vr17, vr17, vr17
vhaddw.hu.bu vr18, vr18, vr18
vhaddw.hu.bu vr19, vr19, vr19
vadd.h vr0, vr4, vr8
vadd.h vr1, vr12, vr16
vadd.h vr16, vr0, vr1
vadd.h vr0, vr5, vr9
vadd.h vr1, vr13, vr17
vadd.h vr17, vr0, vr1
vadd.h vr0, vr6, vr10
vadd.h vr1, vr14, vr18
vadd.h vr18, vr0, vr1
vadd.h vr0, vr7, vr11
vadd.h vr1, vr15, vr19
vadd.h vr19, vr0, vr1
vadd.h vr20, vr16, vr20
vadd.h vr21, vr17, vr21
vadd.h vr22, vr18, vr22
vadd.h vr23, vr19, vr23
.endr
vhaddw.wu.hu vr20, vr20, vr20
vhaddw.wu.hu vr21, vr21, vr21
vhaddw.wu.hu vr22, vr22, vr22
vhaddw.wu.hu vr23, vr23, vr23
vhaddw.du.wu vr20, vr20, vr20
vhaddw.du.wu vr21, vr21, vr21
vhaddw.du.wu vr22, vr22, vr22
vhaddw.du.wu vr23, vr23, vr23
vhaddw.qu.du vr20, vr20, vr20
vhaddw.qu.du vr21, vr21, vr21
vhaddw.qu.du vr22, vr22, vr22
vhaddw.qu.du vr23, vr23, vr23
// Store data to p_sad_array
vstelm.w vr20, a6, 0, 0
vstelm.w vr21, a6, 4, 0
vstelm.w vr22, a6, 8, 0
vstelm.w vr23, a6, 12, 0
endfunc_x264
#endif /* !HIGH_BIT_DEPTH */
|
aestream/faery
| 51,573
|
src/mp4/x264/common/loongarch/predict-a.S
|
/*****************************************************************************
* predict-a.S: loongarch predict functions
*****************************************************************************
* Copyright (C) 2023-2024 x264 project
*
* Authors: Xiwei Gu <guxiwei-hf@loongson.cn>
* Lu Wang <wanglu@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "loongson_asm.S"
#include "loongson_util.S"
#if !HIGH_BIT_DEPTH
/****************************************************************************
* 4x4 prediction for intra luma block
****************************************************************************/
/* void x264_predict_4x4_v_c( pixel *src )
*/
function_x264 predict_4x4_v_lsx
ld.wu t0, a0, -FDEC_STRIDE
st.w t0, a0, 0
st.w t0, a0, FDEC_STRIDE
st.w t0, a0, FDEC_STRIDE * 2
st.w t0, a0, FDEC_STRIDE * 3
endfunc_x264
/* void x264_predict_4x4_h_c( pixel *src )
*/
function_x264 predict_4x4_h_lsx
vldrepl.b vr0, a0, -1
vldrepl.b vr1, a0, FDEC_STRIDE - 1
vldrepl.b vr2, a0, FDEC_STRIDE * 2 - 1
vldrepl.b vr3, a0, FDEC_STRIDE * 3 - 1
fst.s f0, a0, 0
fst.s f1, a0, FDEC_STRIDE
fst.s f2, a0, FDEC_STRIDE * 2
fst.s f3, a0, FDEC_STRIDE * 3
endfunc_x264
/* void x264_predict_4x4_dc_c( pixel *src )
*/
function_x264 predict_4x4_dc_lsx
fld.s f0, a0, -FDEC_STRIDE
ld.bu t0, a0, -1
ld.bu t1, a0, FDEC_STRIDE - 1
ld.bu t2, a0, FDEC_STRIDE * 2 - 1
ld.bu t3, a0, FDEC_STRIDE * 3 - 1
vhaddw.hu.bu vr1, vr0, vr0
vhaddw.wu.hu vr2, vr1, vr1
vpickve2gr.w t4, vr2, 0
add.w t0, t0, t1
add.w t0, t0, t2
add.w t0, t0, t3
add.w t0, t0, t4
addi.w t0, t0, 4
srai.w t0, t0, 3
vreplgr2vr.b vr0, t0
vstelm.w vr0, a0, 0, 0
vstelm.w vr0, a0, FDEC_STRIDE, 0
vstelm.w vr0, a0, FDEC_STRIDE * 2, 0
vstelm.w vr0, a0, FDEC_STRIDE * 3, 0
endfunc_x264
/* void predict_4x4_dc_top_c( pixel *src )
*/
function_x264 predict_4x4_dc_top_lsx
fld.s f0, a0, -FDEC_STRIDE
vhaddw.hu.bu vr1, vr0, vr0
vhaddw.wu.hu vr2, vr1, vr1
vsrari.w vr2, vr2, 2
vreplvei.b vr3, vr2, 0
fst.s f3, a0, 0
fst.s f3, a0, FDEC_STRIDE
fst.s f3, a0, FDEC_STRIDE * 2
fst.s f3, a0, FDEC_STRIDE * 3
endfunc_x264
/* void predict_4x4_dc_left_c( pixel *src )
*/
function_x264 predict_4x4_dc_left_lsx
ld.bu t0, a0, -1
ld.bu t1, a0, FDEC_STRIDE - 1
ld.bu t2, a0, FDEC_STRIDE * 2 - 1
ld.bu t3, a0, FDEC_STRIDE * 3 - 1
add.w t0, t0, t1
add.w t0, t0, t2
add.w t0, t0, t3
addi.w t0, t0, 2
srai.w t0, t0, 2
vreplgr2vr.b vr3, t0
fst.s f3, a0, 0
fst.s f3, a0, FDEC_STRIDE
fst.s f3, a0, FDEC_STRIDE * 2
fst.s f3, a0, FDEC_STRIDE * 3
endfunc_x264
/* void predict_4x4_dc_128_c( pixel *src )
*/
function_x264 predict_4x4_dc_128_lsx
addi.w t0, zero, 1
slli.w t0, t0, BIT_DEPTH - 1
vreplgr2vr.b vr3, t0
fst.s f3, a0, 0
fst.s f3, a0, FDEC_STRIDE
fst.s f3, a0, FDEC_STRIDE * 2
fst.s f3, a0, FDEC_STRIDE * 3
endfunc_x264
/* void predict_4x4_ddl_c( pixel *src )
*/
function_x264 predict_4x4_ddl_lsx
fld.d f0, a0, -FDEC_STRIDE
vxor.v vr10, vr10, vr10
vilvl.b vr0, vr10, vr0
vbsrl.v vr1, vr0, 2
vbsrl.v vr2, vr0, 4
// t7
vextrins.h vr2, vr0, 0x67
vslli.h vr1, vr1, 1
vadd.h vr0, vr0, vr1
vadd.h vr2, vr0, vr2
vssrarni.bu.h vr3, vr2, 2
fst.s f3, a0, 0
vbsrl.v vr4, vr3, 1
fst.s f4, a0, FDEC_STRIDE
vbsrl.v vr4, vr4, 1
fst.s f4, a0, FDEC_STRIDE * 2
vbsrl.v vr4, vr4, 1
fst.s f4, a0, FDEC_STRIDE * 3
endfunc_x264
/****************************************************************************
* 8x8 prediction for intra chroma block (4:2:0)
****************************************************************************/
/* void x264_predict_8x8c_p_lsx( pixel *src )
*/
const mula
.short 1, 2, 3, 4, 0, 0, 0, 0
endconst
const mulb
.short 0, 1, 2, 3, 4, 5, 6, 7
endconst
function_x264 predict_8x8c_p_lsx
la.local t0, mula
fld.d f3, t0, 0
fld.s f4, a0, 4 - FDEC_STRIDE
fld.s f5, a0, -1 - FDEC_STRIDE
vxor.v vr0, vr0, vr0
vilvl.b vr4, vr0, vr4
vilvl.b vr5, vr0, vr5
vshuf4i.h vr5, vr5, 0x1b
vsub.h vr4, vr4, vr5
vmul.h vr4, vr4, vr3
vhaddw.w.h vr4, vr4, vr4
vhaddw.d.w vr4, vr4, vr4
vpickve2gr.w t0, vr4, 0 /* H */
fld.s f6, a0, FDEC_STRIDE * 4 - 1
fld.s f7, a0, FDEC_STRIDE * 5 - 1
fld.s f8, a0, FDEC_STRIDE * 6 - 1
fld.s f9, a0, FDEC_STRIDE * 7 - 1
fld.s f10, a0, FDEC_STRIDE * 2 - 1
fld.s f11, a0, FDEC_STRIDE - 1
fld.s f12, a0, -1
fld.s f13, a0, -1 - FDEC_STRIDE
vilvl.b vr6, vr7, vr6
vilvl.b vr9, vr9, vr8
vilvl.h vr6, vr9, vr6
vilvl.b vr10, vr11, vr10
vilvl.b vr12, vr13, vr12
vilvl.h vr10, vr12, vr10
vilvl.b vr6, vr0, vr6
vilvl.b vr10, vr0, vr10
vsub.h vr6, vr6, vr10
vmul.h vr6, vr6, vr3
vhaddw.w.h vr6, vr6, vr6
vhaddw.d.w vr6, vr6, vr6
vpickve2gr.w t1, vr6, 0 /* V */
ld.bu t2, a0, FDEC_STRIDE * 7 - 1
ld.bu t3, a0, 7 - FDEC_STRIDE
add.w t2, t2, t3
slli.w t2, t2, 4 /* a */
slli.w t3, t0, 4
add.w t0, t0, t3
addi.w t0, t0, 16
srai.w t0, t0, 5 /* b */
slli.w t3, t1, 4
add.w t1, t1, t3
addi.w t1, t1, 16
srai.w t1, t1, 5 /* c */
add.w t3, t0, t1
slli.w t4, t3, 1
add.w t4, t4, t3
sub.w t5, t2, t4
addi.w t5, t5, 16 /* i00 */
la.local t3, mulb
vld vr14, t3, 0
vreplgr2vr.h vr12, t0
vmul.h vr12, vr12, vr14
vreplgr2vr.h vr14, t5
add.w t5, t5, t1
vreplgr2vr.h vr15, t5
add.w t5, t5, t1
vreplgr2vr.h vr16, t5
add.w t5, t5, t1
vreplgr2vr.h vr17, t5
add.w t5, t5, t1
vreplgr2vr.h vr18, t5
add.w t5, t5, t1
vreplgr2vr.h vr19, t5
add.w t5, t5, t1
vreplgr2vr.h vr20, t5
add.w t5, t5, t1
vreplgr2vr.h vr21, t5
vadd.h vr14, vr12, vr14
vadd.h vr15, vr12, vr15
vadd.h vr16, vr12, vr16
vadd.h vr17, vr12, vr17
vadd.h vr18, vr12, vr18
vadd.h vr19, vr12, vr19
vadd.h vr20, vr12, vr20
vadd.h vr21, vr12, vr21
vssrani.bu.h vr14, vr14, 5
vssrani.bu.h vr15, vr15, 5
vssrani.bu.h vr16, vr16, 5
vssrani.bu.h vr17, vr17, 5
vssrani.bu.h vr18, vr18, 5
vssrani.bu.h vr19, vr19, 5
vssrani.bu.h vr20, vr20, 5
vssrani.bu.h vr21, vr21, 5
fst.d f14, a0, 0
fst.d f15, a0, FDEC_STRIDE
fst.d f16, a0, FDEC_STRIDE * 2
fst.d f17, a0, FDEC_STRIDE * 3
fst.d f18, a0, FDEC_STRIDE * 4
fst.d f19, a0, FDEC_STRIDE * 5
fst.d f20, a0, FDEC_STRIDE * 6
fst.d f21, a0, FDEC_STRIDE * 7
endfunc_x264
/* void x264_predict_8x8c_v_lsx( pixel *src )
*/
function_x264 predict_8x8c_v_lsx
fld.d f0, a0, -FDEC_STRIDE
fst.d f0, a0, 0
fst.d f0, a0, FDEC_STRIDE
fst.d f0, a0, FDEC_STRIDE * 2
fst.d f0, a0, FDEC_STRIDE * 3
fst.d f0, a0, FDEC_STRIDE * 4
fst.d f0, a0, FDEC_STRIDE * 5
fst.d f0, a0, FDEC_STRIDE * 6
fst.d f0, a0, FDEC_STRIDE * 7
endfunc_x264
/* void x264_predict_8x8c_h_lsx( pixel *src )
*/
function_x264 predict_8x8c_h_lsx
vldrepl.b vr0, a0, -1
vldrepl.b vr1, a0, FDEC_STRIDE - 1
vldrepl.b vr2, a0, FDEC_STRIDE * 2 - 1
vldrepl.b vr3, a0, FDEC_STRIDE * 3 - 1
vldrepl.b vr4, a0, FDEC_STRIDE * 4 - 1
vldrepl.b vr5, a0, FDEC_STRIDE * 5 - 1
vldrepl.b vr6, a0, FDEC_STRIDE * 6 - 1
vldrepl.b vr7, a0, FDEC_STRIDE * 7 - 1
fst.d f0, a0, 0
fst.d f1, a0, FDEC_STRIDE
fst.d f2, a0, FDEC_STRIDE * 2
fst.d f3, a0, FDEC_STRIDE * 3
fst.d f4, a0, FDEC_STRIDE * 4
fst.d f5, a0, FDEC_STRIDE * 5
fst.d f6, a0, FDEC_STRIDE * 6
fst.d f7, a0, FDEC_STRIDE * 7
endfunc_x264
/* void x264_predict_8x8c_dc_lsx( pixel *src )
*/
function_x264 predict_8x8c_dc_lsx
fld.s f0, a0, -FDEC_STRIDE
fld.s f1, a0, 4 - FDEC_STRIDE
vhaddw.hu.bu vr2, vr0, vr0
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.hu.bu vr3, vr1, vr1
vhaddw.wu.hu vr3, vr3, vr3
vpickve2gr.w t0, vr2, 0 /* s0 */
vpickve2gr.w t1, vr3, 0 /* s1 */
ld.bu t2, a0, -1
ld.bu t3, a0, FDEC_STRIDE - 1
ld.bu t4, a0, FDEC_STRIDE * 2 - 1
ld.bu t5, a0, FDEC_STRIDE * 3 - 1
add.w t2, t2, t3
add.w t2, t2, t4
add.w t2, t2, t5 /* s2 */
ld.bu t3, a0, FDEC_STRIDE * 4 - 1
ld.bu t4, a0, FDEC_STRIDE * 5 - 1
ld.bu t5, a0, FDEC_STRIDE * 6 - 1
ld.bu t6, a0, FDEC_STRIDE * 7 - 1
add.w t3, t3, t4
add.w t3, t3, t5
add.w t3, t3, t6 /* s3 */
add.w t4, t0, t2
addi.w t4, t4, 4
srai.w t4, t4, 3 /* ( s0 + s2 + 4 ) >> 3 */
addi.w t5, t1, 2
srai.w t5, t5, 2 /* ( s1 + 2 ) >> 2 */
addi.w t6, t3, 2
srai.w t6, t6, 2 /* ( s3 + 2 ) >> 2 */
add.w t7, t1, t3
addi.w t7, t7, 4
srai.w t7, t7, 3 /* ( s1 + s3 + 4 ) >> 3 */
vreplgr2vr.b vr4, t4
vreplgr2vr.b vr5, t5
vreplgr2vr.b vr6, t6
vreplgr2vr.b vr7, t7
vpackev.w vr4, vr5, vr4
vpackev.w vr6, vr7, vr6
fst.d f4, a0, 0
fst.d f4, a0, FDEC_STRIDE
fst.d f4, a0, FDEC_STRIDE * 2
fst.d f4, a0, FDEC_STRIDE * 3
fst.d f6, a0, FDEC_STRIDE * 4
fst.d f6, a0, FDEC_STRIDE * 5
fst.d f6, a0, FDEC_STRIDE * 6
fst.d f6, a0, FDEC_STRIDE * 7
endfunc_x264
/* void x264_predict_8x8c_dc_128_lsx( pixel *src )
*/
function_x264 predict_8x8c_dc_128_lsx
ori t1, t0, 1
slli.d t1, t1, BIT_DEPTH - 1
vreplgr2vr.b vr4, t1
fst.d f4, a0, 0
fst.d f4, a0, FDEC_STRIDE
fst.d f4, a0, FDEC_STRIDE * 2
fst.d f4, a0, FDEC_STRIDE * 3
fst.d f4, a0, FDEC_STRIDE * 4
fst.d f4, a0, FDEC_STRIDE * 5
fst.d f4, a0, FDEC_STRIDE * 6
fst.d f4, a0, FDEC_STRIDE * 7
endfunc_x264
/* void x264_predict_8x8c_dc_top_lsx( pixel *src )
*/
function_x264 predict_8x8c_dc_top_lsx
fld.s f0, a0, -FDEC_STRIDE
fld.s f1, a0, 4 - FDEC_STRIDE
vhaddw.hu.bu vr0, vr0, vr0
vhaddw.wu.hu vr0, vr0, vr0
vhaddw.hu.bu vr1, vr1, vr1
vhaddw.wu.hu vr1, vr1, vr1
vpickve2gr.w t0, vr0, 0 /* dc0 */
vpickve2gr.w t1, vr1, 0 /* dc1 */
addi.w t0, t0, 2
srai.w t0, t0, 2
addi.w t1, t1, 2
srai.w t1, t1, 2
vreplgr2vr.b vr4, t0
vreplgr2vr.b vr5, t1
vpackev.w vr4, vr5, vr4
fst.d f4, a0, 0
fst.d f4, a0, FDEC_STRIDE
fst.d f4, a0, FDEC_STRIDE * 2
fst.d f4, a0, FDEC_STRIDE * 3
fst.d f4, a0, FDEC_STRIDE * 4
fst.d f4, a0, FDEC_STRIDE * 5
fst.d f4, a0, FDEC_STRIDE * 6
fst.d f4, a0, FDEC_STRIDE * 7
endfunc_x264
/* void x264_predict_8x8c_dc_left_lsx( pixel *src )
*/
function_x264 predict_8x8c_dc_left_lsx
ld.bu t0, a0, -1
ld.bu t1, a0, FDEC_STRIDE - 1
ld.bu t2, a0, FDEC_STRIDE * 2 - 1
ld.bu t3, a0, FDEC_STRIDE * 3 - 1
add.w t0, t0, t1
add.w t0, t0, t2
add.w t0, t0, t3
ld.bu t1, a0, FDEC_STRIDE * 4 - 1
ld.bu t2, a0, FDEC_STRIDE * 5 - 1
ld.bu t3, a0, FDEC_STRIDE * 6 - 1
ld.bu t4, a0, FDEC_STRIDE * 7 - 1
add.w t1, t1, t2
add.w t1, t1, t3
add.w t1, t1, t4
addi.w t0, t0, 2
srai.w t0, t0, 2
addi.w t1, t1, 2
srai.w t1, t1, 2
vreplgr2vr.b vr4, t0 /* ( dc0 + 2 ) >> 2 */
vreplgr2vr.b vr5, t1 /* ( dc1 + 2 ) >> 2 */
fst.d f4, a0, 0
fst.d f4, a0, FDEC_STRIDE
fst.d f4, a0, FDEC_STRIDE * 2
fst.d f4, a0, FDEC_STRIDE * 3
fst.d f5, a0, FDEC_STRIDE * 4
fst.d f5, a0, FDEC_STRIDE * 5
fst.d f5, a0, FDEC_STRIDE * 6
fst.d f5, a0, FDEC_STRIDE * 7
endfunc_x264
/****************************************************************************
* 8x8 prediction for intra luma block
****************************************************************************/
/* void predict_8x8_v_c( pixel *src, pixel edge[36] )
*/
function_x264 predict_8x8_v_lsx
fld.d f0, a1, 16
fst.d f0, a0, 0
fst.d f0, a0, FDEC_STRIDE
fst.d f0, a0, FDEC_STRIDE * 2
fst.d f0, a0, FDEC_STRIDE * 3
fst.d f0, a0, FDEC_STRIDE * 4
fst.d f0, a0, FDEC_STRIDE * 5
fst.d f0, a0, FDEC_STRIDE * 6
fst.d f0, a0, FDEC_STRIDE * 7
endfunc_x264
/* void predict_8x8_h_c( pixel *src, pixel edge[36] )
*/
function_x264 predict_8x8_h_lasx
fld.d f0, a1, 7
xvinsve0.w xr0, xr0, 5
xvrepl128vei.b xr4, xr0, 7
xvrepl128vei.b xr3, xr0, 6
xvrepl128vei.b xr2, xr0, 5
xvrepl128vei.b xr1, xr0, 4
fst.d f4, a0, 0
fst.d f3, a0, FDEC_STRIDE
fst.d f2, a0, FDEC_STRIDE * 2
fst.d f1, a0, FDEC_STRIDE * 3
xvstelm.d xr4, a0, FDEC_STRIDE * 4, 2
xvstelm.d xr3, a0, FDEC_STRIDE * 5, 2
xvstelm.d xr2, a0, FDEC_STRIDE * 6, 2
xvstelm.d xr1, a0, FDEC_STRIDE * 7, 2
endfunc_x264
function_x264 predict_8x8_h_lsx
fld.d f0, a1, 7
vreplvei.w vr1, vr0, 0
vreplvei.b vr4, vr0, 7
vreplvei.b vr5, vr1, 7
vreplvei.b vr6, vr0, 6
vreplvei.b vr7, vr1, 6
vreplvei.b vr8, vr0, 5
vreplvei.b vr9, vr1, 5
vreplvei.b vr10, vr0, 4
vreplvei.b vr11, vr1, 4
fst.d f4, a0, 0
fst.d f6, a0, FDEC_STRIDE
fst.d f8, a0, FDEC_STRIDE * 2
fst.d f10, a0, FDEC_STRIDE * 3
vstelm.d vr5, a0, FDEC_STRIDE * 4, 0
vstelm.d vr7, a0, FDEC_STRIDE * 5, 0
vstelm.d vr9, a0, FDEC_STRIDE * 6, 0
vstelm.d vr11, a0, FDEC_STRIDE * 7, 0
endfunc_x264
/* void predict_8x8_dc_c( pixel *src, pixel edge[36] )
*/
function_x264 predict_8x8_dc_lsx
fld.d f0, a1, 7
fld.d f1, a1, 16
vilvl.d vr0, vr1, vr0
vhaddw.hu.bu vr1, vr0, vr0
vhaddw.wu.hu vr2, vr1, vr1
vhaddw.du.wu vr3, vr2, vr2
vhaddw.qu.du vr4, vr3, vr3
vsrari.w vr4, vr4, 4
vreplvei.b vr5, vr4, 0
fst.d f5, a0, 0
fst.d f5, a0, FDEC_STRIDE
fst.d f5, a0, FDEC_STRIDE * 2
fst.d f5, a0, FDEC_STRIDE * 3
fst.d f5, a0, FDEC_STRIDE * 4
fst.d f5, a0, FDEC_STRIDE * 5
fst.d f5, a0, FDEC_STRIDE * 6
fst.d f5, a0, FDEC_STRIDE * 7
endfunc_x264
/* void predict_8x8_dc_left_c( pixel *src, pixel edge[36] )
*/
function_x264 predict_8x8_dc_left_lsx
fld.d f0, a1, 7
vhaddw.hu.bu vr1, vr0, vr0
vhaddw.wu.hu vr2, vr1, vr1
vhaddw.du.wu vr3, vr2, vr2
vsrari.w vr3, vr3, 3
vreplvei.b vr5, vr3, 0
fst.d f5, a0, 0
fst.d f5, a0, FDEC_STRIDE
fst.d f5, a0, FDEC_STRIDE * 2
fst.d f5, a0, FDEC_STRIDE * 3
fst.d f5, a0, FDEC_STRIDE * 4
fst.d f5, a0, FDEC_STRIDE * 5
fst.d f5, a0, FDEC_STRIDE * 6
fst.d f5, a0, FDEC_STRIDE * 7
endfunc_x264
/* void predict_8x8_dc_top_c( pixel *src, pixel edge[36] )
*/
function_x264 predict_8x8_dc_top_lsx
fld.d f0, a1, 16
vhaddw.hu.bu vr1, vr0, vr0
vhaddw.wu.hu vr2, vr1, vr1
vhaddw.du.wu vr3, vr2, vr2
vsrari.w vr3, vr3, 3
vreplvei.b vr5, vr3, 0
fst.d f5, a0, 0
fst.d f5, a0, FDEC_STRIDE
fst.d f5, a0, FDEC_STRIDE * 2
fst.d f5, a0, FDEC_STRIDE * 3
fst.d f5, a0, FDEC_STRIDE * 4
fst.d f5, a0, FDEC_STRIDE * 5
fst.d f5, a0, FDEC_STRIDE * 6
fst.d f5, a0, FDEC_STRIDE * 7
endfunc_x264
/* void predict_8x8_dc_128_c( pixel *src, pixel edge[36] )
*/
function_x264 predict_8x8_dc_128_lsx
addi.w t0, zero, 1
slli.d t1, t0, (BIT_DEPTH-1)
vreplgr2vr.b vr5, t1
fst.d f5, a0, 0
fst.d f5, a0, FDEC_STRIDE
fst.d f5, a0, FDEC_STRIDE * 2
fst.d f5, a0, FDEC_STRIDE * 3
fst.d f5, a0, FDEC_STRIDE * 4
fst.d f5, a0, FDEC_STRIDE * 5
fst.d f5, a0, FDEC_STRIDE * 6
fst.d f5, a0, FDEC_STRIDE * 7
endfunc_x264
/* void predict_8x8_ddl_c( pixel *src, pixel edge[36] )
*/
function_x264 predict_8x8_ddl_lasx
vld vr1, a1, 16
vbsrl.v vr2, vr1, 1
vbsrl.v vr3, vr1, 2
vextrins.b vr3, vr1, 0xef
vext2xv.hu.bu xr5, xr1
vext2xv.hu.bu xr6, xr2
vext2xv.hu.bu xr7, xr3
xvslli.h xr6, xr6, 1
xvadd.h xr8, xr5, xr6
xvadd.h xr9, xr8, xr7
xvssrarni.bu.h xr9, xr9, 2
xvpermi.d xr9, xr9, 0x08
vbsrl.v vr10, vr9, 1
vbsrl.v vr11, vr9, 2
vbsrl.v vr12, vr9, 3
vbsrl.v vr13, vr9, 4
vbsrl.v vr14, vr9, 5
vbsrl.v vr15, vr9, 6
vbsrl.v vr16, vr9, 7
fst.d f9, a0, 0
fst.d f10, a0, FDEC_STRIDE
fst.d f11, a0, FDEC_STRIDE * 2
fst.d f12, a0, FDEC_STRIDE * 3
fst.d f13, a0, FDEC_STRIDE * 4
fst.d f14, a0, FDEC_STRIDE * 5
fst.d f15, a0, FDEC_STRIDE * 6
fst.d f16, a0, FDEC_STRIDE * 7
endfunc_x264
function_x264 predict_8x8_ddl_lsx
vld vr1, a1, 16
vbsrl.v vr2, vr1, 1
vbsrl.v vr3, vr1, 2
vextrins.b vr3, vr1, 0xef
vsllwil.hu.bu vr5, vr1, 0
vexth.hu.bu vr15, vr1
vsllwil.hu.bu vr6, vr2, 0
vexth.hu.bu vr16, vr2
vsllwil.hu.bu vr7, vr3, 0
vexth.hu.bu vr17, vr3
vslli.h vr6, vr6, 1
vslli.h vr16, vr16, 1
vadd.h vr8, vr5, vr6
vadd.h vr18, vr15, vr16
vadd.h vr19, vr8, vr7
vadd.h vr9, vr18, vr17
vssrarni.bu.h vr9, vr19, 2
vbsrl.v vr10, vr9, 1
vbsrl.v vr11, vr9, 2
vbsrl.v vr12, vr9, 3
vbsrl.v vr13, vr9, 4
vbsrl.v vr14, vr9, 5
vbsrl.v vr15, vr9, 6
vbsrl.v vr16, vr9, 7
fst.d f9, a0, 0
fst.d f10, a0, FDEC_STRIDE
fst.d f11, a0, FDEC_STRIDE * 2
fst.d f12, a0, FDEC_STRIDE * 3
fst.d f13, a0, FDEC_STRIDE * 4
fst.d f14, a0, FDEC_STRIDE * 5
fst.d f15, a0, FDEC_STRIDE * 6
fst.d f16, a0, FDEC_STRIDE * 7
endfunc_x264
/* void predict_8x8_ddr_c( pixel *src, pixel edge[36] )
*/
function_x264 predict_8x8_ddr_lasx
vld vr1, a1, 7
vbsrl.v vr2, vr1, 1
vbsrl.v vr3, vr1, 2
// edge[23]
ld.bu t0, a1, 23
vinsgr2vr.b vr3, t0, 0xe
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
vext2xv.hu.bu xr3, xr3
xvslli.h xr2, xr2, 1
xvadd.h xr4, xr1, xr2
xvadd.h xr5, xr4, xr3
xvssrarni.bu.h xr5, xr5, 2
xvpermi.d xr6, xr5, 0x08
vbsrl.v vr7, vr6, 7
vbsrl.v vr8, vr6, 6
vbsrl.v vr9, vr6, 5
vbsrl.v vr10, vr6, 4
vbsrl.v vr11, vr6, 3
vbsrl.v vr12, vr6, 2
vbsrl.v vr13, vr6, 1
fst.d f7, a0, 0
fst.d f8, a0, FDEC_STRIDE
fst.d f9, a0, FDEC_STRIDE * 2
fst.d f10, a0, FDEC_STRIDE * 3
fst.d f11, a0, FDEC_STRIDE * 4
fst.d f12, a0, FDEC_STRIDE * 5
fst.d f13, a0, FDEC_STRIDE * 6
fst.d f6, a0, FDEC_STRIDE * 7
endfunc_x264
function_x264 predict_8x8_ddr_lsx
vld vr1, a1, 7
vbsrl.v vr2, vr1, 1
vbsrl.v vr3, vr1, 2
// edge[23]
ld.bu t0, a1, 23
vinsgr2vr.b vr3, t0, 0xe
vexth.hu.bu vr11, vr1
vsllwil.hu.bu vr1, vr1, 0
vexth.hu.bu vr12, vr2
vsllwil.hu.bu vr2, vr2, 0
vexth.hu.bu vr13, vr3
vsllwil.hu.bu vr3, vr3, 0
vslli.h vr2, vr2, 1
vslli.h vr12, vr12, 1
vadd.h vr4, vr1, vr2
vadd.h vr14, vr11, vr12
vadd.h vr5, vr4, vr3
vadd.h vr15, vr14, vr13
vssrarni.bu.h vr15, vr5, 2
vbsrl.v vr7, vr15, 7
vbsrl.v vr8, vr15, 6
vbsrl.v vr9, vr15, 5
vbsrl.v vr10, vr15, 4
vbsrl.v vr11, vr15, 3
vbsrl.v vr12, vr15, 2
vbsrl.v vr13, vr15, 1
fst.d f7, a0, 0
fst.d f8, a0, FDEC_STRIDE
fst.d f9, a0, FDEC_STRIDE * 2
fst.d f10, a0, FDEC_STRIDE * 3
fst.d f11, a0, FDEC_STRIDE * 4
fst.d f12, a0, FDEC_STRIDE * 5
fst.d f13, a0, FDEC_STRIDE * 6
fst.d f15, a0, FDEC_STRIDE * 7
endfunc_x264
/* void predict_8x8_vr_c( pixel *src, pixel edge[36] )
*/
function_x264 predict_8x8_vr_lasx
vld vr0, a1, 8
vbsrl.v vr1, vr0, 1
vbsrl.v vr2, vr0, 2
vext2xv.hu.bu xr5, xr0
vext2xv.hu.bu xr6, xr1
vext2xv.hu.bu xr7, xr2
xvadd.h xr10, xr5, xr6
xvadd.h xr11, xr10, xr6
xvadd.h xr12, xr11, xr7
xvssrarni.bu.h xr12, xr12, 2
xvssrarni.bu.h xr10, xr10, 1
xvpermi.d xr13, xr12, 0x08
xvpermi.d xr14, xr10, 0x08
vbsrl.v vr15, vr13, 6
vbsll.v vr16, vr15, 1
vextrins.b vr16, vr13, 0x04
vbsll.v vr17, vr16, 1
vextrins.b vr17, vr13, 0x02
vbsll.v vr18, vr17, 1
vextrins.b vr18, vr13, 0x00
fst.d f15, a0, FDEC_STRIDE
fst.d f16, a0, FDEC_STRIDE * 3
fst.d f17, a0, FDEC_STRIDE * 5
fst.d f18, a0, FDEC_STRIDE * 7
vbsrl.v vr16, vr14, 7
vbsll.v vr17, vr16, 1
vextrins.b vr17, vr13, 0x05
vbsll.v vr18, vr17, 1
vextrins.b vr18, vr13, 0x03
vbsll.v vr19, vr18, 1
vextrins.b vr19, vr13, 0x01
fst.d f16, a0, 0
fst.d f17, a0, FDEC_STRIDE * 2
fst.d f18, a0, FDEC_STRIDE * 4
fst.d f19, a0, FDEC_STRIDE * 6
endfunc_x264
function_x264 predict_8x8_vr_lsx
vld vr0, a1, 8
vbsrl.v vr1, vr0, 1
vbsrl.v vr2, vr0, 2
vexth.hu.bu vr5, vr0
vsllwil.hu.bu vr0, vr0, 0
vexth.hu.bu vr6, vr1
vsllwil.hu.bu vr1, vr1, 0
vexth.hu.bu vr7, vr2
vsllwil.hu.bu vr2, vr2, 0
vadd.h vr9, vr0, vr1
vadd.h vr10, vr5, vr6
vadd.h vr11, vr9, vr1
vadd.h vr12, vr10, vr6
vadd.h vr13, vr11, vr2
vadd.h vr14, vr12, vr7
vssrarni.bu.h vr14, vr13, 2
vssrarni.bu.h vr10, vr9, 1
vbsrl.v vr15, vr14, 6
vbsll.v vr16, vr15, 1
vextrins.b vr16, vr14, 0x04
vbsll.v vr17, vr16, 1
vextrins.b vr17, vr14, 0x02
vbsll.v vr18, vr17, 1
vextrins.b vr18, vr14, 0x00
fst.d f15, a0, FDEC_STRIDE
fst.d f16, a0, FDEC_STRIDE * 3
fst.d f17, a0, FDEC_STRIDE * 5
fst.d f18, a0, FDEC_STRIDE * 7
vbsrl.v vr16, vr10, 7
vbsll.v vr17, vr16, 1
vextrins.b vr17, vr14, 0x05
vbsll.v vr18, vr17, 1
vextrins.b vr18, vr14, 0x03
vbsll.v vr19, vr18, 1
vextrins.b vr19, vr14, 0x01
fst.d f16, a0, 0
fst.d f17, a0, FDEC_STRIDE * 2
fst.d f18, a0, FDEC_STRIDE * 4
fst.d f19, a0, FDEC_STRIDE * 6
endfunc_x264
/* void predict_8x8_vl_c( pixel *src, pixel edge[36] );
*/
function_x264 predict_8x8_vl_lasx
vld vr0, a1, 16
vbsrl.v vr1, vr0, 1
vbsrl.v vr2, vr0, 2
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
xvadd.h xr3, xr0, xr1
xvadd.h xr4, xr3, xr1
xvadd.h xr5, xr4, xr2
xvssrarni.bu.h xr3, xr3, 1
xvssrarni.bu.h xr5, xr5, 2
xvpermi.d xr6, xr3, 0x8
xvpermi.d xr7, xr5, 0x8
vbsrl.v vr8, vr6, 1
vbsrl.v vr9, vr7, 1
fst.d f6, a0, 0
fst.d f7, a0, FDEC_STRIDE
fst.d f8, a0, FDEC_STRIDE * 2
fst.d f9, a0, FDEC_STRIDE * 3
vbsrl.v vr10, vr8, 1
vbsrl.v vr11, vr9, 1
vbsrl.v vr12, vr10, 1
vbsrl.v vr13, vr11, 1
fst.d f10, a0, FDEC_STRIDE * 4
fst.d f11, a0, FDEC_STRIDE * 5
fst.d f12, a0, FDEC_STRIDE * 6
fst.d f13, a0, FDEC_STRIDE * 7
endfunc_x264
function_x264 predict_8x8_vl_lsx
vld vr0, a1, 16
vbsrl.v vr1, vr0, 1
vbsrl.v vr2, vr0, 2
vexth.hu.bu vr5, vr0
vsllwil.hu.bu vr0, vr0, 0
vexth.hu.bu vr6, vr1
vsllwil.hu.bu vr1, vr1, 0
vexth.hu.bu vr7, vr2
vsllwil.hu.bu vr2, vr2, 0
vadd.h vr3, vr0, vr1
vadd.h vr13, vr5, vr6
vadd.h vr4, vr3, vr1
vadd.h vr14, vr13, vr6
vadd.h vr5, vr4, vr2
vadd.h vr15, vr14, vr7
vssrarni.bu.h vr13, vr3, 1
vssrarni.bu.h vr15, vr5, 2
vbsrl.v vr8, vr13, 1
vbsrl.v vr9, vr15, 1
fst.d f13, a0, 0
fst.d f15, a0, FDEC_STRIDE
fst.d f8, a0, FDEC_STRIDE * 2
fst.d f9, a0, FDEC_STRIDE * 3
vbsrl.v vr8, vr8, 1
vbsrl.v vr9, vr9, 1
vbsrl.v vr10, vr8, 1
vbsrl.v vr11, vr9, 1
fst.d f8, a0, FDEC_STRIDE * 4
fst.d f9, a0, FDEC_STRIDE * 5
fst.d f10, a0, FDEC_STRIDE * 6
fst.d f11, a0, FDEC_STRIDE * 7
endfunc_x264
/****************************************************************************
* 16x16 prediction for intra luma block
****************************************************************************/
/* void x264_predict_16x16_dc_lsx( pixel *src )
*/
function_x264 predict_16x16_dc_lsx
ld.bu t4, a0, -1
ld.bu t5, a0, FDEC_STRIDE - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 2 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 3 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 4 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 5 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 6 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 7 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 8 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 9 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 10 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 11 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 12 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 13 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 14 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 15 - 1
add.d t4, t4, t5
vld vr4, a0, -FDEC_STRIDE
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.wu.hu vr4, vr4, vr4
vhaddw.du.wu vr4, vr4, vr4
vhaddw.qu.du vr4, vr4, vr4
vpickve2gr.wu t5, vr4, 0
add.d t4, t4, t5
addi.d t5, t4, 16
srai.w t5, t5, 5
vreplgr2vr.b vr5, t5
vst vr5, a0, 0
vst vr5, a0, FDEC_STRIDE
vst vr5, a0, FDEC_STRIDE * 2
vst vr5, a0, FDEC_STRIDE * 3
vst vr5, a0, FDEC_STRIDE * 4
vst vr5, a0, FDEC_STRIDE * 5
vst vr5, a0, FDEC_STRIDE * 6
vst vr5, a0, FDEC_STRIDE * 7
vst vr5, a0, FDEC_STRIDE * 8
vst vr5, a0, FDEC_STRIDE * 9
vst vr5, a0, FDEC_STRIDE * 10
vst vr5, a0, FDEC_STRIDE * 11
vst vr5, a0, FDEC_STRIDE * 12
vst vr5, a0, FDEC_STRIDE * 13
vst vr5, a0, FDEC_STRIDE * 14
vst vr5, a0, FDEC_STRIDE * 15
endfunc_x264
/* void x264_predict_16x16_dc_left_lsx( pixel *src )
*/
function_x264 predict_16x16_dc_left_lsx
ld.bu t4, a0, -1
ld.bu t5, a0, FDEC_STRIDE - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 2 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 3 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 4 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 5 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 6 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 7 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 8 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 9 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 10 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 11 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 12 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 13 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 14 - 1
add.d t4, t4, t5
ld.bu t5, a0, FDEC_STRIDE * 15 - 1
add.d t4, t4, t5
addi.d t5, t4, 8
srai.w t5, t5, 4
vreplgr2vr.b vr5, t5
vst vr5, a0, 0
vst vr5, a0, FDEC_STRIDE
vst vr5, a0, FDEC_STRIDE * 2
vst vr5, a0, FDEC_STRIDE * 3
vst vr5, a0, FDEC_STRIDE * 4
vst vr5, a0, FDEC_STRIDE * 5
vst vr5, a0, FDEC_STRIDE * 6
vst vr5, a0, FDEC_STRIDE * 7
vst vr5, a0, FDEC_STRIDE * 8
vst vr5, a0, FDEC_STRIDE * 9
vst vr5, a0, FDEC_STRIDE * 10
vst vr5, a0, FDEC_STRIDE * 11
vst vr5, a0, FDEC_STRIDE * 12
vst vr5, a0, FDEC_STRIDE * 13
vst vr5, a0, FDEC_STRIDE * 14
vst vr5, a0, FDEC_STRIDE * 15
endfunc_x264
/* void x264_predict_16x16_dc_top_lsx( pixel *src )
*/
function_x264 predict_16x16_dc_top_lsx
vld vr4, a0, -FDEC_STRIDE
vhaddw.hu.bu vr4, vr4, vr4
vhaddw.wu.hu vr4, vr4, vr4
vhaddw.du.wu vr4, vr4, vr4
vhaddw.qu.du vr4, vr4, vr4
vpickve2gr.wu t5, vr4, 0
addi.d t5, t5, 8
srai.w t5, t5, 4
vreplgr2vr.b vr5, t5
vst vr5, a0, 0
vst vr5, a0, FDEC_STRIDE
vst vr5, a0, FDEC_STRIDE * 2
vst vr5, a0, FDEC_STRIDE * 3
vst vr5, a0, FDEC_STRIDE * 4
vst vr5, a0, FDEC_STRIDE * 5
vst vr5, a0, FDEC_STRIDE * 6
vst vr5, a0, FDEC_STRIDE * 7
vst vr5, a0, FDEC_STRIDE * 8
vst vr5, a0, FDEC_STRIDE * 9
vst vr5, a0, FDEC_STRIDE * 10
vst vr5, a0, FDEC_STRIDE * 11
vst vr5, a0, FDEC_STRIDE * 12
vst vr5, a0, FDEC_STRIDE * 13
vst vr5, a0, FDEC_STRIDE * 14
vst vr5, a0, FDEC_STRIDE * 15
endfunc_x264
/* void x264_predict_16x16_dc_128_lsx( pixel *src )
*/
function_x264 predict_16x16_dc_128_lsx
ori t1, t0, 1
slli.d t1, t1, BIT_DEPTH - 1
vreplgr2vr.b vr5, t1
vst vr5, a0, 0
vst vr5, a0, FDEC_STRIDE
vst vr5, a0, FDEC_STRIDE * 2
vst vr5, a0, FDEC_STRIDE * 3
vst vr5, a0, FDEC_STRIDE * 4
vst vr5, a0, FDEC_STRIDE * 5
vst vr5, a0, FDEC_STRIDE * 6
vst vr5, a0, FDEC_STRIDE * 7
vst vr5, a0, FDEC_STRIDE * 8
vst vr5, a0, FDEC_STRIDE * 9
vst vr5, a0, FDEC_STRIDE * 10
vst vr5, a0, FDEC_STRIDE * 11
vst vr5, a0, FDEC_STRIDE * 12
vst vr5, a0, FDEC_STRIDE * 13
vst vr5, a0, FDEC_STRIDE * 14
vst vr5, a0, FDEC_STRIDE * 15
endfunc_x264
/* void x264_predict_16x16_h_lsx( pixel *src )
*/
function_x264 predict_16x16_h_lsx
ld.bu t0, a0, -1
ld.bu t1, a0, FDEC_STRIDE - 1
ld.bu t2, a0, FDEC_STRIDE * 2 - 1
ld.bu t3, a0, FDEC_STRIDE * 3 - 1
ld.bu t4, a0, FDEC_STRIDE * 4 - 1
ld.bu t5, a0, FDEC_STRIDE * 5 - 1
ld.bu t6, a0, FDEC_STRIDE * 6 - 1
ld.bu t7, a0, FDEC_STRIDE * 7 - 1
vreplgr2vr.b vr0, t0
vreplgr2vr.b vr1, t1
vreplgr2vr.b vr2, t2
vreplgr2vr.b vr3, t3
vreplgr2vr.b vr4, t4
vreplgr2vr.b vr5, t5
vreplgr2vr.b vr6, t6
vreplgr2vr.b vr7, t7
vst vr0, a0, 0
vst vr1, a0, FDEC_STRIDE
vst vr2, a0, FDEC_STRIDE * 2
vst vr3, a0, FDEC_STRIDE * 3
vst vr4, a0, FDEC_STRIDE * 4
vst vr5, a0, FDEC_STRIDE * 5
vst vr6, a0, FDEC_STRIDE * 6
vst vr7, a0, FDEC_STRIDE * 7
ld.bu t0, a0, FDEC_STRIDE * 8 - 1
ld.bu t1, a0, FDEC_STRIDE * 9 - 1
ld.bu t2, a0, FDEC_STRIDE * 10 - 1
ld.bu t3, a0, FDEC_STRIDE * 11 - 1
ld.bu t4, a0, FDEC_STRIDE * 12 - 1
ld.bu t5, a0, FDEC_STRIDE * 13 - 1
ld.bu t6, a0, FDEC_STRIDE * 14 - 1
ld.bu t7, a0, FDEC_STRIDE * 15 - 1
vreplgr2vr.b vr0, t0
vreplgr2vr.b vr1, t1
vreplgr2vr.b vr2, t2
vreplgr2vr.b vr3, t3
vreplgr2vr.b vr4, t4
vreplgr2vr.b vr5, t5
vreplgr2vr.b vr6, t6
vreplgr2vr.b vr7, t7
vst vr0, a0, FDEC_STRIDE * 8
vst vr1, a0, FDEC_STRIDE * 9
vst vr2, a0, FDEC_STRIDE * 10
vst vr3, a0, FDEC_STRIDE * 11
vst vr4, a0, FDEC_STRIDE * 12
vst vr5, a0, FDEC_STRIDE * 13
vst vr6, a0, FDEC_STRIDE * 14
vst vr7, a0, FDEC_STRIDE * 15
endfunc_x264
/* void x264_predict_16x16_v_lsx( pixel *src )
*/
function_x264 predict_16x16_v_lsx
fld.d f4, a0, -FDEC_STRIDE
fld.d f5, a0, 4 - FDEC_STRIDE
fld.d f6, a0, 8 - FDEC_STRIDE
fld.d f7, a0, 12 - FDEC_STRIDE
vilvl.w vr4, vr5, vr4
vilvl.w vr6, vr7, vr6
vilvl.d vr4, vr6, vr4
vst vr4, a0, 0
vst vr4, a0, FDEC_STRIDE
vst vr4, a0, FDEC_STRIDE * 2
vst vr4, a0, FDEC_STRIDE * 3
vst vr4, a0, FDEC_STRIDE * 4
vst vr4, a0, FDEC_STRIDE * 5
vst vr4, a0, FDEC_STRIDE * 6
vst vr4, a0, FDEC_STRIDE * 7
vst vr4, a0, FDEC_STRIDE * 8
vst vr4, a0, FDEC_STRIDE * 9
vst vr4, a0, FDEC_STRIDE * 10
vst vr4, a0, FDEC_STRIDE * 11
vst vr4, a0, FDEC_STRIDE * 12
vst vr4, a0, FDEC_STRIDE * 13
vst vr4, a0, FDEC_STRIDE * 14
vst vr4, a0, FDEC_STRIDE * 15
endfunc_x264
/* void x264_predict_16x16_p_lasx( pixel *src )
*/
const mulc
.short 1, 2, 3, 4, 5, 6, 7, 8
endconst
const muld
.short 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
endconst
function_x264 predict_16x16_p_lasx
la.local t0, mulc
vld vr3, t0, 0
fld.d f4, a0, 8 - FDEC_STRIDE
fld.d f5, a0, -1 - FDEC_STRIDE
vxor.v vr0, vr0, vr0
vilvl.b vr4, vr0, vr4
vilvl.b vr5, vr0, vr5
vshuf4i.h vr5, vr5, 0x1b
vbsll.v vr6, vr5, 8
vpackod.d vr5, vr6, vr5
vsub.h vr4, vr4, vr5
vmul.h vr4, vr4, vr3
vhaddw.w.h vr4, vr4, vr4
vhaddw.d.w vr4, vr4, vr4
vhaddw.q.d vr4, vr4, vr4
vpickve2gr.w t0, vr4, 0 /* H */
fld.d f6, a0, FDEC_STRIDE * 8 - 1
fld.d f7, a0, FDEC_STRIDE * 9 - 1
fld.d f8, a0, FDEC_STRIDE * 10 - 1
fld.d f9, a0, FDEC_STRIDE * 11 - 1
fld.d f10, a0, FDEC_STRIDE * 12 - 1
fld.d f11, a0, FDEC_STRIDE * 13 - 1
fld.d f12, a0, FDEC_STRIDE * 14 - 1
fld.d f13, a0, FDEC_STRIDE * 15 - 1
vilvl.b vr6, vr7, vr6
vilvl.b vr8, vr9, vr8
vilvl.b vr10, vr11, vr10
vilvl.b vr12, vr13, vr12
vilvl.h vr6, vr8, vr6
vilvl.h vr10, vr12, vr10
vilvl.w vr6, vr10, vr6
fld.d f7, a0, FDEC_STRIDE * 6 - 1
fld.d f8, a0, FDEC_STRIDE * 5 - 1
fld.d f9, a0, FDEC_STRIDE * 4 - 1
fld.d f10, a0, FDEC_STRIDE * 3 - 1
fld.d f11, a0, FDEC_STRIDE * 2 - 1
fld.d f12, a0, FDEC_STRIDE - 1
fld.d f13, a0, -1
fld.d f14, a0, -FDEC_STRIDE - 1
vilvl.b vr7, vr8, vr7
vilvl.b vr9, vr10, vr9
vilvl.b vr11, vr12, vr11
vilvl.b vr13, vr14, vr13
vilvl.h vr7, vr9, vr7
vilvl.h vr11, vr13, vr11
vilvl.w vr7, vr11, vr7
vilvl.b vr6, vr0, vr6
vilvl.b vr7, vr0, vr7
vsub.h vr6, vr6, vr7
vmul.h vr6, vr6, vr3
vhaddw.w.h vr6, vr6, vr6
vhaddw.d.w vr6, vr6, vr6
vhaddw.q.d vr6, vr6, vr6
vpickve2gr.w t1, vr6, 0 /* V */
ld.bu t2, a0, FDEC_STRIDE * 15 - 1
ld.bu t3, a0, 15 - FDEC_STRIDE
add.w t2, t2, t3
slli.w t2, t2, 4 /* a */
slli.w t3, t0, 2
add.w t0, t0, t3
addi.w t0, t0, 32
srai.w t0, t0, 6 /* b */
slli.w t3, t1, 2
add.w t1, t1, t3
addi.w t1, t1, 32
srai.w t1, t1, 6 /* c */
add.w t3, t0, t1
slli.w t4, t3, 3
sub.w t4, t4, t3
sub.w t5, t2, t4
addi.w t5, t5, 16 /* i00 */
la.local t3, muld
xvld xr14, t3, 0
xvreplgr2vr.h xr12, t0
xvmul.h xr12, xr12, xr14
.rept 16
xvreplgr2vr.h xr14, t5
xvadd.h xr13, xr12, xr14
xvssrani.bu.h xr15, xr13, 5
xvstelm.d xr15, a0, 0, 0
xvstelm.d xr15, a0, 8, 2
addi.d a0, a0, FDEC_STRIDE
add.w t5, t5, t1
.endr
endfunc_x264
function_x264 predict_16x16_p_lsx
la.local t0, mulc
vld vr3, t0, 0
fld.d f4, a0, 8 - FDEC_STRIDE
fld.d f5, a0, -1 - FDEC_STRIDE
vxor.v vr0, vr0, vr0
vilvl.b vr4, vr0, vr4
vilvl.b vr5, vr0, vr5
vshuf4i.h vr5, vr5, 0x1b
vbsll.v vr6, vr5, 8
vpackod.d vr5, vr6, vr5
vsub.h vr4, vr4, vr5
vmul.h vr4, vr4, vr3
vhaddw.w.h vr4, vr4, vr4
vhaddw.d.w vr4, vr4, vr4
vhaddw.q.d vr4, vr4, vr4
vpickve2gr.w t0, vr4, 0 /* H */
fld.d f6, a0, FDEC_STRIDE * 8 - 1
fld.d f7, a0, FDEC_STRIDE * 9 - 1
fld.d f8, a0, FDEC_STRIDE * 10 - 1
fld.d f9, a0, FDEC_STRIDE * 11 - 1
fld.d f10, a0, FDEC_STRIDE * 12 - 1
fld.d f11, a0, FDEC_STRIDE * 13 - 1
fld.d f12, a0, FDEC_STRIDE * 14 - 1
fld.d f13, a0, FDEC_STRIDE * 15 - 1
vilvl.b vr6, vr7, vr6
vilvl.b vr8, vr9, vr8
vilvl.b vr10, vr11, vr10
vilvl.b vr12, vr13, vr12
vilvl.h vr6, vr8, vr6
vilvl.h vr10, vr12, vr10
vilvl.w vr6, vr10, vr6
fld.d f7, a0, FDEC_STRIDE * 6 - 1
fld.d f8, a0, FDEC_STRIDE * 5 - 1
fld.d f9, a0, FDEC_STRIDE * 4 - 1
fld.d f10, a0, FDEC_STRIDE * 3 - 1
fld.d f11, a0, FDEC_STRIDE * 2 - 1
fld.d f12, a0, FDEC_STRIDE - 1
fld.d f13, a0, -1
fld.d f14, a0, -FDEC_STRIDE - 1
vilvl.b vr7, vr8, vr7
vilvl.b vr9, vr10, vr9
vilvl.b vr11, vr12, vr11
vilvl.b vr13, vr14, vr13
vilvl.h vr7, vr9, vr7
vilvl.h vr11, vr13, vr11
vilvl.w vr7, vr11, vr7
vilvl.b vr6, vr0, vr6
vilvl.b vr7, vr0, vr7
vsub.h vr6, vr6, vr7
vmul.h vr6, vr6, vr3
vhaddw.w.h vr6, vr6, vr6
vhaddw.d.w vr6, vr6, vr6
vhaddw.q.d vr6, vr6, vr6
vpickve2gr.w t1, vr6, 0 /* V */
ld.bu t2, a0, FDEC_STRIDE * 15 - 1
ld.bu t3, a0, 15 - FDEC_STRIDE
add.w t2, t2, t3
slli.w t2, t2, 4 /* a */
slli.w t3, t0, 2
add.w t0, t0, t3
addi.w t0, t0, 32
srai.w t0, t0, 6 /* b */
slli.w t3, t1, 2
add.w t1, t1, t3
addi.w t1, t1, 32
srai.w t1, t1, 6 /* c */
add.w t3, t0, t1
slli.w t4, t3, 3
sub.w t4, t4, t3
sub.w t5, t2, t4
addi.w t5, t5, 16 /* i00 */
la.local t3, muld
vld vr14, t3, 0
vld vr20, t3, 16
vreplgr2vr.h vr12, t0
vmul.h vr22, vr12, vr14
vmul.h vr23, vr12, vr20
.rept 16
vreplgr2vr.h vr14, t5
vadd.h vr13, vr22, vr14
vadd.h vr16, vr23, vr14
vssrani.bu.h vr15, vr13, 5
vssrani.bu.h vr17, vr16, 5
vpermi.w vr17, vr15, 0x44
vst vr17, a0, 0
addi.d a0, a0, FDEC_STRIDE
add.w t5, t5, t1
.endr
endfunc_x264
#endif /* !HIGH_BIT_DEPT H */
|
aestream/faery
| 1,734
|
src/mp4/x264/common/loongarch/loongson_util.S
|
/*****************************************************************************
* loongson_util.S: loongson utility macros
*****************************************************************************
* Copyright (C) 2023-2024 x264 project
*
* Authors: Shiyou Yin <yinshiyou-hf@loongson.cn>
* Xiwei Gu <guxiwei-hf@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#define GLUE(a, b) a ## b
#define JOIN(a, b) GLUE(a, b)
/* Set prefix as needed. */
#define ASM_REF JOIN(JOIN(x264_, BIT_DEPTH), _)
#define FENC_STRIDE 16
#define FDEC_STRIDE 32
.macro function_x264 name, align=DEFAULT_ALIGN
.macro endfunc_x264
jirl $r0, $r1, 0x0
.size ASM_REF\name, . - ASM_REF\name
.purgem endfunc_x264
.endm
.text ;
.align \align ;
.globl ASM_REF\name ;
.type ASM_REF\name, @function ;
ASM_REF\name: ;
.endm
|
aestream/faery
| 41,598
|
src/mp4/x264/common/loongarch/quant-a.S
|
/*****************************************************************************
* quant-a.S: LoongArch quantization and level-run
*****************************************************************************
* Copyright (C) 2023-2024 x264 project
*
* Authors: Shiyou Yin <yinshiyou-hf@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "loongson_asm.S"
#include "loongson_util.S"
const last64_shuf
.int 0, 4, 1, 5, 2, 6, 3, 7
endconst
/*
* int quant_4x4x4( dctcoef dct[4][16], udctcoef mf[16], udctcoef bias[16] )
*/
.macro QUANT_ONE_LASX s1, s2, s3, s4
xvld xr1, \s1, 0 /* Load dctcoef */
xvadda.h \s4, xr1, \s3
xvmuh.hu \s4, \s4, \s2
xvsigncov.h \s4, xr1, \s4
xvst \s4, \s1, 0
.endm
function_x264 quant_4x4x4_lasx
xvld xr2, a1, 0
xvld xr3, a2, 0
QUANT_ONE_LASX a0, xr2, xr3, xr4
addi.d a0, a0, 32
QUANT_ONE_LASX a0, xr2, xr3, xr0
xvssrlni.h.w xr0, xr4, 0
addi.d a0, a0, 32
QUANT_ONE_LASX a0, xr2, xr3, xr4
addi.d a0, a0, 32
QUANT_ONE_LASX a0, xr2, xr3, xr5
xvssrlni.h.w xr5, xr4, 0
xvssrlni.h.w xr5, xr0, 0
xvseqi.w xr5, xr5, 0
xvmskltz.w xr5, xr5
xvpickve2gr.w t0, xr5, 0
xvpickve2gr.w t1, xr5, 4
alsl.d t0, t1, t0, 4
and t0, t0, t1
xori a0, t0, 0xf
endfunc_x264
.macro QUANT_ONE_LSX tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7
vld vr0, \tmp1, 0
vld vr1, \tmp1, 16
vadda.h \tmp6, vr0, \tmp4
vadda.h \tmp7, vr1, \tmp5
vmuh.hu \tmp6, \tmp6, \tmp2
vmuh.hu \tmp7, \tmp7, \tmp3
vsigncov.h \tmp6, vr0, \tmp6
vsigncov.h \tmp7, vr1, \tmp7
vst \tmp6, \tmp1, 0
vst \tmp7, \tmp1, 16
.endm
function_x264 quant_4x4x4_lsx
vld vr2, a1, 0
vld vr3, a1, 16
vld vr4, a2, 0
vld vr5, a2, 16
QUANT_ONE_LSX a0, vr2, vr3, vr4, vr5, vr6, vr7
addi.d a0, a0, 32
QUANT_ONE_LSX a0, vr2, vr3, vr4, vr5, vr8, vr9
vssrlni.h.w vr8, vr6, 0
vssrlni.h.w vr9, vr7, 0
addi.d a0, a0, 32
QUANT_ONE_LSX a0, vr2, vr3, vr4, vr5, vr10, vr11
addi.d a0, a0, 32
QUANT_ONE_LSX a0, vr2, vr3, vr4, vr5, vr12, vr13
vssrlni.h.w vr12, vr10, 0
vssrlni.h.w vr13, vr11, 0
vssrlni.h.w vr12, vr8, 0
vssrlni.h.w vr13, vr9, 0
vseqi.w vr12, vr12, 0
vseqi.w vr13, vr13, 0
vmskltz.w vr12, vr12
vmskltz.w vr13, vr13
vpickve2gr.w t0, vr12, 0
vpickve2gr.w t1, vr13, 0
alsl.d t0, t1, t0, 4
and t0, t0, t1
xori a0, t0, 0xf
endfunc_x264
function_x264 quant_4x4_lsx
vld vr2, a1, 0
vld vr3, a1, 16
vld vr4, a2, 0
vld vr5, a2, 16
QUANT_ONE_LSX a0, vr2, vr3, vr4, vr5, vr10, vr11
vor.v vr22, vr10, vr11
vpickve2gr.d t0, vr22, 0
vpickve2gr.d t1, vr22, 1
or t2, t0, t1
addi.w t3, zero, 1
maskeqz a0, t3, t2
endfunc_x264
function_x264 quant_8x8_lsx
vld vr2, a1, 0
vld vr3, a1, 16
vld vr4, a2, 0
vld vr5, a2, 16
QUANT_ONE_LSX a0, vr2, vr3, vr4, vr5, vr12, vr13
addi.d a0, a0, 32
vld vr2, a1, 32
vld vr3, a1, 48
vld vr4, a2, 32
vld vr5, a2, 48
QUANT_ONE_LSX a0, vr2, vr3, vr4, vr5, vr14, vr15
addi.d a0, a0, 32
vld vr2, a1, 64
vld vr3, a1, 80
vld vr4, a2, 64
vld vr5, a2, 80
QUANT_ONE_LSX a0, vr2, vr3, vr4, vr5, vr22, vr23
addi.d a0, a0, 32
vld vr2, a1, 96
vld vr3, a1, 112
vld vr4, a2, 96
vld vr5, a2, 112
QUANT_ONE_LSX a0, vr2, vr3, vr4, vr5, vr7, vr8
vor.v vr12, vr12, vr14
vor.v vr13, vr13, vr15
vor.v vr22, vr22, vr7
vor.v vr23, vr23, vr8
vor.v vr12, vr12, vr22
vor.v vr13, vr13, vr23
vor.v vr11, vr12, vr13
vpickve2gr.d t0, vr11, 0
vpickve2gr.d t1, vr11, 1
or t2, t0, t1
addi.w t3, zero, 1
maskeqz a0, t3, t2
endfunc_x264
function_x264 quant_4x4_dc_lsx
vld vr0, a0, 0
vld vr1, a0, 16
vreplgr2vr.w vr2, a1
vreplgr2vr.w vr3, a2
vslei.h vr4, vr0, 0
vslei.h vr5, vr1, 0
vexth.w.h vr7, vr0
vsllwil.w.h vr6, vr0, 0
vexth.w.h vr9, vr1
vsllwil.w.h vr8, vr1, 0
vadda.w vr6, vr3, vr6
vadda.w vr7, vr3, vr7
vadda.w vr8, vr3, vr8
vadda.w vr9, vr3, vr9
vmul.w vr6, vr6, vr2
vmul.w vr7, vr7, vr2
vmul.w vr8, vr8, vr2
vmul.w vr9, vr9, vr2
vsrani.h.w vr8, vr6, 16
vsrani.h.w vr9, vr7, 16
vpermi.w vr10, vr9, 0x0E
vpermi.w vr9, vr8, 0x44
vpermi.w vr10, vr8, 0x4E
vneg.h vr11, vr9
vneg.h vr12, vr10
vbitsel.v vr13, vr9, vr11, vr4
vbitsel.v vr14, vr10, vr12, vr5
vst vr13, a0, 0
vst vr14, a0, 16
vor.v vr15, vr11, vr12
vpickve2gr.d t0, vr15, 0
vpickve2gr.d t1, vr15, 1
or t2, t0, t1
addi.w t3, zero, 1
maskeqz a0, t3, t2
endfunc_x264
/*
* int quant_2x2_dc( dctcoef dct[4], int mf, int bias )
*/
function_x264 quant_2x2_dc_lsx
fld.d f0, a0, 0
vreplgr2vr.w vr1, a1
vreplgr2vr.w vr2, a2
vslei.h vr3, vr0, 0
vsllwil.w.h vr4, vr0, 0
vadda.w vr4, vr4, vr2
vmul.w vr4, vr4, vr1
vsrani.h.w vr4, vr4, 16
vneg.h vr8, vr4
vbitsel.v vr9, vr4, vr8, vr3
vstelm.d vr9, a0, 0, 0
vpickve2gr.w t0, vr9, 0
vpickve2gr.w t1, vr9, 1
or t2, t0, t1
addi.w t3, zero, 1
maskeqz a0, t3, t2
endfunc_x264
/*
* int coeff_last64_c(dctcoef *l)
*/
function_x264 coeff_last64_lasx
addi.w t0, zero, 63
xvxor.v xr20, xr0, xr0
xvld xr0, a0, 0
xvld xr1, a0, 32
xvld xr2, a0, 64
xvld xr3, a0, 96
xvldi xr4, 1
la.local t1, last64_shuf
xvld xr7, t1, 0
xvldi xr9, 0x408
xvldi xr10, 0x401
xvssrlni.bu.h xr1, xr0, 0
xvssrlni.bu.h xr3, xr2, 0
xvsle.bu xr5, xr4, xr1
xvsle.bu xr6, xr4, xr3
xvssrlni.bu.h xr6, xr5, 4
xvperm.w xr6, xr6, xr7
xvclz.w xr7, xr6
xvssrlni.hu.w xr7, xr7, 2
xvpermi.d xr8, xr7, 0xd8
xvsub.h xr9, xr9, xr8
xvsll.h xr10, xr10, xr9
xvssrlni.bu.h xr10, xr10, 1
xvclz.d xr11, xr10
xvpickve2gr.w t3, xr11, 0
sub.w a0, t0, t3
endfunc_x264
function_x264 coeff_last64_lsx
addi.w t0, zero, 63
vxor.v vr20, vr0, vr0
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
vld vr4, a0, 64
vld vr5, a0, 80
vld vr6, a0, 96
vld vr7, a0, 112
vldi vr8, 1
vldi vr9, 0x408
vldi vr10, 0x401
vssrlni.bu.h vr0, vr0, 0
vssrlni.bu.h vr1, vr1, 0
vssrlni.bu.h vr2, vr2, 0
vssrlni.bu.h vr3, vr3, 0
vssrlni.bu.h vr4, vr4, 0
vssrlni.bu.h vr5, vr5, 0
vssrlni.bu.h vr6, vr6, 0
vssrlni.bu.h vr7, vr7, 0
vpermi.w vr2, vr0, 0x44
vpermi.w vr3, vr1, 0x44
vpermi.w vr6, vr4, 0x44
vpermi.w vr7, vr5, 0x44
vsle.bu vr2, vr8, vr2
vsle.bu vr3, vr8, vr3
vsle.bu vr6, vr8, vr6
vsle.bu vr7, vr8, vr7
vssrlni.bu.h vr2, vr2, 4
vssrlni.bu.h vr3, vr3, 4
vssrlni.bu.h vr6, vr6, 4
vssrlni.bu.h vr7, vr7, 4
vpermi.w vr6, vr2, 0x44
vpermi.w vr7, vr3, 0x44
vpermi.w vr11, vr7, 0x0E
vpermi.w vr7, vr6, 0x44
vpermi.w vr7, vr7, 0xD8
vpermi.w vr11, vr6, 0x4E
vpermi.w vr11, vr11, 0xD8
vclz.w vr7, vr7
vclz.w vr11, vr11
vssrlni.hu.w vr7, vr7, 2
vssrlni.hu.w vr11, vr11, 2
vpermi.w vr12, vr11, 0x0E
vpermi.w vr11, vr7, 0x44
vpermi.w vr12, vr7, 0x4E
vsub.h vr11, vr9, vr11
vsub.h vr12, vr9, vr12
vsll.h vr13, vr10, vr11
vsll.h vr14, vr10, vr12
vssrlni.bu.h vr13, vr13, 1
vssrlni.bu.h vr14, vr14, 1
vclz.d vr15, vr14
vpickve2gr.w t1, vr15, 0
sub.w a0, t0, t1
endfunc_x264
/*
* int coeff_last16_c(dctcoef *l)
*/
function_x264 coeff_last16_lasx
addi.w t0, zero, 15
xvld xr0, a0, 0
xvldi xr2, 1
xvssrlni.bu.h xr0, xr0, 0
xvpermi.d xr1, xr0, 0xd8
xvsle.bu xr3, xr2, xr1
xvssrlni.bu.h xr3, xr3, 4
xvclz.d xr4, xr3
xvpickve2gr.w t1, xr4, 0
srai.w t1, t1, 2
sub.w a0, t0, t1
endfunc_x264
function_x264 coeff_last16_lsx
addi.w t0, zero, 15
vld vr0, a0, 0
vld vr1, a0, 16
vldi vr2, 1
vssrlni.bu.h vr0, vr0, 0
vssrlni.bu.h vr1, vr1, 0
vpermi.w vr1, vr0, 0x44
vsle.bu vr3, vr2, vr1
vssrlni.bu.h vr3, vr3, 4
vclz.d vr4, vr3
vpickve2gr.w t1, vr4, 0
srai.w t1, t1, 2
sub.w a0, t0, t1
endfunc_x264
/*
* int coeff_last15_c(dctcoef *l)
*/
function_x264 coeff_last15_lasx
addi.w t0, zero, 15
vld vr0, a0, 0
vld vr1, a0, 16
xvldi xr3, 1
vinsgr2vr.h vr1, zero, 7
xvpermi.q xr1, xr0, 0x20
xvssrlni.bu.h xr1, xr1, 0
xvpermi.d xr2, xr1, 0xd8
xvsle.bu xr4, xr3, xr2
xvssrlni.bu.h xr4, xr4, 4
xvclz.d xr5, xr4
xvpickve2gr.w t1, xr5, 0
srai.w t1, t1, 2
sub.w a0, t0, t1
endfunc_x264
function_x264 coeff_last15_lsx
addi.w t0, zero, 15
vld vr0, a0, 0
vld vr1, a0, 16
vldi vr2, 1
vinsgr2vr.h vr1, zero, 7
vssrlni.bu.h vr0, vr0, 0
vssrlni.bu.h vr1, vr1, 0
vpermi.w vr1, vr0, 0x44
vsle.bu vr3, vr2, vr1
vssrlni.bu.h vr3, vr3, 4
vclz.d vr4, vr3
vpickve2gr.w t1, vr4, 0
srai.w t1, t1, 2
sub.w a0, t0, t1
endfunc_x264
/*
* int coeff_last8_c(dctcoef *l)
*/
function_x264 coeff_last8_lsx
addi.w t0, zero, 7
vld vr0, a0, 0
vclz.d vr1, vr0
vpickve2gr.w t1, vr1, 0
vpickve2gr.w t2, vr1, 2
li.d t3, 64
bne t2, t3, .LAST8_LOW_LSX
addi.d t4, t1, 0
addi.d t0, t0, -4
b .LAST8_END_LSX
.LAST8_LOW_LSX:
addi.d t4, t2, 0
.LAST8_END_LSX:
srai.w t4, t4, 4
sub.w a0, t0, t4
endfunc_x264
/*
* int coeff_last4_c(dctcoef *l)
*/
function_x264 coeff_last4_lsx
addi.w t0, zero, 3
vld vr0, a0, 0
vclz.d vr1, vr0
vpickve2gr.w t1, vr1, 0
srai.w t1, t1, 4
sub.w a0, t0, t1
endfunc_x264
// (dct[i] * dequant_mf[i]) << (i_qbits)
.macro DCT_MF a0, a1, in0, out0, out1
vld vr1, \a0, 0
xvld xr2, \a1, 0
vext2xv.w.h xr5, xr1
xvmul.w xr5, xr5, xr2
xvsll.w \out0, xr5, \in0
vld vr1, \a0, 16
xvld xr2, \a1, 32
vext2xv.w.h xr5, xr1
xvmul.w xr5, xr5, xr2
xvsll.w \out1, xr5, \in0
.endm
// (dct[i] * dequant_mf[i] + f) >> (-i_qbits)
.macro DCT_MF_F a0, a1, in0, out0, out1
vld vr1, \a0, 0
xvld xr2, \a1, 0
vext2xv.w.h xr5, xr1
xvmul.w xr5, xr5, xr2
xvsrar.w \out0, xr5, \in0
vld vr1, \a0, 16
xvld xr2, \a1, 32
vext2xv.w.h xr5, xr1
xvmul.w xr5, xr5, xr2
xvsrar.w \out1, xr5, \in0
.endm
/*
* void dequant_4x4( dctcoef dct[16], int dequant_mf[6][16], int i_qp )
*/
function_x264 dequant_4x4_lasx
addi.w t1, zero, 6
addi.w t2, zero, 4
div.w t0, a2, t1
sub.w t0, t0, t2 // i_qp/6 - 4
mod.w t1, a2, t1 // i_qp%6
slli.w t1, t1, 6
add.d a1, a1, t1
blt t0, zero, .DQ4x4_DEQUANT_SHR
// i_qbits >= 0
xvreplgr2vr.w xr0, t0
DCT_MF a0, a1, xr0, xr6, xr7
b .DQ4x4_END
.DQ4x4_DEQUANT_SHR:
sub.w t4, zero, t0
xvreplgr2vr.w xr4, t4
DCT_MF_F a0, a1, xr4, xr6, xr7
.DQ4x4_END:
xvpickev.h xr8, xr7, xr6
xvpermi.d xr8, xr8, 0xd8
xvst xr8, a0, 0
endfunc_x264
.macro DCT_MF_LSX tmp0, tmp1, in0, out0, out1, out2, out3
vld vr0, \tmp0, 0
vld vr1, \tmp1, 0
vld vr2, \tmp1, 16
vexth.w.h vr4, vr0
vsllwil.w.h vr3, vr0, 0
vmul.w vr3, vr3, vr1
vmul.w vr4, vr4, vr2
vsll.w \out0, vr3, \in0
vsll.w \out1, vr4, \in0
vld vr0, \tmp0, 16
vld vr1, \tmp1, 32
vld vr2, \tmp1, 48
vsllwil.w.h vr3, vr0, 0
vpermi.w vr4, vr0, 0x0E
vsllwil.w.h vr4, vr4, 0
vmul.w vr3, vr3, vr1
vmul.w vr4, vr4, vr2
vsll.w \out2, vr3, \in0
vsll.w \out3, vr4, \in0
.endm
.macro DCT_MF_F_LSX tmp0, tmp1, in0, out0, out1, out2, out3
vld vr0, \tmp0, 0
vld vr1, \tmp1, 0
vld vr2, \tmp1, 16
vexth.w.h vr4, vr0
vsllwil.w.h vr3, vr0, 0
vmul.w vr3, vr3, vr1
vmul.w vr4, vr4, vr2
vsrar.w \out0, vr3, \in0
vsrar.w \out1, vr4, \in0
vld vr0, \tmp0, 16
vld vr1, \tmp1, 32
vld vr2, \tmp1, 48
vexth.w.h vr4, vr0
vsllwil.w.h vr3, vr0, 0
vmul.w vr3, vr3, vr1
vmul.w vr4, vr4, vr2
vsrar.w \out2, vr3, \in0
vsrar.w \out3, vr4, \in0
.endm
function_x264 dequant_4x4_lsx
addi.w t1, zero, 6
addi.w t2, zero, 4
div.w t0, a2, t1
sub.w t0, t0, t2
mod.w t1, a2, t1
slli.w t1, t1, 6
add.d a1, a1, t1
blt t0, zero, .DQ4x4_DEQUANT_SHR_LSX
vreplgr2vr.w vr6, t0
DCT_MF_LSX a0, a1, vr6, vr7, vr8, vr9, vr10
b .DQ4x4_END_LSX
.DQ4x4_DEQUANT_SHR_LSX:
sub.w t4, zero, t0
vreplgr2vr.w vr6, t4
DCT_MF_F_LSX a0, a1, vr6, vr7, vr8, vr9, vr10
.DQ4x4_END_LSX:
vpickev.h vr11, vr9, vr7
vpickev.h vr12, vr10, vr8
vpermi.w vr13, vr12, 0x0E
vpermi.w vr12, vr11, 0x44
vpermi.w vr13, vr11, 0x4E
vst vr12, a0, 0
vst vr13, a0, 16
endfunc_x264
/*
* void dequant_8x8( dctcoef dct[64], int dequant_mf[6][64], int i_qp )
*/
function_x264 dequant_8x8_lasx
addi.w t1, zero, 6
div.w t0, a2, t1
sub.w t0, t0, t1
mod.w t1, a2, t1 // i_qp%6
slli.w t1, t1, 8
add.d a1, a1, t1
blt t0, zero, .DQ8x8_DEQUANT_SHR
// i_qbits >= 0
xvreplgr2vr.w xr0, t0
DCT_MF a0, a1, xr0, xr6, xr7
xvpickev.h xr8, xr7, xr6
xvpermi.d xr8, xr8, 0xd8
xvst xr8, a0, 0
.rept 3
addi.d a0, a0, 32
addi.d a1, a1, 64
DCT_MF a0, a1, xr0, xr6, xr7
xvpickev.h xr8, xr7, xr6
xvpermi.d xr8, xr8, 0xd8
xvst xr8, a0, 0
.endr
b .DQ8x8_END
// i_qbits < 0
.DQ8x8_DEQUANT_SHR:
sub.w t4, zero, t0
xvreplgr2vr.w xr4, t4
DCT_MF_F a0, a1, xr4, xr6, xr7
xvpickev.h xr8, xr7, xr6
xvpermi.d xr8, xr8, 0xd8
xvst xr8, a0, 0
.rept 3
addi.d a0, a0, 32
addi.d a1, a1, 64
DCT_MF_F a0, a1, xr4, xr6, xr7
xvpickev.h xr8, xr7, xr6
xvpermi.d xr8, xr8, 0xd8
xvst xr8, a0, 0
.endr
.DQ8x8_END:
endfunc_x264
function_x264 dequant_8x8_lsx
addi.w t1, zero, 6
div.w t0, a2, t1
sub.w t0, t0, t1
mod.w t1, a2, t1
slli.w t1, t1, 8
add.d a1, a1, t1
blt t0, zero, .DQ8x8_DEQUANT_SHR_LSX
vreplgr2vr.w vr6, t0
DCT_MF_LSX a0, a1, vr6, vr7, vr8, vr9, vr10
vpickev.h vr11, vr9, vr7
vpickev.h vr12, vr10, vr8
vpermi.w vr13, vr12, 0x0E
vpermi.w vr12, vr11, 0x44
vpermi.w vr13, vr11, 0x4E
vst vr12, a0, 0
vst vr13, a0, 16
.rept 3
addi.d a0, a0, 32
addi.d a1, a1, 64
DCT_MF_LSX a0, a1, vr6, vr7, vr8, vr9, vr10
vpickev.h vr11, vr9, vr7
vpickev.h vr12, vr10, vr8
vpermi.w vr13, vr12, 0x0E
vpermi.w vr12, vr11, 0x44
vpermi.w vr13, vr11, 0x4E
vst vr12, a0, 0
vst vr13, a0, 16
.endr
b .DQ8x8_END_LSX
.DQ8x8_DEQUANT_SHR_LSX:
sub.w t4, zero, t0
vreplgr2vr.w vr6, t4
DCT_MF_F_LSX a0, a1, vr6, vr7, vr8, vr9, vr10
vpickev.h vr11, vr9, vr7
vpickev.h vr12, vr10, vr8
vpermi.w vr13, vr12, 0x0E
vpermi.w vr12, vr11, 0x44
vpermi.w vr13, vr11, 0x4E
vst vr12, a0, 0
vst vr13, a0, 16
.rept 3
addi.d a0, a0, 32
addi.d a1, a1, 64
DCT_MF_F_LSX a0, a1, vr6, vr7, vr8, vr9, vr10
vpickev.h vr11, vr9, vr7
vpickev.h vr12, vr10, vr8
vpermi.w vr13, vr12, 0x0E
vpermi.w vr12, vr11, 0x44
vpermi.w vr13, vr11, 0x4E
vst vr12, a0, 0
vst vr13, a0, 16
.endr
.DQ8x8_END_LSX:
endfunc_x264
/*
* void dequant_4x4_dc( dctcoef dct[16], int dequant_mf[6][16], int i_qp )
*/
function_x264 dequant_4x4_dc_lasx
addi.w t0, zero, 6
div.w t1, a2, t0
sub.w t1, t1, t0
blt t1, zero, .DQ4x4DC_LT_ZERO
// i_qbits >= 0
mod.w t2, a2, t0
slli.w t2, t2, 6
ldx.w t0, a1, t2
sll.w t0, t0, t1
vld vr1, a0, 0
vld vr10, a0, 16
xvreplgr2vr.w xr2, t0
vext2xv.w.h xr3, xr1
xvmul.w xr6, xr3, xr2
vext2xv.w.h xr3, xr10
xvmul.w xr7, xr3, xr2
b .DQ4x4DC_END
// i_qbits < 0
.DQ4x4DC_LT_ZERO:
mod.w t2, a2, t0
slli.w t2, t2, 6
ldx.w t0, a1, t2
sub.w t3, zero, t1
vld vr1, a0, 0
vld vr10, a0, 16
xvreplgr2vr.w xr2, t0
xvreplgr2vr.w xr4, t3
vext2xv.w.h xr5, xr1
xvmul.w xr5, xr5, xr2
xvsrar.w xr6, xr5, xr4
vext2xv.w.h xr5, xr10
xvmul.w xr5, xr5, xr2
xvsrar.w xr7, xr5, xr4
.DQ4x4DC_END:
xvpickev.h xr8, xr7, xr6
xvpermi.d xr8, xr8, 0xd8
xvst xr8, a0, 0
endfunc_x264
function_x264 dequant_4x4_dc_lsx
addi.w t0, zero, 6
div.w t1, a2, t0
sub.w t1, t1, t0
blt t1, zero, .DQ4x4DC_LT_ZERO_LSX
mod.w t2, a2, t0
slli.w t2, t2, 6
ldx.w t0, a1, t2
sll.w t0, t0, t1
vld vr1, a0, 0
vld vr2, a0, 16
vreplgr2vr.w vr3, t0
vexth.w.h vr6, vr1
vsllwil.w.h vr5, vr1, 0
vmul.w vr5, vr5, vr3
vmul.w vr6, vr6, vr3
vexth.w.h vr8, vr2
vsllwil.w.h vr7, vr2, 0
vmul.w vr7, vr7, vr3
vmul.w vr8, vr8, vr3
b .DQ4x4DC_END_LSX
.DQ4x4DC_LT_ZERO_LSX:
mod.w t2, a2, t0
slli.w t2, t2, 6
ldx.w t0, a1, t2
sub.w t3, zero, t1
vld vr1, a0, 0
vld vr2, a0, 16
vreplgr2vr.w vr3, t0
vreplgr2vr.w vr4, t3
vexth.w.h vr6, vr1
vsllwil.w.h vr5, vr1, 0
vexth.w.h vr8, vr2
vsllwil.w.h vr7, vr2, 0
vmul.w vr5, vr5, vr3
vmul.w vr6, vr6, vr3
vmul.w vr7, vr7, vr3
vmul.w vr8, vr8, vr3
vsrar.w vr5, vr5, vr4
vsrar.w vr6, vr6, vr4
vsrar.w vr7, vr7, vr4
vsrar.w vr8, vr8, vr4
.DQ4x4DC_END_LSX:
vpickev.h vr9, vr7, vr5
vpickev.h vr10, vr8, vr6
vpermi.w vr11, vr10, 0x0E
vpermi.w vr10, vr9, 0x44
vpermi.w vr11, vr9, 0x4E
vst vr10, a0, 0
vst vr11, a0, 16
endfunc_x264
/*
* int decimate_score15( dctcoef *dct )
*/
function_x264 decimate_score15_lsx
addi.w t0, zero, 15
la.local t3, x264_decimate_table4
addi.d t4, a0, 2
vld vr0, t4, 0
vld vr1, t4, 16
vldi vr3, 1
vinsgr2vr.h vr1, zero, 7
vssrlni.bu.h vr0, vr0, 0
vssrlni.bu.h vr1, vr1, 0
vpermi.w vr2, vr1, 0x0E
vpermi.w vr1, vr0, 0x44
vpermi.w vr2, vr0, 0x4E
vsle.bu vr4, vr3, vr1
vsle.bu vr5, vr3, vr2
vssrlni.bu.h vr4, vr4, 4
vssrlni.bu.h vr5, vr5, 4
vclz.d vr4, vr4
vclz.d vr5, vr5
vpickve2gr.w t1, vr4, 0
srai.w t1, t1, 2
sub.w t2, t0, t1
addi.w t0, zero, 2
move a0, zero
slli.d t2, t2, 1
.LOOP_SCORE_15_LSX:
blt t2, zero, .END_SCORE_15_LSX
ldx.h t5, t4, t2
addi.d t6, t5, 1
bltu t0, t6, .RET_SCORE_15_1_LSX
addi.d t2, t2, -2
move t5, zero
.WHILE_SCORE_15_LSX:
blt t2, zero, .END_WHILE_15_LSX
ldx.h t1, t4, t2
bnez t1, .END_WHILE_15_LSX
addi.d t2, t2, -2
addi.d t5, t5, 1
b .WHILE_SCORE_15_LSX
.END_WHILE_15_LSX:
ldx.b t1, t3, t5
add.d a0, a0, t1
b .LOOP_SCORE_15_LSX
.RET_SCORE_15_1_LSX:
addi.d a0, zero, 9
jirl $r0, $r1, 0x0
.END_SCORE_15_LSX:
endfunc_x264
/*
* int decimate_score16( dctcoef *dct )
*/
function_x264 decimate_score16_lsx
addi.w t0, zero, 15
la.local t3, x264_decimate_table4
addi.w t0, zero, 15
vld vr0, a0, 0
vld vr1, a0, 16
vldi vr2, 1
vssrlni.bu.h vr0, vr0, 0
vssrlni.bu.h vr1, vr1, 0
vpermi.w vr3, vr1, 0x0E
vpermi.w vr1, vr0, 0x44
vpermi.w vr3, vr0, 0x4E
vsle.bu vr4, vr2, vr1
vsle.bu vr5, vr2, vr3
vssrlni.bu.h vr4, vr4, 4
vssrlni.bu.h vr5, vr5, 4
vclz.d vr4, vr4
vclz.d vr5, vr5
vpickve2gr.w t1, vr4, 0
srai.w t1, t1, 2
sub.w t2, t0, t1
move t4, a0
addi.d t0, zero, 2
move a0, zero
slli.d t2, t2, 1
.LOOP_SCORE_16_LSX:
blt t2, zero, .END_SCORE_16_LSX
ldx.h t5, t4, t2
addi.d t6, t5, 1
bltu t0, t6, .RET_SCORE_16_1_LSX
addi.d t2, t2, -2
move t5, zero
.WHILE_SCORE_16_LSX:
blt t2, zero, .END_WHILE_16_LSX
ldx.h t1, t4, t2
bnez t1, .END_WHILE_16_LSX
addi.d t2, t2, -2
addi.d t5, t5, 1
b .WHILE_SCORE_16_LSX
.END_WHILE_16_LSX:
ldx.b t1, t3, t5
add.d a0, a0, t1
b .LOOP_SCORE_16_LSX
.RET_SCORE_16_1_LSX:
addi.d a0, zero, 9
jirl $r0, $r1, 0x0
.END_SCORE_16_LSX:
endfunc_x264
/*
* int decimate_score64( dctcoef *dct )
*/
function_x264 decimate_score64_lsx
addi.w t0, zero, 63
la.local t3, x264_decimate_table8
vxor.v vr20, vr0, vr0
vld vr0, a0, 0
vld vr1, a0, 16
vld vr2, a0, 32
vld vr3, a0, 48
vld vr4, a0, 64
vld vr5, a0, 80
vld vr6, a0, 96
vld vr7, a0, 112
vldi vr8, 1
vldi vr9, 0x408
vldi vr10, 0x401
vssrlni.bu.h vr0, vr0, 0
vssrlni.bu.h vr1, vr1, 0
vssrlni.bu.h vr2, vr2, 0
vssrlni.bu.h vr3, vr3, 0
vssrlni.bu.h vr4, vr4, 0
vssrlni.bu.h vr5, vr5, 0
vssrlni.bu.h vr6, vr6, 0
vssrlni.bu.h vr7, vr7, 0
vpermi.w vr2, vr0, 0x44
vpermi.w vr3, vr1, 0x44
vpermi.w vr6, vr4, 0x44
vpermi.w vr7, vr5, 0x44
vsle.bu vr2, vr8, vr2
vsle.bu vr3, vr8, vr3
vsle.bu vr6, vr8, vr6
vsle.bu vr7, vr8, vr7
vssrlni.bu.h vr2, vr2, 4
vssrlni.bu.h vr3, vr3, 4
vssrlni.bu.h vr6, vr6, 4
vssrlni.bu.h vr7, vr7, 4
vpermi.w vr6, vr2, 0x44
vpermi.w vr7, vr3, 0x44
vpermi.w vr11, vr7, 0x0E
vpermi.w vr7, vr6, 0x44
vpermi.w vr7, vr7, 0xD8
vpermi.w vr11, vr6, 0x4E
vpermi.w vr11, vr11, 0xD8
vclz.w vr7, vr7
vclz.w vr11, vr11
vssrlni.hu.w vr7, vr7, 2
vssrlni.hu.w vr11, vr11, 2
vpermi.w vr12, vr11, 0x0E
vpermi.w vr11, vr7, 0x44
vpermi.w vr12, vr7, 0x4E
vsub.h vr11, vr9, vr11
vsub.h vr12, vr9, vr12
vsll.h vr13, vr10, vr11
vsll.h vr14, vr10, vr12
vssrlni.bu.h vr13, vr13, 1
vssrlni.bu.h vr14, vr14, 1
vclz.d vr15, vr14
vpickve2gr.w t1, vr15, 0
sub.w t2, t0, t1
move t4, a0
addi.d t0, zero, 2
slli.d t2, t2, 1
move a0, zero
.LOOP_SCORE_64_LSX:
blt t2, zero, .END_SCORE_64_LSX
ldx.h t5, t4, t2
addi.d t6, t5, 1
bltu t0, t6, .RET_SCORE_64_1_LSX
addi.d t2, t2, -2
move t5, zero
.WHILE_SCORE_64_LSX:
blt t2, zero, .END_WHILE_64_LSX
ldx.h t1, t4, t2
bnez t1, .END_WHILE_64_LSX
addi.d t2, t2, -2
addi.d t5, t5, 1
b .WHILE_SCORE_64_LSX
.END_WHILE_64_LSX:
ldx.b t1, t3, t5
add.d a0, a0, t1
b .LOOP_SCORE_64_LSX
.RET_SCORE_64_1_LSX:
addi.d a0, zero, 9
jirl $r0, $r1, 0x0
.END_SCORE_64_LSX:
endfunc_x264
/*
* int coeff_level_run16( dctcoef *dct, x264_run_level_t *runlevel )
*/
function_x264 coeff_level_run16_lasx
addi.w t0, zero, 15
xvld xr0, a0, 0
xvldi xr2, 1
xvssrlni.bu.h xr0, xr0, 0
xvpermi.d xr1, xr0, 0xd8
xvsle.bu xr3, xr2, xr1
xvsrlni.b.h xr3, xr3, 4
xvpickve2gr.du t8, xr3, 0
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
st.w t0, a1, 0x00 // Store runlevel->last
addi.d t3, a1, 23
nor t2, zero, zero
addi.d t2, t2, -15
and t3, t3, t2 // runlevel->level
xor t4, t4, t4 // mask
xor t5, t5, t5 // total: number of non-zero elements
addi.w t6, zero, 1 // const 1
.LOOP_COEFF_LEVEL_RUN16_LASX:
slli.w t7, t0, 1
ldx.h t2, a0, t7
st.h t2, t3, 0
addi.d t3, t3, 2
addi.w t5, t5, 1
sll.w t2, t6, t0
or t4, t4, t2
bge zero, t4, .END_COEFF_LEVEL_RUN16_LASX
addi.w t0, t0, -1
slli.w t1, t1, 2
addi.w t1, t1, 4
sll.d t8, t8, t1
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
bge t0, zero, .LOOP_COEFF_LEVEL_RUN16_LASX
.END_COEFF_LEVEL_RUN16_LASX:
st.w t4, a1, 4
move a0, t5
endfunc_x264
function_x264 coeff_level_run15_lasx
addi.w t0, zero, 15
vld vr0, a0, 0
vld vr1, a0, 16
xvldi xr3, 1
vinsgr2vr.h vr1, zero, 7
xvpermi.q xr1, xr0, 0x20
xvssrlni.bu.h xr1, xr1, 0
xvpermi.d xr2, xr1, 0xd8
xvsle.bu xr4, xr3, xr2
xvsrlni.b.h xr4, xr4, 4
xvpickve2gr.du t8, xr4, 0
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
st.w t0, a1, 0x00 // Store runlevel->last
addi.d t3, a1, 23
nor t2, zero, zero
addi.d t2, t2, -15
and t3, t3, t2 // runlevel->level
xor t4, t4, t4 // mask
xor t5, t5, t5 // total: number of non-zero elements
addi.w t6, zero, 1 // const 1
.LOOP_COEFF_LEVEL_RUN15_LASX:
slli.w t7, t0, 1
ldx.h t2, a0, t7
st.h t2, t3, 0
addi.d t3, t3, 2
addi.w t5, t5, 1
sll.w t2, t6, t0
or t4, t4, t2
bge zero, t4, .END_COEFF_LEVEL_RUN15_LASX
addi.w t0, t0, -1
slli.w t1, t1, 2
addi.w t1, t1, 4
sll.d t8, t8, t1
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
bge t0, zero, .LOOP_COEFF_LEVEL_RUN15_LASX
.END_COEFF_LEVEL_RUN15_LASX:
st.w t4, a1, 4
move a0, t5
endfunc_x264
function_x264 coeff_level_run16_lsx
addi.w t0, zero, 15
vld vr0, a0, 0
vld vr1, a0, 16
vldi vr2, 1
vssrlni.bu.h vr0, vr0, 0
vssrlni.bu.h vr1, vr1, 0
vpermi.w vr1, vr0, 0x44
vsle.bu vr3, vr2, vr1
vsrlni.b.h vr3, vr3, 4
vpickve2gr.du t8, vr3, 0
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
st.w t0, a1, 0x00 // Store runlevel->last
addi.d t3, a1, 23
nor t2, zero, zero
addi.d t2, t2, -15
and t3, t3, t2 // runlevel->level
xor t4, t4, t4 // mask
xor t5, t5, t5 // total: number of non-zero elements
addi.w t6, zero, 1 // const 1
.LOOP_COEFF_LEVEL_RUN16_LSX:
slli.w t7, t0, 1
ldx.h t2, a0, t7
st.h t2, t3, 0
addi.d t3, t3, 2
addi.w t5, t5, 1
sll.w t2, t6, t0
or t4, t4, t2
bge zero, t4, .END_COEFF_LEVEL_RUN16_LSX
addi.w t0, t0, -1
slli.w t1, t1, 2
addi.w t1, t1, 4
sll.d t8, t8, t1
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
bge t0, zero, .LOOP_COEFF_LEVEL_RUN16_LSX
.END_COEFF_LEVEL_RUN16_LSX:
st.w t4, a1, 4
move a0, t5
endfunc_x264
function_x264 coeff_level_run15_lsx
addi.w t0, zero, 15
vld vr0, a0, 0
vld vr1, a0, 16
vldi vr2, 1
vinsgr2vr.h vr1, zero, 7
vssrlni.bu.h vr0, vr0, 0
vssrlni.bu.h vr1, vr1, 0
vpermi.w vr1, vr0, 0x44
vsle.bu vr3, vr2, vr1
vsrlni.b.h vr3, vr3, 4
vpickve2gr.du t8, vr3, 0
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
st.w t0, a1, 0x00 // Store runlevel->last
addi.d t3, a1, 23
nor t2, zero, zero
addi.d t2, t2, -15
and t3, t3, t2 // runlevel->level
xor t4, t4, t4 // mask
xor t5, t5, t5 // total: number of non-zero elements
addi.w t6, zero, 1 // const 1
.LOOP_COEFF_LEVEL_RUN15_LSX:
slli.w t7, t0, 1
ldx.h t2, a0, t7
st.h t2, t3, 0
addi.d t3, t3, 2
addi.w t5, t5, 1
sll.w t2, t6, t0
or t4, t4, t2
bge zero, t4, .END_COEFF_LEVEL_RUN15_LSX
addi.w t0, t0, -1
slli.w t1, t1, 2
addi.w t1, t1, 4
sll.d t8, t8, t1
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
bge t0, zero, .LOOP_COEFF_LEVEL_RUN15_LSX
.END_COEFF_LEVEL_RUN15_LSX:
st.w t4, a1, 4
move a0, t5
endfunc_x264
function_x264 coeff_level_run8_lsx
addi.w t0, zero, 15
vld vr0, a0, 0
vxor.v vr1, vr1, vr1
vldi vr2, 1
vssrlni.bu.h vr0, vr0, 0
vpermi.w vr1, vr0, 0x44
vsle.bu vr3, vr2, vr1
vsrlni.b.h vr3, vr3, 4
vpickve2gr.du t8, vr3, 0
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
st.w t0, a1, 0x00 // Store runlevel->last
addi.d t3, a1, 23
nor t2, zero, zero
addi.d t2, t2, -15
and t3, t3, t2 // runlevel->level
xor t4, t4, t4 // mask
xor t5, t5, t5 // total: number of non-zero elements
addi.w t6, zero, 1 // const 1
.LOOP_COEFF_LEVEL_RUN8_LSX:
slli.w t7, t0, 1
ldx.h t2, a0, t7
st.h t2, t3, 0
addi.d t3, t3, 2
addi.w t5, t5, 1
sll.w t2, t6, t0
or t4, t4, t2
bge zero, t4, .END_COEFF_LEVEL_RUN8_LSX
addi.w t0, t0, -1
slli.w t1, t1, 2
addi.w t1, t1, 4
sll.d t8, t8, t1
clz.d t1, t8
srai.w t1, t1, 2
sub.w t0, t0, t1 // Index of the first non-zero element starting from the highest bit
bge t0, zero, .LOOP_COEFF_LEVEL_RUN8_LSX
.END_COEFF_LEVEL_RUN8_LSX:
st.w t4, a1, 4
move a0, t5
endfunc_x264
|
aestream/faery
| 128,184
|
src/mp4/x264/common/loongarch/pixel-a.S
|
/*****************************************************************************
* pixel-a.S: LoongArch pixel metrics
*****************************************************************************
* Copyright (C) 2023-2024 x264 project
*
* Authors: Hecai Yuan <yuanhecai@loongson.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "loongson_asm.S"
#include "loongson_util.S"
#if !HIGH_BIT_DEPTH
const hmul_8p
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1
endconst
const mask_ac4b
.short 0, -1, 0, -1, -1, -1, -1, -1
.short 0, -1, 0, -1, -1, -1, -1, -1
endconst
const mask_ac8
.short 0, -1, -1, -1, -1, -1, -1, -1
.short 0, -1, -1, -1, -1, -1, -1, -1
endconst
.macro LOAD_INC_8x4W n1, n2, n3, n4, n5
vld $vr\n1, a0, 0
vldx $vr\n2, a0, a1
vldx $vr\n3, a0, t0
vldx $vr\n4, a0, t1
xvpermi.d xr18, $xr\n1, 0x05
xvpermi.d xr19, $xr\n2, 0x05
xvpermi.d xr20, $xr\n3, 0x05
xvpermi.d xr21, $xr\n4, 0x05
add.d a0, a0, t2
xvdp2.h.bu.b $xr\n1, xr18, $xr\n5
xvdp2.h.bu.b $xr\n2, xr19, $xr\n5
xvdp2.h.bu.b $xr\n3, xr20, $xr\n5
xvdp2.h.bu.b $xr\n4, xr21, $xr\n5
.endm
.macro SUMSUB_BADC a, b, c, d
xvadd.h \a, \a, \b
xvadd.h \c, \c, \d
xvadd.h \b, \b, \b
xvadd.h \d, \d, \d
xvsub.h \b, \b, \a
xvsub.h \d, \d, \c
.endm
.macro HADAMARD4_V a, b, c, d
SUMSUB_BADC \a, \b, \c, \d
SUMSUB_BADC \a, \c, \b, \d
.endm
.macro HADAMARD_1 a, b, tmp
xmov \tmp, \a
xvpackod.h \a, \b, \a
xvpackev.h \b, \b, \tmp
xvadd.h \tmp, \a, \b
xvsub.h \b, \b, \a
xmov \a, \tmp
.endm
.macro HADAMARD_2 a, b, c
xvpickod.w \c, \b, \a
xvpickev.w \a, \b, \a
xvadda.h \a, \a, xr17
xvadda.h \c, \c, xr17
xvmax.h \a, \a, \c
.endm
.macro HADAMARD_AC_WXH_LASX w, h
function_x264 pixel_hadamard_ac_\w\()x\h\()_lasx
add.d t0, a1, a1
add.d t1, a1, t0
add.d t2, t1, a1
xvxor.v xr17, xr17, xr17
move t4, ra
bl x264_8_hadamard_ac_16x8_lasx
.if \h == 16
xmov xr11, xr9
xmov xr10, xr8
bl x264_8_hadamard_ac_16x8_lasx
xvadd.h xr9, xr9, xr11
xvadd.h xr8, xr8, xr10
.endif
move ra, t4
xvhaddw.wu.hu xr8, xr8, xr8
xvhaddw.du.wu xr8, xr8, xr8
xvhaddw.qu.du xr8, xr8, xr8
xvpickve2gr.wu t0, xr8, 0
xvpickve2gr.wu t1, xr8, 4
add.d t0, t0, t1
xvhaddw.wu.hu xr9, xr9, xr9
xvhaddw.du.wu xr9, xr9, xr9
xvhaddw.qu.du xr9, xr9, xr9
xvpickve2gr.wu t1, xr9, 0
xvpickve2gr.wu t2, xr9, 4
add.d t1, t1, t2
srli.d t0, t0, 2
srli.d t1, t1, 1
slli.d t0, t0, 32
add.d a0, t0, t1
endfunc_x264
.endm
function_x264 hadamard_ac_16x8_lasx
/* Load intermediate variable */
la.local t3, hmul_8p
xvld xr8, t3, 0
LOAD_INC_8x4W 0, 1, 2, 3, 8
HADAMARD4_V xr0, xr1, xr2, xr3
LOAD_INC_8x4W 4, 5, 6, 7, 8
HADAMARD4_V xr4, xr5, xr6, xr7
HADAMARD_1 xr0, xr1, xr8
HADAMARD_1 xr2, xr3, xr8
xmov xr18, xr1
HADAMARD_1 xr4, xr5, xr8
HADAMARD_1 xr6, xr7, xr8
xmov xr19, xr2
xmov xr20, xr3
xvadda.h xr1, xr0, xr4
xvsub.h xr21, xr4, xr0
xvadd.h xr0, xr4, xr0
la.local t3, mask_ac4b
xvld xr8, t3, 0
xvand.v xr1, xr1, xr8
xvadda.h xr1, xr1, xr5
xvadda.h xr1, xr1, xr18
xvadda.h xr1, xr1, xr19
xvadda.h xr1, xr1, xr20
xvadda.h xr1, xr1, xr6
xvadda.h xr9, xr1, xr7
xvadd.h xr3, xr7, xr20
xvsub.h xr7, xr7, xr20
xvadd.h xr2, xr6, xr19
xvsub.h xr6, xr6, xr19
xvadd.h xr1, xr5, xr18
xvsub.h xr5, xr5, xr18
HADAMARD_2 xr3, xr7, xr18
HADAMARD_2 xr2, xr6, xr19
HADAMARD_2 xr1, xr5, xr20
xvpickod.w xr5, xr21, xr0
xvpickev.w xr0, xr21, xr0
xmov xr4, xr5
xvadd.h xr5, xr0, xr4
xvsub.h xr4, xr4, xr0
xvadd.h xr2, xr2, xr3
xvadd.h xr2, xr2, xr1
xvadd.h xr2, xr2, xr2
la.local t3, mask_ac8
xvld xr8, t3, 0
xvand.v xr0, xr5, xr8
xvadda.h xr2, xr2, xr4
xvadda.h xr8, xr2, xr0
endfunc_x264
HADAMARD_AC_WXH_LASX 16, 8
HADAMARD_AC_WXH_LASX 16, 16
/* uint64_t hadamard_ac_8x8_lasx(uint8_t *p_pix,
* int32_t i_stride)
*/
function_x264 hadamard_ac_8x8_lasx
/* Load intermediate variable */
slli.d t0, a1, 1
add.d t1, a1, t0
slli.d t2, a1, 2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
add.d a0, a0, t2
LSX_LOADX_4 a0, a1, t0, t1, vr4, vr5, vr6, vr7
vilvl.d vr8, vr1, vr0
vilvl.d vr9, vr3, vr2
vilvl.d vr10, vr5, vr4
vilvl.d vr11, vr7, vr6
xvpermi.q xr8, xr10, 0x02
xvpermi.q xr9, xr11, 0x02
xvpickev.b xr12, xr9, xr8
xvpickod.b xr13, xr9, xr8
xvaddwev.h.bu xr8, xr12, xr13
xvaddwod.h.bu xr9, xr12, xr13
xvsubwev.h.bu xr10, xr12, xr13
xvsubwod.h.bu xr11, xr12, xr13
xvadd.h xr12, xr8, xr9
xvadd.h xr13, xr10, xr11
xvsub.h xr14, xr8, xr9
xvsub.h xr15, xr10, xr11
xvilvl.h xr8, xr13, xr12
xvilvh.h xr9, xr13, xr12
xvilvl.h xr10, xr15, xr14
xvilvh.h xr11, xr15, xr14
xvilvl.w xr12, xr10, xr8
xvilvh.w xr13, xr10, xr8
xvilvl.w xr14, xr11, xr9
xvilvh.w xr15, xr11, xr9
xvadd.h xr8, xr12, xr13
xvadd.h xr9, xr14, xr15
xvsub.h xr10, xr12, xr13
xvsub.h xr11, xr14, xr15
xvadd.h xr12, xr8, xr9
xvadd.h xr13, xr10, xr11
xvsub.h xr14, xr8, xr9
xvsub.h xr15, xr10, xr11
vpickve2gr.hu t3, vr12, 0
vpickve2gr.hu t4, vr12, 4
xvor.v xr16, xr12, xr12
xvpermi.q xr16, xr16, 0x31
vpickve2gr.hu t5, vr16, 0
vpickve2gr.hu t6, vr16, 4
add.d t3, t3, t4
add.d t5, t5, t6
add.d t3, t3, t5
xvadda.h xr16, xr12, xr13
xvadda.h xr18, xr14, xr15
xvadd.h xr16, xr16, xr18
xvpermi.d xr17, xr16, 0x4e
xvadd.h xr18, xr16, xr17
xvhaddw.wu.hu xr18, xr18, xr18
xvhaddw.du.wu xr18, xr18, xr18
xvhaddw.qu.du xr18, xr18, xr18
xvpickve2gr.wu t4, xr18, 0
xvpackev.h xr8, xr13, xr12
xvpackev.h xr9, xr15, xr14
xvpackod.h xr10, xr13, xr12
xvpackod.h xr11, xr15, xr14
xvilvl.d xr12, xr9, xr8
xvilvh.d xr13, xr9, xr8
xvilvl.d xr14, xr11, xr10
xvilvh.d xr15, xr11, xr10
xvor.v xr16, xr12, xr12
xvor.v xr17, xr13, xr13
xvpermi.q xr12, xr14, 0x02
xvpermi.q xr13, xr14, 0x12
xvpermi.q xr16, xr15, 0x03
xvpermi.q xr17, xr15, 0x13
xvadd.h xr8, xr12, xr13
xvsub.h xr9, xr12, xr13
xvadd.h xr10, xr16, xr17
xvsub.h xr11, xr16, xr17
xvadd.h xr12, xr8, xr10
xvadd.h xr13, xr9, xr11
xvsub.h xr14, xr8, xr10
xvsub.h xr15, xr9, xr11
xvadda.h xr16, xr12, xr13
xvadda.h xr17, xr14, xr15
xvadd.h xr18, xr16, xr17
xvpermi.d xr19, xr18, 0x4e
xvadd.d xr19, xr18, xr19
xvhaddw.wu.hu xr19, xr19, xr19
xvhaddw.du.wu xr19, xr19, xr19
xvhaddw.qu.du xr19, xr19, xr19
xvpickve2gr.wu t5, xr19, 0
sub.d t4, t4, t3
sub.d t5, t5, t3
slli.d t5, t5, 32
add.d a0, t5, t4
endfunc_x264
/* int x264_pixel_satd_16x16_lasx(pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2)
*/
function_x264 pixel_satd_16x16_lasx
slli.d t2, a1, 1
slli.d t3, a3, 1
slli.d t4, a1, 2
slli.d t5, a3, 2
add.d t6, a1, t2
add.d t7, a3, t3
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t6, vr0, vr1, vr2, vr3
add.d a0, a0, t4
LSX_LOADX_4 a0, a1, t2, t6, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t3, t7, vr8, vr9, vr10, vr11
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t7, vr12, vr13, vr14, vr15
xvpermi.q xr0, xr4, 0x02
xvpermi.q xr1, xr5, 0x02
xvpermi.q xr2, xr6, 0x02
xvpermi.q xr3, xr7, 0x02
xvpermi.q xr8, xr12, 0x02
xvpermi.q xr9, xr13, 0x02
xvpermi.q xr10, xr14, 0x02
xvpermi.q xr11, xr15, 0x02
// HADAMARD4
xvsubwev.h.bu xr4, xr0, xr8
xvsubwod.h.bu xr5, xr0, xr8
xvsubwev.h.bu xr6, xr1, xr9
xvsubwod.h.bu xr7, xr1, xr9
xvsubwev.h.bu xr8, xr2, xr10
xvsubwod.h.bu xr9, xr2, xr10
xvsubwev.h.bu xr12, xr3, xr11
xvsubwod.h.bu xr13, xr3, xr11
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvadd.h xr4, xr8, xr9
xvsub.h xr5, xr8, xr9
xvadd.h xr6, xr12, xr13
xvsub.h xr7, xr12, xr13
xvpackev.h xr8, xr5, xr4
xvpackod.h xr9, xr5, xr4
xvpackev.h xr10, xr7, xr6
xvpackod.h xr11, xr7, xr6
xvpackev.h xr4, xr1, xr0
xvpackod.h xr5, xr1, xr0
xvpackev.h xr6, xr3, xr2
xvpackod.h xr7, xr3, xr2
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvadd.h xr4, xr8, xr9
xvsub.h xr5, xr8, xr9
xvadd.h xr6, xr10, xr11
xvsub.h xr7, xr10, xr11
xvilvl.h xr8, xr1, xr0
xvilvl.h xr9, xr3, xr2
xvilvl.h xr10, xr5, xr4
xvilvl.h xr11, xr7, xr6
xvilvh.h xr0, xr1, xr0
xvilvh.h xr1, xr3, xr2
xvilvh.h xr2, xr5, xr4
xvilvh.h xr3, xr7, xr6
xvadd.h xr4, xr8, xr9
xvadd.h xr6, xr10, xr11
xvsub.h xr5, xr8, xr9
xvsub.h xr7, xr10, xr11
xvadd.h xr8, xr4, xr6
xvadd.h xr9, xr5, xr7
xvsub.h xr10, xr4, xr6
xvsub.h xr11, xr5, xr7
xvadd.h xr4, xr0, xr1
xvadd.h xr6, xr2, xr3
xvsub.h xr5, xr0, xr1
xvsub.h xr7, xr2, xr3
xvadd.h xr0, xr4, xr6
xvadd.h xr1, xr5, xr7
xvsub.h xr2, xr4, xr6
xvsub.h xr3, xr5, xr7
xvadda.h xr8, xr8, xr9
xvadda.h xr9, xr10, xr11
xvadda.h xr0, xr0, xr1
xvadda.h xr1, xr2, xr3
xvadd.h xr8, xr8, xr9
xvadd.h xr0, xr0, xr1
xvadd.h xr16, xr0, xr8
add.d a0, a0, t4
add.d a2, a2, t5
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t6, vr0, vr1, vr2, vr3
add.d a0, a0, t4
LSX_LOADX_4 a0, a1, t2, t6, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t3, t7, vr8, vr9, vr10, vr11
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t7, vr12, vr13, vr14, vr15
xvpermi.q xr0, xr4, 0x02
xvpermi.q xr1, xr5, 0x02
xvpermi.q xr2, xr6, 0x02
xvpermi.q xr3, xr7, 0x02
xvpermi.q xr8, xr12, 0x02
xvpermi.q xr9, xr13, 0x02
xvpermi.q xr10, xr14, 0x02
xvpermi.q xr11, xr15, 0x02
// HADAMARD4
xvsubwev.h.bu xr4, xr0, xr8
xvsubwod.h.bu xr5, xr0, xr8
xvsubwev.h.bu xr6, xr1, xr9
xvsubwod.h.bu xr7, xr1, xr9
xvsubwev.h.bu xr8, xr2, xr10
xvsubwod.h.bu xr9, xr2, xr10
xvsubwev.h.bu xr12, xr3, xr11
xvsubwod.h.bu xr13, xr3, xr11
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvadd.h xr4, xr8, xr9
xvsub.h xr5, xr8, xr9
xvadd.h xr6, xr12, xr13
xvsub.h xr7, xr12, xr13
xvpackev.h xr8, xr5, xr4
xvpackod.h xr9, xr5, xr4
xvpackev.h xr10, xr7, xr6
xvpackod.h xr11, xr7, xr6
xvpackev.h xr4, xr1, xr0
xvpackod.h xr5, xr1, xr0
xvpackev.h xr6, xr3, xr2
xvpackod.h xr7, xr3, xr2
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvadd.h xr4, xr8, xr9
xvsub.h xr5, xr8, xr9
xvadd.h xr6, xr10, xr11
xvsub.h xr7, xr10, xr11
xvilvl.h xr8, xr1, xr0
xvilvl.h xr9, xr3, xr2
xvilvl.h xr10, xr5, xr4
xvilvl.h xr11, xr7, xr6
xvilvh.h xr0, xr1, xr0
xvilvh.h xr1, xr3, xr2
xvilvh.h xr2, xr5, xr4
xvilvh.h xr3, xr7, xr6
xvadd.h xr4, xr8, xr9
xvadd.h xr6, xr10, xr11
xvsub.h xr5, xr8, xr9
xvsub.h xr7, xr10, xr11
xvadd.h xr8, xr4, xr6
xvadd.h xr9, xr5, xr7
xvsub.h xr10, xr4, xr6
xvsub.h xr11, xr5, xr7
xvadd.h xr4, xr0, xr1
xvadd.h xr6, xr2, xr3
xvsub.h xr5, xr0, xr1
xvsub.h xr7, xr2, xr3
xvadd.h xr0, xr4, xr6
xvadd.h xr1, xr5, xr7
xvsub.h xr2, xr4, xr6
xvsub.h xr3, xr5, xr7
xvadda.h xr8, xr8, xr9
xvadda.h xr9, xr10, xr11
xvadda.h xr0, xr0, xr1
xvadda.h xr1, xr2, xr3
xvadd.h xr8, xr8, xr9
xvadd.h xr0, xr0, xr1
xvadd.h xr0, xr0, xr8
xvadd.h xr0, xr0, xr16
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.du.wu xr0, xr0, xr0
xvhaddw.qu.du xr0, xr0, xr0
xvpickve2gr.wu t0, xr0, 0
xvpickve2gr.wu t1, xr0, 4
add.w t0, t0, t1
srli.d a0, t0, 1
endfunc_x264
/* int x264_pixel_satd_16x8_lasx(pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2)
*/
function_x264 pixel_satd_16x8_lasx
slli.d t2, a1, 1
slli.d t3, a3, 1
slli.d t4, t2, 1
slli.d t5, t3, 1
add.d t6, a1, t2
add.d t7, a3, t3
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t6, vr0, vr1, vr2, vr3
add.d a0, a0, t4
LSX_LOADX_4 a0, a1, t2, t6, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t3, t7, vr8, vr9, vr10, vr11
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t7, vr12, vr13, vr14, vr15
xvpermi.q xr0, xr4, 0x02
xvpermi.q xr1, xr5, 0x02
xvpermi.q xr2, xr6, 0x02
xvpermi.q xr3, xr7, 0x02
xvpermi.q xr8, xr12, 0x02
xvpermi.q xr9, xr13, 0x02
xvpermi.q xr10, xr14, 0x02
xvpermi.q xr11, xr15, 0x02
// HADAMARD4
xvsubwev.h.bu xr4, xr0, xr8
xvsubwod.h.bu xr5, xr0, xr8
xvsubwev.h.bu xr6, xr1, xr9
xvsubwod.h.bu xr7, xr1, xr9
xvsubwev.h.bu xr8, xr2, xr10
xvsubwod.h.bu xr9, xr2, xr10
xvsubwev.h.bu xr12, xr3, xr11
xvsubwod.h.bu xr13, xr3, xr11
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvadd.h xr4, xr8, xr9
xvsub.h xr5, xr8, xr9
xvadd.h xr6, xr12, xr13
xvsub.h xr7, xr12, xr13
xvpackev.h xr8, xr5, xr4
xvpackod.h xr9, xr5, xr4
xvpackev.h xr10, xr7, xr6
xvpackod.h xr11, xr7, xr6
xvpackev.h xr4, xr1, xr0
xvpackod.h xr5, xr1, xr0
xvpackev.h xr6, xr3, xr2
xvpackod.h xr7, xr3, xr2
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvadd.h xr4, xr8, xr9
xvsub.h xr5, xr8, xr9
xvadd.h xr6, xr10, xr11
xvsub.h xr7, xr10, xr11
xvilvl.h xr8, xr1, xr0
xvilvl.h xr9, xr3, xr2
xvilvl.h xr10, xr5, xr4
xvilvl.h xr11, xr7, xr6
xvilvh.h xr0, xr1, xr0
xvilvh.h xr1, xr3, xr2
xvilvh.h xr2, xr5, xr4
xvilvh.h xr3, xr7, xr6
xvadd.h xr4, xr8, xr9
xvadd.h xr6, xr10, xr11
xvsub.h xr5, xr8, xr9
xvsub.h xr7, xr10, xr11
xvadd.h xr8, xr4, xr6
xvadd.h xr9, xr5, xr7
xvsub.h xr10, xr4, xr6
xvsub.h xr11, xr5, xr7
xvadd.h xr4, xr0, xr1
xvadd.h xr6, xr2, xr3
xvsub.h xr5, xr0, xr1
xvsub.h xr7, xr2, xr3
xvadd.h xr0, xr4, xr6
xvadd.h xr1, xr5, xr7
xvsub.h xr2, xr4, xr6
xvsub.h xr3, xr5, xr7
xvadda.h xr8, xr8, xr9
xvadda.h xr9, xr10, xr11
xvadda.h xr0, xr0, xr1
xvadda.h xr1, xr2, xr3
xvadd.h xr8, xr8, xr9
xvadd.h xr0, xr0, xr1
xvadd.h xr0, xr0, xr8
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.du.wu xr0, xr0, xr0
xvhaddw.qu.du xr0, xr0, xr0
xvpickve2gr.wu t0, xr0, 0
xvpickve2gr.wu t1, xr0, 4
add.w t0, t0, t1
srli.d a0, t0, 1
endfunc_x264
/* int x264_pixel_satd_8x16_lasx(pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2)
*/
function_x264 pixel_satd_8x16_lasx
slli.d t2, a1, 1
add.d t3, a1, t2
slli.d t4, a1, 2
slli.d t5, a3, 1
add.d t6, a3, t5
slli.d t7, a3, 2
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t3, vr0, vr1, vr2, vr3
add.d a0, a0, t4
LSX_LOADX_4 a0, a1, t2, t3, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t5, t6, vr8, vr9, vr10, vr11
add.d a2, a2, t7
LSX_LOADX_4 a2, a3, t5, t6, vr12, vr13, vr14, vr15
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr2, vr5, vr4
vilvl.d vr3, vr7, vr6
xvpermi.q xr0, xr2, 0x02
xvpermi.q xr1, xr3, 0x02
vilvl.d vr2, vr9, vr8
vilvl.d vr3, vr11, vr10
vilvl.d vr4, vr13, vr12
vilvl.d vr5, vr15, vr14
xvpermi.q xr2, xr4, 0x02
xvpermi.q xr3, xr5, 0x02
// HADAMARD4
xvsubwev.h.bu xr4, xr0, xr2
xvsubwod.h.bu xr5, xr0, xr2
xvsubwev.h.bu xr6, xr1, xr3
xvsubwod.h.bu xr7, xr1, xr3
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvpackev.h xr4, xr1, xr0
xvpackod.h xr5, xr1, xr0
xvpackev.h xr6, xr3, xr2
xvpackod.h xr7, xr3, xr2
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvilvl.h xr4, xr1, xr0
xvilvh.h xr5, xr1, xr0
xvilvl.h xr6, xr3, xr2
xvilvh.h xr7, xr3, xr2
xvadd.h xr0, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr1, xr4, xr5
xvsub.h xr3, xr6, xr7
xvadd.h xr4, xr0, xr2
xvadd.h xr5, xr1, xr3
xvsub.h xr6, xr0, xr2
xvsub.h xr7, xr1, xr3
xvadda.h xr0, xr4, xr5
xvadda.h xr1, xr6, xr7
xvadd.h xr16, xr0, xr1
add.d a0, a0, t4
add.d a2, a2, t7
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t3, vr0, vr1, vr2, vr3
add.d a0, a0, t4
LSX_LOADX_4 a0, a1, t2, t3, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t5, t6, vr8, vr9, vr10, vr11
add.d a2, a2, t7
LSX_LOADX_4 a2, a3, t5, t6, vr12, vr13, vr14, vr15
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr2, vr5, vr4
vilvl.d vr3, vr7, vr6
xvpermi.q xr0, xr2, 0x02
xvpermi.q xr1, xr3, 0x02
vilvl.d vr2, vr9, vr8
vilvl.d vr3, vr11, vr10
vilvl.d vr4, vr13, vr12
vilvl.d vr5, vr15, vr14
xvpermi.q xr2, xr4, 0x02
xvpermi.q xr3, xr5, 0x02
// HADAMARD4
xvsubwev.h.bu xr4, xr0, xr2
xvsubwod.h.bu xr5, xr0, xr2
xvsubwev.h.bu xr6, xr1, xr3
xvsubwod.h.bu xr7, xr1, xr3
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvpackev.h xr4, xr1, xr0
xvpackod.h xr5, xr1, xr0
xvpackev.h xr6, xr3, xr2
xvpackod.h xr7, xr3, xr2
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvilvl.h xr4, xr1, xr0
xvilvh.h xr5, xr1, xr0
xvilvl.h xr6, xr3, xr2
xvilvh.h xr7, xr3, xr2
xvadd.h xr0, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr1, xr4, xr5
xvsub.h xr3, xr6, xr7
xvadd.h xr4, xr0, xr2
xvadd.h xr5, xr1, xr3
xvsub.h xr6, xr0, xr2
xvsub.h xr7, xr1, xr3
xvadda.h xr0, xr4, xr5
xvadda.h xr1, xr6, xr7
xvadd.h xr0, xr0, xr1
xvadd.h xr0, xr0, xr16
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.du.wu xr0, xr0, xr0
xvhaddw.qu.du xr0, xr0, xr0
xvpickve2gr.wu t0, xr0, 0
xvpickve2gr.wu t1, xr0, 4
add.w t0, t0, t1
srli.d a0, t0, 1
endfunc_x264
/* int x264_pixel_satd_8x8_lasx(pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2)
*/
function_x264 pixel_satd_8x8_lasx
slli.d t2, a1, 1
slli.d t5, a3, 1
add.d t3, a1, t2
add.d t6, a3, t5
slli.d t4, t2, 1
slli.d t7, t5, 1
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t3, vr0, vr1, vr2, vr3
add.d a0, a0, t4
LSX_LOADX_4 a0, a1, t2, t3, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t5, t6, vr8, vr9, vr10, vr11
add.d a2, a2, t7
LSX_LOADX_4 a2, a3, t5, t6, vr12, vr13, vr14, vr15
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr2, vr5, vr4
vilvl.d vr3, vr7, vr6
xvpermi.q xr0, xr2, 0x02
xvpermi.q xr1, xr3, 0x02
vilvl.d vr2, vr9, vr8
vilvl.d vr3, vr11, vr10
vilvl.d vr4, vr13, vr12
vilvl.d vr5, vr15, vr14
xvpermi.q xr2, xr4, 0x02
xvpermi.q xr3, xr5, 0x02
// HADAMARD4
xvsubwev.h.bu xr4, xr0, xr2
xvsubwod.h.bu xr5, xr0, xr2
xvsubwev.h.bu xr6, xr1, xr3
xvsubwod.h.bu xr7, xr1, xr3
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvpackev.h xr4, xr1, xr0
xvpackod.h xr5, xr1, xr0
xvpackev.h xr6, xr3, xr2
xvpackod.h xr7, xr3, xr2
xvadd.h xr0, xr4, xr5
xvsub.h xr1, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr3, xr6, xr7
xvilvl.h xr4, xr1, xr0
xvilvh.h xr5, xr1, xr0
xvilvl.h xr6, xr3, xr2
xvilvh.h xr7, xr3, xr2
xvadd.h xr0, xr4, xr5
xvadd.h xr2, xr6, xr7
xvsub.h xr1, xr4, xr5
xvsub.h xr3, xr6, xr7
xvadd.h xr4, xr0, xr2
xvadd.h xr5, xr1, xr3
xvsub.h xr6, xr0, xr2
xvsub.h xr7, xr1, xr3
xvadda.h xr0, xr4, xr5
xvadda.h xr1, xr6, xr7
xvadd.h xr0, xr0, xr1
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.du.wu xr0, xr0, xr0
xvhaddw.qu.du xr0, xr0, xr0
xvpickve2gr.wu t0, xr0, 0
xvpickve2gr.wu t1, xr0, 4
add.w t0, t0, t1
srli.d a0, t0, 1
endfunc_x264
/* int x264_pixel_satd_8x4_lasx(pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2)
*/
function_x264 pixel_satd_8x4_lasx
slli.d t2, a1, 1
slli.d t3, a3, 1
add.d t4, a1, t2
add.d t5, a3, t3
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t4, vr1, vr2, vr3, vr4
LSX_LOADX_4 a2, a3, t3, t5, vr5, vr6, vr7, vr8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr12, 0x13
xvadd.h xr9, xr11, xr13
xvsub.h xr10, xr11, xr13
xvpackev.d xr11, xr10, xr9
xvpackod.d xr12, xr10, xr9
xvadda.h xr11, xr11, xr12
xvhaddw.wu.hu xr11, xr11, xr11
xvhaddw.du.wu xr11, xr11, xr11
xvhaddw.qu.du xr11, xr11, xr11
xvpickve2gr.wu t4, xr11, 0
xvpickve2gr.wu t5, xr11, 4
add.d t4, t4, t5
srli.d a0, t4, 1
endfunc_x264
/* int x264_pixel_satd_4x16_lasx(pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2)
*/
function_x264 pixel_satd_4x16_lasx
slli.d t2, a1, 1
slli.d t3, a3, 1
add.d t4, a1, t2
add.d t5, a3, t3
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t4, vr1, vr2, vr3, vr4
LSX_LOADX_4 a2, a3, t3, t5, vr5, vr6, vr7, vr8
vilvl.w vr1, vr2, vr1
vilvl.w vr3, vr4, vr3
vilvl.d vr9, vr3, vr1
vilvl.w vr5, vr6, vr5
vilvl.w vr7, vr8, vr7
vilvl.d vr10, vr7, vr5
slli.d t0, a1, 2
slli.d t1, a3, 2
// Load data from pix1 and pix2
add.d a0, a0, t0
LSX_LOADX_4 a0, a1, t2, t4, vr1, vr2, vr3, vr4
add.d a2, a2, t1
LSX_LOADX_4 a2, a3, t3, t5, vr5, vr6, vr7, vr8
vilvl.w vr1, vr2, vr1
vilvl.w vr3, vr4, vr3
vilvl.d vr1, vr3, vr1
vilvl.w vr5, vr6, vr5
vilvl.w vr7, vr8, vr7
vilvl.d vr5, vr7, vr5
xvpermi.q xr1, xr9, 0x20
xvpermi.q xr5, xr10, 0x20
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* b0 + b1 */
xvsub.h xr12, xr9, xr10 /* b0 - b1 */
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadda.h xr9, xr9, xr10
xvhaddw.wu.hu xr9, xr9, xr9
xvhaddw.du.wu xr9, xr9, xr9
xvhaddw.qu.du xr9, xr9, xr9
xvpickve2gr.wu t6, xr9, 0
xvpickve2gr.wu t7, xr9, 4
add.d t7, t6, t7
// Load data from pix1 and pix2
add.d a0, a0, t0
LSX_LOADX_4 a0, a1, t2, t4, vr1, vr2, vr3, vr4
add.d a2, a2, t1
LSX_LOADX_4 a2, a3, t3, t5, vr5, vr6, vr7, vr8
vilvl.w vr1, vr2, vr1
vilvl.w vr3, vr4, vr3
vilvl.d vr9, vr3, vr1
vilvl.w vr5, vr6, vr5
vilvl.w vr7, vr8, vr7
vilvl.d vr10, vr7, vr5
// Load data from pix1 and pix2
add.d a0, a0, t0
LSX_LOADX_4 a0, a1, t2, t4, vr1, vr2, vr3, vr4
add.d a2, a2, t1
LSX_LOADX_4 a2, a3, t3, t5, vr5, vr6, vr7, vr8
vilvl.w vr1, vr2, vr1
vilvl.w vr3, vr4, vr3
vilvl.d vr1, vr3, vr1
vilvl.w vr5, vr6, vr5
vilvl.w vr7, vr8, vr7
vilvl.d vr5, vr7, vr5
xvpermi.q xr1, xr9, 0x20
xvpermi.q xr5, xr10, 0x20
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* b0 + b1 */
xvsub.h xr12, xr9, xr10 /* b0 - b1 */
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadda.h xr9, xr9, xr10
xvhaddw.wu.hu xr9, xr9, xr9
xvhaddw.du.wu xr9, xr9, xr9
xvhaddw.qu.du xr9, xr9, xr9
xvpickve2gr.wu t6, xr9, 0
xvpickve2gr.wu t5, xr9, 4
add.d t6, t5, t6
add.d t7, t6, t7
srli.d a0, t7, 1
endfunc_x264
/* int x264_pixel_satd_4x8_lasx(pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2)
*/
function_x264 pixel_satd_4x8_lasx
slli.d t2, a1, 1
slli.d t3, a3, 1
add.d t4, a1, t2
add.d t5, a3, t3
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t4, vr1, vr2, vr3, vr4
LSX_LOADX_4 a2, a3, t3, t5, vr5, vr6, vr7, vr8
vilvl.w vr1, vr2, vr1
vilvl.w vr3, vr4, vr3
vilvl.d vr9, vr3, vr1
vilvl.w vr5, vr6, vr5
vilvl.w vr7, vr8, vr7
vilvl.d vr10, vr7, vr5
slli.d t0, a1, 2
slli.d t1, a3, 2
add.d a0, a0, t0
add.d a2, a2, t1
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t2, t4, vr1, vr2, vr3, vr4
LSX_LOADX_4 a2, a3, t3, t5, vr5, vr6, vr7, vr8
vilvl.w vr1, vr2, vr1
vilvl.w vr3, vr4, vr3
vilvl.d vr1, vr3, vr1
vilvl.w vr5, vr6, vr5
vilvl.w vr7, vr8, vr7
vilvl.d vr5, vr7, vr5
xvpermi.q xr1, xr9, 0x20
xvpermi.q xr5, xr10, 0x20
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* b0 + b1 */
xvsub.h xr12, xr9, xr10 /* b0 - b1 */
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadda.h xr9, xr9, xr10
xvhaddw.wu.hu xr9, xr9, xr9
xvhaddw.du.wu xr9, xr9, xr9
xvhaddw.qu.du xr9, xr9, xr9
xvpickve2gr.wu t6, xr9, 0
xvpickve2gr.wu t7, xr9, 4
add.d t6, t6, t7
srli.d a0, t6, 1
endfunc_x264
/* int x264_pixel_satd_4x4_lsx(pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2)
*/
.macro pixel_satd_4x4_lsx_core out
vilvl.w vr1, vr2, vr1
vilvl.w vr3, vr4, vr3
vilvl.d vr1, vr3, vr1
vilvl.w vr5, vr6, vr5
vilvl.w vr7, vr8, vr7
vilvl.d vr5, vr7, vr5
vsubwev.h.bu vr9, vr1, vr5
vsubwod.h.bu vr10, vr1, vr5
vadd.h vr11, vr9, vr10 /* a0 + a1 */
vsub.h vr12, vr9, vr10 /* a0 - a1 */
vpackev.h vr9, vr12, vr11
vpackod.h vr10, vr12, vr11
vadd.h vr11, vr9, vr10 /* b0 + b1 */
vsub.h vr12, vr9, vr10 /* b0 - b1 */
vpackev.w vr9, vr12, vr11
vpackod.w vr10, vr12, vr11
vadd.h vr11, vr9, vr10 /* HADAMARD4 */
vsub.h vr12, vr9, vr10
vpackev.d vr9, vr12, vr11
vpackod.d vr10, vr12, vr11
vadd.h vr11, vr9, vr10
vsub.h vr12, vr9, vr10
vpackev.d vr9, vr12, vr11
vpackod.d vr10, vr12, vr11
vadda.h \out, vr9, vr10
.endm
function_x264 pixel_satd_4x4_lsx
slli.d t2, a1, 1
slli.d t3, a3, 1
add.d t4, a1, t2
add.d t5, a3, t3
// Load data from pix1 and pix2
FLDS_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDS_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
pixel_satd_4x4_lsx_core vr13
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.du.wu vr13, vr13, vr13
vhaddw.qu.du vr13, vr13, vr13
vpickve2gr.wu t5, vr13, 0
srli.d a0, t5, 1
endfunc_x264
/*
* int pixel_ssd_16x16_lasx(const Pixel *pix1, intptr_t stride_pix1,
* const Pixel *pix2, intptr_t stride_pix2)
*/
function_x264 pixel_ssd_16x16_lasx
slli.d t0, a1, 1
add.d t1, a1, t0
add.d t2, a1, t1
slli.d t3, a3, 1
add.d t4, a3, t3
add.d t5, a3, t4
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
add.d a0, a0, t2
LSX_LOADX_4 a0, a1, t0, t1, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t3, t4, vr8, vr9, vr10, vr11
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t4, vr12, vr13, vr14, vr15
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
vext2xv.hu.bu xr3, xr3
vext2xv.hu.bu xr4, xr4
vext2xv.hu.bu xr5, xr5
vext2xv.hu.bu xr6, xr6
vext2xv.hu.bu xr7, xr7
vext2xv.hu.bu xr8, xr8
vext2xv.hu.bu xr9, xr9
vext2xv.hu.bu xr10, xr10
vext2xv.hu.bu xr11, xr11
vext2xv.hu.bu xr12, xr12
vext2xv.hu.bu xr13, xr13
vext2xv.hu.bu xr14, xr14
vext2xv.hu.bu xr15, xr15
// Calculate the square of the difference
xvsub.h xr0, xr0, xr8
xvsub.h xr1, xr1, xr9
xvsub.h xr2, xr2, xr10
xvsub.h xr3, xr3, xr11
xvsub.h xr4, xr4, xr12
xvsub.h xr5, xr5, xr13
xvsub.h xr6, xr6, xr14
xvsub.h xr7, xr7, xr15
xvmul.h xr0, xr0, xr0
xvmul.h xr1, xr1, xr1
xvmul.h xr2, xr2, xr2
xvmul.h xr3, xr3, xr3
xvmul.h xr4, xr4, xr4
xvmul.h xr5, xr5, xr5
xvmul.h xr6, xr6, xr6
xvmul.h xr7, xr7, xr7
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.wu.hu xr1, xr1, xr1
xvhaddw.wu.hu xr2, xr2, xr2
xvhaddw.wu.hu xr3, xr3, xr3
xvhaddw.wu.hu xr4, xr4, xr4
xvhaddw.wu.hu xr5, xr5, xr5
xvhaddw.wu.hu xr6, xr6, xr6
xvhaddw.wu.hu xr7, xr7, xr7
xvadd.w xr16, xr0, xr1
xvadd.w xr17, xr2, xr3
xvadd.w xr18, xr4, xr5
xvadd.w xr19, xr6, xr7
xvadd.w xr16, xr16, xr17
xvadd.w xr18, xr18, xr19
xvadd.w xr16, xr16, xr18
// Load data from pix1 and pix2
add.d a0, a0, t2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
add.d a0, a0, t2
LSX_LOADX_4 a0, a1, t0, t1, vr4, vr5, vr6, vr7
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t4, vr8, vr9, vr10, vr11
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t4, vr12, vr13, vr14, vr15
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
vext2xv.hu.bu xr3, xr3
vext2xv.hu.bu xr4, xr4
vext2xv.hu.bu xr5, xr5
vext2xv.hu.bu xr6, xr6
vext2xv.hu.bu xr7, xr7
vext2xv.hu.bu xr8, xr8
vext2xv.hu.bu xr9, xr9
vext2xv.hu.bu xr10, xr10
vext2xv.hu.bu xr11, xr11
vext2xv.hu.bu xr12, xr12
vext2xv.hu.bu xr13, xr13
vext2xv.hu.bu xr14, xr14
vext2xv.hu.bu xr15, xr15
// Calculate the square of the difference
xvsub.h xr0, xr0, xr8
xvsub.h xr1, xr1, xr9
xvsub.h xr2, xr2, xr10
xvsub.h xr3, xr3, xr11
xvsub.h xr4, xr4, xr12
xvsub.h xr5, xr5, xr13
xvsub.h xr6, xr6, xr14
xvsub.h xr7, xr7, xr15
xvmul.h xr0, xr0, xr0
xvmul.h xr1, xr1, xr1
xvmul.h xr2, xr2, xr2
xvmul.h xr3, xr3, xr3
xvmul.h xr4, xr4, xr4
xvmul.h xr5, xr5, xr5
xvmul.h xr6, xr6, xr6
xvmul.h xr7, xr7, xr7
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.wu.hu xr1, xr1, xr1
xvhaddw.wu.hu xr2, xr2, xr2
xvhaddw.wu.hu xr3, xr3, xr3
xvhaddw.wu.hu xr4, xr4, xr4
xvhaddw.wu.hu xr5, xr5, xr5
xvhaddw.wu.hu xr6, xr6, xr6
xvhaddw.wu.hu xr7, xr7, xr7
xvadd.w xr0, xr0, xr1
xvadd.w xr2, xr2, xr3
xvadd.w xr4, xr4, xr5
xvadd.w xr6, xr6, xr7
xvadd.w xr0, xr0, xr2
xvadd.w xr4, xr4, xr6
xvadd.w xr0, xr0, xr4
xvadd.w xr0, xr0, xr16
// Calculate the sum
xvhaddw.d.w xr0, xr0, xr0
xvhaddw.q.d xr0, xr0, xr0
xvpickve2gr.w t2, xr0, 0
xvpickve2gr.w t3, xr0, 4
add.d a0, t2, t3
endfunc_x264
/*
* int pixel_ssd_16x8_lasx(const Pixel *pix1, intptr_t stride_pix1,
* const Pixel *pix2, intptr_t stride_pix2)
*/
function_x264 pixel_ssd_16x8_lasx
slli.d t0, a1, 1
add.d t1, a1, t0
add.d t2, a1, t1
slli.d t3, a3, 1
add.d t4, a3, t3
add.d t5, a3, t4
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
add.d a0, a0, t2
LSX_LOADX_4 a0, a1, t0, t1, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t3, t4, vr8, vr9, vr10, vr11
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t4, vr12, vr13, vr14, vr15
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
vext2xv.hu.bu xr3, xr3
vext2xv.hu.bu xr4, xr4
vext2xv.hu.bu xr5, xr5
vext2xv.hu.bu xr6, xr6
vext2xv.hu.bu xr7, xr7
vext2xv.hu.bu xr8, xr8
vext2xv.hu.bu xr9, xr9
vext2xv.hu.bu xr10, xr10
vext2xv.hu.bu xr11, xr11
vext2xv.hu.bu xr12, xr12
vext2xv.hu.bu xr13, xr13
vext2xv.hu.bu xr14, xr14
vext2xv.hu.bu xr15, xr15
// Calculate the square of the difference
xvsub.h xr0, xr0, xr8
xvsub.h xr1, xr1, xr9
xvsub.h xr2, xr2, xr10
xvsub.h xr3, xr3, xr11
xvsub.h xr4, xr4, xr12
xvsub.h xr5, xr5, xr13
xvsub.h xr6, xr6, xr14
xvsub.h xr7, xr7, xr15
xvmul.h xr0, xr0, xr0
xvmul.h xr1, xr1, xr1
xvmul.h xr2, xr2, xr2
xvmul.h xr3, xr3, xr3
xvmul.h xr4, xr4, xr4
xvmul.h xr5, xr5, xr5
xvmul.h xr6, xr6, xr6
xvmul.h xr7, xr7, xr7
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.wu.hu xr1, xr1, xr1
xvhaddw.wu.hu xr2, xr2, xr2
xvhaddw.wu.hu xr3, xr3, xr3
xvhaddw.wu.hu xr4, xr4, xr4
xvhaddw.wu.hu xr5, xr5, xr5
xvhaddw.wu.hu xr6, xr6, xr6
xvhaddw.wu.hu xr7, xr7, xr7
xvadd.w xr0, xr0, xr1
xvadd.w xr2, xr2, xr3
xvadd.w xr4, xr4, xr5
xvadd.w xr6, xr6, xr7
xvadd.w xr0, xr0, xr2
xvadd.w xr4, xr4, xr6
xvadd.w xr0, xr0, xr4
// Calculate the sum
xvhaddw.d.w xr0, xr0, xr0
xvhaddw.q.d xr0, xr0, xr0
xvpickve2gr.w t2, xr0, 0
xvpickve2gr.w t3, xr0, 4
add.d a0, t2, t3
endfunc_x264
/*
* int pixel_ssd_8x16_lasx(const Pixel *pix1, intptr_t stride_pix1,
* const Pixel *pix2, intptr_t stride_pix2)
*/
function_x264 pixel_ssd_8x16_lasx
slli.d t0, a1, 1
add.d t1, a1, t0
add.d t2, a1, t1
slli.d t3, a3, 1
add.d t4, a3, t3
add.d t5, a3, t4
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
add.d a0, a0, t2
LSX_LOADX_4 a0, a1, t0, t1, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t3, t4, vr8, vr9, vr10, vr11
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t4, vr12, vr13, vr14, vr15
vilvl.d vr0, vr4, vr0
vilvl.d vr1, vr5, vr1
vilvl.d vr2, vr6, vr2
vilvl.d vr3, vr7, vr3
vilvl.d vr8, vr12, vr8
vilvl.d vr9, vr13, vr9
vilvl.d vr10, vr14, vr10
vilvl.d vr11, vr15, vr11
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
vext2xv.hu.bu xr3, xr3
vext2xv.hu.bu xr8, xr8
vext2xv.hu.bu xr9, xr9
vext2xv.hu.bu xr10, xr10
vext2xv.hu.bu xr11, xr11
// Calculate the square of the difference
xvsub.h xr0, xr0, xr8
xvsub.h xr1, xr1, xr9
xvsub.h xr2, xr2, xr10
xvsub.h xr3, xr3, xr11
xvmul.h xr0, xr0, xr0
xvmul.h xr1, xr1, xr1
xvmul.h xr2, xr2, xr2
xvmul.h xr3, xr3, xr3
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.wu.hu xr1, xr1, xr1
xvhaddw.wu.hu xr2, xr2, xr2
xvhaddw.wu.hu xr3, xr3, xr3
xvadd.w xr0, xr0, xr1
xvadd.w xr2, xr2, xr3
xvadd.w xr16, xr0, xr2
// Load data from pix1 and pix2
add.d a0, a0, t2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
add.d a0, a0, t2
LSX_LOADX_4 a0, a1, t0, t1, vr4, vr5, vr6, vr7
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t4, vr8, vr9, vr10, vr11
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t4, vr12, vr13, vr14, vr15
vilvl.d vr0, vr4, vr0
vilvl.d vr1, vr5, vr1
vilvl.d vr2, vr6, vr2
vilvl.d vr3, vr7, vr3
vilvl.d vr8, vr12, vr8
vilvl.d vr9, vr13, vr9
vilvl.d vr10, vr14, vr10
vilvl.d vr11, vr15, vr11
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
vext2xv.hu.bu xr3, xr3
vext2xv.hu.bu xr8, xr8
vext2xv.hu.bu xr9, xr9
vext2xv.hu.bu xr10, xr10
vext2xv.hu.bu xr11, xr11
// Calculate the square of the difference
xvsub.h xr0, xr0, xr8
xvsub.h xr1, xr1, xr9
xvsub.h xr2, xr2, xr10
xvsub.h xr3, xr3, xr11
xvmul.h xr0, xr0, xr0
xvmul.h xr1, xr1, xr1
xvmul.h xr2, xr2, xr2
xvmul.h xr3, xr3, xr3
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.wu.hu xr1, xr1, xr1
xvhaddw.wu.hu xr2, xr2, xr2
xvhaddw.wu.hu xr3, xr3, xr3
xvadd.w xr0, xr0, xr1
xvadd.w xr2, xr2, xr3
xvadd.w xr0, xr0, xr2
xvadd.w xr0, xr0, xr16
// Calculate the sum
xvhaddw.d.w xr0, xr0, xr0
xvhaddw.q.d xr0, xr0, xr0
xvpickve2gr.w t2, xr0, 0
xvpickve2gr.w t3, xr0, 4
add.d a0, t2, t3
endfunc_x264
/*
* int pixel_ssd_8x8_lasx(const Pixel *pix1, intptr_t stride_pix1,
* const Pixel *pix2, intptr_t stride_pix2)
*/
function_x264 pixel_ssd_8x8_lasx
slli.d t0, a1, 1
add.d t1, a1, t0
add.d t2, a1, t1
slli.d t3, a3, 1
add.d t4, a3, t3
add.d t5, a3, t4
// Load data from pix1 and pix2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
add.d a0, a0, t2
LSX_LOADX_4 a0, a1, t0, t1, vr4, vr5, vr6, vr7
LSX_LOADX_4 a2, a3, t3, t4, vr8, vr9, vr10, vr11
add.d a2, a2, t5
LSX_LOADX_4 a2, a3, t3, t4, vr12, vr13, vr14, vr15
vilvl.d vr0, vr4, vr0
vilvl.d vr1, vr5, vr1
vilvl.d vr2, vr6, vr2
vilvl.d vr3, vr7, vr3
vilvl.d vr8, vr12, vr8
vilvl.d vr9, vr13, vr9
vilvl.d vr10, vr14, vr10
vilvl.d vr11, vr15, vr11
vext2xv.hu.bu xr0, xr0
vext2xv.hu.bu xr1, xr1
vext2xv.hu.bu xr2, xr2
vext2xv.hu.bu xr3, xr3
vext2xv.hu.bu xr8, xr8
vext2xv.hu.bu xr9, xr9
vext2xv.hu.bu xr10, xr10
vext2xv.hu.bu xr11, xr11
// Calculate the square of the difference
xvsub.h xr0, xr0, xr8
xvsub.h xr1, xr1, xr9
xvsub.h xr2, xr2, xr10
xvsub.h xr3, xr3, xr11
xvmul.h xr0, xr0, xr0
xvmul.h xr1, xr1, xr1
xvmul.h xr2, xr2, xr2
xvmul.h xr3, xr3, xr3
xvhaddw.wu.hu xr0, xr0, xr0
xvhaddw.wu.hu xr1, xr1, xr1
xvhaddw.wu.hu xr2, xr2, xr2
xvhaddw.wu.hu xr3, xr3, xr3
xvadd.w xr0, xr0, xr1
xvadd.w xr2, xr2, xr3
xvadd.w xr0, xr0, xr2
// Calculate the sum
xvhaddw.d.w xr0, xr0, xr0
xvhaddw.q.d xr0, xr0, xr0
xvpickve2gr.w t2, xr0, 0
xvpickve2gr.w t3, xr0, 4
add.d a0, t2, t3
endfunc_x264
/*
* int pixel_sa8d_16x16_lasx(const Pixel *pix1, intptr_t i_pix1,
* const Pixel *pix2, intptr_t i_pix2)
*/
function_x264 pixel_sa8d_16x16_lasx
addi.d sp, sp, -8
fst.d f24, sp, 0
slli.d t2, a1, 1
slli.d t3, a3, 1
add.d t4, a1, t2
add.d t5, a3, t3
slli.d t6, a1, 2
slli.d t7, a3, 2
slli.d t0, a1, 3
slli.d t1, a3, 3
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr12, 0x13
xvadd.h xr15, xr11, xr13
xvsub.h xr16, xr11, xr13
add.d a0, a0, t6
add.d a2, a2, t7
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr12, 0x13
xvadd.h xr9, xr11, xr13
xvsub.h xr10, xr11, xr13
xvadd.h xr17, xr15, xr9
xvadd.h xr18, xr16, xr10
xvsub.h xr19, xr15, xr9
xvsub.h xr20, xr16, xr10
xvadda.h xr17, xr17, xr18
xvadda.h xr19, xr19, xr20
xvadd.h xr21, xr17, xr19
add.d a0, a0, t6
add.d a2, a2, t7
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr12, 0x13
xvadd.h xr15, xr11, xr13
xvsub.h xr16, xr11, xr13
add.d a0, a0, t6
add.d a2, a2, t7
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr12, 0x13
xvadd.h xr9, xr11, xr13
xvsub.h xr10, xr11, xr13
xvadd.h xr17, xr15, xr9
xvadd.h xr18, xr16, xr10
xvsub.h xr19, xr15, xr9
xvsub.h xr20, xr16, xr10
xvadda.h xr17, xr17, xr18
xvadda.h xr19, xr19, xr20
xvadd.h xr22, xr17, xr19
sub.d a0, a0, t6
sub.d a2, a2, t7
addi.d a0, a0, 8
addi.d a2, a2, 8
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr12, 0x13
xvadd.h xr15, xr11, xr13
xvsub.h xr16, xr11, xr13
add.d a0, a0, t6
add.d a2, a2, t7
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr12, 0x13
xvadd.h xr9, xr11, xr13
xvsub.h xr10, xr11, xr13
xvadd.h xr17, xr15, xr9
xvadd.h xr18, xr16, xr10
xvsub.h xr19, xr15, xr9
xvsub.h xr20, xr16, xr10
xvadda.h xr17, xr17, xr18
xvadda.h xr19, xr19, xr20
xvadd.h xr23, xr17, xr19
sub.d a0, a0, t0
sub.d a2, a2, t1
sub.d a0, a0, t6
sub.d a2, a2, t7
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr12, 0x13
xvadd.h xr15, xr11, xr13
xvsub.h xr16, xr11, xr13
add.d a0, a0, t6
add.d a2, a2, t7
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr12, 0x13
xvadd.h xr9, xr11, xr13
xvsub.h xr10, xr11, xr13
xvadd.h xr17, xr15, xr9
xvadd.h xr18, xr16, xr10
xvsub.h xr19, xr15, xr9
xvsub.h xr20, xr16, xr10
xvadda.h xr17, xr17, xr18
xvadda.h xr19, xr19, xr20
xvadd.h xr24, xr17, xr19
xvadd.h xr21, xr21, xr22
xvadd.h xr23, xr23, xr24
xvhaddw.wu.hu xr21, xr21, xr21
xvhaddw.wu.hu xr23, xr23, xr23
xvadd.w xr21, xr21, xr23
xvhaddw.du.wu xr21, xr21, xr21
xvhaddw.qu.du xr21, xr21, xr21
xvpickve2gr.du t4, xr21, 0
xvpickve2gr.du t5, xr21, 2
add.d t4, t4, t5
addi.d t4, t4, 2
srli.d a0, t4, 2
fld.d f24, sp, 0
addi.d sp, sp, 8
endfunc_x264
/*
* int pixel_sa8d_8x8_lasx(const Pixel *pix1, intptr_t i_pix1,
* const Pixel *pix2, intptr_t i_pix2)
*/
function_x264 pixel_sa8d_8x8_lasx
slli.d t2, a1, 1
slli.d t3, a3, 1
add.d t4, a1, t2
add.d t5, a3, t3
slli.d t6, a1, 2
slli.d t7, a3, 2
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvor.v xr14, xr12, xr12
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr14, 0x13
xvadd.h xr15, xr11, xr13
xvsub.h xr16, xr11, xr13
add.d a0, a0, t6
add.d a2, a2, t7
// Load data from pix1 and pix2
FLDD_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDD_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
vilvl.d vr1, vr2, vr1
vilvl.d vr3, vr4, vr3
vilvl.d vr5, vr6, vr5
vilvl.d vr7, vr8, vr7
xvpermi.q xr1, xr3, 0x02
xvpermi.q xr5, xr7, 0x02
xvsubwev.h.bu xr9, xr1, xr5
xvsubwod.h.bu xr10, xr1, xr5
xvadd.h xr11, xr9, xr10 /* a0 + a1 */
xvsub.h xr12, xr9, xr10 /* a0 - a1 */
xvpackev.h xr9, xr12, xr11
xvpackod.h xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvpackev.w xr9, xr12, xr11
xvpackod.w xr10, xr12, xr11
xvadd.h xr11, xr9, xr10
xvsub.h xr12, xr9, xr10
xvpackev.d xr9, xr12, xr11
xvpackod.d xr10, xr12, xr11
xvadd.h xr11, xr9, xr10 /* HADAMARD4 */
xvsub.h xr12, xr9, xr10
xvor.v xr13, xr11, xr11
xvor.v xr14, xr12, xr12
xvpermi.q xr11, xr12, 0x02
xvpermi.q xr13, xr14, 0x13
xvadd.h xr9, xr11, xr13
xvsub.h xr10, xr11, xr13
xvadd.h xr17, xr15, xr9
xvadd.h xr18, xr16, xr10
xvsub.h xr19, xr15, xr9
xvsub.h xr20, xr16, xr10
xvadda.h xr17, xr17, xr18
xvadda.h xr19, xr19, xr20
xvadd.h xr17, xr17, xr19
xvhaddw.wu.hu xr17, xr17, xr17
xvhaddw.du.wu xr17, xr17, xr17
xvhaddw.qu.du xr17, xr17, xr17
xvpickve2gr.wu t4, xr17, 0
xvpickve2gr.wu t5, xr17, 4
add.d t4, t4, t5
addi.d t4, t4, 2
srli.d a0, t4, 2
endfunc_x264
.macro sse_diff_8width_lasx in0, in1
fld.d f0, \in0, 0
fld.d f1, \in0, FENC_STRIDE
fld.d f2, \in0, FENC_STRIDE * 2
fld.d f3, \in0, FENC_STRIDE * 3
fld.d f4, \in1, 0
fld.d f5, \in1, FDEC_STRIDE
fld.d f6, \in1, FDEC_STRIDE * 2
fld.d f7, \in1, FDEC_STRIDE * 3
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
xvpermi.q xr1, xr0, 0x20
xvpermi.q xr5, xr4, 0x20
xvilvl.b xr2, xr5, xr1
xvilvh.b xr6, xr5, xr1
xvhsubw.hu.bu xr3, xr2, xr2
xvhsubw.hu.bu xr4, xr6, xr6
xvdp2add.w.h xr8, xr3, xr3
xvdp2add.w.h xr8, xr4, xr4
xvadd.h xr9, xr9, xr3
xvadd.h xr9, xr9, xr4
.endm
/*
* int32_t x264_pixel_var2_8x16_lasx( uint8_t *p_pix1, uint8_t *p_pix2,
* int32_t ssd[2] )
*/
function_x264 pixel_var2_8x16_lasx
add.d t0, a0, zero
add.d t1, a1, zero
xvxor.v xr8, xr8, xr8
xvxor.v xr9, xr9, xr9
sse_diff_8width_lasx a0, a1
addi.d a0, a0, FENC_STRIDE * 4
addi.d a1, a1, FDEC_STRIDE * 4
sse_diff_8width_lasx a0, a1
addi.d a0, a0, FENC_STRIDE * 4
addi.d a1, a1, FDEC_STRIDE * 4
sse_diff_8width_lasx a0, a1
addi.d a0, a0, FENC_STRIDE * 4
addi.d a1, a1, FDEC_STRIDE * 4
sse_diff_8width_lasx a0, a1
xvhaddw.w.h xr9, xr9, xr9
xvhaddw.d.w xr9, xr9, xr9
xvhaddw.q.d xr9, xr9, xr9
xvpickve2gr.wu t2, xr9, 0
xvpickve2gr.wu t3, xr9, 4
add.w t2, t2, t3
xvhaddw.d.w xr8, xr8, xr8
xvhaddw.q.d xr8, xr8, xr8
xvpickve2gr.wu t3, xr8, 0
xvpickve2gr.wu t4, xr8, 4
add.w t3, t4, t3
st.w t3, a2, 0
mul.w t2, t2, t2
srai.w t2, t2, 7
sub.w t3, t3, t2
xvxor.v xr8, xr8, xr8
xvxor.v xr9, xr9, xr9
addi.d a0, t0, FENC_STRIDE / 2
addi.d a1, t1, FDEC_STRIDE / 2
sse_diff_8width_lasx a0, a1
addi.d a0, a0, FENC_STRIDE * 4
addi.d a1, a1, FDEC_STRIDE * 4
sse_diff_8width_lasx a0, a1
addi.d a0, a0, FENC_STRIDE * 4
addi.d a1, a1, FDEC_STRIDE * 4
sse_diff_8width_lasx a0, a1
addi.d a0, a0, FENC_STRIDE * 4
addi.d a1, a1, FDEC_STRIDE * 4
sse_diff_8width_lasx a0, a1
xvhaddw.w.h xr9, xr9, xr9
xvhaddw.d.w xr9, xr9, xr9
xvhaddw.q.d xr9, xr9, xr9
xvpickve2gr.wu t4, xr9, 0
xvpickve2gr.wu t5, xr9, 4
add.w t4, t4, t5
xvhaddw.d.w xr8, xr8, xr8
xvhaddw.q.d xr8, xr8, xr8
xvpickve2gr.wu t5, xr8, 0
xvpickve2gr.wu t6, xr8, 4
add.w t5, t6, t5
st.w t5, a2, 4
mul.w t4, t4, t4
srai.w t4, t4, 7
sub.w t5, t5, t4
add.w a0, t3, t5
endfunc_x264
/*
* int32_t x264_pixel_var2_8x8_lasx( uint8_t *p_pix1, uint8_t *p_pix2,
* int32_t ssd[2] )
*/
function_x264 pixel_var2_8x8_lasx
add.d t0, a0, zero
add.d t1, a1, zero
xvxor.v xr8, xr8, xr8
xvxor.v xr9, xr9, xr9
sse_diff_8width_lasx a0, a1
addi.d a0, a0, FENC_STRIDE * 4
addi.d a1, a1, FDEC_STRIDE * 4
sse_diff_8width_lasx a0, a1
xvhaddw.w.h xr9, xr9, xr9
xvhaddw.d.w xr9, xr9, xr9
xvhaddw.q.d xr9, xr9, xr9
xvpickve2gr.wu t2, xr9, 0
xvpickve2gr.wu t3, xr9, 4
add.w t2, t2, t3
xvhaddw.d.w xr8, xr8, xr8
xvhaddw.q.d xr8, xr8, xr8
xvpickve2gr.wu t3, xr8, 0
xvpickve2gr.wu t4, xr8, 4
add.w t3, t4, t3
st.w t3, a2, 0
mul.w t2, t2, t2
srai.w t2, t2, 6
sub.w t3, t3, t2
xvxor.v xr8, xr8, xr8
xvxor.v xr9, xr9, xr9
addi.d a0, t0, FENC_STRIDE / 2
addi.d a1, t1, FDEC_STRIDE / 2
sse_diff_8width_lasx a0, a1
addi.d a0, a0, FENC_STRIDE * 4
addi.d a1, a1, FDEC_STRIDE * 4
sse_diff_8width_lasx a0, a1
xvhaddw.w.h xr9, xr9, xr9
xvhaddw.d.w xr9, xr9, xr9
xvhaddw.q.d xr9, xr9, xr9
xvpickve2gr.wu t4, xr9, 0
xvpickve2gr.wu t5, xr9, 4
add.w t4, t4, t5
xvhaddw.d.w xr8, xr8, xr8
xvhaddw.q.d xr8, xr8, xr8
xvpickve2gr.wu t5, xr8, 0
xvpickve2gr.wu t6, xr8, 4
add.w t5, t6, t5
st.w t5, a2, 4
mul.w t4, t4, t4
srai.w t4, t4, 6
sub.w t5, t5, t4
add.w a0, t3, t5
endfunc_x264
/*
* uint64_t x264_pixel_hadamard_ac_8x8( pixel *pix, intptr_t stride )
*/
function_x264 hadamard_ac_8x8_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
alsl.d a0, a1, a0, 2
FLDD_LOADX_4 a0, a1, t0, t1, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vpickev.b vr2, vr1, vr0
vpickod.b vr3, vr1, vr0
vaddwev.h.bu vr6, vr2, vr3
vaddwod.h.bu vr7, vr2, vr3
vsubwev.h.bu vr8, vr2, vr3
vsubwod.h.bu vr9, vr2, vr3
vadd.h vr10, vr6, vr7
vadd.h vr11, vr8, vr9
vsub.h vr12, vr6, vr7
vsub.h vr13, vr8, vr9
vilvl.h vr6, vr11, vr10
vilvh.h vr7, vr11, vr10
vilvl.h vr8, vr13, vr12
vilvh.h vr9, vr13, vr12
vilvl.w vr10, vr8, vr6
vilvh.w vr11, vr8, vr6
vilvl.w vr12, vr9, vr7
vilvh.w vr13, vr9, vr7
vadd.h vr6, vr10, vr11
vadd.h vr7, vr12, vr13
vsub.h vr8, vr10, vr11
vsub.h vr9, vr12, vr13
vadd.h vr10, vr6, vr7
vadd.h vr11, vr8, vr9
vsub.h vr12, vr6, vr7
vsub.h vr13, vr8, vr9
vpickev.b vr2, vr5, vr4
vpickod.b vr3, vr5, vr4
vaddwev.h.bu vr6, vr2, vr3
vaddwod.h.bu vr7, vr2, vr3
vsubwev.h.bu vr8, vr2, vr3
vsubwod.h.bu vr9, vr2, vr3
vadd.h vr14, vr6, vr7
vadd.h vr15, vr8, vr9
vsub.h vr16, vr6, vr7
vsub.h vr17, vr8, vr9
vilvl.h vr6, vr15, vr14
vilvh.h vr7, vr15, vr14
vilvl.h vr8, vr17, vr16
vilvh.h vr9, vr17, vr16
vilvl.w vr14, vr8, vr6
vilvh.w vr15, vr8, vr6
vilvl.w vr16, vr9, vr7
vilvh.w vr17, vr9, vr7
vadd.h vr6, vr14, vr15
vadd.h vr7, vr16, vr17
vsub.h vr8, vr14, vr15
vsub.h vr9, vr16, vr17
vadd.h vr14, vr6, vr7
vadd.h vr15, vr8, vr9
vsub.h vr16, vr6, vr7
vsub.h vr17, vr8, vr9
vadd.h vr18, vr10, vr14
vpickve2gr.hu t0, vr18, 0
vpickve2gr.hu t1, vr18, 4
add.d t1, t0, t1 // dc
vadda.h vr4, vr11, vr10
vadda.h vr5, vr13, vr12
vadda.h vr6, vr15, vr14
vadda.h vr7, vr17, vr16
vadd.h vr4, vr5, vr4
vadd.h vr6, vr7, vr6
vadd.h vr4, vr4, vr6
vhaddw.wu.hu vr4, vr4, vr4
vhaddw.du.wu vr4, vr4, vr4
vhaddw.qu.du vr4, vr4, vr4
vpickve2gr.wu t0, vr4, 0 // sum4
vpackev.h vr0, vr11, vr10
vpackev.h vr1, vr13, vr12
vpackev.h vr2, vr15, vr14
vpackev.h vr3, vr17, vr16
vpackod.h vr4, vr11, vr10
vpackod.h vr5, vr13, vr12
vpackod.h vr6, vr15, vr14
vpackod.h vr7, vr17, vr16
vilvl.d vr10, vr1, vr0
vilvh.d vr11, vr1, vr0
vilvl.d vr12, vr3, vr2
vilvh.d vr13, vr3, vr2
vilvl.d vr14, vr5, vr4
vilvh.d vr15, vr5, vr4
vilvl.d vr16, vr7, vr6
vilvh.d vr17, vr7, vr6
vadd.h vr0, vr10, vr11
vadd.h vr1, vr12, vr13
vadd.h vr2, vr14, vr16
vadd.h vr3, vr15, vr17
vsub.h vr4, vr10, vr11
vsub.h vr5, vr12, vr13
vsub.h vr6, vr14, vr16
vsub.h vr7, vr15, vr17
vadd.h vr10, vr0, vr1
vadd.h vr11, vr2, vr3
vadd.h vr12, vr4, vr5
vadd.h vr13, vr6, vr7
vsub.h vr14, vr0, vr1
vsub.h vr15, vr2, vr3
vsub.h vr16, vr4, vr5
vsub.h vr17, vr6, vr7
vadda.h vr10, vr10, vr11
vadda.h vr11, vr12, vr13
vadda.h vr12, vr14, vr15
vadda.h vr13, vr16, vr17
vadd.h vr10, vr10, vr11
vadd.h vr11, vr12, vr13
vadd.h vr10, vr10, vr11
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.du.wu vr10, vr10, vr10
vhaddw.qu.du vr10, vr10, vr10
vpickve2gr.wu t2, vr10, 0 // sum8
sub.d t0, t0, t1
sub.d t2, t2, t1
slli.d t2, t2, 32
add.d a0, t2, t0
endfunc_x264
/*
* int x264_pixel_satd_4x8( pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2 )
*/
function_x264 pixel_satd_4x8_lsx
slli.d t2, a1, 1
slli.d t3, a3, 1
add.d t4, a1, t2
add.d t5, a3, t3
// Load data from pix1 and pix2
FLDS_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDS_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
pixel_satd_4x4_lsx_core vr13
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDS_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDS_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
pixel_satd_4x4_lsx_core vr14
vadd.h vr13, vr14, vr13
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.du.wu vr13, vr13, vr13
vhaddw.qu.du vr13, vr13, vr13
vpickve2gr.wu t5, vr13, 0
srli.d a0, t5, 1
endfunc_x264
/*
* int x264_pixel_satd_4x16( uint8_t *p_pix1, intptr_t i_stride,
* uint8_t *p_pix2, intptr_t i_stride2 )
*/
function_x264 pixel_satd_4x16_lsx
slli.d t2, a1, 1
slli.d t3, a3, 1
add.d t4, a1, t2
add.d t5, a3, t3
// Load data from pix1 and pix2
FLDS_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDS_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
pixel_satd_4x4_lsx_core vr13
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDS_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDS_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
pixel_satd_4x4_lsx_core vr14
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDS_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDS_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
pixel_satd_4x4_lsx_core vr15
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDS_LOADX_4 a0, a1, t2, t4, f1, f2, f3, f4
FLDS_LOADX_4 a2, a3, t3, t5, f5, f6, f7, f8
pixel_satd_4x4_lsx_core vr16
vadd.h vr13, vr14, vr13
vadd.h vr15, vr16, vr15
vadd.h vr13, vr15, vr13
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.du.wu vr13, vr13, vr13
vhaddw.qu.du vr13, vr13, vr13
vpickve2gr.wu t5, vr13, 0
srli.d a0, t5, 1
endfunc_x264
.macro pixel_satd_8x4_lsx_core out0, out1, out2, out3
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr2, vr5, vr4
vilvl.d vr3, vr7, vr6
vsubwev.h.bu vr4, vr0, vr2
vsubwod.h.bu vr5, vr0, vr2
vsubwev.h.bu vr6, vr1, vr3
vsubwod.h.bu vr7, vr1, vr3
vadd.h vr0, vr4, vr5
vsub.h vr1, vr4, vr5
vadd.h vr2, vr6, vr7
vsub.h vr3, vr6, vr7
vpackev.h vr4, vr1, vr0
vpackod.h vr5, vr1, vr0
vpackev.h vr6, vr3, vr2
vpackod.h vr7, vr3, vr2
vadd.h vr8, vr4, vr5
vsub.h vr9, vr4, vr5
vadd.h vr10, vr6, vr7
vsub.h vr11, vr6, vr7
vilvl.d vr4, vr9, vr8
vilvh.d vr5, vr9, vr8
vilvl.d vr6, vr11, vr10
vilvh.d vr7, vr11, vr10
vadd.h vr8, vr4, vr5
vsub.h vr9, vr4, vr5
vadd.h vr10, vr6, vr7
vsub.h vr11, vr6, vr7
vadd.h \out0, vr8, vr10
vsub.h \out1, vr8, vr10
vadd.h \out2, vr9, vr11
vsub.h \out3, vr9, vr11
.endm
/*
* int x264_pixel_satd_8x4( uint8_t *p_pix1, intptr_t i_stride,
* uint8_t *p_pix2, intptr_t i_stride2 )
*/
function_x264 pixel_satd_8x4_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
slli.d t2, a3, 1
add.d t3, t2, a3
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr12, vr13, vr14, vr15
vadda.h vr12, vr13, vr12
vadda.h vr13, vr15, vr14
vadd.h vr12, vr13, vr12
vhaddw.wu.hu vr12, vr12, vr12
vhaddw.du.wu vr12, vr12, vr12
vhaddw.qu.du vr12, vr12, vr12
vpickve2gr.wu t4, vr12, 0
srli.d a0, t4, 1
endfunc_x264
/*
* int x264_pixel_satd_8x8( uint8_t *p_pix1, intptr_t i_stride,
* uint8_t *p_pix2, intptr_t i_stride2 )
*/
function_x264 pixel_satd_8x8_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
slli.d t2, a3, 1
add.d t3, t2, a3
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr12, vr13, vr14, vr15
vadda.h vr12, vr13, vr12
vadda.h vr13, vr15, vr14
vadd.h vr12, vr13, vr12
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr13, vr14, vr15, vr16
vadda.h vr13, vr14, vr13
vadda.h vr14, vr16, vr15
vadd.h vr13, vr14, vr13
vadd.h vr12, vr13, vr12
vhaddw.wu.hu vr12, vr12, vr12
vhaddw.du.wu vr12, vr12, vr12
vhaddw.qu.du vr12, vr12, vr12
vpickve2gr.wu t4, vr12, 0
srli.d a0, t4, 1
endfunc_x264
/*
* int x264_pixel_satd_8x8( uint8_t *p_pix1, intptr_t i_stride,
* uint8_t *p_pix2, intptr_t i_stride2 )
*/
function_x264 pixel_satd_8x16_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
slli.d t2, a3, 1
add.d t3, t2, a3
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr12, vr13, vr14, vr15
vadda.h vr12, vr13, vr12
vadda.h vr13, vr15, vr14
vadd.h vr12, vr13, vr12
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr13, vr14, vr15, vr16
vadda.h vr13, vr14, vr13
vadda.h vr14, vr16, vr15
vadd.h vr13, vr14, vr13
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr14, vr15, vr16, vr17
vadda.h vr14, vr15, vr14
vadda.h vr15, vr17, vr16
vadd.h vr14, vr15, vr14
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr15, vr16, vr17, vr18
vadda.h vr15, vr16, vr15
vadda.h vr16, vr18, vr17
vadd.h vr15, vr16, vr15
vadd.h vr12, vr12, vr13
vadd.h vr14, vr14, vr15
vadd.h vr12, vr12, vr14
vhaddw.wu.hu vr12, vr12, vr12
vhaddw.du.wu vr12, vr12, vr12
vhaddw.qu.du vr12, vr12, vr12
vpickve2gr.wu t4, vr12, 0
srli.d a0, t4, 1
endfunc_x264
/*
* int x264_pixel_satd_16x8( uint8_t *p_pix1, intptr_t i_stride,
* uint8_t *p_pix2, intptr_t i_stride2 )
*/
function_x264 pixel_satd_16x8_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
slli.d t2, a3, 1
add.d t3, t2, a3
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr12, vr13, vr14, vr15
vadda.h vr12, vr13, vr12
vadda.h vr13, vr15, vr14
vadd.h vr12, vr13, vr12
addi.d t5, a0, 8
addi.d t6, a2, 8
FLDD_LOADX_4 t5, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 t6, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr13, vr14, vr15, vr16
vadda.h vr13, vr14, vr13
vadda.h vr14, vr16, vr15
vadd.h vr13, vr14, vr13
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr14, vr15, vr16, vr17
vadda.h vr14, vr15, vr14
vadda.h vr15, vr17, vr16
vadd.h vr14, vr15, vr14
addi.d t5, a0, 8
addi.d t6, a2, 8
FLDD_LOADX_4 t5, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 t6, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr15, vr16, vr17, vr18
vadda.h vr15, vr16, vr15
vadda.h vr16, vr18, vr17
vadd.h vr15, vr16, vr15
vadd.h vr12, vr13, vr12
vadd.h vr14, vr15, vr14
vadd.h vr12, vr14, vr12
vhaddw.wu.hu vr12, vr12, vr12
vhaddw.du.wu vr12, vr12, vr12
vhaddw.qu.du vr12, vr12, vr12
vpickve2gr.wu t4, vr12, 0
srli.d a0, t4, 1
endfunc_x264
/*
* int x264_pixel_satd_16x16( uint8_t *p_pix1, intptr_t i_stride,
* uint8_t *p_pix2, intptr_t i_stride2 )
*/
function_x264 pixel_satd_16x16_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
slli.d t2, a3, 1
add.d t3, t2, a3
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr12, vr13, vr14, vr15
vadda.h vr12, vr13, vr12
vadda.h vr13, vr15, vr14
vadd.h vr12, vr13, vr12
addi.d t5, a0, 8
addi.d t6, a2, 8
FLDD_LOADX_4 t5, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 t6, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr13, vr14, vr15, vr16
vadda.h vr13, vr14, vr13
vadda.h vr14, vr16, vr15
vadd.h vr13, vr14, vr13
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr14, vr15, vr16, vr17
vadda.h vr14, vr15, vr14
vadda.h vr15, vr17, vr16
vadd.h vr14, vr15, vr14
addi.d t5, a0, 8
addi.d t6, a2, 8
FLDD_LOADX_4 t5, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 t6, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr15, vr16, vr17, vr18
vadda.h vr15, vr16, vr15
vadda.h vr16, vr18, vr17
vadd.h vr15, vr16, vr15
vadd.h vr12, vr13, vr12
vadd.h vr14, vr15, vr14
vadd.h vr19, vr14, vr12
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr12, vr13, vr14, vr15
vadda.h vr12, vr13, vr12
vadda.h vr13, vr15, vr14
vadd.h vr12, vr13, vr12
addi.d t5, a0, 8
addi.d t6, a2, 8
FLDD_LOADX_4 t5, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 t6, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr13, vr14, vr15, vr16
vadda.h vr13, vr14, vr13
vadda.h vr14, vr16, vr15
vadd.h vr13, vr14, vr13
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr14, vr15, vr16, vr17
vadda.h vr14, vr15, vr14
vadda.h vr15, vr17, vr16
vadd.h vr14, vr15, vr14
addi.d t5, a0, 8
addi.d t6, a2, 8
FLDD_LOADX_4 t5, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 t6, a3, t2, t3, f4, f5, f6, f7
pixel_satd_8x4_lsx_core vr15, vr16, vr17, vr18
vadda.h vr15, vr16, vr15
vadda.h vr16, vr18, vr17
vadd.h vr15, vr16, vr15
vadd.h vr12, vr13, vr12
vadd.h vr14, vr15, vr14
vadd.h vr12, vr14, vr12
vadd.h vr12, vr19, vr12
vhaddw.wu.hu vr12, vr12, vr12
vhaddw.du.wu vr12, vr12, vr12
vhaddw.qu.du vr12, vr12, vr12
vpickve2gr.wu t4, vr12, 0
srli.d a0, t4, 1
endfunc_x264
/*
* int x264_pixel_ssd_4x4( pixel *pix1, intptr_t i_stride_pix1,
* pixel *pix2, intptr_t i_stride_pix2 )
*/
function_x264 pixel_ssd_4x4_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
slli.d t2, a3, 1
add.d t3, a3, t2
FLDS_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDS_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.w vr0, vr1, vr0
vilvl.w vr1, vr3, vr2
vilvl.w vr4, vr5, vr4
vilvl.w vr5, vr7, vr6
vilvl.d vr0, vr1, vr0
vilvl.d vr4, vr5, vr4
vsubwev.h.bu vr1, vr0, vr4
vsubwod.h.bu vr2, vr0, vr4
vmul.h vr5, vr1, vr1
vmul.h vr6, vr2, vr2
vhaddw.wu.hu vr5, vr5, vr5
vhaddw.wu.hu vr6, vr6, vr6
vadd.w vr5, vr5, vr6
vhaddw.d.w vr5, vr5, vr5
vhaddw.q.d vr5, vr5, vr5
vpickve2gr.w a0, vr5, 0
endfunc_x264
/*
* int x264_pixel_ssd_4x8( pixel *pix1, intptr_t i_stride_pix1,
* pixel *pix2, intptr_t i_stride_pix2 )
*/
function_x264 pixel_ssd_4x8_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
slli.d t2, a3, 1
add.d t3, a3, t2
FLDS_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDS_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.w vr0, vr1, vr0
vilvl.w vr1, vr3, vr2
vilvl.w vr4, vr5, vr4
vilvl.w vr5, vr7, vr6
vilvl.d vr0, vr1, vr0
vilvl.d vr4, vr5, vr4
vsubwev.h.bu vr1, vr0, vr4
vsubwod.h.bu vr2, vr0, vr4
vmul.h vr5, vr1, vr1
vmul.h vr6, vr2, vr2
vhaddw.wu.hu vr5, vr5, vr5
vhaddw.wu.hu vr6, vr6, vr6
vadd.w vr10, vr5, vr6
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDS_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDS_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.w vr0, vr1, vr0
vilvl.w vr1, vr3, vr2
vilvl.w vr4, vr5, vr4
vilvl.w vr5, vr7, vr6
vilvl.d vr0, vr1, vr0
vilvl.d vr4, vr5, vr4
vsubwev.h.bu vr1, vr0, vr4
vsubwod.h.bu vr2, vr0, vr4
vmul.h vr5, vr1, vr1
vmul.h vr6, vr2, vr2
vhaddw.wu.hu vr5, vr5, vr5
vhaddw.wu.hu vr6, vr6, vr6
vadd.w vr5, vr5, vr6
vadd.w vr5, vr5, vr10
vhaddw.d.w vr5, vr5, vr5
vhaddw.q.d vr5, vr5, vr5
vpickve2gr.w a0, vr5, 0
endfunc_x264
/*
* int x264_pixel_ssd_4x16( pixel *pix1, intptr_t i_stride_pix1,
* pixel *pix2, intptr_t i_stride_pix2 )
*/
function_x264 pixel_ssd_4x16_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
slli.d t2, a3, 1
add.d t3, a3, t2
FLDS_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDS_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.w vr0, vr1, vr0
vilvl.w vr1, vr3, vr2
vilvl.w vr4, vr5, vr4
vilvl.w vr5, vr7, vr6
vilvl.d vr0, vr1, vr0
vilvl.d vr4, vr5, vr4
vsubwev.h.bu vr1, vr0, vr4
vsubwod.h.bu vr2, vr0, vr4
vmul.h vr5, vr1, vr1
vmul.h vr6, vr2, vr2
vhaddw.wu.hu vr5, vr5, vr5
vhaddw.wu.hu vr6, vr6, vr6
vadd.w vr10, vr5, vr6
.rept 3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDS_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDS_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.w vr0, vr1, vr0
vilvl.w vr1, vr3, vr2
vilvl.w vr4, vr5, vr4
vilvl.w vr5, vr7, vr6
vilvl.d vr0, vr1, vr0
vilvl.d vr4, vr5, vr4
vsubwev.h.bu vr1, vr0, vr4
vsubwod.h.bu vr2, vr0, vr4
vmul.h vr5, vr1, vr1
vmul.h vr6, vr2, vr2
vhaddw.wu.hu vr5, vr5, vr5
vhaddw.wu.hu vr6, vr6, vr6
vadd.w vr5, vr5, vr6
vadd.w vr10, vr5, vr10
.endr
vhaddw.d.w vr10, vr10, vr10
vhaddw.q.d vr10, vr10, vr10
vpickve2gr.w a0, vr10, 0
endfunc_x264
/*
* int x264_pixel_ssd_8x4( pixel *pix1, intptr_t i_stride_pix1,
* pixel *pix2, intptr_t i_stride_pix2 )
*/
function_x264 pixel_ssd_8x4_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
slli.d t2, a3, 1
add.d t3, a3, t2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vsubwev.h.bu vr2, vr0, vr4
vsubwod.h.bu vr3, vr0, vr4
vsubwev.h.bu vr6, vr1, vr5
vsubwod.h.bu vr7, vr1, vr5
vmul.h vr2, vr2, vr2
vmul.h vr3, vr3, vr3
vmul.h vr6, vr6, vr6
vmul.h vr7, vr7, vr7
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.wu.hu vr3, vr3, vr3
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vadd.w vr2, vr2, vr3
vadd.w vr6, vr6, vr7
vadd.w vr2, vr2, vr6
vhaddw.d.w vr2, vr2, vr2
vhaddw.q.d vr2, vr2, vr2
vpickve2gr.w a0, vr2, 0
endfunc_x264
/*
* int x264_pixel_ssd_8x8( pixel *pix1, intptr_t i_stride_pix1,
* pixel *pix2, intptr_t i_stride_pix2 )
*/
function_x264 pixel_ssd_8x8_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
slli.d t2, a3, 1
add.d t3, a3, t2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vsubwev.h.bu vr2, vr0, vr4
vsubwod.h.bu vr3, vr0, vr4
vsubwev.h.bu vr6, vr1, vr5
vsubwod.h.bu vr7, vr1, vr5
vmul.h vr2, vr2, vr2
vmul.h vr3, vr3, vr3
vmul.h vr6, vr6, vr6
vmul.h vr7, vr7, vr7
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.wu.hu vr3, vr3, vr3
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vadd.w vr2, vr2, vr3
vadd.w vr6, vr6, vr7
vadd.w vr10, vr2, vr6
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vsubwev.h.bu vr2, vr0, vr4
vsubwod.h.bu vr3, vr0, vr4
vsubwev.h.bu vr6, vr1, vr5
vsubwod.h.bu vr7, vr1, vr5
vmul.h vr2, vr2, vr2
vmul.h vr3, vr3, vr3
vmul.h vr6, vr6, vr6
vmul.h vr7, vr7, vr7
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.wu.hu vr3, vr3, vr3
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vadd.w vr2, vr2, vr3
vadd.w vr6, vr6, vr7
vadd.w vr11, vr2, vr6
vadd.w vr10, vr10, vr11
vhaddw.d.w vr10, vr10, vr10
vhaddw.q.d vr10, vr10, vr10
vpickve2gr.w a0, vr10, 0
endfunc_x264
/*
* int x264_pixel_ssd_8x16( pixel *pix1, intptr_t i_stride_pix1,
* pixel *pix2, intptr_t i_stride_pix2 )
*/
function_x264 pixel_ssd_8x16_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
slli.d t2, a3, 1
add.d t3, a3, t2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vsubwev.h.bu vr2, vr0, vr4
vsubwod.h.bu vr3, vr0, vr4
vsubwev.h.bu vr6, vr1, vr5
vsubwod.h.bu vr7, vr1, vr5
vmul.h vr2, vr2, vr2
vmul.h vr3, vr3, vr3
vmul.h vr6, vr6, vr6
vmul.h vr7, vr7, vr7
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.wu.hu vr3, vr3, vr3
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vadd.w vr2, vr2, vr3
vadd.w vr6, vr6, vr7
vadd.w vr10, vr2, vr6
.rept 3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vsubwev.h.bu vr2, vr0, vr4
vsubwod.h.bu vr3, vr0, vr4
vsubwev.h.bu vr6, vr1, vr5
vsubwod.h.bu vr7, vr1, vr5
vmul.h vr2, vr2, vr2
vmul.h vr3, vr3, vr3
vmul.h vr6, vr6, vr6
vmul.h vr7, vr7, vr7
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.wu.hu vr3, vr3, vr3
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vadd.w vr2, vr2, vr3
vadd.w vr6, vr6, vr7
vadd.w vr11, vr2, vr6
vadd.w vr10, vr10, vr11
.endr
vhaddw.d.w vr10, vr10, vr10
vhaddw.q.d vr10, vr10, vr10
vpickve2gr.w a0, vr10, 0
endfunc_x264
/*
* int x264_pixel_ssd_16x8( pixel *pix1, intptr_t i_stride_pix1,
* pixel *pix2, intptr_t i_stride_pix2 )
*/
function_x264 pixel_ssd_16x8_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
slli.d t2, a3, 1
add.d t3, a3, t2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
LSX_LOADX_4 a2, a3, t2, t3, vr4, vr5, vr6, vr7
vsubwev.h.bu vr8, vr0, vr4
vsubwod.h.bu vr9, vr0, vr4
vsubwev.h.bu vr10, vr1, vr5
vsubwod.h.bu vr11, vr1, vr5
vsubwev.h.bu vr12, vr2, vr6
vsubwod.h.bu vr13, vr2, vr6
vsubwev.h.bu vr14, vr3, vr7
vsubwod.h.bu vr15, vr3, vr7
vmul.h vr8, vr8, vr8
vmul.h vr9, vr9, vr9
vmul.h vr10, vr10, vr10
vmul.h vr11, vr11, vr11
vmul.h vr12, vr12, vr12
vmul.h vr13, vr13, vr13
vmul.h vr14, vr14, vr14
vmul.h vr15, vr15, vr15
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vhaddw.wu.hu vr12, vr12, vr12
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.wu.hu vr14, vr14, vr14
vhaddw.wu.hu vr15, vr15, vr15
vadd.w vr8, vr8, vr9
vadd.w vr9, vr10, vr11
vadd.w vr10, vr12, vr13
vadd.w vr11, vr14, vr15
vadd.w vr8, vr8, vr9
vadd.w vr9, vr10, vr11
vadd.w vr16, vr8, vr9
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
LSX_LOADX_4 a2, a3, t2, t3, vr4, vr5, vr6, vr7
vsubwev.h.bu vr8, vr0, vr4
vsubwod.h.bu vr9, vr0, vr4
vsubwev.h.bu vr10, vr1, vr5
vsubwod.h.bu vr11, vr1, vr5
vsubwev.h.bu vr12, vr2, vr6
vsubwod.h.bu vr13, vr2, vr6
vsubwev.h.bu vr14, vr3, vr7
vsubwod.h.bu vr15, vr3, vr7
vmul.h vr8, vr8, vr8
vmul.h vr9, vr9, vr9
vmul.h vr10, vr10, vr10
vmul.h vr11, vr11, vr11
vmul.h vr12, vr12, vr12
vmul.h vr13, vr13, vr13
vmul.h vr14, vr14, vr14
vmul.h vr15, vr15, vr15
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vhaddw.wu.hu vr12, vr12, vr12
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.wu.hu vr14, vr14, vr14
vhaddw.wu.hu vr15, vr15, vr15
vadd.w vr8, vr8, vr9
vadd.w vr9, vr10, vr11
vadd.w vr10, vr12, vr13
vadd.w vr11, vr14, vr15
vadd.w vr8, vr8, vr9
vadd.w vr9, vr10, vr11
vadd.w vr17, vr8, vr9
vadd.w vr10, vr16, vr17
vhaddw.d.w vr10, vr10, vr10
vhaddw.q.d vr10, vr10, vr10
vpickve2gr.w a0, vr10, 0
endfunc_x264
/*
* int x264_pixel_ssd_16x16( pixel *pix1, intptr_t i_stride_pix1,
* pixel *pix2, intptr_t i_stride_pix2 )
*/
function_x264 pixel_ssd_16x16_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
slli.d t2, a3, 1
add.d t3, a3, t2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
LSX_LOADX_4 a2, a3, t2, t3, vr4, vr5, vr6, vr7
vsubwev.h.bu vr8, vr0, vr4
vsubwod.h.bu vr9, vr0, vr4
vsubwev.h.bu vr10, vr1, vr5
vsubwod.h.bu vr11, vr1, vr5
vsubwev.h.bu vr12, vr2, vr6
vsubwod.h.bu vr13, vr2, vr6
vsubwev.h.bu vr14, vr3, vr7
vsubwod.h.bu vr15, vr3, vr7
vmul.h vr8, vr8, vr8
vmul.h vr9, vr9, vr9
vmul.h vr10, vr10, vr10
vmul.h vr11, vr11, vr11
vmul.h vr12, vr12, vr12
vmul.h vr13, vr13, vr13
vmul.h vr14, vr14, vr14
vmul.h vr15, vr15, vr15
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vhaddw.wu.hu vr12, vr12, vr12
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.wu.hu vr14, vr14, vr14
vhaddw.wu.hu vr15, vr15, vr15
vadd.w vr8, vr8, vr9
vadd.w vr9, vr10, vr11
vadd.w vr10, vr12, vr13
vadd.w vr11, vr14, vr15
vadd.w vr8, vr8, vr9
vadd.w vr9, vr10, vr11
vadd.w vr16, vr8, vr9
.rept 3
alsl.d a0, a1, a0, 2
alsl.d a2, a3, a2, 2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
LSX_LOADX_4 a2, a3, t2, t3, vr4, vr5, vr6, vr7
vsubwev.h.bu vr8, vr0, vr4
vsubwod.h.bu vr9, vr0, vr4
vsubwev.h.bu vr10, vr1, vr5
vsubwod.h.bu vr11, vr1, vr5
vsubwev.h.bu vr12, vr2, vr6
vsubwod.h.bu vr13, vr2, vr6
vsubwev.h.bu vr14, vr3, vr7
vsubwod.h.bu vr15, vr3, vr7
vmul.h vr8, vr8, vr8
vmul.h vr9, vr9, vr9
vmul.h vr10, vr10, vr10
vmul.h vr11, vr11, vr11
vmul.h vr12, vr12, vr12
vmul.h vr13, vr13, vr13
vmul.h vr14, vr14, vr14
vmul.h vr15, vr15, vr15
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vhaddw.wu.hu vr12, vr12, vr12
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.wu.hu vr14, vr14, vr14
vhaddw.wu.hu vr15, vr15, vr15
vadd.w vr8, vr8, vr9
vadd.w vr9, vr10, vr11
vadd.w vr10, vr12, vr13
vadd.w vr11, vr14, vr15
vadd.w vr8, vr8, vr9
vadd.w vr9, vr10, vr11
vadd.w vr17, vr8, vr9
vadd.w vr16, vr16, vr17
.endr
vhaddw.d.w vr16, vr16, vr16
vhaddw.q.d vr16, vr16, vr16
vpickve2gr.w a0, vr16, 0
endfunc_x264
/*
* int x264_pixel_sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
*/
.macro pixel_sa8d_8x8_lsx_core out0, out1, out2, out3
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 a2, a3, t2, t3, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vsubwev.h.bu vr2, vr0, vr4
vsubwod.h.bu vr3, vr0, vr4
vsubwev.h.bu vr6, vr1, vr5
vsubwod.h.bu vr7, vr1, vr5
vadd.h vr8, vr2, vr3
vsub.h vr9, vr2, vr3
vadd.h vr10, vr6, vr7
vsub.h vr11, vr6, vr7
vpackev.h vr0, vr9, vr8
vpackod.h vr1, vr9, vr8
vpackev.h vr2, vr11, vr10
vpackod.h vr3, vr11, vr10
vadd.h vr4, vr0, vr1
vsub.h vr5, vr0, vr1
vadd.h vr6, vr2, vr3
vsub.h vr7, vr2, vr3
vilvl.d vr0, vr5, vr4
vilvh.d vr1, vr5, vr4
vilvl.d vr2, vr7, vr6
vilvh.d vr3, vr7, vr6
vadd.h vr12, vr0, vr1
vsub.h vr13, vr0, vr1
vadd.h vr14, vr2, vr3
vsub.h vr15, vr2, vr3
alsl.d t4, a1, a0, 2
alsl.d t5, a3, a2, 2
FLDD_LOADX_4 t4, a1, t0, t1, f0, f1, f2, f3
FLDD_LOADX_4 t5, a3, t2, t3, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vsubwev.h.bu vr2, vr0, vr4
vsubwod.h.bu vr3, vr0, vr4
vsubwev.h.bu vr6, vr1, vr5
vsubwod.h.bu vr7, vr1, vr5
vadd.h vr8, vr2, vr3
vsub.h vr9, vr2, vr3
vadd.h vr10, vr6, vr7
vsub.h vr11, vr6, vr7
vpackev.h vr0, vr9, vr8
vpackod.h vr1, vr9, vr8
vpackev.h vr2, vr11, vr10
vpackod.h vr3, vr11, vr10
vadd.h vr4, vr0, vr1
vsub.h vr5, vr0, vr1
vadd.h vr6, vr2, vr3
vsub.h vr7, vr2, vr3
vilvl.d vr0, vr5, vr4
vilvh.d vr1, vr5, vr4
vilvl.d vr2, vr7, vr6
vilvh.d vr3, vr7, vr6
vadd.h vr4, vr0, vr1
vsub.h vr5, vr0, vr1
vadd.h vr6, vr2, vr3
vsub.h vr7, vr2, vr3
// vr12 vr13 vr14 vr15
vpickev.w vr0, vr13, vr12
vpickod.w vr1, vr13, vr12
vpickev.w vr2, vr15, vr14
vpickod.w vr3, vr15, vr14
vadd.h vr8, vr0, vr1
vsub.h vr9, vr0, vr1
vadd.h vr10, vr2, vr3
vsub.h vr11, vr2, vr3
vadd.h vr12, vr8, vr10
vadd.h vr13, vr9, vr11
vsub.h vr14, vr8, vr10
vsub.h vr15, vr9, vr11
// vr4 vr5 vr6 vr7
vpickev.w vr0, vr5, vr4
vpickod.w vr1, vr5, vr4
vpickev.w vr2, vr7, vr6
vpickod.w vr3, vr7, vr6
vadd.h vr8, vr0, vr1
vsub.h vr9, vr0, vr1
vadd.h vr10, vr2, vr3
vsub.h vr11, vr2, vr3
vadd.h vr4, vr8, vr10
vadd.h vr5, vr9, vr11
vsub.h vr6, vr8, vr10
vsub.h vr7, vr9, vr11
vadd.h vr0, vr12, vr4
vadd.h vr1, vr13, vr5
vadd.h vr2, vr14, vr6
vadd.h vr3, vr15, vr7
vsub.h vr8, vr12, vr4
vsub.h vr9, vr13, vr5
vsub.h vr10, vr14, vr6
vsub.h vr11, vr15, vr7
vadda.h \out0, vr0, vr8
vadda.h \out1, vr1, vr9
vadda.h \out2, vr2, vr10
vadda.h \out3, vr3, vr11
.endm
function_x264 pixel_sa8d_8x8_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
slli.d t2, a3, 1
add.d t3, t2, a3
pixel_sa8d_8x8_lsx_core vr0, vr1, vr2, vr3
vadd.h vr0, vr0, vr1
vadd.h vr1, vr2, vr3
vadd.h vr17, vr0, vr1
vhaddw.wu.hu vr17, vr17, vr17
vhaddw.du.wu vr17, vr17, vr17
vhaddw.qu.du vr17, vr17, vr17
vpickve2gr.wu t5, vr17, 0
addi.d t5, t5, 2
srli.d a0, t5, 2
endfunc_x264
/*
* int x264_pixel_sa8d_16x16( pixel *pix1, intptr_t i_pix1,
* pixel *pix2, intptr_t i_pix2 )
*/
function_x264 pixel_sa8d_16x16_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
slli.d t2, a3, 1
add.d t3, t2, a3
add.d t6, a0, zero
add.d t7, a2, zero
pixel_sa8d_8x8_lsx_core vr0, vr1, vr2, vr3
vadd.h vr0, vr0, vr1
vadd.h vr1, vr2, vr3
vadd.h vr16, vr0, vr1
addi.d a0, t6, 8
addi.d a2, t7, 8
pixel_sa8d_8x8_lsx_core vr0, vr1, vr2, vr3
vadd.h vr0, vr0, vr1
vadd.h vr1, vr2, vr3
vadd.h vr17, vr0, vr1
alsl.d a0, a1, t6, 3
alsl.d a2, a3, t7, 3
pixel_sa8d_8x8_lsx_core vr0, vr1, vr2, vr3
vadd.h vr0, vr0, vr1
vadd.h vr1, vr2, vr3
vadd.h vr18, vr0, vr1
addi.d a0, a0, 8
addi.d a2, a2, 8
pixel_sa8d_8x8_lsx_core vr0, vr1, vr2, vr3
vadd.h vr0, vr0, vr1
vadd.h vr1, vr2, vr3
vadd.h vr19, vr0, vr1
vhaddw.wu.hu vr16, vr16, vr16
vhaddw.wu.hu vr17, vr17, vr17
vhaddw.wu.hu vr18, vr18, vr18
vhaddw.wu.hu vr19, vr19, vr19
vadd.w vr16, vr17, vr16
vadd.w vr18, vr19, vr18
vadd.w vr17, vr18, vr16
vhaddw.du.wu vr17, vr17, vr17
vhaddw.qu.du vr17, vr17, vr17
vpickve2gr.wu t5, vr17, 0
addi.d t5, t5, 2
srli.d a0, t5, 2
endfunc_x264
/*
* uint64_t pixel_var_8x8( pixel *pix, intptr_t i_stride )
*/
function_x264 pixel_var_8x8_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
alsl.d a0, a1, a0, 2
FLDD_LOADX_4 a0, a1, t0, t1, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vhaddw.hu.bu vr2, vr0, vr0
vhaddw.hu.bu vr3, vr1, vr1
vhaddw.hu.bu vr6, vr4, vr4
vhaddw.hu.bu vr7, vr5, vr5
vadd.h vr2, vr2, vr3
vadd.h vr6, vr6, vr7
vadd.h vr2, vr2, vr6
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.du.wu vr2, vr2, vr2
vhaddw.qu.du vr2, vr2, vr2
vpickve2gr.wu t5, vr2, 0 // sum
vmulwev.h.bu vr2, vr0, vr0
vmulwod.h.bu vr3, vr0, vr0
vmulwev.h.bu vr6, vr1, vr1
vmulwod.h.bu vr7, vr1, vr1
vmulwev.h.bu vr8, vr4, vr4
vmulwod.h.bu vr9, vr4, vr4
vmulwev.h.bu vr10, vr5, vr5
vmulwod.h.bu vr11, vr5, vr5
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.wu.hu vr3, vr3, vr3
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vadd.w vr2, vr2, vr3
vadd.w vr6, vr6, vr7
vadd.w vr8, vr8, vr9
vadd.w vr10, vr10, vr11
vadd.w vr2, vr2, vr6
vadd.w vr8, vr8, vr10
vadd.w vr2, vr2, vr8
vhaddw.du.wu vr2, vr2, vr2
vhaddw.qu.du vr2, vr2, vr2
vpickve2gr.du t6, vr2, 0 // sqr
slli.d t4, t6, 32
add.d a0, t4, t5
endfunc_x264
/*
* uint64_t pixel_var_8x16( pixel *pix, intptr_t i_stride )
*/
function_x264 pixel_var_8x16_lsx
slli.d t0, a1, 1
add.d t1, a1, t0
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
alsl.d a0, a1, a0, 2
FLDD_LOADX_4 a0, a1, t0, t1, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vhaddw.hu.bu vr2, vr0, vr0
vhaddw.hu.bu vr3, vr1, vr1
vhaddw.hu.bu vr6, vr4, vr4
vhaddw.hu.bu vr7, vr5, vr5
vadd.h vr2, vr2, vr3
vadd.h vr6, vr6, vr7
vadd.h vr16, vr2, vr6
vmulwev.h.bu vr2, vr0, vr0
vmulwod.h.bu vr3, vr0, vr0
vmulwev.h.bu vr6, vr1, vr1
vmulwod.h.bu vr7, vr1, vr1
vmulwev.h.bu vr8, vr4, vr4
vmulwod.h.bu vr9, vr4, vr4
vmulwev.h.bu vr10, vr5, vr5
vmulwod.h.bu vr11, vr5, vr5
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.wu.hu vr3, vr3, vr3
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vadd.w vr12, vr2, vr3
vadd.w vr13, vr6, vr7
vadd.w vr14, vr8, vr9
vadd.w vr15, vr10, vr11
vadd.w vr12, vr12, vr13
vadd.w vr14, vr14, vr15
vadd.w vr12, vr12, vr14
alsl.d a0, a1, a0, 2
FLDD_LOADX_4 a0, a1, t0, t1, f0, f1, f2, f3
alsl.d a0, a1, a0, 2
FLDD_LOADX_4 a0, a1, t0, t1, f4, f5, f6, f7
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr4, vr5, vr4
vilvl.d vr5, vr7, vr6
vhaddw.hu.bu vr2, vr0, vr0
vhaddw.hu.bu vr3, vr1, vr1
vhaddw.hu.bu vr6, vr4, vr4
vhaddw.hu.bu vr7, vr5, vr5
vadd.h vr2, vr2, vr3
vadd.h vr6, vr6, vr7
vadd.h vr2, vr2, vr6
vadd.h vr2, vr2, vr16
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.du.wu vr2, vr2, vr2
vhaddw.qu.du vr2, vr2, vr2
vpickve2gr.wu t5, vr2, 0 // sum
vmulwev.h.bu vr2, vr0, vr0
vmulwod.h.bu vr3, vr0, vr0
vmulwev.h.bu vr6, vr1, vr1
vmulwod.h.bu vr7, vr1, vr1
vmulwev.h.bu vr8, vr4, vr4
vmulwod.h.bu vr9, vr4, vr4
vmulwev.h.bu vr10, vr5, vr5
vmulwod.h.bu vr11, vr5, vr5
vhaddw.wu.hu vr2, vr2, vr2
vhaddw.wu.hu vr3, vr3, vr3
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vadd.w vr2, vr2, vr3
vadd.w vr6, vr6, vr7
vadd.w vr8, vr8, vr9
vadd.w vr10, vr10, vr11
vadd.w vr2, vr2, vr6
vadd.w vr8, vr8, vr10
vadd.w vr2, vr2, vr8
vadd.w vr2, vr2, vr12
vhaddw.du.wu vr2, vr2, vr2
vhaddw.qu.du vr2, vr2, vr2
vpickve2gr.du t6, vr2, 0 // sqr
slli.d t4, t6, 32
add.d a0, t4, t5
endfunc_x264
/*
* uint64_t pixel_var_16x16( pixel *pix, intptr_t i_stride )
*/
function_x264 pixel_var_16x16_lsx
slli.d t0, a1, 1
add.d t1, t0, a1
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
vhaddw.hu.bu vr4, vr0, vr0
vhaddw.hu.bu vr5, vr1, vr1
vhaddw.hu.bu vr6, vr2, vr2
vhaddw.hu.bu vr7, vr3, vr3
vadd.h vr4, vr5, vr4
vadd.h vr5, vr7, vr6
vadd.h vr13, vr5, vr4
vmulwev.h.bu vr5, vr0, vr0
vmulwod.h.bu vr6, vr0, vr0
vmulwev.h.bu vr7, vr1, vr1
vmulwod.h.bu vr8, vr1, vr1
vmulwev.h.bu vr9, vr2, vr2
vmulwod.h.bu vr10, vr2, vr2
vmulwev.h.bu vr11, vr3, vr3
vmulwod.h.bu vr12, vr3, vr3
vhaddw.wu.hu vr5, vr5, vr5
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vhaddw.wu.hu vr12, vr12, vr12
vadd.w vr5, vr5, vr6
vadd.w vr6, vr8, vr7
vadd.w vr7, vr10, vr9
vadd.w vr8, vr12, vr11
vadd.w vr0, vr5, vr6
vadd.w vr1, vr8, vr7
vadd.w vr14, vr1, vr0
.rept 3
alsl.d a0, a1, a0, 2
LSX_LOADX_4 a0, a1, t0, t1, vr0, vr1, vr2, vr3
vhaddw.hu.bu vr4, vr0, vr0
vhaddw.hu.bu vr5, vr1, vr1
vhaddw.hu.bu vr6, vr2, vr2
vhaddw.hu.bu vr7, vr3, vr3
vadd.h vr4, vr5, vr4
vadd.h vr5, vr7, vr6
vadd.h vr4, vr5, vr4
vadd.h vr13, vr4, vr13
vmulwev.h.bu vr5, vr0, vr0
vmulwod.h.bu vr6, vr0, vr0
vmulwev.h.bu vr7, vr1, vr1
vmulwod.h.bu vr8, vr1, vr1
vmulwev.h.bu vr9, vr2, vr2
vmulwod.h.bu vr10, vr2, vr2
vmulwev.h.bu vr11, vr3, vr3
vmulwod.h.bu vr12, vr3, vr3
vhaddw.wu.hu vr5, vr5, vr5
vhaddw.wu.hu vr6, vr6, vr6
vhaddw.wu.hu vr7, vr7, vr7
vhaddw.wu.hu vr8, vr8, vr8
vhaddw.wu.hu vr9, vr9, vr9
vhaddw.wu.hu vr10, vr10, vr10
vhaddw.wu.hu vr11, vr11, vr11
vhaddw.wu.hu vr12, vr12, vr12
vadd.w vr5, vr5, vr6
vadd.w vr6, vr8, vr7
vadd.w vr7, vr10, vr9
vadd.w vr8, vr12, vr11
vadd.w vr0, vr5, vr6
vadd.w vr1, vr8, vr7
vadd.w vr0, vr1, vr0
vadd.w vr14, vr0, vr14
.endr
vhaddw.wu.hu vr13, vr13, vr13
vhaddw.du.wu vr13, vr13, vr13
vhaddw.qu.du vr13, vr13, vr13
vpickve2gr.wu t4, vr13, 0
vhaddw.du.wu vr14, vr14, vr14
vhaddw.qu.du vr14, vr14, vr14
vpickve2gr.du t6, vr14, 0 // sqr
slli.d t5, t6, 32
add.d a0, t4, t5
endfunc_x264
.macro sse_diff_8width_lsx in0, in1, in2, in3
fld.d f0, \in0, 0
fld.d f1, \in0, FENC_STRIDE
fld.d f2, \in0, FENC_STRIDE * 2
fld.d f3, \in0, FENC_STRIDE * 3
fld.d f4, \in1, 0
fld.d f5, \in1, FDEC_STRIDE
fld.d f6, \in1, FDEC_STRIDE * 2
fld.d f7, \in1, FDEC_STRIDE * 3
vilvl.d vr0, vr1, vr0
vilvl.d vr1, vr3, vr2
vilvl.d vr2, vr5, vr4
vilvl.d vr3, vr7, vr6
vsubwev.h.bu vr4, vr0, vr2
vsubwod.h.bu vr5, vr0, vr2
vsubwev.h.bu vr6, vr1, vr3
vsubwod.h.bu vr7, vr1, vr3
// sqr_u
vdp2add.w.h \in2, vr4, vr4
vdp2add.w.h \in2, vr5, vr5
vdp2add.w.h \in2, vr6, vr6
vdp2add.w.h \in2, vr7, vr7
// sum_u
vadd.h vr4, vr4, vr5
vadd.h vr6, vr6, vr7
vadd.h \in3, vr4, vr6
.endm
/*
* int pixel_var2_8x8( pixel *fenc, pixel *fdec, int ssd[2] )
*/
function_x264 pixel_var2_8x8_lsx
vxor.v vr8, vr8, vr8
sse_diff_8width_lsx a0, a1, vr8, vr9
addi.d t0, a0, FENC_STRIDE * 4
addi.d t1, a1, FDEC_STRIDE * 4
sse_diff_8width_lsx t0, t1, vr8, vr10
vhaddw.d.w vr8, vr8, vr8
vhaddw.q.d vr8, vr8, vr8
vpickve2gr.w t2, vr8, 0 // sqr_u
vadd.h vr8, vr10, vr9
vhaddw.w.h vr8, vr8, vr8
vhaddw.d.w vr8, vr8, vr8
vhaddw.q.d vr8, vr8, vr8
vpickve2gr.w t3, vr8, 0 // sum_u
addi.d a0, a0, FENC_STRIDE / 2
addi.d a1, a1, FDEC_STRIDE / 2
vxor.v vr8, vr8, vr8
sse_diff_8width_lsx a0, a1, vr8, vr9
addi.d t0, a0, FENC_STRIDE * 4
addi.d t1, a1, FDEC_STRIDE * 4
sse_diff_8width_lsx t0, t1, vr8, vr10
vhaddw.d.w vr8, vr8, vr8
vhaddw.q.d vr8, vr8, vr8
vpickve2gr.w t4, vr8, 0 // sqr_v
vadd.h vr8, vr10, vr9
vhaddw.w.h vr8, vr8, vr8
vhaddw.d.w vr8, vr8, vr8
vhaddw.q.d vr8, vr8, vr8
vpickve2gr.w t5, vr8, 0 // sum_v
st.w t2, a2, 0
st.w t4, a2, 4
mul.w t3, t3, t3
mul.w t5, t5, t5
srai.w t3, t3, 6
srai.w t5, t5, 6
sub.w t2, t2, t3
sub.w t4, t4, t5
add.w a0, t2, t4
endfunc_x264
/*
* int pixel_var2_8x16( pixel *fenc, pixel *fdec, int ssd[2] )
*/
function_x264 pixel_var2_8x16_lsx
vxor.v vr8, vr8, vr8
sse_diff_8width_lsx a0, a1, vr8, vr9
addi.d t0, a0, FENC_STRIDE * 4
addi.d t1, a1, FDEC_STRIDE * 4
sse_diff_8width_lsx t0, t1, vr8, vr10
addi.d t0, t0, FENC_STRIDE * 4
addi.d t1, t1, FDEC_STRIDE * 4
sse_diff_8width_lsx t0, t1, vr8, vr11
addi.d t0, t0, FENC_STRIDE * 4
addi.d t1, t1, FDEC_STRIDE * 4
sse_diff_8width_lsx t0, t1, vr8, vr12
vhaddw.d.w vr8, vr8, vr8
vhaddw.q.d vr8, vr8, vr8
vpickve2gr.w t2, vr8, 0 // sqr_u
vadd.h vr8, vr10, vr9
vadd.h vr8, vr11, vr8
vadd.h vr8, vr12, vr8
vhaddw.w.h vr8, vr8, vr8
vhaddw.d.w vr8, vr8, vr8
vhaddw.q.d vr8, vr8, vr8
vpickve2gr.w t3, vr8, 0 // sum_u
addi.d a0, a0, FENC_STRIDE / 2
addi.d a1, a1, FDEC_STRIDE / 2
vxor.v vr8, vr8, vr8
sse_diff_8width_lsx a0, a1, vr8, vr9
addi.d t0, a0, FENC_STRIDE * 4
addi.d t1, a1, FDEC_STRIDE * 4
sse_diff_8width_lsx t0, t1, vr8, vr10
addi.d t0, t0, FENC_STRIDE * 4
addi.d t1, t1, FDEC_STRIDE * 4
sse_diff_8width_lsx t0, t1, vr8, vr11
addi.d t0, t0, FENC_STRIDE * 4
addi.d t1, t1, FDEC_STRIDE * 4
sse_diff_8width_lsx t0, t1, vr8, vr12
vhaddw.d.w vr8, vr8, vr8
vhaddw.q.d vr8, vr8, vr8
vpickve2gr.w t4, vr8, 0 // sqr_v
vadd.h vr8, vr10, vr9
vadd.h vr8, vr11, vr8
vadd.h vr8, vr12, vr8
vhaddw.w.h vr8, vr8, vr8
vhaddw.d.w vr8, vr8, vr8
vhaddw.q.d vr8, vr8, vr8
vpickve2gr.w t5, vr8, 0 // sum_v
st.w t2, a2, 0
st.w t4, a2, 4
mul.w t3, t3, t3
mul.w t5, t5, t5
srai.w t3, t3, 7
srai.w t5, t5, 7
sub.w t2, t2, t3
sub.w t4, t4, t5
add.w a0, t2, t4
endfunc_x264
#endif /* !HIGH_BIT_DEPTH */
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerTimer_DMA/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerTimer_DMA/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aestream/faery
| 2,598
|
src/mp4/x264/common/arm/bitstream-a.S
|
/*****************************************************************************
* bitstream-a.S: arm bitstream functions
*****************************************************************************
* Copyright (C) 2014-2024 x264 project
*
* Authors: Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
function nal_escape_neon
push {r4-r5,lr}
vmov.u8 q0, #0xff
vmov.u8 q8, #4
mov r3, #3
subs lr, r1, r2
beq 99f
0:
cmn lr, #15
blt 16f
mov r1, r2
b 100f
16:
vld1.8 {q1}, [r1]!
vext.8 q2, q0, q1, #14
vext.8 q3, q0, q1, #15
vcgt.u8 q11, q8, q1
vceq.u8 q9, q2, #0
vceq.u8 q10, q3, #0
vand q9, q9, q11
vand q9, q9, q10
vshrn.u16 d22, q9, #4
vmov ip, lr, d22
orrs ip, ip, lr
beq 16f
mov lr, #-16
100:
vmov.u8 r5, d1[6]
vmov.u8 r4, d1[7]
orr r5, r4, r5, lsl #8
101:
ldrb r4, [r1, lr]
orr ip, r4, r5, lsl #16
cmp ip, #3
bhi 102f
strb r3, [r0], #1
orr r5, r3, r5, lsl #8
102:
adds lr, lr, #1
strb r4, [r0], #1
orr r5, r4, r5, lsl #8
blt 101b
subs lr, r1, r2
lsr ip, r5, #8
vmov.u8 d1[6], ip
vmov.u8 d1[7], r5
blt 0b
pop {r4-r5,pc}
16:
subs lr, r1, r2
vst1.8 {q1}, [r0]!
vmov q0, q1
blt 0b
99:
pop {r4-r5,pc}
endfunc
|
aestream/faery
| 24,178
|
src/mp4/x264/common/arm/dct-a.S
|
/****************************************************************************
* dct-a.S: arm transform and zigzag
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Martin Storsjo <martin@martin.st>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
const scan4x4_frame, align=4
.byte 0,1, 8,9, 2,3, 4,5
.byte 2,3, 8,9, 16,17, 10,11
.byte 12,13, 6,7, 14,15, 20,21
.byte 10,11, 12,13, 6,7, 14,15
endconst
.text
// sum = a + (b>>shift) sub = (a>>shift) - b
.macro SUMSUB_SHR shift sum sub a b t0 t1
vshr.s16 \t0, \b, #\shift
vshr.s16 \t1, \a, #\shift
vadd.s16 \sum, \a, \t0
vsub.s16 \sub, \t1, \b
.endm
// sum = (a>>shift) + b sub = a - (b>>shift)
.macro SUMSUB_SHR2 shift sum sub a b t0 t1
vshr.s16 \t0, \a, #\shift
vshr.s16 \t1, \b, #\shift
vadd.s16 \sum, \t0, \b
vsub.s16 \sub, \a, \t1
.endm
// a += 1.5*ma b -= 1.5*mb
.macro SUMSUB_15 a b ma mb t0 t1
vshr.s16 \t0, \ma, #1
vshr.s16 \t1, \mb, #1
vadd.s16 \t0, \t0, \ma
vadd.s16 \t1, \t1, \mb
vadd.s16 \a, \a, \t0
vsub.s16 \b, \b, \t1
.endm
function dct4x4dc_neon
vld1.64 {d0-d3}, [r0,:128]
SUMSUB_ABCD d4, d5, d6, d7, d0, d1, d2, d3
SUMSUB_ABCD d0, d2, d3, d1, d4, d6, d5, d7
vmov.s16 d31, #1
HADAMARD 1, sumsub, q2, q3, q0, q1
vtrn.32 d4, d5
vadd.s16 d16, d4, d31
vtrn.32 d6, d7
vadd.s16 d17, d6, d31
vrhadd.s16 d0, d4, d5
vhsub.s16 d1, d16, d5
vhsub.s16 d2, d17, d7
vrhadd.s16 d3, d6, d7
vst1.64 {d0-d3}, [r0,:128]
bx lr
endfunc
function idct4x4dc_neon
vld1.64 {d0-d3}, [r0,:128]
SUMSUB_ABCD d4, d5, d6, d7, d0, d1, d2, d3
SUMSUB_ABCD d0, d2, d3, d1, d4, d6, d5, d7
HADAMARD 1, sumsub, q2, q3, q0, q1
HADAMARD 2, sumsub, d0, d1, d4, d5
HADAMARD 2, sumsub, d3, d2, d6, d7
vst1.64 {d0-d3}, [r0,:128]
bx lr
endfunc
.macro DCT_1D d0 d1 d2 d3 d4 d5 d6 d7
SUMSUB_AB \d1, \d6, \d5, \d6
SUMSUB_AB \d3, \d7, \d4, \d7
vadd.s16 \d0, \d3, \d1
vadd.s16 \d4, \d7, \d7
vadd.s16 \d5, \d6, \d6
vsub.s16 \d2, \d3, \d1
vadd.s16 \d1, \d4, \d6
vsub.s16 \d3, \d7, \d5
.endm
function sub4x4_dct_neon
mov r3, #FENC_STRIDE
mov ip, #FDEC_STRIDE
vld1.32 {d0[]}, [r1,:32], r3
vld1.32 {d1[]}, [r2,:32], ip
vld1.32 {d2[]}, [r1,:32], r3
vsubl.u8 q8, d0, d1
vld1.32 {d3[]}, [r2,:32], ip
vld1.32 {d4[]}, [r1,:32], r3
vsubl.u8 q9, d2, d3
vld1.32 {d5[]}, [r2,:32], ip
vld1.32 {d6[]}, [r1,:32], r3
vsubl.u8 q10, d4, d5
vld1.32 {d7[]}, [r2,:32], ip
vsubl.u8 q11, d6, d7
DCT_1D d0, d1, d2, d3, d16, d18, d20, d22
TRANSPOSE4x4_16 d0, d1, d2, d3
DCT_1D d4, d5, d6, d7, d0, d1, d2, d3
vst1.64 {d4-d7}, [r0,:128]
bx lr
endfunc
function sub8x4_dct_neon, export=0
vld1.64 {d0}, [r1,:64], r3
vld1.64 {d1}, [r2,:64], ip
vsubl.u8 q8, d0, d1
vld1.64 {d2}, [r1,:64], r3
vld1.64 {d3}, [r2,:64], ip
vsubl.u8 q9, d2, d3
vld1.64 {d4}, [r1,:64], r3
vld1.64 {d5}, [r2,:64], ip
vsubl.u8 q10, d4, d5
vld1.64 {d6}, [r1,:64], r3
vld1.64 {d7}, [r2,:64], ip
vsubl.u8 q11, d6, d7
DCT_1D q0, q1, q2, q3, q8, q9, q10, q11
TRANSPOSE4x4_16 q0, q1, q2, q3
SUMSUB_AB q8, q12, q0, q3
SUMSUB_AB q9, q10, q1, q2
vadd.i16 q13, q12, q12
vadd.i16 q11, q10, q10
vadd.i16 d0, d16, d18
vadd.i16 d1, d26, d20
vsub.i16 d2, d16, d18
vsub.i16 d3, d24, d22
vst1.64 {d0-d1}, [r0,:128]!
vadd.i16 d4, d17, d19
vadd.i16 d5, d27, d21
vst1.64 {d2-d3}, [r0,:128]!
vsub.i16 d6, d17, d19
vsub.i16 d7, d25, d23
vst1.64 {d4-d5}, [r0,:128]!
vst1.64 {d6-d7}, [r0,:128]!
bx lr
endfunc
function sub8x8_dct_neon
push {lr}
mov r3, #FENC_STRIDE
mov ip, #FDEC_STRIDE
bl sub8x4_dct_neon
pop {lr}
b sub8x4_dct_neon
endfunc
function sub16x16_dct_neon
push {lr}
mov r3, #FENC_STRIDE
mov ip, #FDEC_STRIDE
bl sub8x4_dct_neon
bl sub8x4_dct_neon
sub r1, r1, #8*FENC_STRIDE-8
sub r2, r2, #8*FDEC_STRIDE-8
bl sub8x4_dct_neon
bl sub8x4_dct_neon
sub r1, r1, #8
sub r2, r2, #8
bl sub8x4_dct_neon
bl sub8x4_dct_neon
sub r1, r1, #8*FENC_STRIDE-8
sub r2, r2, #8*FDEC_STRIDE-8
bl sub8x4_dct_neon
pop {lr}
b sub8x4_dct_neon
endfunc
.macro DCT8_1D type
SUMSUB_AB q2, q1, q11, q12 // s34/d34
SUMSUB_AB q3, q11, q10, q13 // s25/d25
SUMSUB_AB q13, q10, q9, q14 // s16/d16
SUMSUB_AB q14, q8, q8, q15 // s07/d07
SUMSUB_AB q9, q2, q14, q2 // a0/a2
SUMSUB_AB q12, q14, q13, q3 // a1/a3
SUMSUB_AB q3, q13, q8, q1 // a6/a5
vshr.s16 q0, q10, #1
vshr.s16 q15, q11, #1
vadd.s16 q0, q0, q10
vadd.s16 q15, q15, q11
vsub.s16 q3, q3, q0
vsub.s16 q13, q13, q15
SUMSUB_AB q0, q15, q10, q11 // a4/a7
vshr.s16 q10, q8, #1
vshr.s16 q11, q1, #1
vadd.s16 q10, q10, q8
vadd.s16 q11, q11, q1
vadd.s16 q10, q0, q10
vadd.s16 q15, q15, q11
SUMSUB_AB q8, q12, q9, q12
SUMSUB_SHR 2, q9, q15, q10, q15, q0, q1
SUMSUB_SHR 1, q10, q14, q2, q14, q0, q1
SUMSUB_SHR2 2, q11, q13, q3, q13, q0, q1
.endm
function sub8x8_dct8_neon
mov r3, #FENC_STRIDE
mov ip, #FDEC_STRIDE
vld1.64 {d16}, [r1,:64], r3
vld1.64 {d17}, [r2,:64], ip
vsubl.u8 q8, d16, d17
vld1.64 {d18}, [r1,:64], r3
vld1.64 {d19}, [r2,:64], ip
vsubl.u8 q9, d18, d19
vld1.64 {d20}, [r1,:64], r3
vld1.64 {d21}, [r2,:64], ip
vsubl.u8 q10, d20, d21
vld1.64 {d22}, [r1,:64], r3
vld1.64 {d23}, [r2,:64], ip
vsubl.u8 q11, d22, d23
vld1.64 {d24}, [r1,:64], r3
vld1.64 {d25}, [r2,:64], ip
vsubl.u8 q12, d24, d25
vld1.64 {d26}, [r1,:64], r3
vld1.64 {d27}, [r2,:64], ip
vsubl.u8 q13, d26, d27
vld1.64 {d28}, [r1,:64], r3
vld1.64 {d29}, [r2,:64], ip
vsubl.u8 q14, d28, d29
vld1.64 {d30}, [r1,:64], r3
vld1.64 {d31}, [r2,:64], ip
vsubl.u8 q15, d30, d31
DCT8_1D row
vswp d17, d24 // 8, 12
vswp d21, d28 // 10,14
vtrn.32 q8, q10
vtrn.32 q12, q14
vswp d19, d26 // 9, 13
vswp d23, d30 // 11,15
vtrn.32 q9, q11
vtrn.32 q13, q15
vtrn.16 q10, q11
vtrn.16 q12, q13
vtrn.16 q8, q9
vtrn.16 q14, q15
DCT8_1D col
vst1.64 {d16-d19}, [r0,:128]!
vst1.64 {d20-d23}, [r0,:128]!
vst1.64 {d24-d27}, [r0,:128]!
vst1.64 {d28-d31}, [r0,:128]!
bx lr
endfunc
function sub16x16_dct8_neon
push {lr}
bl X(sub8x8_dct8_neon)
sub r1, r1, #FENC_STRIDE*8 - 8
sub r2, r2, #FDEC_STRIDE*8 - 8
bl X(sub8x8_dct8_neon)
sub r1, r1, #8
sub r2, r2, #8
bl X(sub8x8_dct8_neon)
pop {lr}
sub r1, r1, #FENC_STRIDE*8 - 8
sub r2, r2, #FDEC_STRIDE*8 - 8
b X(sub8x8_dct8_neon)
endfunc
// First part of IDCT (minus final SUMSUB_BA)
.macro IDCT_1D d4 d5 d6 d7 d0 d1 d2 d3
SUMSUB_AB \d4, \d5, \d0, \d2
vshr.s16 \d7, \d1, #1
vshr.s16 \d6, \d3, #1
vsub.s16 \d7, \d7, \d3
vadd.s16 \d6, \d6, \d1
.endm
function add4x4_idct_neon
mov r2, #FDEC_STRIDE
vld1.64 {d0-d3}, [r1,:128]
IDCT_1D d4, d5, d6, d7, d0, d1, d2, d3
vld1.32 {d30[0]}, [r0,:32], r2
SUMSUB_AB q0, q1, q2, q3
TRANSPOSE4x4_16 d0, d1, d3, d2
IDCT_1D d4, d5, d6, d7, d0, d1, d3, d2
vld1.32 {d30[1]}, [r0,:32], r2
SUMSUB_AB q0, q1, q2, q3
vrshr.s16 q0, q0, #6
vld1.32 {d31[1]}, [r0,:32], r2
vrshr.s16 q1, q1, #6
vld1.32 {d31[0]}, [r0,:32], r2
sub r0, r0, r2, lsl #2
vaddw.u8 q0, q0, d30
vaddw.u8 q1, q1, d31
vqmovun.s16 d0, q0
vqmovun.s16 d2, q1
vst1.32 {d0[0]}, [r0,:32], r2
vst1.32 {d0[1]}, [r0,:32], r2
vst1.32 {d2[1]}, [r0,:32], r2
vst1.32 {d2[0]}, [r0,:32], r2
bx lr
endfunc
function add8x4_idct_neon, export=0
vld1.64 {d0-d3}, [r1,:128]!
IDCT_1D d16, d18, d20, d22, d0, d1, d2, d3
vld1.64 {d4-d7}, [r1,:128]!
IDCT_1D d17, d19, d21, d23, d4, d5, d6, d7
SUMSUB_AB q0, q3, q8, q10
SUMSUB_AB q1, q2, q9, q11
TRANSPOSE4x4_16 q0, q1, q2, q3
IDCT_1D q8, q9, q10, q11, q0, q1, q2, q3
SUMSUB_AB q0, q3, q8, q10
SUMSUB_AB q1, q2, q9, q11
vrshr.s16 q0, q0, #6
vld1.32 {d28}, [r0,:64], r2
vrshr.s16 q1, q1, #6
vld1.32 {d29}, [r0,:64], r2
vrshr.s16 q2, q2, #6
vld1.32 {d30}, [r0,:64], r2
vrshr.s16 q3, q3, #6
vld1.32 {d31}, [r0,:64], r2
sub r0, r0, r2, lsl #2
vaddw.u8 q0, q0, d28
vaddw.u8 q1, q1, d29
vaddw.u8 q2, q2, d30
vaddw.u8 q3, q3, d31
vqmovun.s16 d0, q0
vqmovun.s16 d1, q1
vst1.32 {d0}, [r0,:64], r2
vqmovun.s16 d2, q2
vst1.32 {d1}, [r0,:64], r2
vqmovun.s16 d3, q3
vst1.32 {d2}, [r0,:64], r2
vst1.32 {d3}, [r0,:64], r2
bx lr
endfunc
function add8x8_idct_neon
mov r2, #FDEC_STRIDE
mov ip, lr
bl add8x4_idct_neon
mov lr, ip
b add8x4_idct_neon
endfunc
function add16x16_idct_neon
mov r2, #FDEC_STRIDE
mov ip, lr
bl add8x4_idct_neon
bl add8x4_idct_neon
sub r0, r0, #8*FDEC_STRIDE-8
bl add8x4_idct_neon
bl add8x4_idct_neon
sub r0, r0, #8
bl add8x4_idct_neon
bl add8x4_idct_neon
sub r0, r0, #8*FDEC_STRIDE-8
bl add8x4_idct_neon
mov lr, ip
b add8x4_idct_neon
endfunc
.macro IDCT8_1D type
.ifc \type, col
vswp d21, d28
.endif
SUMSUB_AB q0, q1, q8, q12 // a0/a2
.ifc \type, row
vld1.64 {d28-d31}, [r1,:128]!
.else
vswp d19, d26
.endif
SUMSUB_SHR 1, q2, q3, q10, q14, q8, q12 // a6/a4
.ifc \type, col
vswp d23, d30
.endif
SUMSUB_AB q8, q10, q13, q11
SUMSUB_15 q8, q10, q9, q15, q12, q14 // a7/a1
SUMSUB_AB q14, q15, q15, q9
SUMSUB_15 q15, q14, q13, q11, q12, q9 // a5/a3
SUMSUB_SHR 2, q13, q14, q14, q15, q11, q9 // b3/b5
SUMSUB_SHR2 2, q12, q15, q8, q10, q11, q9 // b1/b7
SUMSUB_AB q10, q2, q0, q2 // b0/b6
SUMSUB_AB q11, q3, q1, q3 // b2/b4
SUMSUB_AB q8, q15, q10, q15
SUMSUB_AB q9, q14, q11, q14
SUMSUB_AB q10, q13, q3, q13
.ifc \type, row
vtrn.16 q8, q9
.endif
SUMSUB_AB q11, q12, q2, q12
.endm
function add8x8_idct8_neon
mov r2, #FDEC_STRIDE
vld1.64 {d16-d19}, [r1,:128]!
vld1.64 {d20-d23}, [r1,:128]!
vld1.64 {d24-d27}, [r1,:128]!
IDCT8_1D row
vtrn.16 q10, q11
vtrn.16 q12, q13
vtrn.16 q14, q15
vtrn.32 q8, q10
vtrn.32 q9, q11
vtrn.32 q12, q14
vtrn.32 q13, q15
vswp d17, d24
IDCT8_1D col
vld1.64 {d0}, [r0,:64], r2
vrshr.s16 q8, q8, #6
vld1.64 {d1}, [r0,:64], r2
vrshr.s16 q9, q9, #6
vld1.64 {d2}, [r0,:64], r2
vrshr.s16 q10, q10, #6
vld1.64 {d3}, [r0,:64], r2
vrshr.s16 q11, q11, #6
vld1.64 {d4}, [r0,:64], r2
vrshr.s16 q12, q12, #6
vld1.64 {d5}, [r0,:64], r2
vrshr.s16 q13, q13, #6
vld1.64 {d6}, [r0,:64], r2
vrshr.s16 q14, q14, #6
vld1.64 {d7}, [r0,:64], r2
vrshr.s16 q15, q15, #6
sub r0, r0, r2, lsl #3
vaddw.u8 q8, q8, d0
vaddw.u8 q9, q9, d1
vaddw.u8 q10, q10, d2
vqmovun.s16 d0, q8
vqmovun.s16 d1, q9
vqmovun.s16 d2, q10
vaddw.u8 q11, q11, d3
vst1.64 {d0}, [r0,:64], r2
vaddw.u8 q12, q12, d4
vst1.64 {d1}, [r0,:64], r2
vaddw.u8 q13, q13, d5
vst1.64 {d2}, [r0,:64], r2
vqmovun.s16 d3, q11
vqmovun.s16 d4, q12
vaddw.u8 q14, q14, d6
vaddw.u8 q15, q15, d7
vst1.64 {d3}, [r0,:64], r2
vqmovun.s16 d5, q13
vst1.64 {d4}, [r0,:64], r2
vqmovun.s16 d6, q14
vqmovun.s16 d7, q15
vst1.64 {d5}, [r0,:64], r2
vst1.64 {d6}, [r0,:64], r2
vst1.64 {d7}, [r0,:64], r2
bx lr
endfunc
function add16x16_idct8_neon
mov ip, lr
bl X(add8x8_idct8_neon)
sub r0, r0, #8*FDEC_STRIDE-8
bl X(add8x8_idct8_neon)
sub r0, r0, #8
bl X(add8x8_idct8_neon)
sub r0, r0, #8*FDEC_STRIDE-8
mov lr, ip
b X(add8x8_idct8_neon)
endfunc
function add8x8_idct_dc_neon
mov r2, #FDEC_STRIDE
vld1.64 {d16}, [r1,:64]
vrshr.s16 d16, d16, #6
vld1.64 {d0}, [r0,:64], r2
vmov.i16 q15, #0
vld1.64 {d1}, [r0,:64], r2
vld1.64 {d2}, [r0,:64], r2
vdup.16 d20, d16[0]
vld1.64 {d3}, [r0,:64], r2
vdup.16 d21, d16[1]
vld1.64 {d4}, [r0,:64], r2
vdup.16 d22, d16[2]
vld1.64 {d5}, [r0,:64], r2
vdup.16 d23, d16[3]
vld1.64 {d6}, [r0,:64], r2
vsub.s16 q12, q15, q10
vld1.64 {d7}, [r0,:64], r2
vsub.s16 q13, q15, q11
sub r0, r0, #8*FDEC_STRIDE
vqmovun.s16 d20, q10
vqmovun.s16 d22, q11
vqmovun.s16 d24, q12
vqmovun.s16 d26, q13
vmov d21, d20
vqadd.u8 q0, q0, q10
vmov d23, d22
vqadd.u8 q1, q1, q10
vmov d25, d24
vqadd.u8 q2, q2, q11
vmov d27, d26
vqadd.u8 q3, q3, q11
vqsub.u8 q0, q0, q12
vqsub.u8 q1, q1, q12
vqsub.u8 q2, q2, q13
vst1.64 {d0}, [r0,:64], r2
vqsub.u8 q3, q3, q13
vst1.64 {d1}, [r0,:64], r2
vst1.64 {d2}, [r0,:64], r2
vst1.64 {d3}, [r0,:64], r2
vst1.64 {d4}, [r0,:64], r2
vst1.64 {d5}, [r0,:64], r2
vst1.64 {d6}, [r0,:64], r2
vst1.64 {d7}, [r0,:64], r2
bx lr
endfunc
.macro ADD16x4_IDCT_DC dc
vld1.64 {d16-d17}, [r0,:128], r3
vld1.64 {d18-d19}, [r0,:128], r3
vdup.16 d4, \dc[0]
vdup.16 d5, \dc[1]
vld1.64 {d20-d21}, [r0,:128], r3
vdup.16 d6, \dc[2]
vdup.16 d7, \dc[3]
vld1.64 {d22-d23}, [r0,:128], r3
vsub.s16 q12, q15, q2
vsub.s16 q13, q15, q3
vqmovun.s16 d4, q2
vqmovun.s16 d5, q3
vqmovun.s16 d6, q12
vqmovun.s16 d7, q13
vqadd.u8 q8, q8, q2
vqadd.u8 q9, q9, q2
vqadd.u8 q10, q10, q2
vqadd.u8 q11, q11, q2
vqsub.u8 q8, q8, q3
vqsub.u8 q9, q9, q3
vqsub.u8 q10, q10, q3
vst1.64 {d16-d17}, [r2,:128], r3
vqsub.u8 q11, q11, q3
vst1.64 {d18-d19}, [r2,:128], r3
vst1.64 {d20-d21}, [r2,:128], r3
vst1.64 {d22-d23}, [r2,:128], r3
.endm
function add16x16_idct_dc_neon
mov r2, r0
mov r3, #FDEC_STRIDE
vmov.i16 q15, #0
vld1.64 {d0-d3}, [r1,:64]
vrshr.s16 q0, #6
vrshr.s16 q1, #6
ADD16x4_IDCT_DC d0
ADD16x4_IDCT_DC d1
ADD16x4_IDCT_DC d2
ADD16x4_IDCT_DC d3
bx lr
endfunc
function sub8x8_dct_dc_neon
mov r3, #FENC_STRIDE
mov ip, #FDEC_STRIDE
vld1.64 {d16}, [r1,:64], r3
vld1.64 {d17}, [r2,:64], ip
vsubl.u8 q8, d16, d17
vld1.64 {d18}, [r1,:64], r3
vld1.64 {d19}, [r2,:64], ip
vsubl.u8 q9, d18, d19
vld1.64 {d20}, [r1,:64], r3
vld1.64 {d21}, [r2,:64], ip
vsubl.u8 q10, d20, d21
vld1.64 {d22}, [r1,:64], r3
vadd.s16 q0, q8, q9
vld1.64 {d23}, [r2,:64], ip
vsubl.u8 q11, d22, d23
vld1.64 {d24}, [r1,:64], r3
vadd.s16 q0, q0, q10
vld1.64 {d25}, [r2,:64], ip
vsubl.u8 q12, d24, d25
vld1.64 {d26}, [r1,:64], r3
vadd.s16 q0, q0, q11
vld1.64 {d27}, [r2,:64], ip
vsubl.u8 q13, d26, d27
vld1.64 {d28}, [r1,:64], r3
vld1.64 {d29}, [r2,:64], ip
vsubl.u8 q14, d28, d29
vld1.64 {d30}, [r1,:64], r3
vadd.s16 q1, q12, q13
vld1.64 {d31}, [r2,:64], ip
vsubl.u8 q15, d30, d31
vadd.s16 q1, q1, q14
vadd.s16 d4, d0, d1
vadd.s16 q1, q1, q15
vsub.s16 d5, d0, d1
vadd.s16 d6, d2, d3
vsub.s16 d7, d2, d3
vadd.s16 q0, q2, q3
vsub.s16 q1, q2, q3
vpadd.s16 d0, d0, d2
vpadd.s16 d1, d1, d3
vpadd.s16 d0, d0, d1
vst1.64 {d0}, [r0,:64]
bx lr
endfunc
function sub8x16_dct_dc_neon
mov r3, #FENC_STRIDE
mov ip, #FDEC_STRIDE
vld1.64 {d16}, [r1,:64], r3
vld1.64 {d17}, [r2,:64], ip
vsubl.u8 q8, d16, d17
vld1.64 {d18}, [r1,:64], r3
vld1.64 {d19}, [r2,:64], ip
vsubl.u8 q9, d18, d19
vld1.64 {d20}, [r1,:64], r3
vld1.64 {d21}, [r2,:64], ip
vsubl.u8 q10, d20, d21
vld1.64 {d22}, [r1,:64], r3
vadd.s16 q0, q8, q9
vld1.64 {d23}, [r2,:64], ip
vsubl.u8 q11, d22, d23
vld1.64 {d24}, [r1,:64], r3
vadd.s16 q0, q0, q10
vld1.64 {d25}, [r2,:64], ip
vsubl.u8 q12, d24, d25
vld1.64 {d26}, [r1,:64], r3
vadd.s16 q0, q0, q11
vld1.64 {d27}, [r2,:64], ip
vsubl.u8 q13, d26, d27
vld1.64 {d28}, [r1,:64], r3
vld1.64 {d29}, [r2,:64], ip
vsubl.u8 q14, d28, d29
vld1.64 {d30}, [r1,:64], r3
vadd.s16 q1, q12, q13
vld1.64 {d31}, [r2,:64], ip
vsubl.u8 q15, d30, d31
vld1.64 {d16}, [r1,:64], r3
vadd.s16 q1, q1, q14
vld1.64 {d17}, [r2,:64], ip
vadd.s16 q1, q1, q15
vld1.64 {d18}, [r1,:64], r3
vsubl.u8 q8, d16, d17
vld1.64 {d19}, [r2,:64], ip
vsubl.u8 q9, d18, d19
vld1.64 {d20}, [r1,:64], r3
vld1.64 {d21}, [r2,:64], ip
vsubl.u8 q10, d20, d21
vld1.64 {d22}, [r1,:64], r3
vadd.s16 q2, q8, q9
vld1.64 {d23}, [r2,:64], ip
vsubl.u8 q11, d22, d23
vld1.64 {d24}, [r1,:64], r3
vadd.s16 q2, q2, q10
vld1.64 {d25}, [r2,:64], ip
vsubl.u8 q12, d24, d25
vld1.64 {d26}, [r1,:64], r3
vadd.s16 q2, q2, q11
vld1.64 {d27}, [r2,:64], ip
vsubl.u8 q13, d26, d27
vld1.64 {d28}, [r1,:64], r3
vld1.64 {d29}, [r2,:64], ip
vsubl.u8 q14, d28, d29
vld1.64 {d30}, [r1,:64], r3
vadd.s16 q3, q12, q13
vld1.64 {d31}, [r2,:64], ip
vsubl.u8 q15, d30, d31
vadd.s16 q3, q3, q14
vadd.s16 d16, d0, d1 @ b0
vadd.s16 q3, q3, q15
vsub.s16 d17, d0, d1 @ b4
vadd.s16 d18, d2, d3 @ b1
vsub.s16 d19, d2, d3 @ b5
vadd.s16 d20, d4, d5 @ b2
vsub.s16 d21, d4, d5 @ b6
vadd.s16 d22, d6, d7 @ b3
vsub.s16 d23, d6, d7 @ b7
vadd.s16 q0, q8, q9 @ b0 + b1, b4 + b5; a0, a2
vsub.s16 q1, q8, q9 @ b0 - b1, b4 - b5; a4, a6
vadd.s16 q2, q10, q11 @ b2 + b3, b6 + b7; a1, a3
vsub.s16 q3, q10, q11 @ b2 - b3, b6 - b7; a5, a7
vadd.s16 q8, q0, q2 @ a0 + a1, a2 + a3
vsub.s16 q9, q0, q2 @ a0 - a1, a2 - a3
vsub.s16 q10, q1, q3 @ a4 - a5, a6 - a7
vadd.s16 q11, q1, q3 @ a4 + a5, a6 + a7
vpadd.s16 d0, d16, d17
vpadd.s16 d1, d18, d19
vpadd.s16 d2, d20, d21
vpadd.s16 d3, d22, d23
vpadd.s16 d0, d0, d1
vpadd.s16 d1, d2, d3
vst1.64 {q0}, [r0,:64]
bx lr
endfunc
function zigzag_scan_4x4_frame_neon
movrel r2, scan4x4_frame
vld1.64 {d0-d3}, [r1,:128]
vld1.64 {d16-d19}, [r2,:128]
vtbl.8 d4, {d0-d1}, d16
vtbl.8 d5, {d1-d3}, d17
vtbl.8 d6, {d0-d2}, d18
vtbl.8 d7, {d2-d3}, d19
vst1.64 {d4-d7}, [r0,:128]
bx lr
endfunc
|
aestream/faery
| 55,663
|
src/mp4/x264/common/arm/mc-a.S
|
/*****************************************************************************
* mc.S: arm motion compensation
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Mans Rullgard <mans@mansr.com>
* Stefan Groenroos <stefan.gronroos@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
const pw_0to15, align=4
.short 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
endconst
.text
// note: prefetch stuff assumes 64-byte cacheline, true for the Cortex-A8
// They also use nothing above armv5te, but we don't care about pre-armv6
// void prefetch_ref( uint8_t *pix, intptr_t stride, int parity )
function prefetch_ref_arm
sub r2, r2, #1
add r0, r0, #64
and r2, r2, r1
add r0, r0, r2, lsl #3
add r2, r1, r1, lsl #1
pld [r0]
pld [r0, r1]
pld [r0, r1, lsl #1]
add r3, r0, r1, lsl #2
pld [r0, r2]
pld [r3]
pld [r3, r1]
pld [r3, r1, lsl #1]
pld [r3, r2]
bx lr
endfunc
// void prefetch_fenc( uint8_t *pix_y, intptr_t stride_y,
// uint8_t *pix_uv, intptr_t stride_uv, int mb_x )
function prefetch_fenc_arm
ldr ip, [sp]
push {lr}
and lr, ip, #3
smulbb lr, lr, r1 // note: this assumes stride_y is <= 16 bits signed
and ip, ip, #6
smulbb ip, ip, r3
add r0, r0, #64
add r2, r2, #64
add r0, r0, lr, lsl #2
pld [r0]
add lr, r0, r1, lsl #1
pld [r0, r1]
pld [lr]
add r2, r2, ip, lsl #2
pld [lr, r1]
pld [r2]
add ip, r2, r3, lsl #1
pld [r2, r3]
pld [ip]
pld [ip, r3]
pop {pc}
endfunc
// void *memcpy_aligned( void *dst, const void *src, size_t n )
function memcpy_aligned_neon
orr r3, r0, r1, lsr #1
movrel ip, memcpy_table
and r3, r3, #0xc
ldr pc, [ip, r3]
endfunc
.macro MEMCPY_ALIGNED srcalign dstalign
function memcpy_aligned_\dstalign\()_\srcalign\()_neon, export=0
mov r3, r0
.if \srcalign == 8 && \dstalign == 8
sub r2, #16
vld1.64 {d0}, [r1,:64]!
vst1.64 {d0}, [r3,:64]!
.set r1align, 128
.set r3align, 128
.else
.set r1align, \srcalign * 8
.set r3align, \dstalign * 8
.endif
tst r2, #16
beq 32f
sub r2, #16
vld1.64 {d0-d1}, [r1,:r1align]!
vst1.64 {d0-d1}, [r3,:r3align]!
32: // n is a multiple of 32
tst r2, #32
beq 640f
sub r2, #32
vld1.64 {d0-d3}, [r1,:r1align]!
vst1.64 {d0-d3}, [r3,:r3align]!
640: // n is a multiple of 64
cmp r2, #0
beq 1f
64:
subs r2, #64
vld1.64 {d0-d3}, [r1,:r1align]!
vld1.64 {d4-d7}, [r1,:r1align]!
vst1.64 {d0-d3}, [r3,:r3align]!
vst1.64 {d4-d7}, [r3,:r3align]!
bgt 64b
1: // end
.if \srcalign == 8 && \dstalign == 8
vld1.64 {d0}, [r1,:64]!
vst1.64 {d0}, [r3,:64]!
.endif
bx lr
endfunc
.endm
MEMCPY_ALIGNED 16, 16
MEMCPY_ALIGNED 16, 8
MEMCPY_ALIGNED 8, 16
MEMCPY_ALIGNED 8, 8
const memcpy_table, align=2, relocate=1
.word memcpy_aligned_16_16_neon
.word memcpy_aligned_16_8_neon
.word memcpy_aligned_8_16_neon
.word memcpy_aligned_8_8_neon
endconst
.text
.ltorg
// void memzero_aligned( void *dst, size_t n )
function memzero_aligned_neon
vmov.i8 q0, #0
vmov.i8 q1, #0
memzero_loop:
subs r1, #128
.rept 4
vst1.64 {d0-d3}, [r0,:128]!
.endr
bgt memzero_loop
bx lr
endfunc
// void pixel_avg( uint8_t *dst, intptr_t dst_stride,
// uint8_t *src1, intptr_t src1_stride,
// uint8_t *src2, intptr_t src2_stride, int weight );
.macro AVGH w h
function pixel_avg_\w\()x\h\()_neon
ldr ip, [sp, #8]
push {r4-r6,lr}
cmp ip, #32
ldrd r4, r5, [sp, #16]
mov lr, #\h
beq pixel_avg_w\w\()_neon
rsbs r6, ip, #64
blt pixel_avg_weight_w\w\()_add_sub_neon // weight > 64
cmp ip, #0
bge pixel_avg_weight_w\w\()_add_add_neon
b pixel_avg_weight_w\w\()_sub_add_neon // weight < 0
endfunc
.endm
AVGH 4, 2
AVGH 4, 4
AVGH 4, 8
AVGH 4, 16
AVGH 8, 4
AVGH 8, 8
AVGH 8, 16
AVGH 16, 8
AVGH 16, 16
// 0 < weight < 64
.macro load_weights_add_add
vdup.8 d30, ip
vdup.8 d31, r6
.endm
.macro load_add_add d1 d2
vld1.32 {\d1}, [r2], r3
vld1.32 {\d2}, [r4], r5
.endm
.macro weight_add_add dst s1 s2
vmull.u8 \dst, \s1, d30
vmlal.u8 \dst, \s2, d31
.endm
// weight > 64
.macro load_weights_add_sub
rsb r6, #0
vdup.8 d30, ip
vdup.8 d31, r6
.endm
.macro load_add_sub d1 d2
vld1.32 {\d1}, [r2], r3
vld1.32 {\d2}, [r4], r5
.endm
.macro weight_add_sub dst s1 s2
vmull.u8 \dst, \s1, d30
vmlsl.u8 \dst, \s2, d31
.endm
// weight < 0
.macro load_weights_sub_add
rsb ip, #0
vdup.8 d31, r6
vdup.8 d30, ip
.endm
.macro load_sub_add d1 d2
vld1.32 {\d2}, [r4], r5
vld1.32 {\d1}, [r2], r3
.endm
.macro weight_sub_add dst s1 s2
vmull.u8 \dst, \s2, d31
vmlsl.u8 \dst, \s1, d30
.endm
.macro AVG_WEIGHT ext
function pixel_avg_weight_w4_\ext\()_neon, export=0
load_weights_\ext
1: // height loop
subs lr, lr, #2
load_\ext d0[], d1[]
weight_\ext q8, d0, d1
load_\ext d2[], d3[]
vqrshrun.s16 d0, q8, #6
weight_\ext q9, d2, d3
vst1.32 {d0[0]}, [r0,:32], r1
vqrshrun.s16 d1, q9, #6
vst1.32 {d1[0]}, [r0,:32], r1
bgt 1b
pop {r4-r6,pc}
endfunc
function pixel_avg_weight_w8_\ext\()_neon, export=0
load_weights_\ext
1: // height loop
subs lr, lr, #4
load_\ext d0, d1
weight_\ext q8, d0, d1
load_\ext d2, d3
weight_\ext q9, d2, d3
load_\ext d4, d5
weight_\ext q10, d4, d5
load_\ext d6, d7
weight_\ext q11, d6, d7
vqrshrun.s16 d0, q8, #6
vqrshrun.s16 d1, q9, #6
vqrshrun.s16 d2, q10, #6
vqrshrun.s16 d3, q11, #6
vst1.64 {d0}, [r0,:64], r1
vst1.64 {d1}, [r0,:64], r1
vst1.64 {d2}, [r0,:64], r1
vst1.64 {d3}, [r0,:64], r1
bgt 1b
pop {r4-r6,pc}
endfunc
function pixel_avg_weight_w16_\ext\()_neon, export=0
load_weights_\ext
1: // height loop
subs lr, lr, #2
load_\ext d0-d1, d2-d3
weight_\ext q8, d0, d2
weight_\ext q9, d1, d3
load_\ext d4-d5, d6-d7
weight_\ext q10, d4, d6
weight_\ext q11, d5, d7
vqrshrun.s16 d0, q8, #6
vqrshrun.s16 d1, q9, #6
vqrshrun.s16 d2, q10, #6
vqrshrun.s16 d3, q11, #6
vst1.64 {d0-d1}, [r0,:128], r1
vst1.64 {d2-d3}, [r0,:128], r1
bgt 1b
pop {r4-r6,pc}
endfunc
.endm
AVG_WEIGHT add_add
AVG_WEIGHT add_sub
AVG_WEIGHT sub_add
function pixel_avg_w4_neon, export=0
subs lr, lr, #2
vld1.32 {d0[]}, [r2], r3
vld1.32 {d2[]}, [r4], r5
vrhadd.u8 d0, d0, d2
vld1.32 {d1[]}, [r2], r3
vld1.32 {d3[]}, [r4], r5
vrhadd.u8 d1, d1, d3
vst1.32 {d0[0]}, [r0,:32], r1
vst1.32 {d1[0]}, [r0,:32], r1
bgt pixel_avg_w4_neon
pop {r4-r6,pc}
endfunc
function pixel_avg_w8_neon, export=0
subs lr, lr, #4
vld1.64 {d0}, [r2], r3
vld1.64 {d2}, [r4], r5
vrhadd.u8 d0, d0, d2
vld1.64 {d1}, [r2], r3
vld1.64 {d3}, [r4], r5
vrhadd.u8 d1, d1, d3
vst1.64 {d0}, [r0,:64], r1
vld1.64 {d2}, [r2], r3
vld1.64 {d4}, [r4], r5
vrhadd.u8 d2, d2, d4
vst1.64 {d1}, [r0,:64], r1
vld1.64 {d3}, [r2], r3
vld1.64 {d5}, [r4], r5
vrhadd.u8 d3, d3, d5
vst1.64 {d2}, [r0,:64], r1
vst1.64 {d3}, [r0,:64], r1
bgt pixel_avg_w8_neon
pop {r4-r6,pc}
endfunc
function pixel_avg_w16_neon, export=0
subs lr, lr, #4
vld1.64 {d0-d1}, [r2], r3
vld1.64 {d2-d3}, [r4], r5
vrhadd.u8 q0, q0, q1
vld1.64 {d2-d3}, [r2], r3
vld1.64 {d4-d5}, [r4], r5
vrhadd.u8 q1, q1, q2
vst1.64 {d0-d1}, [r0,:128], r1
vld1.64 {d4-d5}, [r2], r3
vld1.64 {d6-d7}, [r4], r5
vrhadd.u8 q2, q2, q3
vst1.64 {d2-d3}, [r0,:128], r1
vld1.64 {d6-d7}, [r2], r3
vld1.64 {d0-d1}, [r4], r5
vrhadd.u8 q3, q3, q0
vst1.64 {d4-d5}, [r0,:128], r1
vst1.64 {d6-d7}, [r0,:128], r1
bgt pixel_avg_w16_neon
pop {r4-r6,pc}
endfunc
function pixel_avg2_w4_neon
ldr ip, [sp, #4]
push {lr}
ldr lr, [sp, #4]
avg2_w4_loop:
subs ip, ip, #2
vld1.32 {d0[]}, [r2], r3
vld1.32 {d2[]}, [lr], r3
vrhadd.u8 d0, d0, d2
vld1.32 {d1[]}, [r2], r3
vld1.32 {d3[]}, [lr], r3
vrhadd.u8 d1, d1, d3
vst1.32 {d0[0]}, [r0,:32], r1
vst1.32 {d1[0]}, [r0,:32], r1
bgt avg2_w4_loop
pop {pc}
endfunc
function pixel_avg2_w8_neon
ldr ip, [sp, #4]
push {lr}
ldr lr, [sp, #4]
avg2_w8_loop:
subs ip, ip, #2
vld1.64 {d0}, [r2], r3
vld1.64 {d2}, [lr], r3
vrhadd.u8 d0, d0, d2
vld1.64 {d1}, [r2], r3
vld1.64 {d3}, [lr], r3
vrhadd.u8 d1, d1, d3
vst1.64 {d0}, [r0,:64], r1
vst1.64 {d1}, [r0,:64], r1
bgt avg2_w8_loop
pop {pc}
endfunc
function pixel_avg2_w16_neon
ldr ip, [sp, #4]
push {lr}
ldr lr, [sp, #4]
avg2_w16_loop:
subs ip, ip, #2
vld1.64 {d0-d1}, [r2], r3
vld1.64 {d2-d3}, [lr], r3
vrhadd.u8 q0, q0, q1
vld1.64 {d4-d5}, [r2], r3
vld1.64 {d6-d7}, [lr], r3
vrhadd.u8 q2, q2, q3
vst1.64 {d0-d1}, [r0,:128], r1
vst1.64 {d4-d5}, [r0,:128], r1
bgt avg2_w16_loop
pop {pc}
endfunc
function pixel_avg2_w20_neon
ldr ip, [sp, #4]
push {lr}
sub r1, r1, #16
ldr lr, [sp, #4]
avg2_w20_loop:
subs ip, ip, #2
vld1.64 {d0-d2}, [r2], r3
vld1.64 {d4-d6}, [lr], r3
vrhadd.u8 q0, q0, q2
vrhadd.u8 d2, d2, d6
vld1.64 {d4-d6}, [r2], r3
vld1.64 {d16-d18},[lr], r3
vrhadd.u8 q2, q2, q8
vst1.64 {d0-d1}, [r0,:128]!
vrhadd.u8 d6, d6, d18
vst1.32 {d2[0]}, [r0,:32], r1
vst1.64 {d4-d5}, [r0,:128]!
vst1.32 {d6[0]}, [r0,:32], r1
bgt avg2_w20_loop
pop {pc}
endfunc
.macro weight_prologue type
push {r4-r5,lr}
ldr r4, [sp, #4*3] // weight_t
ldr ip, [sp, #4*3+4] // h
.ifc \type, full
ldr lr, [r4, #32] // denom
.endif
ldrd r4, r5, [r4, #32+4] // scale, offset
vdup.8 d0, r4
vdup.16 q1, r5
.ifc \type, full
rsb lr, lr, #0
vdup.16 q2, lr
.endif
.endm
// void mc_weight( uint8_t *src, intptr_t src_stride, uint8_t *dst, intptr_t dst_stride,
// const x264_weight_t *weight, int height )
function mc_weight_w20_neon
weight_prologue full
sub r1, #16
weight20_loop:
subs ip, #2
vld1.8 {d17-d19}, [r2], r3
vmull.u8 q10, d17, d0
vmull.u8 q11, d18, d0
vld1.8 {d16-d18}, [r2], r3
vmull.u8 q12, d16, d0
vmull.u8 q13, d17, d0
vtrn.32 d19, d18
vmull.u8 q14, d19, d0
vrshl.s16 q10, q10, q2
vrshl.s16 q11, q11, q2
vrshl.s16 q12, q12, q2
vrshl.s16 q13, q13, q2
vrshl.s16 q14, q14, q2
vadd.s16 q10, q10, q1
vadd.s16 q11, q11, q1
vadd.s16 q12, q12, q1
vadd.s16 q13, q13, q1
vadd.s16 q14, q14, q1
vqmovun.s16 d16, q10
vqmovun.s16 d17, q11
vqmovun.s16 d18, q12
vqmovun.s16 d19, q13
vqmovun.s16 d20, q14
vst1.8 {d16-d17}, [r0,:128]!
vst1.32 {d20[0]}, [r0,:32], r1
vst1.8 {d18-d19}, [r0,:128]!
vst1.32 {d20[1]}, [r0,:32], r1
bgt weight20_loop
pop {r4-r5,pc}
endfunc
function mc_weight_w16_neon
weight_prologue full
weight16_loop:
subs ip, #2
vld1.8 {d16-d17}, [r2], r3
vld1.8 {d18-d19}, [r2], r3
vmull.u8 q10, d16, d0
vmull.u8 q11, d17, d0
vmull.u8 q12, d18, d0
vmull.u8 q13, d19, d0
vrshl.s16 q10, q10, q2
vrshl.s16 q11, q11, q2
vrshl.s16 q12, q12, q2
vrshl.s16 q13, q13, q2
vadd.s16 q10, q10, q1
vadd.s16 q11, q11, q1
vadd.s16 q12, q12, q1
vadd.s16 q13, q13, q1
vqmovun.s16 d16, q10
vqmovun.s16 d17, q11
vqmovun.s16 d18, q12
vqmovun.s16 d19, q13
vst1.8 {d16-d17}, [r0,:128], r1
vst1.8 {d18-d19}, [r0,:128], r1
bgt weight16_loop
pop {r4-r5,pc}
endfunc
function mc_weight_w8_neon
weight_prologue full
weight8_loop:
subs ip, #2
vld1.8 {d16}, [r2], r3
vld1.8 {d18}, [r2], r3
vmull.u8 q8, d16, d0
vmull.u8 q9, d18, d0
vrshl.s16 q8, q8, q2
vrshl.s16 q9, q9, q2
vadd.s16 q8, q8, q1
vadd.s16 q9, q9, q1
vqmovun.s16 d16, q8
vqmovun.s16 d18, q9
vst1.8 {d16}, [r0,:64], r1
vst1.8 {d18}, [r0,:64], r1
bgt weight8_loop
pop {r4-r5,pc}
endfunc
function mc_weight_w4_neon
weight_prologue full
weight4_loop:
subs ip, #2
vld1.32 {d16[0]}, [r2], r3
vld1.32 {d16[1]}, [r2], r3
vmull.u8 q8, d16, d0
vrshl.s16 q8, q8, q2
vadd.s16 q8, q8, q1
vqmovun.s16 d16, q8
vst1.32 {d16[0]}, [r0], r1
vst1.32 {d16[1]}, [r0], r1
bgt weight4_loop
pop {r4-r5,pc}
endfunc
function mc_weight_w20_nodenom_neon
weight_prologue nodenom
sub r1, #16
weight20_nodenom_loop:
subs ip, #2
vld1.8 {d26-d28}, [r2], r3
vmov q8, q1
vmov q9, q1
vld1.8 {d29-d31}, [r2], r3
vmov q10, q1
vmov q11, q1
vmov q12, q1
vtrn.32 d28, d31
vmlal.u8 q8, d26, d0
vmlal.u8 q9, d27, d0
vmlal.u8 q10, d29, d0
vmlal.u8 q11, d30, d0
vmlal.u8 q12, d28, d0
vqmovun.s16 d16, q8
vqmovun.s16 d17, q9
vqmovun.s16 d18, q10
vqmovun.s16 d19, q11
vqmovun.s16 d20, q12
vst1.8 {d16-d17}, [r0,:128]!
vst1.32 {d20[0]}, [r0,:32], r1
vst1.8 {d18-d19}, [r0,:128]!
vst1.32 {d20[1]}, [r0,:32], r1
bgt weight20_nodenom_loop
pop {r4-r5,pc}
endfunc
function mc_weight_w16_nodenom_neon
weight_prologue nodenom
weight16_nodenom_loop:
subs ip, #2
vld1.8 {d16-d17}, [r2], r3
vld1.8 {d18-d19}, [r2], r3
vmov q12, q1
vmov q13, q1
vmov q14, q1
vmov q15, q1
vmlal.u8 q12, d16, d0
vmlal.u8 q13, d17, d0
vmlal.u8 q14, d18, d0
vmlal.u8 q15, d19, d0
vqmovun.s16 d16, q12
vqmovun.s16 d17, q13
vqmovun.s16 d18, q14
vqmovun.s16 d19, q15
vst1.8 {d16-d17}, [r0,:128], r1
vst1.8 {d18-d19}, [r0,:128], r1
bgt weight16_nodenom_loop
pop {r4-r5,pc}
endfunc
function mc_weight_w8_nodenom_neon
weight_prologue nodenom
weight8_nodenom_loop:
subs ip, #2
vld1.8 {d16}, [r2], r3
vld1.8 {d18}, [r2], r3
vmov q10, q1
vmov q11, q1
vmlal.u8 q10, d16, d0
vmlal.u8 q11, d18, d0
vqmovun.s16 d16, q10
vqmovun.s16 d17, q11
vst1.8 {d16}, [r0,:64], r1
vst1.8 {d17}, [r0,:64], r1
bgt weight8_nodenom_loop
pop {r4-r5,pc}
endfunc
function mc_weight_w4_nodenom_neon
weight_prologue nodenom
weight4_nodenom_loop:
subs ip, #2
vld1.32 {d16[0]}, [r2], r3
vld1.32 {d16[1]}, [r2], r3
vmov q10, q1
vmlal.u8 q10, d16, d0
vqmovun.s16 d16, q10
vst1.32 {d16[0]}, [r0], r1
vst1.32 {d16[1]}, [r0], r1
bgt weight4_nodenom_loop
pop {r4-r5,pc}
endfunc
.macro weight_simple_prologue
push {lr}
ldr lr, [sp, #4] // weight_t
ldr ip, [sp, #8] // h
ldr lr, [lr] // offset
vdup.8 q1, lr
.endm
.macro weight_simple name op
function mc_weight_w20_\name\()_neon
weight_simple_prologue
weight20_\name\()_loop:
subs ip, #2
vld1.8 {d16-d18}, [r2], r3
vld1.8 {d19-d21}, [r2], r3
\op q8, q8, q1
\op q9, q9, q1
\op q10, q10, q1
vst1.8 {d16-d18}, [r0,:64], r1
vst1.8 {d19-d21}, [r0,:64], r1
bgt weight20_\name\()_loop
pop {pc}
endfunc
function mc_weight_w16_\name\()_neon
weight_simple_prologue
weight16_\name\()_loop:
subs ip, #2
vld1.8 {d16-d17}, [r2], r3
vld1.8 {d18-d19}, [r2], r3
\op q8, q8, q1
\op q9, q9, q1
vst1.8 {d16-d17}, [r0,:128], r1
vst1.8 {d18-d19}, [r0,:128], r1
bgt weight16_\name\()_loop
pop {pc}
endfunc
function mc_weight_w8_\name\()_neon
weight_simple_prologue
weight8_\name\()_loop:
subs ip, #2
vld1.8 {d16}, [r2], r3
vld1.8 {d17}, [r2], r3
\op q8, q8, q1
vst1.8 {d16}, [r0,:64], r1
vst1.8 {d17}, [r0,:64], r1
bgt weight8_\name\()_loop
pop {pc}
endfunc
function mc_weight_w4_\name\()_neon
weight_simple_prologue
weight4_\name\()_loop:
subs ip, #2
vld1.32 {d16[]}, [r2], r3
vld1.32 {d17[]}, [r2], r3
\op q8, q8, q1
vst1.32 {d16[0]}, [r0], r1
vst1.32 {d17[0]}, [r0], r1
bgt weight4_\name\()_loop
pop {pc}
endfunc
.endm
weight_simple offsetadd, vqadd.u8
weight_simple offsetsub, vqsub.u8
// void mc_copy( uint8_t *dst, intptr_t dst_stride, uint8_t *src, intptr_t src_stride, int height )
function mc_copy_w4_neon
ldr ip, [sp]
copy_w4_loop:
subs ip, ip, #4
vld1.32 {d0[]}, [r2], r3
vld1.32 {d1[]}, [r2], r3
vld1.32 {d2[]}, [r2], r3
vld1.32 {d3[]}, [r2], r3
vst1.32 {d0[0]}, [r0,:32], r1
vst1.32 {d1[0]}, [r0,:32], r1
vst1.32 {d2[0]}, [r0,:32], r1
vst1.32 {d3[0]}, [r0,:32], r1
bgt copy_w4_loop
bx lr
endfunc
function mc_copy_w8_neon
ldr ip, [sp]
copy_w8_loop:
subs ip, ip, #4
vld1.32 {d0}, [r2], r3
vld1.32 {d1}, [r2], r3
vld1.32 {d2}, [r2], r3
vld1.32 {d3}, [r2], r3
vst1.32 {d0}, [r0,:64], r1
vst1.32 {d1}, [r0,:64], r1
vst1.32 {d2}, [r0,:64], r1
vst1.32 {d3}, [r0,:64], r1
bgt copy_w8_loop
bx lr
endfunc
function mc_copy_w16_neon
ldr ip, [sp]
copy_w16_loop:
subs ip, ip, #4
vld1.32 {d0-d1}, [r2], r3
vld1.32 {d2-d3}, [r2], r3
vld1.32 {d4-d5}, [r2], r3
vld1.32 {d6-d7}, [r2], r3
vst1.32 {d0-d1}, [r0,:128], r1
vst1.32 {d2-d3}, [r0,:128], r1
vst1.32 {d4-d5}, [r0,:128], r1
vst1.32 {d6-d7}, [r0,:128], r1
bgt copy_w16_loop
bx lr
endfunc
function mc_copy_w16_aligned_neon
ldr ip, [sp]
copy_w16_aligned_loop:
subs ip, ip, #4
vld1.32 {d0-d1}, [r2,:128], r3
vld1.32 {d2-d3}, [r2,:128], r3
vld1.32 {d4-d5}, [r2,:128], r3
vld1.32 {d6-d7}, [r2,:128], r3
vst1.32 {d0-d1}, [r0,:128], r1
vst1.32 {d2-d3}, [r0,:128], r1
vst1.32 {d4-d5}, [r0,:128], r1
vst1.32 {d6-d7}, [r0,:128], r1
bgt copy_w16_aligned_loop
bx lr
endfunc
// void mc_chroma( uint8_t *dst, intptr_t i_dst_stride,
// uint8_t *src, intptr_t i_src_stride,
// int dx, int dy, int i_width, int i_height );
function mc_chroma_neon
push {r4-r8, lr}
vpush {d8-d11}
ldrd r4, r5, [sp, #56]
ldrd r6, r7, [sp, #64]
asr lr, r6, #3
mul lr, r4, lr
add r3, r3, r5, asr #2
cmp r7, #4
and r5, r5, #7
and r6, r6, #7
add r3, r3, lr
bic r3, r3, #0x1
pld [r3]
pld [r3, r4]
bgt mc_chroma_w8
beq mc_chroma_w4
.macro CHROMA_MC_START r00, r01, r10, r11
muls lr, r5, r6
rsb r7, lr, r6, lsl #3
rsb ip, lr, r5, lsl #3
sub r5, lr, r5, lsl #3
sub r5, r5, r6, lsl #3
add r5, r5, #64
beq 2f
vld2.8 {\r00-\r01}, [r3], r4
vdup.8 d0, r5
vdup.8 d1, ip
vdup.8 d2, r7
vld2.8 {\r10-\r11}, [r3], r4
vdup.8 d3, lr
ldr r5, [sp, #72]
.endm
.macro CHROMA_MC width, align
mc_chroma_w\width:
CHROMA_MC_START d4, d5, d8, d9
vext.8 d6, d4, d6, #1
vext.8 d7, d5, d7, #1
vext.8 d10, d8, d10, #1
vext.8 d11, d9, d11, #1
// since the element size varies, there's a different index for the 2nd store
.if \width == 4
.set st2, 1
.else
.set st2, 2
.endif
vtrn.32 d4, d6
vtrn.32 d5, d7
vtrn.32 d8, d10
vtrn.32 d9, d11
vtrn.32 d0, d1
vtrn.32 d2, d3
1: // height loop, interpolate xy
vmull.u8 q8, d4, d0
vmlal.u8 q8, d8, d2
vmull.u8 q9, d5, d0
vmlal.u8 q9, d9, d2
vld2.8 {d4-d5}, [r3], r4
vext.8 d6, d4, d6, #1
vext.8 d7, d5, d7, #1
vadd.i16 d16, d16, d17
vadd.i16 d17, d18, d19
vtrn.32 d4, d6
vtrn.32 d5, d7
vmull.u8 q10, d8, d0
vmlal.u8 q10, d4, d2
vmull.u8 q11, d9, d0
vmlal.u8 q11, d5, d2
vld2.8 {d8-d9}, [r3], r4
vrshrn.u16 d16, q8, #6
vext.8 d10, d8, d10, #1
vext.8 d11, d9, d11, #1
vadd.i16 d18, d20, d21
vadd.i16 d19, d22, d23
vtrn.32 d8, d10
vtrn.32 d9, d11
vrshrn.u16 d18, q9, #6
subs r5, r5, #2
pld [r3]
pld [r3, r4]
vst1.\align {d16[0]}, [r0,:\align], r2
vst1.\align {d16[st2]}, [r1,:\align], r2
vst1.\align {d18[0]}, [r0,:\align], r2
vst1.\align {d18[st2]}, [r1,:\align], r2
bgt 1b
vpop {d8-d11}
pop {r4-r8, pc}
2: // dx or dy are 0
tst r7, r7
add ip, ip, r7
vdup.8 d0, r5
ldr r5, [sp, #72]
vdup.8 d1, ip
beq 4f
vld1.64 {d4}, [r3], r4
vld1.64 {d6}, [r3], r4
3: // vertical interpolation loop
vmull.u8 q8, d4, d0
vmlal.u8 q8, d6, d1
vmull.u8 q9, d6, d0
vld1.64 {d4}, [r3], r4
vmlal.u8 q9, d4, d1
vld1.64 {d6}, [r3], r4
vrshrn.u16 d16, q8, #6 // uvuvuvuv
vrshrn.u16 d17, q9, #6 // uvuvuvuv
subs r5, r5, #2
vuzp.8 d16, d17 // d16=uuuu|uuuu, d17=vvvv|vvvv
pld [r3]
pld [r3, r4]
vst1.\align {d16[0]}, [r0,:\align], r2
vst1.\align {d16[st2]}, [r0,:\align], r2
vst1.\align {d17[0]}, [r1,:\align], r2
vst1.\align {d17[st2]}, [r1,:\align], r2
bgt 3b
vpop {d8-d11}
pop {r4-r8, pc}
4: // dy is 0
vld1.64 {d4-d5}, [r3], r4
vld1.64 {d6-d7}, [r3], r4
vext.8 d5, d4, d5, #2
vext.8 d7, d6, d7, #2
5: // horizontal interpolation loop
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
vmull.u8 q9, d6, d0
vmlal.u8 q9, d7, d1
subs r5, r5, #2
vld1.64 {d4-d5}, [r3], r4
vld1.64 {d6-d7}, [r3], r4
vext.8 d5, d4, d5, #2
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
vext.8 d7, d6, d7, #2
vuzp.8 d16, d17
pld [r3]
pld [r3, r4]
vst1.\align {d16[0]}, [r0,:\align], r2
vst1.\align {d16[st2]}, [r0,:\align], r2
vst1.\align {d17[0]}, [r1,:\align], r2
vst1.\align {d17[st2]}, [r1,:\align], r2
bgt 5b
vpop {d8-d11}
pop {r4-r8, pc}
.endm
CHROMA_MC 2, 16
CHROMA_MC 4, 32
mc_chroma_w8:
CHROMA_MC_START d4, d7, d8, d11
vext.8 d5, d4, d5, #1
vext.8 d9, d8, d9, #1
vext.8 d7, d6, d7, #1
vext.8 d11, d10, d11, #1
1: // height loop, interpolate xy
vmull.u8 q8, d4, d0
vmlal.u8 q8, d5, d1
vmlal.u8 q8, d8, d2
vmlal.u8 q8, d9, d3
vmull.u8 q9, d6, d0
vmlal.u8 q9, d7, d1
vmlal.u8 q9, d10, d2
vmlal.u8 q9, d11, d3
vld2.8 {d4-d7}, [r3], r4
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
vmull.u8 q10, d8, d0
vmlal.u8 q10, d9, d1
vmlal.u8 q10, d4, d2
vmlal.u8 q10, d5, d3
vmull.u8 q11, d10, d0
vmlal.u8 q11, d11, d1
vmlal.u8 q11, d6, d2
vmlal.u8 q11, d7, d3
subs r5, r5, #2
vld2.8 {d8-d11}, [r3], r4
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
vrshrn.u16 d18, q10, #6
vext.8 d9, d8, d9, #1
vrshrn.u16 d19, q11, #6
vext.8 d11, d10, d11, #1
pld [r3]
pld [r3, r4]
vst1.64 {d16}, [r0,:64], r2
vst1.64 {d17}, [r1,:64], r2
vst1.64 {d18}, [r0,:64], r2
vst1.64 {d19}, [r1,:64], r2
bgt 1b
vpop {d8-d11}
pop {r4-r8, pc}
2: // dx or dy are 0
tst r7, r7
add ip, ip, r7
vdup.8 d0, r5
ldr r5, [sp, #72]
vdup.8 d1, ip
beq 4f
vld2.8 {d4-d5}, [r3], r4
vld2.8 {d6-d7}, [r3], r4
3: // vertical interpolation loop
vmull.u8 q8, d4, d0 //U
vmlal.u8 q8, d6, d1
vmull.u8 q9, d5, d0 //V
vmlal.u8 q9, d7, d1
vld2.8 {d4-d5}, [r3], r4
vmull.u8 q10, d6, d0
vmlal.u8 q10, d4, d1
vmull.u8 q11, d7, d0
vmlal.u8 q11, d5, d1
vld2.8 {d6-d7}, [r3], r4
vrshrn.u16 d16, q8, #6
vrshrn.u16 d17, q9, #6
vrshrn.u16 d18, q10, #6
vrshrn.u16 d19, q11, #6
subs r5, r5, #2
pld [r3]
pld [r3, r4]
vst1.64 {d16}, [r0,:64], r2
vst1.64 {d17}, [r1,:64], r2
vst1.64 {d18}, [r0,:64], r2
vst1.64 {d19}, [r1,:64], r2
bgt 3b
vpop {d8-d11}
pop {r4-r8, pc}
4: // dy is 0
vld2.8 {d4-d7}, [r3], r4
vld2.8 {d8-d11}, [r3], r4
vext.8 d5, d4, d5, #1
vext.8 d7, d6, d7, #1
vext.8 d9, d8, d9, #1
vext.8 d11, d10, d11, #1
5: // horizontal interpolation loop
subs r5, r5, #2
vmull.u8 q8, d4, d0 //U
vmlal.u8 q8, d5, d1
vmull.u8 q9, d6, d0 //V
vmlal.u8 q9, d7, d1
vld2.8 {d4-d7}, [r3], r4
vmull.u8 q10, d8, d0
vmlal.u8 q10, d9, d1
vmull.u8 q11, d10, d0
vmlal.u8 q11, d11, d1
vld2.8 {d8-d11}, [r3], r4
vext.8 d5, d4, d5, #1
vrshrn.u16 d16, q8, #6
vext.8 d7, d6, d7, #1
vrshrn.u16 d17, q9, #6
vext.8 d9, d8, d9, #1
vrshrn.u16 d18, q10, #6
vext.8 d11, d10, d11, #1
vrshrn.u16 d19, q11, #6
pld [r3]
pld [r3, r4]
vst1.64 {d16}, [r0,:64], r2
vst1.64 {d17}, [r1,:64], r2
vst1.64 {d18}, [r0,:64], r2
vst1.64 {d19}, [r1,:64], r2
bgt 5b
vpop {d8-d11}
pop {r4-r8, pc}
endfunc
// hpel_filter_v( uint8_t *dst, uint8_t *src, int16_t *buf, intptr_t stride, int width )
function hpel_filter_v_neon
ldr ip, [sp]
sub r1, r1, r3, lsl #1
push {lr}
add lr, r1, ip
vmov.u8 d30, #5
vmov.u8 d31, #20
filter_v_loop:
subs ip, ip, #16
vld1.64 {d0-d1}, [r1,:128], r3
vld1.64 {d2-d3}, [r1,:128], r3
vld1.64 {d4-d5}, [r1,:128], r3
vld1.64 {d6-d7}, [r1,:128], r3
vld1.64 {d16-d17}, [r1,:128], r3
vld1.64 {d18-d19}, [r1,:128], r3
sub r1, lr, ip
vaddl.u8 q10, d0, d18
vmlsl.u8 q10, d2, d30
vmlal.u8 q10, d4, d31
vmlal.u8 q10, d6, d31
vmlsl.u8 q10, d16, d30
vaddl.u8 q11, d1, d19
vmlsl.u8 q11, d3, d30
vmlal.u8 q11, d5, d31
vmlal.u8 q11, d7, d31
vmlsl.u8 q11, d17, d30
vqrshrun.s16 d0, q10, #5
vst1.64 {d20-d21}, [r2,:128]!
vqrshrun.s16 d1, q11, #5
vst1.64 {d22-d23}, [r2,:128]!
vst1.64 {d0-d1}, [r0,:128]!
bgt filter_v_loop
pop {pc}
endfunc
// hpel_filter_c( uint8_t *dst, int16_t *buf, int width );
function hpel_filter_c_neon
sub r1, #16
vld1.64 {d0-d3}, [r1,:128]!
// unrolled 2x: 4% faster
filter_c_loop:
subs r2, r2, #16
vld1.64 {d4-d7}, [r1,:128]!
vext.16 q8, q0, q1, #6
vext.16 q12, q1, q2, #3
vadd.s16 q8, q8, q12
vext.16 q9, q0, q1, #7
vext.16 q11, q1, q2, #2
vadd.s16 q9, q9, q11
vext.16 q10, q1, q2, #1
vext.16 q11, q1, q2, #6
vadd.s16 q10, q1, q10
vsub.s16 q8, q8, q9 // a-b
vext.16 q15, q2, q3, #3
vsub.s16 q9, q9, q10 // b-c
vext.16 q12, q1, q2, #7
vshr.s16 q8, q8, #2 // (a-b)/4
vadd.s16 q11, q11, q15
vext.16 q14, q2, q3, #2
vsub.s16 q8, q8, q9 // (a-b)/4-b+c
vadd.s16 q12, q12, q14
vext.16 q13, q2, q3, #1
vshr.s16 q8, q8, #2 // ((a-b)/4-b+c)/4
vadd.s16 q13, q2, q13
vadd.s16 q8, q8, q10 // ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
vsub.s16 q11, q11, q12 // a-b
vsub.s16 q12, q12, q13 // b-c
vshr.s16 q11, q11, #2 // (a-b)/4
vqrshrun.s16 d30, q8, #6
vsub.s16 q11, q11, q12 // (a-b)/4-b+c
vshr.s16 q11, q11, #2 // ((a-b)/4-b+c)/4
vld1.64 {d0-d3}, [r1,:128]!
vadd.s16 q11, q11, q13 // ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
vext.16 q8, q2, q3, #6
vqrshrun.s16 d31, q11, #6
vext.16 q12, q3, q0, #3
vadd.s16 q8, q8, q12
vext.16 q9, q2, q3, #7
vst1.64 {d30-d31}, [r0,:128]!
bxle lr
subs r2, r2, #16
vext.16 q11, q3, q0, #2
vadd.s16 q9, q9, q11
vext.16 q10, q3, q0, #1
vext.16 q11, q3, q0, #6
vadd.s16 q10, q3, q10
vsub.s16 q8, q8, q9 // a-b
vext.16 q15, q0, q1, #3
vsub.s16 q9, q9, q10 // b-c
vext.16 q12, q3, q0, #7
vshr.s16 q8, q8, #2 // (a-b)/4
vadd.s16 q11, q11, q15
vext.16 q14, q0, q1, #2
vsub.s16 q8, q8, q9 // (a-b)/4-b+c
vadd.s16 q12, q12, q14
vext.16 q13, q0, q1, #1
vshr.s16 q8, q8, #2 // ((a-b)/4-b+c)/4
vadd.s16 q13, q0, q13
vadd.s16 q8, q8, q10 // ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
vsub.s16 q11, q11, q12 // a-b
vsub.s16 q12, q12, q13 // b-c
vshr.s16 q11, q11, #2 // (a-b)/4
vqrshrun.s16 d30, q8, #6
vsub.s16 q11, q11, q12 // (a-b)/4-b+c
vshr.s16 q11, q11, #2 // ((a-b)/4-b+c)/4
vadd.s16 q11, q11, q13 // ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
vqrshrun.s16 d31, q11, #6
vst1.64 {d30-d31}, [r0,:128]!
bgt filter_c_loop
bx lr
endfunc
// hpel_filter_h( uint8_t *dst, uint8_t *src, int width );
function hpel_filter_h_neon
sub r1, #16
vmov.u8 d30, #5
vld1.64 {d0-d3}, [r1,:128]!
vmov.u8 d31, #20
// unrolled 3x because it's 5% faster, due to mitigating
// the high latency of multiplication and vqrshrun
filter_h_loop:
subs r2, r2, #16
vld1.64 {d4-d5}, [r1,:128]!
vext.8 q8, q0, q1, #14
vext.8 q12, q1, q2, #3
vaddl.u8 q13, d16, d24
vext.8 q9, q0, q1, #15
vaddl.u8 q14, d17, d25
vext.8 q10, q1, q2, #1
vmlal.u8 q13, d2, d31
vmlsl.u8 q13, d18, d30
vext.8 q11, q1, q2, #2
vmlal.u8 q13, d20, d31
vmlsl.u8 q13, d22, d30
vmlsl.u8 q14, d19, d30
vmlal.u8 q14, d3, d31
vmlal.u8 q14, d21, d31
vmlsl.u8 q14, d23, d30
vqrshrun.s16 d6, q13, #5
vld1.64 {d0-d1}, [r1,:128]!
vext.8 q8, q1, q2, #14
vext.8 q12, q2, q0, #3
vaddl.u8 q13, d16, d24
vqrshrun.s16 d7, q14, #5
vext.8 q9, q1, q2, #15
vaddl.u8 q14, d17, d25
vst1.64 {d6-d7}, [r0,:128]!
bxle lr
subs r2, r2, #16
vext.8 q10, q2, q0, #1
vmlal.u8 q13, d4, d31
vmlsl.u8 q13, d18, d30
vext.8 q11, q2, q0, #2
vmlal.u8 q13, d20, d31
vmlsl.u8 q13, d22, d30
vmlsl.u8 q14, d19, d30
vmlal.u8 q14, d5, d31
vmlal.u8 q14, d21, d31
vmlsl.u8 q14, d23, d30
vqrshrun.s16 d6, q13, #5
vld1.64 {d2-d3}, [r1,:128]!
vext.8 q8, q2, q0, #14
vext.8 q12, q0, q1, #3
vaddl.u8 q13, d16, d24
vqrshrun.s16 d7, q14, #5
vext.8 q9, q2, q0, #15
vaddl.u8 q14, d17, d25
vst1.64 {d6-d7}, [r0,:128]!
bxle lr
subs r2, r2, #16
vext.8 q10, q0, q1, #1
vmlal.u8 q13, d0, d31
vmlsl.u8 q13, d18, d30
vext.8 q11, q0, q1, #2
vmlal.u8 q13, d20, d31
vmlsl.u8 q13, d22, d30
vmlsl.u8 q14, d19, d30
vmlal.u8 q14, d1, d31
vmlal.u8 q14, d21, d31
vmlsl.u8 q14, d23, d30
vqrshrun.s16 d6, q13, #5
vqrshrun.s16 d7, q14, #5
vst1.64 {d6-d7}, [r0,:128]!
bgt filter_h_loop
bx lr
endfunc
// frame_init_lowres_core( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv,
// uint8_t *dstc, intptr_t src_stride, intptr_t dst_stride, int width,
// int height )
function frame_init_lowres_core_neon
push {r4-r10,lr}
vpush {d8-d15}
ldrd r4, r5, [sp, #96]
ldrd r6, r7, [sp, #104]
ldr lr, [sp, #112]
sub r10, r6, r7 // dst_stride - width
and r10, r10, #~15
lowres_yloop:
mov ip, r7 // width
mov r6, r0 // src0
add r8, r0, r5 // src1 = src0 + src_stride
add r9, r0, r5, lsl #1 // src2 = src1 + src_stride
vld2.8 {d8, d10}, [r6,:128]!
vld2.8 {d12,d14}, [r8,:128]!
vld2.8 {d16,d18}, [r9,:128]!
lowres_xloop:
subs ip, ip, #16
vld2.8 {d9, d11}, [r6,:128]!
vld2.8 {d13,d15}, [r8,:128]!
vrhadd.u8 q0, q4, q6
vld2.8 {d17,d19}, [r9,:128]!
vrhadd.u8 q5, q5, q7
vld2.8 {d20,d22}, [r6,:128]!
vrhadd.u8 q1, q6, q8
vld2.8 {d24,d26}, [r8,:128]!
vrhadd.u8 q7, q7, q9
vext.8 q4, q4, q10, #1
vrhadd.u8 q0, q0, q5
vext.8 q6, q6, q12, #1
vrhadd.u8 q1, q1, q7
vld2.8 {d28,d30}, [r9,:128]!
vrhadd.u8 q4, q4, q6
vext.8 q8, q8, q14, #1
vrhadd.u8 q6, q6, q8
vst1.64 {d0-d1}, [r1,:128]!
vrhadd.u8 q2, q4, q5
vst1.64 {d2-d3}, [r3,:128]!
vrhadd.u8 q3, q6, q7
vst1.64 {d4-d5}, [r2,:128]!
vst1.64 {d6-d7}, [r4,:128]!
ble lowres_xloop_end
subs ip, ip, #16
vld2.8 {d21,d23}, [r6,:128]!
vld2.8 {d25,d27}, [r8,:128]!
vrhadd.u8 q0, q10, q12
vld2.8 {d29,d31}, [r9,:128]!
vrhadd.u8 q11, q11, q13
vld2.8 {d8, d10}, [r6,:128]!
vrhadd.u8 q1, q12, q14
vld2.8 {d12,d14}, [r8,:128]!
vrhadd.u8 q13, q13, q15
vext.8 q10, q10, q4, #1
vrhadd.u8 q0, q0, q11
vext.8 q12, q12, q6, #1
vrhadd.u8 q1, q1, q13
vld2.8 {d16,d18}, [r9,:128]!
vrhadd.u8 q10, q10, q12
vext.8 q14, q14, q8, #1
vrhadd.u8 q12, q12, q14
vst1.64 {d0-d1}, [r1,:128]!
vrhadd.u8 q2, q10, q11
vst1.64 {d2-d3}, [r3,:128]!
vrhadd.u8 q3, q12, q13
vst1.64 {d4-d5}, [r2,:128]!
vst1.64 {d6-d7}, [r4,:128]!
bgt lowres_xloop
lowres_xloop_end:
subs lr, lr, #1
add r0, r0, r5, lsl #1
add r1, r1, r10
add r2, r2, r10
add r3, r3, r10
add r4, r4, r10
bgt lowres_yloop
vpop {d8-d15}
pop {r4-r10,pc}
endfunc
function load_deinterleave_chroma_fdec_neon
mov ip, #FDEC_STRIDE/2
1:
vld2.8 {d0-d1}, [r1,:128], r2
subs r3, r3, #1
pld [r1]
vst1.8 {d0}, [r0,:64], ip
vst1.8 {d1}, [r0,:64], ip
bgt 1b
bx lr
endfunc
function load_deinterleave_chroma_fenc_neon
mov ip, #FENC_STRIDE/2
1:
vld2.8 {d0-d1}, [r1,:128], r2
subs r3, r3, #1
pld [r1]
vst1.8 {d0}, [r0,:64], ip
vst1.8 {d1}, [r0,:64], ip
bgt 1b
bx lr
endfunc
function plane_copy_core_neon
push {r4,lr}
ldr r4, [sp, #8]
ldr lr, [sp, #12]
add r12, r4, #15
bic r4, r12, #15
sub r1, r1, r4
sub r3, r3, r4
1:
mov r12, r4
16:
tst r12, #16
beq 32f
subs r12, r12, #16
vld1.8 {q0}, [r2]!
vst1.8 {q0}, [r0]!
beq 0f
32:
subs r12, r12, #32
vld1.8 {q0, q1}, [r2]!
vst1.8 {q0, q1}, [r0]!
bgt 32b
0:
subs lr, lr, #1
add r2, r2, r3
add r0, r0, r1
bgt 1b
pop {r4,pc}
endfunc
function plane_copy_deinterleave_neon
push {r4-r7, lr}
ldrd r6, r7, [sp, #28]
ldrd r4, r5, [sp, #20]
add lr, r6, #15
bic lr, lr, #15
sub r1, r1, lr
sub r3, r3, lr
sub r5, r5, lr, lsl #1
block:
vld2.8 {d0-d3}, [r4,:128]!
subs lr, lr, #16
vst1.8 {q0}, [r0]!
vst1.8 {q1}, [r2]!
bgt block
add r4, r4, r5
subs r7, r7, #1
add r0, r0, r1
add r2, r2, r3
mov lr, r6
bgt block
pop {r4-r7, pc}
endfunc
function plane_copy_deinterleave_rgb_neon
push {r4-r8, r10, r11, lr}
ldrd r4, r5, [sp, #32]
ldrd r6, r7, [sp, #40]
ldr r8, [sp, #48]
ldrd r10, r11, [sp, #52]
add lr, r10, #7
subs r8, r8, #3
bic lr, lr, #7
sub r7, r7, lr, lsl #1
sub r1, r1, lr
sub r3, r3, lr
sub r5, r5, lr
subne r7, r7, lr, lsl #1
subeq r7, r7, lr
bne block4
block3:
vld3.8 {d0,d1,d2}, [r6]!
subs lr, lr, #8
vst1.8 {d0}, [r0]!
vst1.8 {d1}, [r2]!
vst1.8 {d2}, [r4]!
bgt block3
subs r11, r11, #1
add r0, r0, r1
add r2, r2, r3
add r4, r4, r5
add r6, r6, r7
mov lr, r10
bgt block3
pop {r4-r8, r10, r11, pc}
block4:
vld4.8 {d0,d1,d2,d3}, [r6]!
subs lr, lr, #8
vst1.8 {d0}, [r0]!
vst1.8 {d1}, [r2]!
vst1.8 {d2}, [r4]!
bgt block4
subs r11, r11, #1
add r0, r0, r1
add r2, r2, r3
add r4, r4, r5
add r6, r6, r7
mov lr, r10
bgt block4
pop {r4-r8, r10, r11, pc}
endfunc
function plane_copy_interleave_core_neon
push {r4-r7, lr}
ldrd r6, r7, [sp, #28]
ldrd r4, r5, [sp, #20]
add lr, r6, #15
bic lr, lr, #15
sub r1, r1, lr, lsl #1
sub r3, r3, lr
sub r5, r5, lr
blocki:
vld1.8 {q0}, [r2]!
vld1.8 {q1}, [r4]!
subs lr, lr, #16
vst2.8 {d0,d2}, [r0]!
vst2.8 {d1,d3}, [r0]!
bgt blocki
subs r7, r7, #1
add r0, r0, r1
add r2, r2, r3
add r4, r4, r5
mov lr, r6
bgt blocki
pop {r4-r7, pc}
endfunc
function plane_copy_swap_core_neon
push {r4-r5, lr}
ldrd r4, r5, [sp, #12]
add lr, r4, #15
bic lr, lr, #15
sub r1, r1, lr, lsl #1
sub r3, r3, lr, lsl #1
1:
vld1.8 {q0, q1}, [r2]!
subs lr, lr, #16
vrev16.8 q0, q0
vrev16.8 q1, q1
vst1.8 {q0, q1}, [r0]!
bgt 1b
subs r5, r5, #1
add r0, r0, r1
add r2, r2, r3
mov lr, r4
bgt 1b
pop {r4-r5, pc}
endfunc
function store_interleave_chroma_neon
push {lr}
ldr lr, [sp, #4]
mov ip, #FDEC_STRIDE
1:
vld1.8 {d0}, [r2], ip
vld1.8 {d1}, [r3], ip
subs lr, lr, #1
vst2.8 {d0,d1}, [r0,:128], r1
bgt 1b
pop {pc}
endfunc
.macro integral4h p1, p2
vext.8 d1, \p1, \p2, #1
vext.8 d2, \p1, \p2, #2
vext.8 d3, \p1, \p2, #3
vaddl.u8 q0, \p1, d1
vaddl.u8 q1, d2, d3
vadd.u16 q0, q0, q1
vadd.u16 q0, q0, q2
.endm
function integral_init4h_neon
sub r3, r0, r2, lsl #1
vld1.8 {d6, d7}, [r1, :128]!
1:
subs r2, r2, #16
vld1.16 {q2}, [r3, :128]!
integral4h d6, d7
vld1.8 {d6}, [r1, :64]!
vld1.16 {q2}, [r3, :128]!
vst1.16 {q0}, [r0, :128]!
integral4h d7, d6
vld1.8 {d7}, [r1, :64]!
vst1.16 {q0}, [r0, :128]!
bgt 1b
bx lr
endfunc
.macro integral8h p1, p2, s
vext.8 d1, \p1, \p2, #1
vext.8 d2, \p1, \p2, #2
vext.8 d3, \p1, \p2, #3
vext.8 d4, \p1, \p2, #4
vext.8 d5, \p1, \p2, #5
vext.8 d6, \p1, \p2, #6
vext.8 d7, \p1, \p2, #7
vaddl.u8 q0, \p1, d1
vaddl.u8 q1, d2, d3
vaddl.u8 q2, d4, d5
vaddl.u8 q3, d6, d7
vadd.u16 q0, q0, q1
vadd.u16 q2, q2, q3
vadd.u16 q0, q0, q2
vadd.u16 q0, q0, \s
.endm
function integral_init8h_neon
sub r3, r0, r2, lsl #1
vld1.8 {d16, d17}, [r1, :128]!
1:
subs r2, r2, #16
vld1.16 {q9}, [r3, :128]!
integral8h d16, d17, q9
vld1.8 {d16}, [r1, :64]!
vld1.16 {q9}, [r3, :128]!
vst1.16 {q0}, [r0, :128]!
integral8h d17, d16, q9
vld1.8 {d17}, [r1, :64]!
vst1.16 {q0}, [r0, :128]!
bgt 1b
bx lr
endfunc
function integral_init4v_neon
push {r4-r5}
mov r3, r0
add r4, r0, r2, lsl #3
add r5, r0, r2, lsl #4
sub r2, r2, #8
vld1.16 {q11, q12}, [r3]!
vld1.16 {q8, q9}, [r5]!
vld1.16 {q13}, [r3]!
vld1.16 {q10}, [r5]!
1:
subs r2, r2, #16
vld1.16 {q14, q15}, [r4]!
vext.8 q0, q11, q12, #8
vext.8 q1, q12, q13, #8
vext.8 q2, q8, q9, #8
vext.8 q3, q9, q10, #8
vsub.u16 q14, q14, q11
vsub.u16 q15, q15, q12
vadd.u16 q0, q0, q11
vadd.u16 q1, q1, q12
vadd.u16 q2, q2, q8
vadd.u16 q3, q3, q9
vst1.16 {q14}, [r1]!
vst1.16 {q15}, [r1]!
vmov q11, q13
vmov q8, q10
vsub.u16 q0, q2, q0
vsub.u16 q1, q3, q1
vld1.16 {q12, q13}, [r3]!
vld1.16 {q9, q10}, [r5]!
vst1.16 {q0}, [r0]!
vst1.16 {q1}, [r0]!
bgt 1b
2:
pop {r4-r5}
bx lr
endfunc
function integral_init8v_neon
add r2, r0, r1, lsl #4
sub r1, r1, #8
ands r3, r1, #16 - 1
beq 1f
subs r1, r1, #8
vld1.16 {q0}, [r0]
vld1.16 {q2}, [r2]!
vsub.u16 q8, q2, q0
vst1.16 {q8}, [r0]!
ble 2f
1:
subs r1, r1, #16
vld1.16 {q0, q1}, [r0]
vld1.16 {q2, q3}, [r2]!
vsub.u16 q8, q2, q0
vsub.u16 q9, q3, q1
vst1.16 {q8}, [r0]!
vst1.16 {q9}, [r0]!
bgt 1b
2:
bx lr
endfunc
function mbtree_propagate_cost_neon
push {r4-r5,lr}
ldrd r4, r5, [sp, #12]
ldr lr, [sp, #20]
vld1.32 {d6[], d7[]}, [r5]
8:
subs lr, lr, #8
vld1.16 {q8}, [r1]!
vld1.16 {q9}, [r2]!
vld1.16 {q10}, [r3]!
vld1.16 {q11}, [r4]!
vbic.u16 q10, #0xc000
vmin.u16 q10, q9, q10
vmull.u16 q12, d18, d22 @ propagate_intra
vmull.u16 q13, d19, d23 @ propagate_intra
vsubl.u16 q14, d18, d20 @ propagate_num
vsubl.u16 q15, d19, d21 @ propagate_num
vmovl.u16 q10, d18 @ propagate_denom
vmovl.u16 q11, d19 @ propagate_denom
vmovl.u16 q9, d17
vmovl.u16 q8, d16
vcvt.f32.s32 q12, q12
vcvt.f32.s32 q13, q13
vcvt.f32.s32 q14, q14
vcvt.f32.s32 q15, q15
vcvt.f32.s32 q10, q10
vcvt.f32.s32 q11, q11
vrecpe.f32 q0, q10
vrecpe.f32 q1, q11
vcvt.f32.s32 q8, q8
vcvt.f32.s32 q9, q9
vrecps.f32 q10, q0, q10
vrecps.f32 q11, q1, q11
vmla.f32 q8, q12, q3 @ propagate_amount
vmla.f32 q9, q13, q3 @ propagate_amount
vmul.f32 q0, q0, q10
vmul.f32 q1, q1, q11
vmul.f32 q8, q8, q14
vmul.f32 q9, q9, q15
vmul.f32 q0, q8, q0
vmul.f32 q1, q9, q1
vcvt.s32.f32 q0, q0
vcvt.s32.f32 q1, q1
vqmovn.s32 d0, q0
vqmovn.s32 d1, q1
vst1.16 {q0}, [r0]!
bgt 8b
pop {r4-r5,pc}
endfunc
function mbtree_propagate_list_internal_neon
vld1.16 {d4[]}, [sp] @ bipred_weight
movrel r12, pw_0to15
vmov.u16 q10, #0xc000
vld1.16 {q0}, [r12, :128] @h->mb.i_mb_x,h->mb.i_mb_y
ldrh r12, [sp, #4]
vmov.u32 q11, #4
vmov.u8 q3, #32
vdup.u16 q8, r12 @ mb_y
vzip.u16 q0, q8
ldr r12, [sp, #8]
8:
subs r12, r12, #8
vld1.16 {q14}, [r1, :128]! @ propagate_amount
vld1.16 {q15}, [r2]! @ lowres_cost
vld1.16 {q8, q9}, [r0]!
vand q15, q15, q10
vceq.u16 q1, q15, q10
vmull.u16 q12, d28, d4
vmull.u16 q13, d29, d4
vrshrn.u32 d30, q12, #6
vrshrn.u32 d31, q13, #6
vbsl q1, q15, q14 @ if( lists_used == 3 )
@ propagate_amount = (propagate_amount * bipred_weight + 32) >> 6
vshr.s16 q12, q8, #5
vshr.s16 q13, q9, #5
vuzp.16 q8, q9 @ x & 31, y & 31
vadd.s16 q12, q12, q0
vadd.s16 q0, q0, q11
vmovn.i16 d16, q8
vmovn.i16 d17, q9
vadd.s16 q13, q13, q0
vbic.i16 q8, #128+64+32
vadd.s16 q0, q0, q11
vbic.i16 q8, #(128+64+32)<<8
vst1.16 {q12, q13}, [r3, :128]!
vsub.i8 q9, q3, q8
vmull.u8 q12, d17, d16 @ idx3weight = y*x
vmull.u8 q14, d19, d16 @ idx1weight = (32-y)*x
vmull.u8 q15, d19, d18 @ idx0weight = (32-y)*(32-x)
vmull.u8 q13, d17, d18 @ idx2weight = y*(32-x)
vmull.u16 q9, d28, d2 @ idx1weight
vmull.u16 q8, d29, d3
vmull.u16 q14, d30, d2 @ idx0weight
vmull.u16 q15, d31, d3
vrshrn.u32 d18, q9, #10 @ idx1weight
vrshrn.u32 d19, q8, #10
vrshrn.u32 d16, q14, #10 @ idx0weight
vrshrn.u32 d17, q15, #10
vmull.u16 q14, d24, d2 @ idx3weight
vmull.u16 q15, d25, d3
vzip.16 q8, q9
vmull.u16 q12, d26, d2 @ idx2weight
vmull.u16 q13, d27, d3
vst1.16 {q8, q9}, [r3, :128]!
vrshrn.u32 d19, q15, #10 @ idx3weight
vrshrn.u32 d18, q14, #10
vrshrn.u32 d16, q12, #10 @ idx2weight
vrshrn.u32 d17, q13, #10
vzip.16 q8, q9
vst1.16 {q8, q9}, [r3, :128]!
bge 8b
bx lr
endfunc
@ void mbtree_fix8_pack( int16_t *dst, float *src, int count )
function mbtree_fix8_pack_neon, export=1
subs r3, r2, #8
blt 2f
1:
subs r3, r3, #8
vld1.32 {q0,q1}, [r1,:128]!
vcvt.s32.f32 q0, q0, #8
vcvt.s32.f32 q1, q1, #8
vqmovn.s32 d4, q0
vqmovn.s32 d5, q1
vrev16.8 q3, q2
vst1.16 {q3}, [r0,:128]!
bge 1b
2:
adds r3, r3, #8
bxeq lr
3:
subs r3, r3, #1
vld1.32 {d0[0]}, [r1]!
vcvt.s32.f32 s0, s0, #8
vrev16.8 d0, d0
vst1.16 {d0[0]}, [r0]!
bgt 3b
bx lr
endfunc
@ void mbtree_fix8_unpack( float *dst, int16_t *src, int count )
function mbtree_fix8_unpack_neon, export=1
subs r3, r2, #8
blt 2f
1:
subs r3, r3, #8
vld1.16 {q0}, [r1,:128]!
vrev16.8 q1, q0
vmovl.s16 q0, d2
vmovl.s16 q1, d3
vcvt.f32.s32 q0, q0, #8
vcvt.f32.s32 q1, q1, #8
vst1.32 {q0,q1}, [r0,:128]!
bge 1b
2:
adds r3, r3, #8
bxeq lr
3:
subs r3, r3, #1
vld1.16 {d0[0]}, [r1]!
vrev16.8 d0, d0
vmovl.s16 q0, d0
vcvt.f32.s32 d0, d0, #8
vst1.32 {d0[0]}, [r0]!
bgt 3b
bx lr
endfunc
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_SingleConversion_TriggerTimer_DMA/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aestream/faery
| 25,097
|
src/mp4/x264/common/arm/deblock-a.S
|
/*****************************************************************************
* deblock.S: arm deblocking
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: Mans Rullgard <mans@mansr.com>
* Martin Storsjo <martin@martin.st>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
.macro h264_loop_filter_start
ldr ip, [sp]
ldr ip, [ip]
vdup.32 d24, ip
and ip, ip, ip, lsl #16
ands ip, ip, ip, lsl #8
bxlt lr
.endm
.macro align_push_regs
and ip, sp, #15
add ip, ip, #32
sub sp, sp, ip
vst1.64 {d12-d15}, [sp,:128]
sub sp, sp, #32
vst1.64 {d8-d11}, [sp,:128]
.endm
.macro align_pop_regs
vld1.64 {d8-d11}, [sp,:128]!
vld1.64 {d12-d15}, [sp,:128], ip
.endm
.macro h264_loop_filter_luma
vdup.8 q11, r2 @ alpha
vmovl.u8 q12, d24
vabd.u8 q6, q8, q0 @ abs(p0 - q0)
vmovl.u16 q12, d24
vabd.u8 q14, q9, q8 @ abs(p1 - p0)
vsli.16 q12, q12, #8
vabd.u8 q15, q1, q0 @ abs(q1 - q0)
vsli.32 q12, q12, #16
vclt.u8 q6, q6, q11 @ < alpha
vdup.8 q11, r3 @ beta
vclt.s8 q7, q12, #0
vclt.u8 q14, q14, q11 @ < beta
vclt.u8 q15, q15, q11 @ < beta
vbic q6, q6, q7
vabd.u8 q4, q10, q8 @ abs(p2 - p0)
vand q6, q6, q14
vabd.u8 q5, q2, q0 @ abs(q2 - q0)
vclt.u8 q4, q4, q11 @ < beta
vand q6, q6, q15
vclt.u8 q5, q5, q11 @ < beta
vand q4, q4, q6
vand q5, q5, q6
vand q12, q12, q6
vrhadd.u8 q14, q8, q0
vsub.i8 q6, q12, q4
vqadd.u8 q7, q9, q12
vhadd.u8 q10, q10, q14
vsub.i8 q6, q6, q5
vhadd.u8 q14, q2, q14
vmin.u8 q7, q7, q10
vqsub.u8 q11, q9, q12
vqadd.u8 q2, q1, q12
vmax.u8 q7, q7, q11
vqsub.u8 q11, q1, q12
vmin.u8 q14, q2, q14
vmovl.u8 q2, d0
vmax.u8 q14, q14, q11
vmovl.u8 q10, d1
vsubw.u8 q2, q2, d16
vsubw.u8 q10, q10, d17
vshl.i16 q2, q2, #2
vshl.i16 q10, q10, #2
vaddw.u8 q2, q2, d18
vaddw.u8 q10, q10, d19
vsubw.u8 q2, q2, d2
vsubw.u8 q10, q10, d3
vrshrn.i16 d4, q2, #3
vrshrn.i16 d5, q10, #3
vbsl q4, q7, q9
vbsl q5, q14, q1
vneg.s8 q7, q6
vmovl.u8 q14, d16
vmin.s8 q2, q2, q6
vmovl.u8 q6, d17
vmax.s8 q2, q2, q7
vmovl.u8 q11, d0
vmovl.u8 q12, d1
vaddw.s8 q14, q14, d4
vaddw.s8 q6, q6, d5
vsubw.s8 q11, q11, d4
vsubw.s8 q12, q12, d5
vqmovun.s16 d16, q14
vqmovun.s16 d17, q6
vqmovun.s16 d0, q11
vqmovun.s16 d1, q12
.endm
function deblock_v_luma_neon
h264_loop_filter_start
vld1.64 {d0, d1}, [r0,:128], r1
vld1.64 {d2, d3}, [r0,:128], r1
vld1.64 {d4, d5}, [r0,:128], r1
sub r0, r0, r1, lsl #2
sub r0, r0, r1, lsl #1
vld1.64 {d20,d21}, [r0,:128], r1
vld1.64 {d18,d19}, [r0,:128], r1
vld1.64 {d16,d17}, [r0,:128], r1
align_push_regs
h264_loop_filter_luma
sub r0, r0, r1, lsl #1
vst1.64 {d8, d9}, [r0,:128], r1
vst1.64 {d16,d17}, [r0,:128], r1
vst1.64 {d0, d1}, [r0,:128], r1
vst1.64 {d10,d11}, [r0,:128]
align_pop_regs
bx lr
endfunc
function deblock_h_luma_neon
h264_loop_filter_start
sub r0, r0, #4
vld1.64 {d6}, [r0], r1
vld1.64 {d20}, [r0], r1
vld1.64 {d18}, [r0], r1
vld1.64 {d16}, [r0], r1
vld1.64 {d0}, [r0], r1
vld1.64 {d2}, [r0], r1
vld1.64 {d4}, [r0], r1
vld1.64 {d26}, [r0], r1
vld1.64 {d7}, [r0], r1
vld1.64 {d21}, [r0], r1
vld1.64 {d19}, [r0], r1
vld1.64 {d17}, [r0], r1
vld1.64 {d1}, [r0], r1
vld1.64 {d3}, [r0], r1
vld1.64 {d5}, [r0], r1
vld1.64 {d27}, [r0], r1
TRANSPOSE8x8 q3, q10, q9, q8, q0, q1, q2, q13
align_push_regs
h264_loop_filter_luma
TRANSPOSE4x4 q4, q8, q0, q5
sub r0, r0, r1, lsl #4
add r0, r0, #2
vst1.32 {d8[0]}, [r0], r1
vst1.32 {d16[0]}, [r0], r1
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d10[0]}, [r0], r1
vst1.32 {d8[1]}, [r0], r1
vst1.32 {d16[1]}, [r0], r1
vst1.32 {d0[1]}, [r0], r1
vst1.32 {d10[1]}, [r0], r1
vst1.32 {d9[0]}, [r0], r1
vst1.32 {d17[0]}, [r0], r1
vst1.32 {d1[0]}, [r0], r1
vst1.32 {d11[0]}, [r0], r1
vst1.32 {d9[1]}, [r0], r1
vst1.32 {d17[1]}, [r0], r1
vst1.32 {d1[1]}, [r0], r1
vst1.32 {d11[1]}, [r0], r1
align_pop_regs
bx lr
endfunc
.macro h264_loop_filter_luma_intra
vdup.8 q14, r2 @ alpha
vabd.u8 q4, q8, q0 @ abs(p0 - q0)
vabd.u8 q5, q9, q8 @ abs(p1 - p0)
vabd.u8 q6, q1, q0 @ abs(q1 - q0)
vdup.8 q15, r3 @ beta
vmov.u8 q13, #2
vclt.u8 q7, q4, q14 @ < alpha
vshr.u8 q14, q14, #2 @ alpha >> 2
vclt.u8 q5, q5, q15 @ < beta
vadd.u8 q14, q14, q13 @ (alpha >> 2) + 2
vand q7, q7, q5
vclt.u8 q6, q6, q15 @ < beta
vclt.u8 q13, q4, q14 @ < (alpha >> 2) + 2 if_2
vand q12, q7, q6 @ if_1
vshrn.u16 d28, q12, #4
vmov r2, lr, d28
orrs r2, r2, lr
beq 9f
sub sp, sp, #32
vst1.8 {q12-q13}, [sp,:128]
vshll.u8 q4, d18, #1 @ 2*p1
vshll.u8 q5, d19, #1
vaddw.u8 q4, q4, d16 @ 2*p1 + p0
vaddw.u8 q5, q5, d17
vaddw.u8 q4, q4, d2 @ 2*p1 + p0 + q1
vaddw.u8 q5, q5, d3
vrshrn.u16 d24, q4, #2
vrshrn.u16 d25, q5, #2
vaddl.u8 q6, d20, d16 @ p2 + p0
vaddl.u8 q7, d21, d17
vaddw.u8 q6, q6, d0 @ p2 + p0 + q0
vaddw.u8 q7, q7, d1
vadd.u16 q4, q4, q6 @ p2 + 2*p1 + 2*p0 + q0 + q1
vadd.u16 q5, q5, q7
vaddw.u8 q4, q4, d0 @ p2 + 2*p1 + 2*p0 + 2*q0 + q1
vaddw.u8 q5, q5, d1
vrshrn.u16 d26, q4, #3 @ p0'_2
vrshrn.u16 d27, q5, #3
vaddw.u8 q6, q6, d18 @ p2 + p1 + p0 + q0
vaddw.u8 q7, q7, d19
vrshrn.u16 d28, q6, #2 @ p1'_2
vrshrn.u16 d29, q7, #2
vaddl.u8 q4, d22, d20 @ p3 + p2
vaddl.u8 q5, d23, d21
vshl.u16 q4, q4, #1 @ 2*p3 + 2*p2
vshl.u16 q5, q5, #1
vadd.u16 q4, q4, q6 @ 2*p3 + 3*p2 + p1 + p0 + q0
vadd.u16 q5, q5, q7
vrshrn.u16 d30, q4, #3 @ p2'_2
vrshrn.u16 d31, q5, #3
vdup.8 q4, r3 @ beta
vabd.u8 q5, q10, q8 @ abs(p2 - p0)
vld1.8 {q6-q7}, [sp,:128] @ if_1, if_2
vclt.u8 q5, q5, q4 @ < beta if_3
vand q7, q7, q5 @ if_2 && if_3
vmvn q4, q7
vand q7, q7, q6 @ if_1 && if_2 && if_3
vand q6, q4, q6 @ if_1 && !(if_2 && if_3)
@ copy p0 to q15 so it can be clobbered
vbit q10, q15, q7
vmov q15, q8
vbit q8, q12, q6
@ wait for q9 to clobber
vshll.u8 q4, d2, #1 @ 2*q1
vshll.u8 q5, d3, #1
vbit q8, q12, q6
vaddw.u8 q4, q4, d0 @ 2*q1 + q0
vaddw.u8 q5, q5, d1
vbit q8, q13, q7
vaddw.u8 q4, q4, d18 @ 2*q1 + q0 + p1
vaddw.u8 q5, q5, d19
vbit q9, q14, q7
vrshrn.u16 d24, q4, #2
vrshrn.u16 d25, q5, #2
vaddl.u8 q6, d4, d0 @ q2 + q0
vaddl.u8 q7, d5, d1
vaddw.u8 q6, q6, d30 @ q2 + q0 + p0
vaddw.u8 q7, q7, d31
vadd.u16 q4, q4, q6 @ q2 + 2*q1 + 2*q0 + p0 + p1
vadd.u16 q5, q5, q7
vaddw.u8 q4, q4, d30 @ q2 + 2*q1 + 2*q0 + 2*p0 + p1
vaddw.u8 q5, q5, d31
vrshrn.u16 d26, q4, #3 @ q0'_2
vrshrn.u16 d27, q5, #3
vaddw.u8 q6, q6, d2 @ q2 + q1 + q0 + p0
vaddw.u8 q7, q7, d3
vrshrn.u16 d28, q6, #2 @ q1'_2
vrshrn.u16 d29, q7, #2
vaddl.u8 q4, d6, d4 @ q3 + q2
vaddl.u8 q5, d7, d5
vshl.u16 q4, q4, #1 @ 2*q3 + 2*q2
vshl.u16 q5, q5, #1
vadd.u16 q4, q4, q6 @ 2*q3 + 3*q2 + q1 + q0 + p0
vadd.u16 q5, q5, q7
vrshrn.u16 d30, q4, #3 @ q2'_2
vrshrn.u16 d31, q5, #3
vdup.8 q4, r3 @ beta
vabd.u8 q5, q2, q0 @ abs(q2 - q0)
vld1.8 {q6-q7}, [sp,:128]! @ if_1, if_2
vclt.u8 q5, q5, q4 @ < beta if_4
vand q7, q7, q5 @ if_2 && if_4
vmvn q4, q7
vand q7, q6, q7 @ if_1 && if_2 && if_4
vand q6, q6, q4 @ if_1 && !(if_2 && if_4)
vbit q0, q12, q6
vbit q1, q14, q7
vbit q0, q13, q7
vbit q2, q15, q7
.endm
function deblock_v_luma_intra_neon
push {lr}
vld1.64 {d0, d1}, [r0,:128], r1
vld1.64 {d2, d3}, [r0,:128], r1
vld1.64 {d4, d5}, [r0,:128], r1
vld1.64 {d6, d7}, [r0,:128], r1
sub r0, r0, r1, lsl #3
vld1.64 {d22,d23}, [r0,:128], r1
vld1.64 {d20,d21}, [r0,:128], r1
vld1.64 {d18,d19}, [r0,:128], r1
vld1.64 {d16,d17}, [r0,:128]
align_push_regs
h264_loop_filter_luma_intra
sub r0, r0, r1, lsl #1
vst1.64 {d20,d21}, [r0,:128], r1
vst1.64 {d18,d19}, [r0,:128], r1
vst1.64 {d16,d17}, [r0,:128], r1
vst1.64 {d0, d1}, [r0,:128], r1
vst1.64 {d2, d3}, [r0,:128], r1
vst1.64 {d4, d5}, [r0,:128]
9:
align_pop_regs
pop {pc}
endfunc
function deblock_h_luma_intra_neon
push {lr}
sub r0, r0, #4
vld1.64 {d22}, [r0], r1
vld1.64 {d20}, [r0], r1
vld1.64 {d18}, [r0], r1
vld1.64 {d16}, [r0], r1
vld1.64 {d0}, [r0], r1
vld1.64 {d2}, [r0], r1
vld1.64 {d4}, [r0], r1
vld1.64 {d6}, [r0], r1
vld1.64 {d23}, [r0], r1
vld1.64 {d21}, [r0], r1
vld1.64 {d19}, [r0], r1
vld1.64 {d17}, [r0], r1
vld1.64 {d1}, [r0], r1
vld1.64 {d3}, [r0], r1
vld1.64 {d5}, [r0], r1
vld1.64 {d7}, [r0], r1
TRANSPOSE8x8 q11, q10, q9, q8, q0, q1, q2, q3
align_push_regs
h264_loop_filter_luma_intra
TRANSPOSE8x8 q11, q10, q9, q8, q0, q1, q2, q3
sub r0, r0, r1, lsl #4
vst1.64 {d22}, [r0], r1
vst1.64 {d20}, [r0], r1
vst1.64 {d18}, [r0], r1
vst1.64 {d16}, [r0], r1
vst1.64 {d0}, [r0], r1
vst1.64 {d2}, [r0], r1
vst1.64 {d4}, [r0], r1
vst1.64 {d6}, [r0], r1
vst1.64 {d23}, [r0], r1
vst1.64 {d21}, [r0], r1
vst1.64 {d19}, [r0], r1
vst1.64 {d17}, [r0], r1
vst1.64 {d1}, [r0], r1
vst1.64 {d3}, [r0], r1
vst1.64 {d5}, [r0], r1
vst1.64 {d7}, [r0], r1
9:
align_pop_regs
pop {pc}
endfunc
.macro h264_loop_filter_chroma
vdup.8 q11, r2 // alpha
vmovl.u8 q12, d24
vabd.u8 q13, q8, q0 // abs(p0 - q0)
vabd.u8 q14, q9, q8 // abs(p1 - p0)
vsubl.u8 q2, d0, d16
vsubl.u8 q3, d1, d17
vsli.16 q12, q12, #8
vshl.i16 q2, q2, #2
vshl.i16 q3, q3, #2
vabd.u8 q15, q1, q0 // abs(q1 - q0)
vmovl.u8 q12, d24
vaddw.u8 q2, q2, d18
vaddw.u8 q3, q3, d19
vclt.u8 q13, q13, q11 // < alpha
vsubw.u8 q2, q2, d2
vsubw.u8 q3, q3, d3
vsli.16 q12, q12, #8
vdup.8 q11, r3 // beta
vclt.s8 q10, q12, #0
vrshrn.i16 d4, q2, #3
vrshrn.i16 d5, q3, #3
vclt.u8 q14, q14, q11 // < beta
vbic q13, q13, q10
vclt.u8 q15, q15, q11 // < beta
vand q13, q13, q14
vneg.s8 q10, q12
vand q13, q13, q15
vmin.s8 q2, q2, q12
vmovl.u8 q14, d16
vand q2, q2, q13
vmovl.u8 q15, d17
vmax.s8 q2, q2, q10
vmovl.u8 q11, d0
vmovl.u8 q12, d1
vaddw.s8 q14, q14, d4
vaddw.s8 q15, q15, d5
vsubw.s8 q11, q11, d4
vsubw.s8 q12, q12, d5
vqmovun.s16 d16, q14
vqmovun.s16 d17, q15
vqmovun.s16 d0, q11
vqmovun.s16 d1, q12
.endm
function deblock_v_chroma_neon
h264_loop_filter_start
sub r0, r0, r1, lsl #1
vld1.8 {d18,d19}, [r0,:128], r1
vld1.8 {d16,d17}, [r0,:128], r1
vld1.8 {d0, d1}, [r0,:128], r1
vld1.8 {d2, d3}, [r0,:128]
h264_loop_filter_chroma
sub r0, r0, r1, lsl #1
vst1.8 {d16,d17}, [r0,:128], r1
vst1.8 {d0, d1}, [r0,:128], r1
bx lr
endfunc
function deblock_h_chroma_neon
h264_loop_filter_start
sub r0, r0, #4
deblock_h_chroma:
vld1.8 {d18}, [r0], r1
vld1.8 {d16}, [r0], r1
vld1.8 {d0}, [r0], r1
vld1.8 {d2}, [r0], r1
vld1.8 {d19}, [r0], r1
vld1.8 {d17}, [r0], r1
vld1.8 {d1}, [r0], r1
vld1.8 {d3}, [r0], r1
TRANSPOSE4x4_16 q9, q8, q0, q1
h264_loop_filter_chroma
vtrn.16 q8, q0
sub r0, r0, r1, lsl #3
add r0, r0, #2
vst1.32 {d16[0]}, [r0], r1
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d16[1]}, [r0], r1
vst1.32 {d0[1]}, [r0], r1
vst1.32 {d17[0]}, [r0], r1
vst1.32 {d1[0]}, [r0], r1
vst1.32 {d17[1]}, [r0], r1
vst1.32 {d1[1]}, [r0], r1
bx lr
endfunc
function deblock_h_chroma_422_neon
h264_loop_filter_start
push {lr}
sub r0, r0, #4
add r1, r1, r1
bl deblock_h_chroma
ldr ip, [sp, #4]
ldr ip, [ip]
vdup.32 d24, ip
sub r0, r0, r1, lsl #3
add r0, r0, r1, lsr #1
sub r0, r0, #2
pop {lr}
b deblock_h_chroma
endfunc
.macro h264_loop_filter_chroma8
vdup.8 d22, r2 @ alpha
vmovl.u8 q12, d24
vabd.u8 d26, d16, d0 @ abs(p0 - q0)
vabd.u8 d28, d18, d16 @ abs(p1 - p0)
vsubl.u8 q2, d0, d16
vsli.16 d24, d24, #8
vshl.i16 q2, q2, #2
vabd.u8 d30, d2, d0 @ abs(q1 - q0)
vaddw.u8 q2, q2, d18
vclt.u8 d26, d26, d22 @ < alpha
vsubw.u8 q2, q2, d2
vdup.8 d22, r3 @ beta
vclt.s8 d20, d24, #0
vrshrn.i16 d4, q2, #3
vclt.u8 d28, d28, d22 @ < beta
vbic d26, d26, d20
vclt.u8 d30, d30, d22 @ < beta
vand d26, d26, d28
vneg.s8 d20, d24
vand d26, d26, d30
vmin.s8 d4, d4, d24
vmovl.u8 q14, d16
vand d4, d4, d26
vmax.s8 d4, d4, d20
vmovl.u8 q11, d0
vaddw.s8 q14, q14, d4
vsubw.s8 q11, q11, d4
vqmovun.s16 d16, q14
vqmovun.s16 d0, q11
.endm
function deblock_h_chroma_mbaff_neon
h264_loop_filter_start
sub r0, r0, #4
vld1.8 {d18}, [r0], r1
vld1.8 {d16}, [r0], r1
vld1.8 {d0}, [r0], r1
vld1.8 {d2}, [r0], r1
TRANSPOSE4x4_16 d18, d16, d0, d2
h264_loop_filter_chroma8
vtrn.16 d16, d0
sub r0, r0, r1, lsl #2
add r0, r0, #2
vst1.32 {d16[0]}, [r0], r1
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d16[1]}, [r0], r1
vst1.32 {d0[1]}, [r0]
bx lr
endfunc
.macro h264_loop_filter_chroma_intra, width=16
vdup.8 q11, r2 @ alpha
vabd.u8 q13, q8, q0 @ abs(p0 - q0)
vabd.u8 q14, q9, q8 @ abs(p1 - p0)
vabd.u8 q15, q1, q0 @ abs(q1 - q0)
vclt.u8 q13, q13, q11 @ < alpha
vdup.8 q11, r3 @ beta
vclt.u8 q14, q14, q11 @ < beta
vclt.u8 q15, q15, q11 @ < beta
vand q13, q13, q14
vand q13, q13, q15
vshll.u8 q14, d18, #1
vshll.u8 q2, d2, #1
.ifc \width, 16
vshll.u8 q15, d19, #1
vshll.u8 q3, d3, #1
vaddl.u8 q12, d17, d3
vaddl.u8 q10, d1, d19
.endif
vaddl.u8 q11, d16, d2
vaddl.u8 q1, d18, d0 @ or vaddw q2, to not clobber q1
vadd.u16 q14, q14, q11
vadd.u16 q2, q2, q1
.ifc \width, 16
vadd.u16 q15, q15, q12
vadd.u16 q3, q3, q10
.endif
vqrshrn.u16 d28, q14, #2
vqrshrn.u16 d4, q2, #2
.ifc \width, 16
vqrshrn.u16 d29, q15, #2
vqrshrn.u16 d5, q3, #2
.endif
vbit q8, q14, q13
vbit q0, q2, q13
.endm
function deblock_v_chroma_intra_neon
sub r0, r0, r1, lsl #1
vld2.8 {d18,d19}, [r0,:128], r1
vld2.8 {d16,d17}, [r0,:128], r1
vld2.8 {d0, d1}, [r0,:128], r1
vld2.8 {d2, d3}, [r0,:128]
h264_loop_filter_chroma_intra
sub r0, r0, r1, lsl #1
vst2.8 {d16,d17}, [r0,:128], r1
vst2.8 {d0, d1}, [r0,:128], r1
bx lr
endfunc
function deblock_h_chroma_intra_neon
sub r0, r0, #4
vld1.8 {d18}, [r0], r1
vld1.8 {d16}, [r0], r1
vld1.8 {d0}, [r0], r1
vld1.8 {d2}, [r0], r1
vld1.8 {d19}, [r0], r1
vld1.8 {d17}, [r0], r1
vld1.8 {d1}, [r0], r1
vld1.8 {d3}, [r0], r1
TRANSPOSE4x4_16 q9, q8, q0, q1
h264_loop_filter_chroma_intra
vtrn.16 q8, q0
sub r0, r0, r1, lsl #3
add r0, r0, #2
vst1.32 {d16[0]}, [r0], r1
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d16[1]}, [r0], r1
vst1.32 {d0[1]}, [r0], r1
vst1.32 {d17[0]}, [r0], r1
vst1.32 {d1[0]}, [r0], r1
vst1.32 {d17[1]}, [r0], r1
vst1.32 {d1[1]}, [r0], r1
bx lr
endfunc
function deblock_h_chroma_422_intra_neon
push {lr}
bl X(deblock_h_chroma_intra_neon)
add r0, r0, #2
pop {lr}
b X(deblock_h_chroma_intra_neon)
endfunc
function deblock_h_chroma_intra_mbaff_neon
sub r0, r0, #4
vld1.8 {d18}, [r0], r1
vld1.8 {d16}, [r0], r1
vld1.8 {d0}, [r0], r1
vld1.8 {d2}, [r0], r1
TRANSPOSE4x4_16 d18, d16, d0, d2
h264_loop_filter_chroma_intra width=8
vtrn.16 d16, d0
sub r0, r0, r1, lsl #2
add r0, r0, #2
vst1.32 {d16[0]}, [r0], r1
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d16[1]}, [r0], r1
vst1.32 {d0[1]}, [r0]
bx lr
endfunc
function deblock_strength_neon
ldr ip, [sp]
vmov.i8 q8, #0
lsl ip, ip, #8
add r3, r3, #32
sub ip, ip, #(1<<8)-3
vmov.i8 q9, #0
vdup.16 q10, ip
ldr ip, [sp, #4]
lists:
@ load bytes ref
vld1.8 {d31}, [r1]!
add r2, r2, #16
vld1.8 {q1}, [r1]!
vmov.i8 q0, #0
vld1.8 {q2}, [r1]!
vext.8 q3, q0, q1, #15
vext.8 q0, q0, q2, #15
vuzp.32 q1, q2
vuzp.32 q3, q0
vext.8 q1, q15, q2, #12
veor q0, q0, q2
veor q1, q1, q2
vorr q8, q8, q0
vorr q9, q9, q1
vld1.16 {q11}, [r2,:128]! @ mv + 0x10
vld1.16 {q3}, [r2,:128]! @ mv + 0x20
vld1.16 {q12}, [r2,:128]! @ mv + 0x30
vld1.16 {q2}, [r2,:128]! @ mv + 0x40
vld1.16 {q13}, [r2,:128]! @ mv + 0x50
vext.8 q3, q3, q12, #12
vext.8 q2, q2, q13, #12
vabd.s16 q0, q12, q3
vld1.16 {q3}, [r2,:128]! @ mv + 0x60
vabd.s16 q1, q13, q2
vld1.16 {q14}, [r2,:128]! @ mv + 0x70
vqmovn.u16 d0, q0
vld1.16 {q2}, [r2,:128]! @ mv + 0x80
vld1.16 {q15}, [r2,:128]! @ mv + 0x90
vqmovn.u16 d1, q1
vext.8 q3, q3, q14, #12
vext.8 q2, q2, q15, #12
vabd.s16 q3, q14, q3
vabd.s16 q2, q15, q2
vqmovn.u16 d2, q3
vqmovn.u16 d3, q2
vqsub.u8 q0, q0, q10
vqsub.u8 q1, q1, q10
vqmovn.u16 d0, q0
vqmovn.u16 d1, q1
vabd.s16 q1, q12, q13
vorr q8, q8, q0
vabd.s16 q0, q11, q12
vabd.s16 q2, q13, q14
vabd.s16 q3, q14, q15
vqmovn.u16 d0, q0
vqmovn.u16 d1, q1
vqmovn.u16 d2, q2
vqmovn.u16 d3, q3
vqsub.u8 q0, q0, q10
vqsub.u8 q1, q1, q10
vqmovn.u16 d0, q0
vqmovn.u16 d1, q1
subs ip, ip, #1
vorr q9, q9, q0
beq lists
mov ip, #-32
@ load bytes nnz
vld1.8 {d31}, [r0]!
vld1.8 {q1}, [r0]!
vmov.i8 q0, #0
vld1.8 {q2}, [r0]
vext.8 q3, q0, q1, #15
vext.8 q0, q0, q2, #15
vuzp.32 q1, q2
vuzp.32 q3, q0
vext.8 q1, q15, q2, #12
vorr q0, q0, q2
vorr q1, q1, q2
vmov.u8 q10, #1
vmin.u8 q0, q0, q10
vmin.u8 q1, q1, q10
vmin.u8 q8, q8, q10 @ mv ? 1 : 0
vmin.u8 q9, q9, q10
vadd.u8 q0, q0, q0 @ nnz ? 2 : 0
vadd.u8 q1, q1, q1
vmax.u8 q8, q8, q0
vmax.u8 q9, q9, q1
vzip.16 d16, d17
vst1.8 {q9}, [r3,:128], ip @ bs[1]
vtrn.8 d16, d17
vtrn.32 d16, d17
vst1.8 {q8}, [r3,:128] @ bs[0]
bx lr
endfunc
|
aestream/faery
| 3,492
|
src/mp4/x264/common/arm/cpu-a.S
|
/*****************************************************************************
* cpu-a.S: arm cpu detection
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
.align 2
// done in gas because .fpu neon overrides the refusal to assemble
// instructions the selected -march/-mcpu doesn't support
function cpu_neon_test
vadd.i16 q0, q0, q0
bx lr
endfunc
// return: 0 on success
// 1 if counters were already enabled
// 9 if lo-res counters were already enabled
function cpu_enable_armv7_counter, export=0
mrc p15, 0, r2, c9, c12, 0 // read PMNC
ands r0, r2, #1
andne r0, r2, #9
orr r2, r2, #1 // enable counters
bic r2, r2, #8 // full resolution
mcreq p15, 0, r2, c9, c12, 0 // write PMNC
mov r2, #1 << 31 // enable cycle counter
mcr p15, 0, r2, c9, c12, 1 // write CNTENS
bx lr
endfunc
function cpu_disable_armv7_counter, export=0
mrc p15, 0, r0, c9, c12, 0 // read PMNC
bic r0, r0, #1 // disable counters
mcr p15, 0, r0, c9, c12, 0 // write PMNC
bx lr
endfunc
.macro READ_TIME r
mrc p15, 0, \r, c9, c13, 0
.endm
// return: 0 if transfers neon -> arm transfers take more than 10 cycles
// nonzero otherwise
function cpu_fast_neon_mrc_test
// check for user access to performance counters
mrc p15, 0, r0, c9, c14, 0
cmp r0, #0
bxeq lr
push {r4-r6,lr}
bl cpu_enable_armv7_counter
ands r1, r0, #8
mov r3, #0
mov ip, #4
mov r6, #4
moveq r5, #1
movne r5, #64
average_loop:
mov r4, r5
READ_TIME r1
1: subs r4, r4, #1
.rept 8
vmov.u32 lr, d0[0]
add lr, lr, lr
.endr
bgt 1b
READ_TIME r2
subs r6, r6, #1
sub r2, r2, r1
cmpgt r2, #30 << 3 // assume context switch if it took over 30 cycles
addle r3, r3, r2
subsle ip, ip, #1
bgt average_loop
// disable counters if we enabled them
ands r0, r0, #1
bleq cpu_disable_armv7_counter
lsr r0, r3, #5
cmp r0, #10
movgt r0, #0
pop {r4-r6,pc}
endfunc
|
aestream/faery
| 5,886
|
src/mp4/x264/common/arm/asm.S
|
/*****************************************************************************
* asm.S: arm utility macros
*****************************************************************************
* Copyright (C) 2008-2024 x264 project
*
* Authors: Mans Rullgard <mans@mansr.com>
* David Conrad <lessen42@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "config.h"
.syntax unified
#ifdef __ELF__
.arch armv7-a
.fpu neon
#endif
#define GLUE(a, b) a ## b
#define JOIN(a, b) GLUE(a, b)
#ifdef PREFIX
# define BASE _x264_
# define SYM_PREFIX _
#else
# define BASE x264_
# define SYM_PREFIX
#endif
#ifdef BIT_DEPTH
# define EXTERN_ASM JOIN(JOIN(BASE, BIT_DEPTH), _)
#else
# define EXTERN_ASM BASE
#endif
#define X(s) JOIN(EXTERN_ASM, s)
#define X264(s) JOIN(BASE, s)
#define EXT(s) JOIN(SYM_PREFIX, s)
#ifdef __ELF__
# define ELF
#else
# define ELF @
#endif
#ifdef __MACH__
# define MACH
# define NONMACH @
#else
# define MACH @
# define NONMACH
#endif
#if HAVE_AS_FUNC
# define FUNC
#else
# define FUNC @
#endif
#if SYS_LINUX || SYS_OPENBSD
#define HAVE_SECTION_DATA_REL_RO 1
#else
#define HAVE_SECTION_DATA_REL_RO 0
#endif
.macro require8, val=1
ELF .eabi_attribute 24, \val
.endm
.macro preserve8, val=1
ELF .eabi_attribute 25, \val
.endm
.macro function name, export=1
.macro endfunc
.if \export
ELF .size EXTERN_ASM\name, . - EXTERN_ASM\name
.else
ELF .size \name, . - \name
.endif
FUNC .endfunc
.purgem endfunc
.endm
.text
.align 2
.if \export == 1
.global EXTERN_ASM\name
ELF .hidden EXTERN_ASM\name
ELF .type EXTERN_ASM\name, %function
FUNC .func EXTERN_ASM\name
EXTERN_ASM\name:
.else
ELF .hidden \name
ELF .type \name, %function
FUNC .func \name
\name:
.endif
.endm
.macro const name, align=2, relocate=0
.macro endconst
ELF .size \name, . - \name
.purgem endconst
.endm
.if HAVE_SECTION_DATA_REL_RO && \relocate
.section .data.rel.ro
.else
NONMACH .section .rodata
MACH .const_data
.endif
.align \align
\name:
.endm
.macro movrel rd, val
#if defined(PIC)
ldr \rd, 1f
b 2f
1:
@ FIXME: thumb
.word \val - (2f + 8)
2:
add \rd, \rd, pc
#elif HAVE_ARMV6T2
movw \rd, #:lower16:\val
movt \rd, #:upper16:\val
#else
ldr \rd, =\val
#endif
.endm
.macro movrelx rd, val, got
#if defined(PIC) && defined(__ELF__)
ldr \got, 2f
ldr \rd, 1f
b 3f
1:
@ FIXME: thumb
.word \val(GOT)
2:
.word _GLOBAL_OFFSET_TABLE_ - (3f + 8)
3:
add \got, \got, pc
ldr \rd, [\got, \rd]
#elif defined(PIC) && defined(__APPLE__)
ldr \rd, 1f
b 2f
1:
@ FIXME: thumb
.word 3f - (2f + 8)
2:
ldr \rd, [pc, \rd]
.non_lazy_symbol_pointer
3:
.indirect_symbol \val
.word 0
.text
#else
movrel \rd, \val
#endif
.endm
.macro movconst rd, val
#if HAVE_ARMV6T2
movw \rd, #:lower16:\val
.if \val >> 16
movt \rd, #:upper16:\val
.endif
#else
ldr \rd, =\val
#endif
.endm
#define FENC_STRIDE 16
#define FDEC_STRIDE 32
.macro HORIZ_ADD dest, a, b
.ifnb \b
vadd.u16 \a, \a, \b
.endif
vpaddl.u16 \a, \a
vpaddl.u32 \dest, \a
.endm
.macro SUMSUB_AB sum, diff, a, b
vadd.s16 \sum, \a, \b
vsub.s16 \diff, \a, \b
.endm
.macro SUMSUB_ABCD s1, d1, s2, d2, a, b, c, d
SUMSUB_AB \s1, \d1, \a, \b
SUMSUB_AB \s2, \d2, \c, \d
.endm
.macro ABS2 a b
vabs.s16 \a, \a
vabs.s16 \b, \b
.endm
// dist = distance in elements (0 for vertical pass, 1/2 for horizontal passes)
// op = sumsub/amax (sum and diff / maximum of absolutes)
// d1/2 = destination registers
// s1/2 = source registers
.macro HADAMARD dist, op, d1, d2, s1, s2
.if \dist == 1
vtrn.16 \s1, \s2
.else
vtrn.32 \s1, \s2
.endif
.ifc \op, sumsub
SUMSUB_AB \d1, \d2, \s1, \s2
.else
vabs.s16 \s1, \s1
vabs.s16 \s2, \s2
vmax.s16 \d1, \s1, \s2
.endif
.endm
.macro TRANSPOSE8x8 r0 r1 r2 r3 r4 r5 r6 r7
vtrn.32 \r0, \r4
vtrn.32 \r1, \r5
vtrn.32 \r2, \r6
vtrn.32 \r3, \r7
vtrn.16 \r0, \r2
vtrn.16 \r1, \r3
vtrn.16 \r4, \r6
vtrn.16 \r5, \r7
vtrn.8 \r0, \r1
vtrn.8 \r2, \r3
vtrn.8 \r4, \r5
vtrn.8 \r6, \r7
.endm
.macro TRANSPOSE4x4 r0 r1 r2 r3
vtrn.16 \r0, \r2
vtrn.16 \r1, \r3
vtrn.8 \r0, \r1
vtrn.8 \r2, \r3
.endm
.macro TRANSPOSE4x4_16 d0 d1 d2 d3
vtrn.32 \d0, \d2
vtrn.32 \d1, \d3
vtrn.16 \d0, \d1
vtrn.16 \d2, \d3
.endm
|
aestream/faery
| 22,113
|
src/mp4/x264/common/arm/predict-a.S
|
/*****************************************************************************
* predict.S: arm intra prediction
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Mans Rullgard <mans@mansr.com>
* Martin Storsjo <martin@martin.st>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
const p16weight, align=4
.short 1,2,3,4,5,6,7,8
endconst
.text
.macro ldcol.8 rd, rs, rt, n=8, hi=0
.if \n == 8 || \hi == 0
vld1.8 {\rd[0]}, [\rs], \rt
vld1.8 {\rd[1]}, [\rs], \rt
vld1.8 {\rd[2]}, [\rs], \rt
vld1.8 {\rd[3]}, [\rs], \rt
.endif
.if \n == 8 || \hi == 1
vld1.8 {\rd[4]}, [\rs], \rt
vld1.8 {\rd[5]}, [\rs], \rt
vld1.8 {\rd[6]}, [\rs], \rt
vld1.8 {\rd[7]}, [\rs], \rt
.endif
.endm
.macro ldcol.16 rd1, rd2, rs, rt, ru
add \ru, \rs, \rt, lsl #3
vld1.8 {\rd1[0]}, [\rs], \rt
vld1.8 {\rd2[0]}, [\ru], \rt
vld1.8 {\rd1[1]}, [\rs], \rt
vld1.8 {\rd2[1]}, [\ru], \rt
vld1.8 {\rd1[2]}, [\rs], \rt
vld1.8 {\rd2[2]}, [\ru], \rt
vld1.8 {\rd1[3]}, [\rs], \rt
vld1.8 {\rd2[3]}, [\ru], \rt
vld1.8 {\rd1[4]}, [\rs], \rt
vld1.8 {\rd2[4]}, [\ru], \rt
vld1.8 {\rd1[5]}, [\rs], \rt
vld1.8 {\rd2[5]}, [\ru], \rt
vld1.8 {\rd1[6]}, [\rs], \rt
vld1.8 {\rd2[6]}, [\ru], \rt
vld1.8 {\rd1[7]}, [\rs], \rt
vld1.8 {\rd2[7]}, [\ru], \rt
.endm
.macro add16x8 dq, dl, dh, rl, rh
vaddl.u8 \dq, \rl, \rh
vadd.u16 \dl, \dl, \dh
vpadd.u16 \dl, \dl, \dl
vpadd.u16 \dl, \dl, \dl
.endm
// because gcc doesn't believe in using the free shift in add
function predict_4x4_h_armv6
ldrb r1, [r0, #0*FDEC_STRIDE-1]
ldrb r2, [r0, #1*FDEC_STRIDE-1]
ldrb r3, [r0, #2*FDEC_STRIDE-1]
ldrb ip, [r0, #3*FDEC_STRIDE-1]
add r1, r1, r1, lsl #8
add r2, r2, r2, lsl #8
add r3, r3, r3, lsl #8
add ip, ip, ip, lsl #8
add r1, r1, r1, lsl #16
str r1, [r0, #0*FDEC_STRIDE]
add r2, r2, r2, lsl #16
str r2, [r0, #1*FDEC_STRIDE]
add r3, r3, r3, lsl #16
str r3, [r0, #2*FDEC_STRIDE]
add ip, ip, ip, lsl #16
str ip, [r0, #3*FDEC_STRIDE]
bx lr
endfunc
function predict_4x4_v_armv6
ldr r1, [r0, #0 - 1 * FDEC_STRIDE]
str r1, [r0, #0 + 0 * FDEC_STRIDE]
str r1, [r0, #0 + 1 * FDEC_STRIDE]
str r1, [r0, #0 + 2 * FDEC_STRIDE]
str r1, [r0, #0 + 3 * FDEC_STRIDE]
bx lr
endfunc
function predict_4x4_dc_armv6
mov ip, #0
ldr r1, [r0, #-FDEC_STRIDE]
ldrb r2, [r0, #0*FDEC_STRIDE-1]
ldrb r3, [r0, #1*FDEC_STRIDE-1]
usad8 r1, r1, ip
add r2, r2, #4
ldrb ip, [r0, #2*FDEC_STRIDE-1]
add r2, r2, r3
ldrb r3, [r0, #3*FDEC_STRIDE-1]
add r2, r2, ip
add r2, r2, r3
add r1, r1, r2
lsr r1, r1, #3
add r1, r1, r1, lsl #8
add r1, r1, r1, lsl #16
str r1, [r0, #0*FDEC_STRIDE]
str r1, [r0, #1*FDEC_STRIDE]
str r1, [r0, #2*FDEC_STRIDE]
str r1, [r0, #3*FDEC_STRIDE]
bx lr
endfunc
function predict_4x4_dc_top_neon
mov r12, #FDEC_STRIDE
sub r1, r0, #FDEC_STRIDE
vld1.32 d1[], [r1,:32]
vpaddl.u8 d1, d1
vpadd.u16 d1, d1, d1
vrshr.u16 d1, d1, #2
vdup.8 d1, d1[0]
vst1.32 d1[0], [r0,:32], r12
vst1.32 d1[0], [r0,:32], r12
vst1.32 d1[0], [r0,:32], r12
vst1.32 d1[0], [r0,:32], r12
bx lr
endfunc
// return a1 = (a1+2*b1+c1+2)>>2 a2 = (a2+2*b2+c2+2)>>2
.macro PRED4x4_LOWPASS a1 b1 c1 a2 b2 c2 pb_1
uhadd8 \a1, \a1, \c1
uhadd8 \a2, \a2, \c2
uhadd8 \c1, \a1, \b1
uhadd8 \c2, \a2, \b2
eor \a1, \a1, \b1
eor \a2, \a2, \b2
and \a1, \a1, \pb_1
and \a2, \a2, \pb_1
uadd8 \a1, \a1, \c1
uadd8 \a2, \a2, \c2
.endm
function predict_4x4_ddr_armv6
ldr r1, [r0, # -FDEC_STRIDE]
ldrb r2, [r0, # -FDEC_STRIDE-1]
ldrb r3, [r0, #0*FDEC_STRIDE-1]
push {r4-r6,lr}
add r2, r2, r1, lsl #8
ldrb r4, [r0, #1*FDEC_STRIDE-1]
add r3, r3, r2, lsl #8
ldrb r5, [r0, #2*FDEC_STRIDE-1]
ldrb r6, [r0, #3*FDEC_STRIDE-1]
add r4, r4, r3, lsl #8
add r5, r5, r4, lsl #8
add r6, r6, r5, lsl #8
ldr ip, =0x01010101
PRED4x4_LOWPASS r1, r2, r3, r4, r5, r6, ip
str r1, [r0, #0*FDEC_STRIDE]
lsl r2, r1, #8
lsl r3, r1, #16
lsl r4, r4, #8
lsl r5, r1, #24
add r2, r2, r4, lsr #24
str r2, [r0, #1*FDEC_STRIDE]
add r3, r3, r4, lsr #16
str r3, [r0, #2*FDEC_STRIDE]
add r5, r5, r4, lsr #8
str r5, [r0, #3*FDEC_STRIDE]
pop {r4-r6,pc}
endfunc
function predict_4x4_ddl_neon
sub r0, #FDEC_STRIDE
mov ip, #FDEC_STRIDE
vld1.64 {d0}, [r0], ip
vdup.8 d3, d0[7]
vext.8 d1, d0, d0, #1
vext.8 d2, d0, d3, #2
vhadd.u8 d0, d0, d2
vrhadd.u8 d0, d0, d1
vst1.32 {d0[0]}, [r0,:32], ip
vext.8 d1, d0, d0, #1
vext.8 d2, d0, d0, #2
vst1.32 {d1[0]}, [r0,:32], ip
vext.8 d3, d0, d0, #3
vst1.32 {d2[0]}, [r0,:32], ip
vst1.32 {d3[0]}, [r0,:32], ip
bx lr
endfunc
function predict_8x8_dc_neon
mov ip, #0
ldrd r2, r3, [r1, #8]
push {r4-r5,lr}
ldrd r4, r5, [r1, #16]
lsl r3, r3, #8
ldrb lr, [r1, #7]
usad8 r2, r2, ip
usad8 r3, r3, ip
usada8 r2, r4, ip, r2
add lr, lr, #8
usada8 r3, r5, ip, r3
add r2, r2, lr
mov ip, #FDEC_STRIDE
add r2, r2, r3
lsr r2, r2, #4
vdup.8 d0, r2
.rept 8
vst1.64 {d0}, [r0,:64], ip
.endr
pop {r4-r5,pc}
endfunc
function predict_8x8_h_neon
add r1, r1, #7
mov ip, #FDEC_STRIDE
vld1.64 {d16}, [r1]
vdup.8 d0, d16[7]
vdup.8 d1, d16[6]
vst1.64 {d0}, [r0,:64], ip
vdup.8 d2, d16[5]
vst1.64 {d1}, [r0,:64], ip
vdup.8 d3, d16[4]
vst1.64 {d2}, [r0,:64], ip
vdup.8 d4, d16[3]
vst1.64 {d3}, [r0,:64], ip
vdup.8 d5, d16[2]
vst1.64 {d4}, [r0,:64], ip
vdup.8 d6, d16[1]
vst1.64 {d5}, [r0,:64], ip
vdup.8 d7, d16[0]
vst1.64 {d6}, [r0,:64], ip
vst1.64 {d7}, [r0,:64], ip
bx lr
endfunc
function predict_8x8_v_neon
add r1, r1, #16
mov r12, #FDEC_STRIDE
vld1.8 {d0}, [r1,:64]
.rept 8
vst1.8 {d0}, [r0,:64], r12
.endr
bx lr
endfunc
function predict_8x8_ddl_neon
add r1, #16
vld1.8 {d0, d1}, [r1,:128]
vmov.i8 q3, #0
vrev64.8 d2, d1
vext.8 q8, q3, q0, #15
vext.8 q2, q0, q1, #1
vhadd.u8 q8, q2
mov r12, #FDEC_STRIDE
vrhadd.u8 q0, q8
vext.8 d2, d0, d1, #1
vext.8 d3, d0, d1, #2
vst1.8 d2, [r0,:64], r12
vext.8 d2, d0, d1, #3
vst1.8 d3, [r0,:64], r12
vext.8 d3, d0, d1, #4
vst1.8 d2, [r0,:64], r12
vext.8 d2, d0, d1, #5
vst1.8 d3, [r0,:64], r12
vext.8 d3, d0, d1, #6
vst1.8 d2, [r0,:64], r12
vext.8 d2, d0, d1, #7
vst1.8 d3, [r0,:64], r12
vst1.8 d2, [r0,:64], r12
vst1.8 d1, [r0,:64], r12
bx lr
endfunc
function predict_8x8_ddr_neon
vld1.8 {d0-d3}, [r1,:128]
vext.8 q2, q0, q1, #7
vext.8 q3, q0, q1, #9
vhadd.u8 q2, q2, q3
vrhadd.u8 d0, d1, d4
vrhadd.u8 d1, d2, d5
add r0, #7*FDEC_STRIDE
mov r12, #-1*FDEC_STRIDE
vext.8 d2, d0, d1, #1
vst1.8 {d0}, [r0,:64], r12
vext.8 d4, d0, d1, #2
vst1.8 {d2}, [r0,:64], r12
vext.8 d5, d0, d1, #3
vst1.8 {d4}, [r0,:64], r12
vext.8 d4, d0, d1, #4
vst1.8 {d5}, [r0,:64], r12
vext.8 d5, d0, d1, #5
vst1.8 {d4}, [r0,:64], r12
vext.8 d4, d0, d1, #6
vst1.8 {d5}, [r0,:64], r12
vext.8 d5, d0, d1, #7
vst1.8 {d4}, [r0,:64], r12
vst1.8 {d5}, [r0,:64], r12
bx lr
endfunc
function predict_8x8_vl_neon
add r1, #16
mov r12, #FDEC_STRIDE
vld1.8 {d0, d1}, [r1,:128]
vext.8 q1, q1, q0, #15
vext.8 q2, q0, q2, #1
vrhadd.u8 q3, q0, q2
vhadd.u8 q1, q1, q2
vrhadd.u8 q0, q0, q1
vext.8 d2, d0, d1, #1
vst1.8 {d6}, [r0,:64], r12
vext.8 d3, d6, d7, #1
vst1.8 {d2}, [r0,:64], r12
vext.8 d2, d0, d1, #2
vst1.8 {d3}, [r0,:64], r12
vext.8 d3, d6, d7, #2
vst1.8 {d2}, [r0,:64], r12
vext.8 d2, d0, d1, #3
vst1.8 {d3}, [r0,:64], r12
vext.8 d3, d6, d7, #3
vst1.8 {d2}, [r0,:64], r12
vext.8 d2, d0, d1, #4
vst1.8 {d3}, [r0,:64], r12
vst1.8 {d2}, [r0,:64], r12
bx lr
endfunc
function predict_8x8_vr_neon
add r1, #8
mov r12, #FDEC_STRIDE
vld1.8 {d4,d5}, [r1,:64]
vext.8 q1, q2, q2, #14
vext.8 q0, q2, q2, #15
vhadd.u8 q3, q2, q1
vrhadd.u8 q2, q2, q0
vrhadd.u8 q0, q0, q3
vmov d2, d0
vst1.8 {d5}, [r0,:64], r12
vuzp.8 d2, d0
vst1.8 {d1}, [r0,:64], r12
vext.8 d6, d0, d5, #7
vext.8 d3, d2, d1, #7
vst1.8 {d6}, [r0,:64], r12
vst1.8 {d3}, [r0,:64], r12
vext.8 d6, d0, d5, #6
vext.8 d3, d2, d1, #6
vst1.8 {d6}, [r0,:64], r12
vst1.8 {d3}, [r0,:64], r12
vext.8 d6, d0, d5, #5
vext.8 d3, d2, d1, #5
vst1.8 {d6}, [r0,:64], r12
vst1.8 {d3}, [r0,:64], r12
bx lr
endfunc
function predict_8x8_hd_neon
mov r12, #FDEC_STRIDE
add r1, #7
vld1.8 {d2,d3}, [r1]
vext.8 q3, q1, q1, #1
vext.8 q2, q1, q1, #2
vrhadd.u8 q8, q1, q3
vhadd.u8 q1, q2
vrhadd.u8 q0, q1, q3
vzip.8 d16, d0
vext.8 d2, d0, d1, #6
vext.8 d3, d0, d1, #4
vst1.8 {d2}, [r0,:64], r12
vext.8 d2, d0, d1, #2
vst1.8 {d3}, [r0,:64], r12
vst1.8 {d2}, [r0,:64], r12
vext.8 d2, d16, d0, #6
vst1.8 {d0}, [r0,:64], r12
vext.8 d3, d16, d0, #4
vst1.8 {d2}, [r0,:64], r12
vext.8 d2, d16, d0, #2
vst1.8 {d3}, [r0,:64], r12
vst1.8 {d2}, [r0,:64], r12
vst1.8 {d16}, [r0,:64], r12
bx lr
endfunc
function predict_8x8_hu_neon
mov r12, #FDEC_STRIDE
add r1, #7
vld1.8 {d7}, [r1]
vdup.8 d6, d7[0]
vrev64.8 d7, d7
vext.8 d4, d7, d6, #2
vext.8 d2, d7, d6, #1
vhadd.u8 d16, d7, d4
vrhadd.u8 d0, d2, d7
vrhadd.u8 d1, d16, d2
vzip.8 d0, d1
vdup.16 q1, d1[3]
vext.8 q2, q0, q1, #2
vext.8 q3, q0, q1, #4
vext.8 q8, q0, q1, #6
vst1.8 {d0}, [r0,:64], r12
vst1.8 {d4}, [r0,:64], r12
vst1.8 {d6}, [r0,:64], r12
vst1.8 {d16}, [r0,:64], r12
vst1.8 {d1}, [r0,:64], r12
vst1.8 {d5}, [r0,:64], r12
vst1.8 {d7}, [r0,:64], r12
vst1.8 {d17}, [r0,:64]
bx lr
endfunc
function predict_8x8c_dc_top_neon
sub r2, r0, #FDEC_STRIDE
mov r1, #FDEC_STRIDE
vld1.8 {d0}, [r2,:64]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0, d0
vrshrn.u16 d0, q0, #2
vdup.8 d1, d0[1]
vdup.8 d0, d0[0]
vtrn.32 d0, d1
b pred8x8_dc_end
endfunc
function predict_8x8c_dc_left_neon
mov r1, #FDEC_STRIDE
sub r2, r0, #1
ldcol.8 d0, r2, r1
vpaddl.u8 d0, d0
vpadd.u16 d0, d0, d0
vrshrn.u16 d0, q0, #2
vdup.8 d1, d0[1]
vdup.8 d0, d0[0]
b pred8x8_dc_end
endfunc
function predict_8x8c_dc_neon
sub r2, r0, #FDEC_STRIDE
mov r1, #FDEC_STRIDE
vld1.8 {d0}, [r2,:64]
sub r2, r0, #1
ldcol.8 d1, r2, r1
vtrn.32 d0, d1
vpaddl.u8 q0, q0
vpadd.u16 d0, d0, d1
vpadd.u16 d1, d0, d0
vrshrn.u16 d2, q0, #3
vrshrn.u16 d3, q0, #2
vdup.8 d0, d2[4]
vdup.8 d1, d3[3]
vdup.8 d4, d3[2]
vdup.8 d5, d2[5]
vtrn.32 q0, q2
pred8x8_dc_end:
add r2, r0, r1, lsl #2
.rept 4
vst1.8 {d0}, [r0,:64], r1
vst1.8 {d1}, [r2,:64], r1
.endr
bx lr
endfunc
function predict_8x8c_h_neon
sub r1, r0, #1
mov ip, #FDEC_STRIDE
.rept 4
vld1.8 {d0[]}, [r1], ip
vld1.8 {d2[]}, [r1], ip
vst1.64 {d0}, [r0,:64], ip
vst1.64 {d2}, [r0,:64], ip
.endr
bx lr
endfunc
function predict_8x8c_v_neon
sub r0, r0, #FDEC_STRIDE
mov ip, #FDEC_STRIDE
vld1.64 {d0}, [r0,:64], ip
.rept 8
vst1.64 {d0}, [r0,:64], ip
.endr
bx lr
endfunc
function predict_8x8c_p_neon
sub r3, r0, #FDEC_STRIDE
mov r1, #FDEC_STRIDE
add r2, r3, #4
sub r3, r3, #1
vld1.32 {d0[0]}, [r3]
vld1.32 {d2[0]}, [r2,:32], r1
ldcol.8 d0, r3, r1, 4, hi=1
add r3, r3, r1
ldcol.8 d3, r3, r1, 4
vaddl.u8 q8, d2, d3
vrev32.8 d0, d0
vtrn.32 d2, d3
vsubl.u8 q2, d2, d0
movrel r3, p16weight
vld1.16 {q0}, [r3,:128]
vmul.s16 d4, d4, d0
vmul.s16 d5, d5, d0
vpadd.i16 d4, d4, d5
vpaddl.s16 d4, d4
vshl.i32 d5, d4, #4
vadd.s32 d4, d4, d5
vrshrn.s32 d4, q2, #5
mov r3, #0
vtrn.16 d4, d5
vadd.i16 d2, d4, d5
vshl.i16 d3, d2, #2
vrev64.16 d16, d16
vsub.i16 d3, d3, d2
vadd.i16 d16, d16, d0
vshl.i16 d2, d16, #4
vsub.i16 d2, d2, d3
vext.16 q0, q0, q0, #7
vmov.16 d0[0], r3
vmul.i16 q0, q0, d4[0]
vdup.16 q1, d2[0]
vdup.16 q3, d5[0]
vadd.i16 q1, q1, q0
mov r3, #8
1:
vqshrun.s16 d0, q1, #5
vadd.i16 q1, q1, q3
vst1.8 {d0}, [r0,:64], r1
subs r3, r3, #1
bne 1b
bx lr
endfunc
function predict_8x16c_dc_top_neon
sub r2, r0, #FDEC_STRIDE
mov r1, #FDEC_STRIDE
vld1.8 {d0}, [r2,:64]
vpaddl.u8 d0, d0
vpadd.u16 d0, d0, d0
vrshrn.u16 d0, q0, #2
vdup.8 d1, d0[1]
vdup.8 d0, d0[0]
vtrn.32 d0, d1
add r2, r0, r1, lsl #2
.rept 4
vst1.8 {d0}, [r0,:64], r1
vst1.8 {d1}, [r2,:64], r1
.endr
add r2, r2, r1, lsl #2
add r0, r0, r1, lsl #2
.rept 4
vst1.8 {d0}, [r0,:64], r1
vst1.8 {d1}, [r2,:64], r1
.endr
bx lr
endfunc
function predict_8x16c_h_neon
sub r1, r0, #1
mov ip, #FDEC_STRIDE
.rept 8
vld1.8 {d0[]}, [r1], ip
vld1.8 {d2[]}, [r1], ip
vst1.64 {d0}, [r0,:64], ip
vst1.64 {d2}, [r0,:64], ip
.endr
bx lr
endfunc
function predict_8x16c_p_neon
sub r3, r0, #FDEC_STRIDE
mov r1, #FDEC_STRIDE
add r2, r3, #4
sub r3, r3, #1
vld1.32 {d0[0]}, [r3]
vld1.32 {d2[0]}, [r2,:32], r1
ldcol.8 d1, r3, r1
add r3, r3, r1
ldcol.8 d3, r3, r1
vrev64.32 d16, d3
vaddl.u8 q8, d2, d16
vrev32.8 d0, d0
vsubl.u8 q2, d2, d0
vrev64.8 d1, d1
vsubl.u8 q3, d3, d1
movrel r3, p16weight
vld1.16 {q0}, [r3,:128]
vmul.s16 d4, d4, d0
vmul.s16 q3, q3, q0
vpadd.i16 d4, d4, d5
vpadd.i16 d6, d6, d7
vpaddl.s16 d4, d4 @ d4[0] = H
vpaddl.s16 d6, d6
vpadd.s32 d6, d6 @ d6[0] = V
vshl.i32 d5, d4, #4
vadd.s32 d4, d4, d5 @ d4[0] = 17*H
vshl.i32 d7, d6, #2
vrshrn.s32 d4, q2, #5 @ d4[0] = b
vadd.s32 d6, d6, d7 @ d6[0] = 5*V
vrshrn.s32 d6, q3, #6 @ d6[0] = c
mov r3, #0
vshl.i16 d3, d4, #2
vsub.i16 d3, d3, d4 @ d2[0] = 3 * b
vshl.i16 d2, d6, #3
vadd.i16 d3, d3, d2 @ d2[0] = 3 * b + 8 * c
vsub.i16 d3, d3, d6 @ d2[0] = 3 * b + 7 * c
vrev64.16 d16, d16
vadd.i16 d16, d16, d0 @ d16[0] = src[]+src[] + 1
vshl.i16 d2, d16, #4 @ d3[0] = a + 16
vsub.i16 d2, d2, d3 @ i00
vext.16 q0, q0, q0, #7
vmov.16 d0[0], r3
vmul.i16 q0, q0, d4[0]
vdup.16 q1, d2[0]
vdup.16 q3, d6[0]
vadd.i16 q1, q1, q0
mov r3, #16
1:
vqshrun.s16 d0, q1, #5
vadd.i16 q1, q1, q3
vst1.8 {d0}, [r0,:64], r1
subs r3, r3, #1
bne 1b
bx lr
endfunc
function predict_16x16_dc_top_neon
sub r2, r0, #FDEC_STRIDE
mov r1, #FDEC_STRIDE
vld1.8 {q0}, [r2,:128]
add16x8 q0, d0, d1, d0, d1
vrshrn.u16 d0, q0, #4
vdup.8 q0, d0[0]
b pred16x16_dc_end
endfunc
function predict_16x16_dc_left_neon
mov r1, #FDEC_STRIDE
sub r2, r0, #1
ldcol.8 d0, r2, r1
ldcol.8 d1, r2, r1
add16x8 q0, d0, d1, d0, d1
vrshrn.u16 d0, q0, #4
vdup.8 q0, d0[0]
b pred16x16_dc_end
endfunc
function predict_16x16_dc_neon
sub r3, r0, #FDEC_STRIDE
sub r0, r0, #1
vld1.64 {d0-d1}, [r3,:128]
ldrb ip, [r0], #FDEC_STRIDE
vaddl.u8 q0, d0, d1
ldrb r1, [r0], #FDEC_STRIDE
vadd.u16 d0, d0, d1
vpadd.u16 d0, d0, d0
vpadd.u16 d0, d0, d0
.rept 4
ldrb r2, [r0], #FDEC_STRIDE
add ip, ip, r1
ldrb r3, [r0], #FDEC_STRIDE
add ip, ip, r2
ldrb r1, [r0], #FDEC_STRIDE
add ip, ip, r3
.endr
ldrb r2, [r0], #FDEC_STRIDE
add ip, ip, r1
ldrb r3, [r0], #FDEC_STRIDE
add ip, ip, r2
sub r0, r0, #FDEC_STRIDE*16
add ip, ip, r3
vdup.16 d1, ip
vadd.u16 d0, d0, d1
mov r1, #FDEC_STRIDE
add r0, r0, #1
vrshr.u16 d0, d0, #5
vdup.8 q0, d0[0]
pred16x16_dc_end:
.rept 16
vst1.64 {d0-d1}, [r0,:128], r1
.endr
bx lr
endfunc
function predict_16x16_h_neon
sub r1, r0, #1
mov ip, #FDEC_STRIDE
.rept 8
vld1.8 {d0[]}, [r1], ip
vmov d1, d0
vld1.8 {d2[]}, [r1], ip
vmov d3, d2
vst1.64 {d0-d1}, [r0,:128], ip
vst1.64 {d2-d3}, [r0,:128], ip
.endr
bx lr
endfunc
function predict_16x16_v_neon
sub r0, r0, #FDEC_STRIDE
mov ip, #FDEC_STRIDE
vld1.64 {d0-d1}, [r0,:128], ip
.rept 16
vst1.64 {d0-d1}, [r0,:128], ip
.endr
bx lr
endfunc
function predict_16x16_p_neon
sub r3, r0, #FDEC_STRIDE
mov r1, #FDEC_STRIDE
add r2, r3, #8
sub r3, r3, #1
vld1.8 {d0}, [r3]
vld1.8 {d2}, [r2,:64], r1
ldcol.8 d1, r3, r1
add r3, r3, r1
ldcol.8 d3, r3, r1
vrev64.8 q0, q0
vaddl.u8 q8, d2, d3
vsubl.u8 q2, d2, d0
vsubl.u8 q3, d3, d1
movrel r3, p16weight
vld1.8 {q0}, [r3,:128]
vmul.s16 q2, q2, q0
vmul.s16 q3, q3, q0
vadd.i16 d4, d4, d5
vadd.i16 d5, d6, d7
vpadd.i16 d4, d4, d5
vpadd.i16 d4, d4, d4
vshll.s16 q3, d4, #2
vaddw.s16 q2, q3, d4
vrshrn.s32 d4, q2, #6
mov r3, #0
vtrn.16 d4, d5
vadd.i16 d2, d4, d5
vshl.i16 d3, d2, #3
vrev64.16 d16, d17
vsub.i16 d3, d3, d2
vadd.i16 d16, d16, d0
vshl.i16 d2, d16, #4
vsub.i16 d2, d2, d3
vshl.i16 d3, d4, #4
vext.16 q0, q0, q0, #7
vsub.i16 d6, d5, d3
vmov.16 d0[0], r3
vmul.i16 q0, q0, d4[0]
vdup.16 q1, d2[0]
vdup.16 q2, d4[0]
vdup.16 q3, d6[0]
vshl.i16 q2, q2, #3
vadd.i16 q1, q1, q0
vadd.i16 q3, q3, q2
mov r3, #16
1:
vqshrun.s16 d0, q1, #5
vadd.i16 q1, q1, q2
vqshrun.s16 d1, q1, #5
vadd.i16 q1, q1, q3
vst1.8 {q0}, [r0,:128], r1
subs r3, r3, #1
bne 1b
bx lr
endfunc
|
aestream/faery
| 15,171
|
src/mp4/x264/common/arm/quant-a.S
|
/****************************************************************************
* quant.S: arm quantization and level-run
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
const pmovmskb_byte, align=4
.byte 1,2,4,8,16,32,64,128
.byte 1,2,4,8,16,32,64,128
endconst
const mask_2bit, align=4
.byte 3,12,48,192,3,12,48,192
.byte 3,12,48,192,3,12,48,192
endconst
const mask_1bit, align=4
.byte 128,64,32,16,8,4,2,1
.byte 128,64,32,16,8,4,2,1
endconst
.text
.macro QUANT_TWO bias0 bias1 mf0 mf1 mf2 mf3 mask load_mf=no
vadd.u16 q8, q8, \bias0
vadd.u16 q9, q9, \bias1
.ifc \load_mf, yes
vld1.64 {\mf0-\mf3}, [r1,:128]!
.endif
vmull.u16 q10, d16, \mf0
vmull.u16 q11, d17, \mf1
vmull.u16 q12, d18, \mf2
vmull.u16 q13, d19, \mf3
vshr.s16 q14, q14, #15
vshr.s16 q15, q15, #15
vshrn.u32 d16, q10, #16
vshrn.u32 d17, q11, #16
vshrn.u32 d18, q12, #16
vshrn.u32 d19, q13, #16
veor q8, q8, q14
veor q9, q9, q15
vsub.s16 q8, q8, q14
vsub.s16 q9, q9, q15
vorr \mask, q8, q9
vst1.64 {d16-d19}, [r0,:128]!
.endm
.macro QUANT_END d
vmov r2, r3, \d
orrs r0, r2, r3
movne r0, #1
bx lr
.endm
// quant_2x2_dc( int16_t dct[4], int mf, int bias )
function quant_2x2_dc_neon
vld1.64 {d0}, [r0,:64]
vabs.s16 d3, d0
vdup.16 d2, r2
vdup.16 d1, r1
vadd.u16 d3, d3, d2
vmull.u16 q3, d3, d1
vshr.s16 d0, d0, #15
vshrn.u32 d3, q3, #16
veor d3, d3, d0
vsub.s16 d3, d3, d0
vst1.64 {d3}, [r0,:64]
QUANT_END d3
endfunc
// quant_4x4_dc( int16_t dct[16], int mf, int bias )
function quant_4x4_dc_neon
vld1.64 {d28-d31}, [r0,:128]
vabs.s16 q8, q14
vabs.s16 q9, q15
vdup.16 q0, r2
vdup.16 q2, r1
QUANT_TWO q0, q0, d4, d5, d4, d5, q0
vorr d0, d0, d1
QUANT_END d0
endfunc
// quant_4x4( int16_t dct[16], uint16_t mf[16], uint16_t bias[16] )
function quant_4x4_neon
vld1.64 {d28-d31}, [r0,:128]
vabs.s16 q8, q14
vabs.s16 q9, q15
vld1.64 {d0-d3}, [r2,:128]
vld1.64 {d4-d7}, [r1,:128]
QUANT_TWO q0, q1, d4, d5, d6, d7, q0
vorr d0, d0, d1
QUANT_END d0
endfunc
// quant_4x4x4( int16_t dct[4][16], uint16_t mf[16], uint16_t bias[16] )
function quant_4x4x4_neon
vpush {d8-d15}
vld1.64 {d28-d31}, [r0,:128]
vabs.s16 q8, q14
vabs.s16 q9, q15
vld1.64 {d0-d3}, [r2,:128]
vld1.64 {d4-d7}, [r1,:128]
QUANT_TWO q0, q1, d4, d5, d6, d7, q4
vld1.64 {d28-d31}, [r0,:128]
vabs.s16 q8, q14
vabs.s16 q9, q15
QUANT_TWO q0, q1, d4, d5, d6, d7, q5
vld1.64 {d28-d31}, [r0,:128]
vabs.s16 q8, q14
vabs.s16 q9, q15
QUANT_TWO q0, q1, d4, d5, d6, d7, q6
vld1.64 {d28-d31}, [r0,:128]
vabs.s16 q8, q14
vabs.s16 q9, q15
QUANT_TWO q0, q1, d4, d5, d6, d7, q7
vorr d8, d8, d9
vorr d10, d10, d11
vorr d12, d12, d13
vorr d14, d14, d15
vmov r0, r1, d8
vmov r2, r3, d10
orrs r0, r1
movne r0, #1
orrs r2, r3
orrne r0, #2
vmov r1, r2, d12
vmov r3, ip, d14
orrs r1, r2
orrne r0, #4
orrs r3, ip
orrne r0, #8
vpop {d8-d15}
bx lr
endfunc
// quant_8x8( int16_t dct[64], uint16_t mf[64], uint16_t bias[64] )
function quant_8x8_neon
vld1.64 {d28-d31}, [r0,:128]
vabs.s16 q8, q14
vabs.s16 q9, q15
vld1.64 {d0-d3}, [r2,:128]!
vld1.64 {d4-d7}, [r1,:128]!
QUANT_TWO q0, q1, d4, d5, d6, d7, q0
.rept 3
vld1.64 {d28-d31}, [r0,:128]
vabs.s16 q8, q14
vabs.s16 q9, q15
vld1.64 {d2-d5}, [r2,:128]!
QUANT_TWO q1, q2, d4, d5, d6, d7, q1, yes
vorr q0, q0, q1
.endr
vorr d0, d0, d1
QUANT_END d0
endfunc
.macro DEQUANT_START mf_size offset dc=no
mov r3, #0x2b
mul r3, r3, r2
lsr r3, r3, #8 // i_qbits = i_qp / 6
add ip, r3, r3, lsl #1
sub r2, r2, ip, lsl #1 // i_mf = i_qp % 6
.ifc \dc,no
add r1, r1, r2, lsl #\mf_size // dequant_mf[i_mf]
.else
ldr r1, [r1, r2, lsl #\mf_size] // dequant_mf[i_mf][0][0]
.endif
subs r3, r3, #\offset // 6 for 8x8
.endm
// dequant_4x4( int16_t dct[16], int dequant_mf[6][16], int i_qp )
.macro DEQUANT size bits
function dequant_\size\()_neon
DEQUANT_START \bits+2, \bits
.ifc \size, 8x8
mov r2, #4
.endif
blt dequant_\size\()_rshift
vdup.16 q15, r3
dequant_\size\()_lshift_loop:
.ifc \size, 8x8
subs r2, r2, #1
.endif
vld1.32 {d16-d17}, [r1,:128]!
vld1.32 {d18-d19}, [r1,:128]!
vmovn.s32 d4, q8
vld1.32 {d20-d21}, [r1,:128]!
vmovn.s32 d5, q9
vld1.32 {d22-d23}, [r1,:128]!
vmovn.s32 d6, q10
vld1.16 {d0-d3}, [r0,:128]
vmovn.s32 d7, q11
vmul.s16 q0, q0, q2
vmul.s16 q1, q1, q3
vshl.s16 q0, q0, q15
vshl.s16 q1, q1, q15
vst1.16 {d0-d3}, [r0,:128]!
.ifc \size, 8x8
bgt dequant_\size\()_lshift_loop
.endif
bx lr
dequant_\size\()_rshift:
vdup.32 q15, r3
rsb r3, r3, #0
mov ip, #1
sub r3, r3, #1
lsl ip, ip, r3
.ifc \size, 8x8
dequant_\size\()_rshift_loop:
subs r2, r2, #1
.endif
vdup.32 q10, ip
vld1.32 {d16-d17}, [r1,:128]!
vdup.32 q11, ip
vld1.32 {d18-d19}, [r1,:128]!
vmovn.s32 d4, q8
vld1.32 {d16-d17}, [r1,:128]!
vmovn.s32 d5, q9
vld1.32 {d18-d19}, [r1,:128]!
vmovn.s32 d6, q8
vld1.16 {d0-d3}, [r0,:128]
vmovn.s32 d7, q9
vdup.32 q12, ip
vdup.32 q13, ip
vmlal.s16 q10, d0, d4
vmlal.s16 q11, d1, d5
vmlal.s16 q12, d2, d6
vmlal.s16 q13, d3, d7
vshl.s32 q10, q10, q15
vshl.s32 q11, q11, q15
vshl.s32 q12, q12, q15
vshl.s32 q13, q13, q15
vmovn.s32 d0, q10
vmovn.s32 d1, q11
vmovn.s32 d2, q12
vmovn.s32 d3, q13
vst1.16 {d0-d3}, [r0,:128]!
.ifc \size, 8x8
bgt dequant_\size\()_rshift_loop
.endif
bx lr
endfunc
.endm
DEQUANT 4x4, 4
DEQUANT 8x8, 6
// dequant_4x4_dc( int16_t dct[16], int dequant_mf[6][16], int i_qp )
function dequant_4x4_dc_neon
DEQUANT_START 6, 6, yes
blt dequant_4x4_dc_rshift
lsl r1, r1, r3
vdup.16 q2, r1
vld1.16 {d0-d3}, [r0,:128]
vdup.16 q15, r3
vmul.s16 q0, q0, q2
vmul.s16 q1, q1, q2
vst1.16 {d0-d3}, [r0,:128]
bx lr
dequant_4x4_dc_rshift:
vdup.16 d4, r1
vdup.32 q15, r3
rsb r3, r3, #0
mov ip, #1
sub r3, r3, #1
lsl ip, ip, r3
vdup.32 q10, ip
vdup.32 q11, ip
vld1.16 {d0-d3}, [r0,:128]
vdup.32 q12, ip
vdup.32 q13, ip
vmlal.s16 q10, d0, d4
vmlal.s16 q11, d1, d4
vmlal.s16 q12, d2, d4
vmlal.s16 q13, d3, d4
vshl.s32 q10, q10, q15
vshl.s32 q11, q11, q15
vshl.s32 q12, q12, q15
vshl.s32 q13, q13, q15
vmovn.s32 d0, q10
vmovn.s32 d1, q11
vmovn.s32 d2, q12
vmovn.s32 d3, q13
vst1.16 {d0-d3}, [r0,:128]
bx lr
endfunc
.macro decimate_score_1x size
function decimate_score\size\()_neon
vld1.16 {q0, q1}, [r0, :128]
movrel r3, mask_2bit
vmov.s8 q3, #0x01
vqmovn.s16 d0, q0
vqmovn.s16 d1, q1
vqabs.s8 q2, q0
vld1.8 {q8}, [r3, :128]
vceq.s8 q1, q0, #0
vcgt.s8 q2, q2, q3
vand.u8 q1, q1, q8
vshrn.u16 d4, q2, #4
vpadd.u8 d2, d2, d3
vpadd.u8 d4, d4, d4
vpadd.u8 d2, d2, d2
vmov.32 r2, d4[0]
vmov.32 r1, d2[0]
cmp r2, #0
beq 0f
mov r0, #9
bx lr
0:
mvns r1, r1
mov r0, #0
bxeq lr
.ifc \size, 15
lsr r1, r1, #2
.endif
rbit r1, r1
movrelx r3, X264(decimate_table4), r2
1:
clz r2, r1
lsl r1, r1, r2
lsr r12, r2, #1
ldrb r2, [r3, r12]
lsls r1, r1, #2
add r0, r0, r2
bne 1b
bx lr
endfunc
.endm
decimate_score_1x 15
decimate_score_1x 16
function decimate_score64_neon
push {lr}
vld1.16 {q8, q9}, [r0, :128]!
vld1.16 {q10, q11}, [r0, :128]!
vld1.16 {q12, q13}, [r0, :128]!
vld1.16 {q14, q15}, [r0, :128]
movrel r3, mask_1bit
vmov.s8 q3, #0x01
vqmovn.s16 d17, q8
vqmovn.s16 d16, q9
vqmovn.s16 d19, q10
vqmovn.s16 d18, q11
vqmovn.s16 d21, q12
vqmovn.s16 d20, q13
vqmovn.s16 d23, q14
vqmovn.s16 d22, q15
vqabs.s8 q12, q8
vqabs.s8 q13, q9
vqabs.s8 q14, q10
vqabs.s8 q15, q11
vld1.8 {q2}, [r3, :128]
vceq.s8 q8, q8, #0
vceq.s8 q9, q9, #0
vceq.s8 q10, q10, #0
vceq.s8 q11, q11, #0
vmax.s8 q12, q12, q13
vmax.s8 q14, q14, q15
vand.u8 q8, q8, q2
vand.u8 q9, q9, q2
vand.u8 q10, q10, q2
vand.u8 q11, q11, q2
vmax.s8 q12, q12, q14
vpadd.u8 d18, d18, d19
vpadd.u8 d19, d16, d17
vcgt.s8 q12, q12, q3
vpadd.u8 d22, d22, d23
vpadd.u8 d23, d20, d21
vshrn.u16 d24, q12, #4
vpadd.u8 d16, d22, d23
vpadd.u8 d17, d18, d19
vpadd.u8 d24, d24, d24
vpadd.u8 d16, d16, d17
vmov.32 r2, d24[0]
vmov r12, r1, d16
cmp r2, #0
beq 0f
mov r0, #9
pop {pc}
0:
mvns r1, r1
mvn r12, r12
mov r0, #0
mov lr, #32
movrelx r3, X264(decimate_table8), r2
beq 2f
1:
clz r2, r1
lsl r1, r1, r2
sub lr, lr, r2
ldrb r2, [r3, r2]
lsls r1, r1, #1
sub lr, lr, #1
add r0, r0, r2
bne 1b
2:
cmp r12, #0
popeq {pc}
clz r2, r12
lsl r1, r12, r2
add r2, r2, lr
ldrb r2, [r3, r2]
lsls r1, r1, #1
add r0, r0, r2
popeq {pc}
3:
clz r2, r1
lsl r1, r1, r2
ldrb r2, [r3, r2]
lsls r1, r1, #1
add r0, r0, r2
bne 3b
pop {pc}
endfunc
// int coeff_last( int16_t *l )
function coeff_last4_arm
ldrd r2, r3, [r0]
subs r0, r3, #0
movne r0, #2
movne r2, r3
lsrs r2, r2, #16
addne r0, r0, #1
bx lr
endfunc
function coeff_last8_arm
ldrd r2, r3, [r0, #8]
orrs ip, r2, r3
movne r0, #4
ldrdeq r2, r3, [r0]
moveq r0, #0
tst r3, r3
addne r0, #2
movne r2, r3
lsrs r2, r2, #16
addne r0, r0, #1
bx lr
endfunc
.macro COEFF_LAST_1x size
function coeff_last\size\()_neon
.if \size == 15
sub r0, r0, #2
.endif
vld1.64 {d0-d3}, [r0,:128]
vtst.16 q0, q0
vtst.16 q1, q1
vshrn.u16 d0, q0, #8
vshrn.u16 d1, q1, #8
vshrn.u16 d0, q0, #4
vclz.i32 d0, d0
mov ip, #7
mov r3, #\size - 9
vmov r0, r1, d0
subs r1, ip, r1, lsr #2
addge r0, r1, #\size - 8
subslt r0, r3, r0, lsr #2
movlt r0, #0
bx lr
endfunc
.endm
COEFF_LAST_1x 15
COEFF_LAST_1x 16
function coeff_last64_neon
vld1.64 {d16-d19}, [r0,:128]!
vqmovn.u16 d16, q8
vqmovn.u16 d17, q9
vld1.64 {d20-d23}, [r0,:128]!
vqmovn.u16 d18, q10
vqmovn.u16 d19, q11
vld1.64 {d24-d27}, [r0,:128]!
vqmovn.u16 d20, q12
vqmovn.u16 d21, q13
vld1.64 {d28-d31}, [r0,:128]!
vqmovn.u16 d22, q14
vqmovn.u16 d23, q15
movrel r1, pmovmskb_byte
vld1.64 {d0-d1}, [r1,:128]
vtst.8 q8, q8
vtst.8 q9, q9
vtst.8 q10, q10
vtst.8 q11, q11
vand q8, q8, q0
vand q9, q9, q0
vand q10, q10, q0
vand q11, q11, q0
vpadd.u8 d0, d16, d17
vpadd.u8 d1, d18, d19
vpadd.u8 d2, d20, d21
vpadd.u8 d3, d22, d23
vpadd.u8 d0, d0, d1
vpadd.u8 d1, d2, d3
vpadd.u8 d0, d0, d1
vclz.i32 d0, d0
mov ip, #31
vmov r0, r1, d0
subs r1, ip, r1
addge r0, r1, #32
subslt r0, ip, r0
movlt r0, #0
bx lr
endfunc
function denoise_dct_neon
1: subs r3, r3, #16
vld1.16 {q0, q1}, [r0]
vld1.32 {q12, q13}, [r1]!
vld1.32 {q14, q15}, [r1]
sub r1, #32
vabs.s16 q8, q0
vabs.s16 q9, q1
vld1.16 {q2, q3}, [r2]!
vclt.s16 q10, q0, #0
vclt.s16 q11, q1, #0
vaddw.u16 q12, q12, d16
vaddw.u16 q13, q13, d17
vqsub.u16 q0, q8, q2
vqsub.u16 q1, q9, q3
vaddw.u16 q14, q14, d18
vaddw.u16 q15, q15, d19
vneg.s16 q8, q0
vneg.s16 q9, q1
vbsl q10, q8, q0
vbsl q11, q9, q1
vst1.32 {q12, q13}, [r1]!
vst1.32 {q14, q15}, [r1]!
vst1.16 {q10, q11}, [r0]!
bgt 1b
bx lr
endfunc
|
aestream/faery
| 40,408
|
src/mp4/x264/common/arm/pixel-a.S
|
/*****************************************************************************
* pixel.S: arm pixel metrics
*****************************************************************************
* Copyright (C) 2009-2024 x264 project
*
* Authors: David Conrad <lessen42@gmail.com>
* Janne Grunau <janne-x264@jannau.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "asm.S"
const mask_array, align=4
.rept 16
.byte 0xff
.endr
mask_ff:
.rept 16
.byte 0
.endr
endconst
const mask_ac4, align=4
.short 0, -1, -1, -1, 0, -1, -1, -1
endconst
const mask_ac8, align=4
.short 0, -1, -1, -1, -1, -1, -1, -1
endconst
.text
.macro SAD4_ARMV6 h
function pixel_sad_4x\h\()_armv6
push {r4-r6,lr}
ldr r4, [r2], r3
ldr r5, [r0], r1
ldr r6, [r2], r3
ldr lr, [r0], r1
usad8 ip, r4, r5
.rept (\h - 2)/2
ldr r4, [r2], r3
ldr r5, [r0], r1
usada8 ip, r6, lr, ip
ldr r6, [r2], r3
ldr lr, [r0], r1
usada8 ip, r4, r5, ip
.endr
usada8 r0, r6, lr, ip
pop {r4-r6,pc}
endfunc
.endm
SAD4_ARMV6 4
SAD4_ARMV6 8
.macro SAD_START_4 align:vararg
vld1.32 {d1[]}, [r2\align], r3
vld1.32 {d0[]}, [r0,:32], r1
vabdl.u8 q8, d0, d1
.endm
.macro SAD_4 align:vararg
vld1.32 {d1[]}, [r2\align], r3
vld1.32 {d0[]}, [r0,:32], r1
vabal.u8 q8, d0, d1
.endm
.macro SAD_START_8 align:vararg
vld1.64 {d1}, [r2\align], r3
vld1.64 {d0}, [r0,:64], r1
vabdl.u8 q8, d0, d1
.endm
.macro SAD_8 align:vararg
vld1.64 {d1}, [r2\align], r3
vld1.64 {d0}, [r0,:64], r1
vabal.u8 q8, d0, d1
.endm
.macro SAD_START_16 align:vararg
vld1.64 {d2-d3}, [r2\align], r3
vld1.64 {d0-d1}, [r0,:128], r1
vabdl.u8 q8, d0, d2
vld1.64 {d6-d7}, [r2\align], r3
vabdl.u8 q9, d1, d3
vld1.64 {d4-d5}, [r0,:128], r1
.endm
.macro SAD_16 align:vararg
vabal.u8 q8, d4, d6
vld1.64 {d2-d3}, [r2\align], r3
vabal.u8 q9, d5, d7
vld1.64 {d0-d1}, [r0,:128], r1
vabal.u8 q8, d0, d2
vld1.64 {d6-d7}, [r2\align], r3
vabal.u8 q9, d1, d3
vld1.64 {d4-d5}, [r0,:128], r1
.endm
.macro SAD_FUNC w, h, name, align:vararg
function pixel_sad\name\()_\w\()x\h\()_neon
SAD_START_\w \align
.if \w == 16
.rept \h / 2 - 1
SAD_\w \align
.endr
.else
.rept \h - 1
SAD_\w \align
.endr
.endif
.if \w > 8
vabal.u8 q8, d4, d6
vabal.u8 q9, d5, d7
vadd.u16 q8, q8, q9
.endif
.if \w > 4
vadd.u16 d16, d16, d17
.endif
vpadd.u16 d0, d16, d16
vpaddl.u16 d0, d0
vmov.u32 r0, d0[0]
bx lr
endfunc
.endm
SAD_FUNC 4, 4
SAD_FUNC 4, 8
SAD_FUNC 8, 4
SAD_FUNC 8, 8
SAD_FUNC 8, 16
SAD_FUNC 16, 8
SAD_FUNC 16, 16
SAD_FUNC 4, 4, _aligned, ,:32
SAD_FUNC 4, 8, _aligned, ,:32
SAD_FUNC 8, 4, _aligned, ,:64
SAD_FUNC 8, 8, _aligned, ,:64
SAD_FUNC 8, 16, _aligned, ,:64
SAD_FUNC 16, 8, _aligned, ,:128
SAD_FUNC 16, 16, _aligned, ,:128
// If dual issue is possible, use additional accumulators to avoid
// stalls from vadal's latency. This only matters for aligned.
.macro SAD_DUAL_START_8
SAD_START_8 ,:64
vld1.64 {d3}, [r2,:64], r3
vld1.64 {d2}, [r0,:64], r1
vabdl.u8 q9, d2, d3
.endm
.macro SAD_DUAL_8 align:vararg
vld1.64 {d1}, [r2,:64], r3
vld1.64 {d0}, [r0,:64], r1
vabal.u8 q8, d0, d1
vld1.64 {d3}, [r2,:64], r3
vld1.64 {d2}, [r0,:64], r1
vabal.u8 q9, d2, d3
.endm
.macro SAD_DUAL_START_16
SAD_START_16 ,:128
vabdl.u8 q10, d4, d6
vld1.64 {d2-d3}, [r2,:128], r3
vabdl.u8 q11, d5, d7
vld1.64 {d0-d1}, [r0,:128], r1
.endm
.macro SAD_DUAL_16
vabal.u8 q8, d0, d2
vld1.64 {d6-d7}, [r2,:128], r3
vabal.u8 q9, d1, d3
vld1.64 {d4-d5}, [r0,:128], r1
vabal.u8 q10, d4, d6
vld1.64 {d2-d3}, [r2,:128], r3
vabal.u8 q11, d5, d7
vld1.64 {d0-d1}, [r0,:128], r1
.endm
.macro SAD_DUAL_END_16
vabal.u8 q8, d0, d2
vld1.64 {d6-d7}, [r2,:128], r3
vabal.u8 q9, d1, d3
vld1.64 {d4-d5}, [r0,:128], r1
vabal.u8 q10, d4, d6
vabal.u8 q11, d5, d7
.endm
.macro SAD_FUNC_DUAL w, h
function pixel_sad_aligned_\w\()x\h\()_neon_dual
SAD_DUAL_START_\w
.rept \h / 2 - \w / 8
SAD_DUAL_\w
.endr
.if \w > 8
SAD_DUAL_END_16
vadd.u16 q8, q8, q9
vadd.u16 q9, q10, q11
.endif
.if \w > 4
vadd.u16 q8, q8, q9
vadd.u16 d16, d16, d17
.endif
vpadd.u16 d0, d16, d16
vpaddl.u16 d0, d0
vmov.u32 r0, d0[0]
bx lr
endfunc
.endm
SAD_FUNC_DUAL 8, 4
SAD_FUNC_DUAL 8, 8
SAD_FUNC_DUAL 8, 16
SAD_FUNC_DUAL 16, 8
SAD_FUNC_DUAL 16, 16
.macro SAD_X_START_4 x
vld1.32 {d0[]}, [r0,:32], lr
vld1.32 {d1[]}, [r1], r6
vabdl.u8 q8, d1, d0
vld1.32 {d2[]}, [r2], r6
vabdl.u8 q9, d2, d0
vld1.32 {d3[]}, [r3], r6
vabdl.u8 q10, d3, d0
.if \x == 4
vld1.32 {d4[]}, [r12], r6
vabdl.u8 q11, d4, d0
.endif
.endm
.macro SAD_X_4 x
vld1.32 {d0[]}, [r0,:32], lr
vld1.32 {d1[]}, [r1], r6
vabal.u8 q8, d1, d0
vld1.32 {d2[]}, [r2], r6
vabal.u8 q9, d2, d0
vld1.32 {d3[]}, [r3], r6
vabal.u8 q10, d3, d0
.if \x == 4
vld1.32 {d4[]}, [r12], r6
vabal.u8 q11, d4, d0
.endif
.endm
.macro SAD_X_START_8 x
vld1.64 {d0}, [r0,:64], lr
vld1.64 {d1}, [r1], r6
vabdl.u8 q8, d1, d0
vld1.64 {d2}, [r2], r6
vabdl.u8 q9, d2, d0
vld1.64 {d3}, [r3], r6
vabdl.u8 q10, d3, d0
.if \x == 4
vld1.64 {d4}, [r12], r6
vabdl.u8 q11, d4, d0
.endif
.endm
.macro SAD_X_8 x
vld1.64 {d0}, [r0,:64], lr
vld1.64 {d1}, [r1], r6
vabal.u8 q8, d1, d0
vld1.64 {d2}, [r2], r6
vabal.u8 q9, d2, d0
vld1.64 {d3}, [r3], r6
vabal.u8 q10, d3, d0
.if \x == 4
vld1.64 {d4}, [r12], r6
vabal.u8 q11, d4, d0
.endif
.endm
.macro SAD_X_START_16 x
vld1.64 {d0-d1}, [r0,:128], lr
vld1.64 {d2-d3}, [r1], r6
vabdl.u8 q8, d2, d0
vabdl.u8 q12, d3, d1
vld1.64 {d4-d5}, [r2], r6
vabdl.u8 q9, d4, d0
vabdl.u8 q13, d5, d1
vld1.64 {d6-d7}, [r3], r6
vabdl.u8 q10, d6, d0
vabdl.u8 q14, d7, d1
.if \x == 4
vld1.64 {d2-d3}, [r12], r6
vabdl.u8 q11, d2, d0
vabdl.u8 q15, d3, d1
.endif
.endm
.macro SAD_X_16 x
vld1.64 {d0-d1}, [r0,:128], lr
vld1.64 {d2-d3}, [r1], r6
vabal.u8 q8, d2, d0
vabal.u8 q12, d3, d1
vld1.64 {d4-d5}, [r2], r6
vabal.u8 q9, d4, d0
vabal.u8 q13, d5, d1
vld1.64 {d6-d7}, [r3], r6
vabal.u8 q10, d6, d0
vabal.u8 q14, d7, d1
.if \x == 4
vld1.64 {d2-d3}, [r12], r6
vabal.u8 q11, d2, d0
vabal.u8 q15, d3, d1
.endif
.endm
.macro SAD_X_FUNC x, w, h
function pixel_sad_x\x\()_\w\()x\h\()_neon
push {r6-r7,lr}
.if \x == 3
ldrd r6, r7, [sp, #12]
.else
ldrd r6, r7, [sp, #16]
ldr r12, [sp, #12]
.endif
mov lr, #FENC_STRIDE
SAD_X_START_\w \x
.rept \h - 1
SAD_X_\w \x
.endr
// add up the sads
.if \w > 8
vadd.u16 q8, q8, q12
vadd.u16 q9, q9, q13
vadd.u16 q10, q10, q14
.if \x == 4
vadd.u16 q11, q11, q15
.endif
.endif
.if \w > 4
vadd.u16 d16, d16, d17
vadd.u16 d18, d18, d19
vadd.u16 d20, d20, d21
.if \x == 4
vadd.u16 d22, d22, d23
.endif
.endif
vpadd.u16 d0, d16, d18
vpadd.u16 d1, d20, d22
vpaddl.u16 q0, q0
.if \x == 3
vst1.32 {d0}, [r7]!
vst1.32 {d1[0]}, [r7,:32]
.else
vst1.32 {d0-d1}, [r7]
.endif
pop {r6-r7,pc}
endfunc
.endm
SAD_X_FUNC 3, 4, 4
SAD_X_FUNC 3, 4, 8
SAD_X_FUNC 3, 8, 4
SAD_X_FUNC 3, 8, 8
SAD_X_FUNC 3, 8, 16
SAD_X_FUNC 3, 16, 8
SAD_X_FUNC 3, 16, 16
SAD_X_FUNC 4, 4, 4
SAD_X_FUNC 4, 4, 8
SAD_X_FUNC 4, 8, 4
SAD_X_FUNC 4, 8, 8
SAD_X_FUNC 4, 8, 16
SAD_X_FUNC 4, 16, 8
SAD_X_FUNC 4, 16, 16
function pixel_vsad_neon
subs r2, r2, #2
vld1.8 {q0}, [r0], r1
vld1.8 {q1}, [r0], r1
vabdl.u8 q2, d0, d2
vabdl.u8 q3, d1, d3
ble 2f
1:
subs r2, r2, #2
vld1.8 {q0}, [r0], r1
vabal.u8 q2, d2, d0
vabal.u8 q3, d3, d1
vld1.8 {q1}, [r0], r1
blt 2f
vabal.u8 q2, d0, d2
vabal.u8 q3, d1, d3
bgt 1b
2:
vadd.u16 q0, q2, q3
HORIZ_ADD d0, d0, d1
vmov.32 r0, d0[0]
bx lr
endfunc
function pixel_asd8_neon
ldr r12, [sp, #0]
sub r12, r12, #2
vld1.8 {d0}, [r0], r1
vld1.8 {d1}, [r2], r3
vld1.8 {d2}, [r0], r1
vld1.8 {d3}, [r2], r3
vsubl.u8 q8, d0, d1
1:
subs r12, r12, #2
vld1.8 {d4}, [r0], r1
vld1.8 {d5}, [r2], r3
vsubl.u8 q9, d2, d3
vsubl.u8 q10, d4, d5
vadd.s16 q8, q9
vld1.8 {d2}, [r0], r1
vld1.8 {d3}, [r2], r3
vadd.s16 q8, q10
bgt 1b
vsubl.u8 q9, d2, d3
vadd.s16 q8, q9
vpaddl.s16 q8, q8
vpadd.s32 d16, d16, d17
vpadd.s32 d16, d16, d17
vabs.s32 d16, d16
vmov.32 r0, d16[0]
bx lr
endfunc
.macro SSD_START_4
vld1.32 {d16[]}, [r0,:32], r1
vld1.32 {d17[]}, [r2,:32], r3
vsubl.u8 q2, d16, d17
vld1.32 {d16[]}, [r0,:32], r1
vmull.s16 q0, d4, d4
vld1.32 {d17[]}, [r2,:32], r3
.endm
.macro SSD_4
vsubl.u8 q2, d16, d17
vld1.32 {d16[]}, [r0,:32], r1
vmlal.s16 q0, d4, d4
vld1.32 {d17[]}, [r2,:32], r3
.endm
.macro SSD_END_4
vsubl.u8 q2, d16, d17
vmlal.s16 q0, d4, d4
.endm
.macro SSD_START_8
vld1.64 {d16}, [r0,:64], r1
vld1.64 {d17}, [r2,:64], r3
vsubl.u8 q2, d16, d17
vld1.64 {d16}, [r0,:64], r1
vmull.s16 q0, d4, d4
vmlal.s16 q0, d5, d5
vld1.64 {d17}, [r2,:64], r3
.endm
.macro SSD_8
vsubl.u8 q2, d16, d17
vld1.64 {d16}, [r0,:64], r1
vmlal.s16 q0, d4, d4
vmlal.s16 q0, d5, d5
vld1.64 {d17}, [r2,:64], r3
.endm
.macro SSD_END_8
vsubl.u8 q2, d16, d17
vmlal.s16 q0, d4, d4
vmlal.s16 q0, d5, d5
.endm
.macro SSD_START_16
vld1.64 {d16-d17}, [r0,:128], r1
vld1.64 {d18-d19}, [r2,:128], r3
vsubl.u8 q2, d16, d18
vsubl.u8 q3, d17, d19
vld1.64 {d16-d17}, [r0,:128], r1
vmull.s16 q0, d4, d4
vmlal.s16 q0, d5, d5
vld1.64 {d18-d19}, [r2,:128], r3
vmlal.s16 q0, d6, d6
vmlal.s16 q0, d7, d7
.endm
.macro SSD_16
vsubl.u8 q2, d16, d18
vsubl.u8 q3, d17, d19
vld1.64 {d16-d17}, [r0,:128], r1
vmlal.s16 q0, d4, d4
vmlal.s16 q0, d5, d5
vld1.64 {d18-d19}, [r2,:128], r3
vmlal.s16 q0, d6, d6
vmlal.s16 q0, d7, d7
.endm
.macro SSD_END_16
vsubl.u8 q2, d16, d18
vsubl.u8 q3, d17, d19
vmlal.s16 q0, d4, d4
vmlal.s16 q0, d5, d5
vmlal.s16 q0, d6, d6
vmlal.s16 q0, d7, d7
.endm
.macro SSD_FUNC w h
function pixel_ssd_\w\()x\h\()_neon
SSD_START_\w
.rept \h-2
SSD_\w
.endr
SSD_END_\w
vadd.s32 d0, d0, d1
vpadd.s32 d0, d0, d0
vmov.32 r0, d0[0]
bx lr
endfunc
.endm
SSD_FUNC 4, 4
SSD_FUNC 4, 8
SSD_FUNC 8, 4
SSD_FUNC 8, 8
SSD_FUNC 8, 16
SSD_FUNC 16, 8
SSD_FUNC 16, 16
function pixel_ssd_nv12_core_neon
push {r4-r5}
ldrd r4, r5, [sp, #8]
add r12, r4, #8
bic r12, r12, #15
vmov.u64 q8, #0
vmov.u64 q9, #0
sub r1, r1, r12, lsl #1
sub r3, r3, r12, lsl #1
1:
subs r12, r4, #16
vld2.8 {d0,d1}, [r0]!
vld2.8 {d2,d3}, [r2]!
vld2.8 {d4,d5}, [r0]!
vld2.8 {d6,d7}, [r2]!
vsubl.u8 q10, d0, d2
vsubl.u8 q11, d1, d3
vmull.s16 q14, d20, d20
vmull.s16 q15, d22, d22
vsubl.u8 q12, d4, d6
vsubl.u8 q13, d5, d7
vmlal.s16 q14, d21, d21
vmlal.s16 q15, d23, d23
blt 4f
beq 3f
2:
vmlal.s16 q14, d24, d24
vmlal.s16 q15, d26, d26
vld2.8 {d0,d1}, [r0]!
vld2.8 {d2,d3}, [r2]!
vmlal.s16 q14, d25, d25
vmlal.s16 q15, d27, d27
subs r12, r12, #16
vsubl.u8 q10, d0, d2
vsubl.u8 q11, d1, d3
vmlal.s16 q14, d20, d20
vmlal.s16 q15, d22, d22
vld2.8 {d4,d5}, [r0]!
vld2.8 {d6,d7}, [r2]!
vmlal.s16 q14, d21, d21
vmlal.s16 q15, d23, d23
blt 4f
vsubl.u8 q12, d4, d6
vsubl.u8 q13, d5, d7
bgt 2b
3:
vmlal.s16 q14, d24, d24
vmlal.s16 q15, d26, d26
vmlal.s16 q14, d25, d25
vmlal.s16 q15, d27, d27
4:
subs r5, r5, #1
vaddw.s32 q8, q8, d28
vaddw.s32 q9, q9, d30
add r0, r0, r1
add r2, r2, r3
vaddw.s32 q8, q8, d29
vaddw.s32 q9, q9, d31
bgt 1b
vadd.u64 d16, d16, d17
vadd.u64 d18, d18, d19
ldrd r4, r5, [sp, #16]
vst1.64 {d16}, [r4]
vst1.64 {d18}, [r5]
pop {r4-r5}
bx lr
endfunc
.macro VAR_SQR_SUM qsqr_sum qsqr_last qsqr dsrc vpadal=vpadal.u16
vmull.u8 \qsqr, \dsrc, \dsrc
vaddw.u8 q0, q0, \dsrc
\vpadal \qsqr_sum, \qsqr_last
.endm
function pixel_var_8x8_neon
vld1.64 {d16}, [r0,:64], r1
vmull.u8 q1, d16, d16
vmovl.u8 q0, d16
vld1.64 {d18}, [r0,:64], r1
vmull.u8 q2, d18, d18
vaddw.u8 q0, q0, d18
vld1.64 {d20}, [r0,:64], r1
VAR_SQR_SUM q1, q1, q3, d20, vpaddl.u16
vld1.64 {d22}, [r0,:64], r1
VAR_SQR_SUM q2, q2, q8, d22, vpaddl.u16
vld1.64 {d24}, [r0,:64], r1
VAR_SQR_SUM q1, q3, q9, d24
vld1.64 {d26}, [r0,:64], r1
VAR_SQR_SUM q2, q8, q10, d26
vld1.64 {d24}, [r0,:64], r1
VAR_SQR_SUM q1, q9, q14, d24
vld1.64 {d26}, [r0,:64], r1
VAR_SQR_SUM q2, q10, q15, d26
b var_end
endfunc
function pixel_var_8x16_neon
vld1.64 {d16}, [r0,:64], r1
vld1.64 {d18}, [r0,:64], r1
vmull.u8 q1, d16, d16
vmovl.u8 q0, d16
vld1.64 {d20}, [r0,:64], r1
vmull.u8 q2, d18, d18
vaddw.u8 q0, q0, d18
mov ip, #12
vld1.64 {d22}, [r0,:64], r1
VAR_SQR_SUM q1, q1, q14, d20, vpaddl.u16
vld1.64 {d16}, [r0,:64], r1
VAR_SQR_SUM q2, q2, q15, d22, vpaddl.u16
1: subs ip, ip, #4
vld1.64 {d18}, [r0,:64], r1
VAR_SQR_SUM q1, q14, q12, d16
vld1.64 {d20}, [r0,:64], r1
VAR_SQR_SUM q2, q15, q13, d18
vld1.64 {d22}, [r0,:64], r1
VAR_SQR_SUM q1, q12, q14, d20
beq 2f
vld1.64 {d16}, [r0,:64], r1
VAR_SQR_SUM q2, q13, q15, d22
b 1b
2:
VAR_SQR_SUM q2, q13, q15, d22
b var_end
endfunc
function pixel_var_16x16_neon
vld1.64 {d16-d17}, [r0,:128], r1
vmull.u8 q12, d16, d16
vmovl.u8 q0, d16
vmull.u8 q13, d17, d17
vaddw.u8 q0, q0, d17
vld1.64 {d18-d19}, [r0,:128], r1
VAR_SQR_SUM q1, q12, q14, d18, vpaddl.u16
VAR_SQR_SUM q2, q13, q15, d19, vpaddl.u16
mov ip, #7
var16_loop:
subs ip, ip, #1
vld1.64 {d16-d17}, [r0,:128], r1
VAR_SQR_SUM q1, q14, q12, d16
VAR_SQR_SUM q2, q15, q13, d17
vld1.64 {d18-d19}, [r0,:128], r1
VAR_SQR_SUM q1, q12, q14, d18
VAR_SQR_SUM q2, q13, q15, d19
bgt var16_loop
endfunc
function var_end, export=0
vpaddl.u16 q8, q14
vpaddl.u16 q9, q15
vadd.u32 q1, q1, q8
vadd.u16 d0, d0, d1
vadd.u32 q1, q1, q9
vadd.u32 q1, q1, q2
vpaddl.u16 d0, d0
vadd.u32 d2, d2, d3
vpadd.u32 d0, d0, d2
vmov r0, r1, d0
bx lr
endfunc
.macro DIFF_SUM diff1 diff2 da1 db1 da2 db2 lastdiff1 lastdiff2 acc1 acc2
vld1.64 {\da1}, [r0,:64]!
vld1.64 {\db1}, [r1,:64], r3
.ifnb \lastdiff1
vadd.s16 \acc1, \acc1, \lastdiff1
vadd.s16 \acc2, \acc2, \lastdiff2
.endif
vld1.64 {\da2}, [r0,:64]!
vld1.64 {\db2}, [r1,:64], r3
vsubl.u8 \diff1, \da1, \db1
vsubl.u8 \diff2, \da2, \db2
.endm
.macro SQR_ACC_DOUBLE acc1 acc2 d0 d1 d2 d3 vmlal=vmlal.s16
\vmlal \acc1, \d0, \d0
vmlal.s16 \acc1, \d1, \d1
\vmlal \acc2, \d2, \d2
vmlal.s16 \acc2, \d3, \d3
.endm
.macro SQR_ACC acc d0 d1 vmlal=vmlal.s16
\vmlal \acc, \d0, \d0
vmlal.s16 \acc, \d1, \d1
.endm
function pixel_var2_8x8_neon
mov r3, #16
DIFF_SUM q0, q10, d0, d1, d20, d21
DIFF_SUM q8, q11, d16, d17, d22, d23
SQR_ACC_DOUBLE q1, q13, d0, d1, d20, d21, vmull.s16
DIFF_SUM q9, q12, d18, d19, d24, d25, q8, q11, q0, q10
SQR_ACC_DOUBLE q2, q14, d16, d17, d22, d23, vmull.s16
.rept 2
DIFF_SUM q8, q11, d16, d17, d22, d23, q9, q12, q0, q10
SQR_ACC_DOUBLE q1, q13, d18, d19, d24, d25
DIFF_SUM q9, q12, d18, d19, d24, d25, q8, q11, q0, q10
SQR_ACC_DOUBLE q2, q14, d16, d17, d22, d23
.endr
DIFF_SUM q8, q11, d16, d17, d22, d23, q9, q12, q0, q10
SQR_ACC_DOUBLE q1, q13, d18, d19, d24, d25
vadd.s16 q0, q0, q8
vadd.s16 q10, q10, q11
SQR_ACC_DOUBLE q2, q14, d16, d17, d22, d23
vadd.s16 d0, d0, d1
vadd.s16 d20, d20, d21
vadd.s32 q1, q1, q2
vadd.s32 q13, q13, q14
vpaddl.s16 d0, d0
vpaddl.s16 d20, d20
vadd.s32 d1, d2, d3
vadd.s32 d26, d26, d27
vpadd.s32 d0, d0, d20 @ sum
vpadd.s32 d1, d1, d26 @ sqr
vmul.s32 d0, d0, d0 @ sum*sum
vshr.s32 d0, d0, #6
vsub.s32 d0, d1, d0
vpadd.s32 d0, d0, d0
vmov r0, r1, d0
vst1.32 {d1}, [r2,:64]
bx lr
endfunc
function pixel_var2_8x16_neon
mov r3, #16
vld1.64 {d16}, [r0,:64]!
vld1.64 {d17}, [r1,:64], r3
vld1.64 {d18}, [r0,:64]!
vld1.64 {d19}, [r1,:64], r3
vsubl.u8 q0, d16, d17
vsubl.u8 q3, d18, d19
SQR_ACC q1, d0, d1, vmull.s16
vld1.64 {d16}, [r0,:64]!
mov ip, #15
vld1.64 {d17}, [r1,:64], r3
SQR_ACC q2, d6, d7, vmull.s16
1: subs ip, ip, #1
vld1.64 {d18}, [r0,:64]!
vsubl.u8 q10, d16, d17
vld1.64 {d19}, [r1,:64], r3
vadd.s16 q0, q0, q10
SQR_ACC q1, d20, d21
vsubl.u8 q11, d18, d19
beq 2f
vld1.64 {d16}, [r0,:64]!
vadd.s16 q3, q3, q11
vld1.64 {d17}, [r1,:64], r3
SQR_ACC q2, d22, d23
b 1b
2:
vadd.s16 q3, q3, q11
SQR_ACC q2, d22, d23
vadd.s16 d0, d0, d1
vadd.s16 d6, d6, d7
vpaddl.s16 d0, d0
vpaddl.s16 d6, d6
vadd.s32 d2, d2, d3
vadd.s32 d4, d4, d5
vpadd.s32 d0, d0, d6 @ sum
vpadd.s32 d2, d2, d4 @ sqr
vmul.s32 d0, d0, d0 @ sum*sum
vshr.s32 d0, d0, #7
vsub.s32 d0, d2, d0
vpadd.s32 d0, d0, d0
vmov r0, r1, d0
vst1.32 {d2}, [r2,:64]
bx lr
endfunc
.macro LOAD_DIFF_8x4 q0 q1 q2 q3
vld1.32 {d1}, [r2], r3
vld1.32 {d0}, [r0,:64], r1
vsubl.u8 \q0, d0, d1
vld1.32 {d3}, [r2], r3
vld1.32 {d2}, [r0,:64], r1
vsubl.u8 \q1, d2, d3
vld1.32 {d5}, [r2], r3
vld1.32 {d4}, [r0,:64], r1
vsubl.u8 \q2, d4, d5
vld1.32 {d7}, [r2], r3
vld1.32 {d6}, [r0,:64], r1
vsubl.u8 \q3, d6, d7
.endm
function pixel_satd_4x4_neon
vld1.32 {d1[]}, [r2], r3
vld1.32 {d0[]}, [r0,:32], r1
vld1.32 {d3[]}, [r2], r3
vld1.32 {d2[]}, [r0,:32], r1
vld1.32 {d1[1]}, [r2], r3
vld1.32 {d0[1]}, [r0,:32], r1
vld1.32 {d3[1]}, [r2], r3
vld1.32 {d2[1]}, [r0,:32], r1
vsubl.u8 q0, d0, d1
vsubl.u8 q1, d2, d3
SUMSUB_AB q2, q3, q0, q1
SUMSUB_ABCD d0, d2, d1, d3, d4, d5, d6, d7
HADAMARD 1, sumsub, q2, q3, q0, q1
HADAMARD 2, amax, q0,, q2, q3
HORIZ_ADD d0, d0, d1
vmov.32 r0, d0[0]
bx lr
endfunc
function pixel_satd_4x8_neon
vld1.32 {d1[]}, [r2], r3
vld1.32 {d0[]}, [r0,:32], r1
vld1.32 {d3[]}, [r2], r3
vld1.32 {d2[]}, [r0,:32], r1
vld1.32 {d5[]}, [r2], r3
vld1.32 {d4[]}, [r0,:32], r1
vld1.32 {d7[]}, [r2], r3
vld1.32 {d6[]}, [r0,:32], r1
vld1.32 {d1[1]}, [r2], r3
vld1.32 {d0[1]}, [r0,:32], r1
vsubl.u8 q0, d0, d1
vld1.32 {d3[1]}, [r2], r3
vld1.32 {d2[1]}, [r0,:32], r1
vsubl.u8 q1, d2, d3
vld1.32 {d5[1]}, [r2], r3
vld1.32 {d4[1]}, [r0,:32], r1
vsubl.u8 q2, d4, d5
vld1.32 {d7[1]}, [r2], r3
SUMSUB_AB q8, q9, q0, q1
vld1.32 {d6[1]}, [r0,:32], r1
vsubl.u8 q3, d6, d7
SUMSUB_AB q10, q11, q2, q3
b satd_4x8_8x4_end_neon
endfunc
function pixel_satd_8x4_neon
vld1.64 {d1}, [r2], r3
vld1.64 {d0}, [r0,:64], r1
vsubl.u8 q0, d0, d1
vld1.64 {d3}, [r2], r3
vld1.64 {d2}, [r0,:64], r1
vsubl.u8 q1, d2, d3
vld1.64 {d5}, [r2], r3
vld1.64 {d4}, [r0,:64], r1
vsubl.u8 q2, d4, d5
vld1.64 {d7}, [r2], r3
SUMSUB_AB q8, q9, q0, q1
vld1.64 {d6}, [r0,:64], r1
vsubl.u8 q3, d6, d7
SUMSUB_AB q10, q11, q2, q3
endfunc
function satd_4x8_8x4_end_neon, export=0
vadd.s16 q0, q8, q10
vadd.s16 q1, q9, q11
vsub.s16 q2, q8, q10
vsub.s16 q3, q9, q11
vtrn.16 q0, q1
vadd.s16 q8, q0, q1
vtrn.16 q2, q3
vsub.s16 q9, q0, q1
vadd.s16 q10, q2, q3
vsub.s16 q11, q2, q3
vtrn.32 q8, q10
vabs.s16 q8, q8
vtrn.32 q9, q11
vabs.s16 q10, q10
vabs.s16 q9, q9
vabs.s16 q11, q11
vmax.u16 q0, q8, q10
vmax.u16 q1, q9, q11
vadd.u16 q0, q0, q1
HORIZ_ADD d0, d0, d1
vmov.32 r0, d0[0]
bx lr
endfunc
function pixel_satd_8x8_neon
mov ip, lr
bl satd_8x8_neon
vadd.u16 q0, q12, q13
vadd.u16 q1, q14, q15
vadd.u16 q0, q0, q1
HORIZ_ADD d0, d0, d1
mov lr, ip
vmov.32 r0, d0[0]
bx lr
endfunc
function pixel_satd_8x16_neon
vpush {d8-d11}
mov ip, lr
bl satd_8x8_neon
vadd.u16 q4, q12, q13
vadd.u16 q5, q14, q15
bl satd_8x8_neon
vadd.u16 q4, q4, q12
vadd.u16 q5, q5, q13
vadd.u16 q4, q4, q14
vadd.u16 q5, q5, q15
vadd.u16 q0, q4, q5
HORIZ_ADD d0, d0, d1
vpop {d8-d11}
mov lr, ip
vmov.32 r0, d0[0]
bx lr
endfunc
function satd_8x8_neon, export=0
LOAD_DIFF_8x4 q8, q9, q10, q11
vld1.64 {d7}, [r2], r3
SUMSUB_AB q0, q1, q8, q9
vld1.64 {d6}, [r0,:64], r1
vsubl.u8 q12, d6, d7
vld1.64 {d17}, [r2], r3
SUMSUB_AB q2, q3, q10, q11
vld1.64 {d16}, [r0,:64], r1
vsubl.u8 q13, d16, d17
vld1.64 {d19}, [r2], r3
SUMSUB_AB q8, q10, q0, q2
vld1.64 {d18}, [r0,:64], r1
vsubl.u8 q14, d18, d19
vld1.64 {d1}, [r2], r3
SUMSUB_AB q9, q11, q1, q3
vld1.64 {d0}, [r0,:64], r1
vsubl.u8 q15, d0, d1
endfunc
// one vertical hadamard pass and two horizontal
function satd_8x4v_8x8h_neon, export=0
SUMSUB_ABCD q0, q1, q2, q3, q12, q13, q14, q15
vtrn.16 q8, q9
SUMSUB_AB q12, q14, q0, q2
vtrn.16 q10, q11
SUMSUB_AB q13, q15, q1, q3
SUMSUB_AB q0, q1, q8, q9
vtrn.16 q12, q13
SUMSUB_AB q2, q3, q10, q11
vtrn.16 q14, q15
SUMSUB_AB q8, q9, q12, q13
vtrn.32 q0, q2
SUMSUB_AB q10, q11, q14, q15
vtrn.32 q1, q3
ABS2 q0, q2
vtrn.32 q8, q10
ABS2 q1, q3
vtrn.32 q9, q11
ABS2 q8, q10
ABS2 q9, q11
vmax.s16 q12, q0, q2
vmax.s16 q13, q1, q3
vmax.s16 q14, q8, q10
vmax.s16 q15, q9, q11
bx lr
endfunc
function pixel_satd_16x8_neon
vpush {d8-d11}
mov ip, lr
bl satd_16x4_neon
vadd.u16 q4, q12, q13
vadd.u16 q5, q14, q15
bl satd_16x4_neon
vadd.u16 q4, q4, q12
vadd.u16 q5, q5, q13
vadd.u16 q4, q4, q14
vadd.u16 q5, q5, q15
vadd.u16 q0, q4, q5
HORIZ_ADD d0, d0, d1
vpop {d8-d11}
mov lr, ip
vmov.32 r0, d0[0]
bx lr
endfunc
function pixel_satd_16x16_neon
vpush {d8-d11}
mov ip, lr
bl satd_16x4_neon
vadd.u16 q4, q12, q13
vadd.u16 q5, q14, q15
bl satd_16x4_neon
vadd.u16 q4, q4, q12
vadd.u16 q5, q5, q13
vadd.u16 q4, q4, q14
vadd.u16 q5, q5, q15
bl satd_16x4_neon
vadd.u16 q4, q4, q12
vadd.u16 q5, q5, q13
vadd.u16 q4, q4, q14
vadd.u16 q5, q5, q15
bl satd_16x4_neon
vadd.u16 q4, q4, q12
vadd.u16 q5, q5, q13
vadd.u16 q4, q4, q14
vadd.u16 q5, q5, q15
vadd.u16 q0, q4, q5
HORIZ_ADD d0, d0, d1
vpop {d8-d11}
mov lr, ip
vmov.32 r0, d0[0]
bx lr
endfunc
function satd_16x4_neon, export=0
vld1.64 {d2-d3}, [r2], r3
vld1.64 {d0-d1}, [r0,:128], r1
vsubl.u8 q8, d0, d2
vld1.64 {d6-d7}, [r2], r3
vsubl.u8 q12, d1, d3
vld1.64 {d4-d5}, [r0,:128], r1
vsubl.u8 q9, d4, d6
vld1.64 {d2-d3}, [r2], r3
vsubl.u8 q13, d5, d7
vld1.64 {d0-d1}, [r0,:128], r1
vsubl.u8 q10, d0, d2
vld1.64 {d6-d7}, [r2], r3
vsubl.u8 q14, d1, d3
vadd.s16 q0, q8, q9
vld1.64 {d4-d5}, [r0,:128], r1
vsub.s16 q1, q8, q9
vsubl.u8 q11, d4, d6
vsubl.u8 q15, d5, d7
SUMSUB_AB q2, q3, q10, q11
SUMSUB_ABCD q8, q10, q9, q11, q0, q2, q1, q3
b satd_8x4v_8x8h_neon
endfunc
function pixel_sa8d_8x8_neon
mov ip, lr
bl sa8d_8x8_neon
vadd.u16 q0, q8, q9
HORIZ_ADD d0, d0, d1
mov lr, ip
vmov.32 r0, d0[0]
add r0, r0, #1
lsr r0, r0, #1
bx lr
endfunc
function pixel_sa8d_16x16_neon
vpush {d8-d11}
mov ip, lr
bl sa8d_8x8_neon
vpaddl.u16 q4, q8
vpaddl.u16 q5, q9
bl sa8d_8x8_neon
vpadal.u16 q4, q8
vpadal.u16 q5, q9
sub r0, r0, r1, lsl #4
sub r2, r2, r3, lsl #4
add r0, r0, #8
add r2, r2, #8
bl sa8d_8x8_neon
vpadal.u16 q4, q8
vpadal.u16 q5, q9
bl sa8d_8x8_neon
vpaddl.u16 q8, q8
vpaddl.u16 q9, q9
vadd.u32 q0, q4, q8
vadd.u32 q1, q5, q9
vadd.u32 q0, q0, q1
vadd.u32 d0, d0, d1
vpadd.u32 d0, d0, d0
vpop {d8-d11}
mov lr, ip
vmov.32 r0, d0[0]
add r0, r0, #1
lsr r0, r0, #1
bx lr
endfunc
.macro HADAMARD4_V r1, r2, r3, r4, t1, t2, t3, t4
SUMSUB_ABCD \t1, \t2, \t3, \t4, \r1, \r2, \r3, \r4
SUMSUB_ABCD \r1, \r3, \r2, \r4, \t1, \t3, \t2, \t4
.endm
.macro integrated_satd dst, s0, s1, s2, s3
vmov q0, \s0
vmov q1, \s1
vmov q2, \s2
vmov q3, \s3
vtrn.16 q0, q1
vtrn.16 q2, q3
SUMSUB_AB q6, q7, q0, q1
SUMSUB_AB q0, q1, q2, q3
vtrn.32 q6, q0
vtrn.32 q7, q1
vabs.s16 q6, q6
vabs.s16 q0, q0
vabs.s16 q7, q7
vabs.s16 q1, q1
vmax.u16 q6, q6, q0
vmax.u16 q7, q7, q1
vadd.i16 q6, q6, q7
vpadal.u16 \dst, q6
.endm
.macro sa8d_satd_8x8 satd=
function sa8d_\satd\()8x8_neon, export=0
LOAD_DIFF_8x4 q8, q9, q10, q11
vld1.64 {d7}, [r2], r3
SUMSUB_AB q0, q1, q8, q9
vld1.64 {d6}, [r0,:64], r1
vsubl.u8 q12, d6, d7
vld1.64 {d17}, [r2], r3
SUMSUB_AB q2, q3, q10, q11
vld1.64 {d16}, [r0,:64], r1
vsubl.u8 q13, d16, d17
vld1.64 {d19}, [r2], r3
SUMSUB_AB q8, q10, q0, q2
vld1.64 {d18}, [r0,:64], r1
vsubl.u8 q14, d18, d19
vld1.64 {d1}, [r2], r3
SUMSUB_AB q9, q11, q1, q3
vld1.64 {d0}, [r0,:64], r1
vsubl.u8 q15, d0, d1
HADAMARD4_V q12, q13, q14, q15, q0, q1, q2, q3
.ifc \satd, satd_
integrated_satd q4, q8, q9, q10, q11
integrated_satd q4, q12, q13, q14, q15
.endif
SUMSUB_ABCD q0, q8, q1, q9, q8, q12, q9, q13
SUMSUB_AB q2, q10, q10, q14
vtrn.16 q8, q9
SUMSUB_AB q3, q11, q11, q15
vtrn.16 q0, q1
SUMSUB_AB q12, q13, q8, q9
vtrn.16 q10, q11
SUMSUB_AB q8, q9, q0, q1
vtrn.16 q2, q3
SUMSUB_AB q14, q15, q10, q11
vadd.i16 q10, q2, q3
vtrn.32 q12, q14
vsub.i16 q11, q2, q3
vtrn.32 q13, q15
SUMSUB_AB q0, q2, q12, q14
vtrn.32 q8, q10
SUMSUB_AB q1, q3, q13, q15
vtrn.32 q9, q11
SUMSUB_AB q12, q14, q8, q10
SUMSUB_AB q13, q15, q9, q11
vswp d1, d24
ABS2 q0, q12
vswp d3, d26
ABS2 q1, q13
vswp d5, d28
ABS2 q2, q14
vswp d7, d30
ABS2 q3, q15
vmax.s16 q8, q0, q12
vmax.s16 q9, q1, q13
vmax.s16 q10, q2, q14
vmax.s16 q11, q3, q15
vadd.i16 q8, q8, q9
vadd.i16 q9, q10, q11
.ifc \satd, satd_
vpadal.u16 q5, q8
vpadal.u16 q5, q9
.endif
bx lr
endfunc
.endm
sa8d_satd_8x8
sa8d_satd_8x8 satd_
function pixel_sa8d_satd_16x16_neon
push {lr}
vpush {q4-q7}
vmov.u32 q4, #0
vmov.u32 q5, #0
bl sa8d_satd_8x8_neon
bl sa8d_satd_8x8_neon
sub r0, r0, r1, lsl #4
sub r2, r2, r3, lsl #4
add r0, r0, #8
add r2, r2, #8
bl sa8d_satd_8x8_neon
bl sa8d_satd_8x8_neon
vadd.u32 d1, d10, d11
vadd.u32 d0, d8, d9
vpadd.u32 d1, d1, d1
vpadd.u32 d0, d0, d0
vrshr.u32 d1, d1, #1
vmov.32 r1, d0[0]
vmov.32 r0, d1[0]
vpop {q4-q7}
pop {pc}
endfunc
.macro HADAMARD_AC w h
function pixel_hadamard_ac_\w\()x\h\()_neon
vpush {d8-d15}
movrel ip, mask_ac4
vmov.i8 q4, #0
// note: this assumes mask_ac8 is after mask_ac4 (so don't move it)
vld1.64 {d12-d15}, [ip,:128]
vmov.i8 q5, #0
mov ip, lr
bl hadamard_ac_8x8_neon
.if \h > 8
bl hadamard_ac_8x8_neon
.endif
.if \w > 8
sub r0, r0, r1, lsl #3
add r0, r0, #8
bl hadamard_ac_8x8_neon
.endif
.if \w * \h == 256
sub r0, r0, r1, lsl #4
bl hadamard_ac_8x8_neon
.endif
vadd.s32 d8, d8, d9
vadd.s32 d10, d10, d11
vpadd.s32 d0, d8, d10
vpop {d8-d15}
mov lr, ip
vmov r0, r1, d0
lsr r0, r0, #1
lsr r1, r1, #2
bx lr
endfunc
.endm
HADAMARD_AC 8, 8
HADAMARD_AC 8, 16
HADAMARD_AC 16, 8
HADAMARD_AC 16, 16
// q4: satd q5: sa8d q6: mask_ac4 q7: mask_ac8
function hadamard_ac_8x8_neon, export=0
vld1.64 {d2}, [r0,:64], r1
vld1.64 {d3}, [r0,:64], r1
vaddl.u8 q0, d2, d3
vld1.64 {d6}, [r0,:64], r1
vsubl.u8 q1, d2, d3
vld1.64 {d7}, [r0,:64], r1
vaddl.u8 q2, d6, d7
vld1.64 {d18}, [r0,:64], r1
vsubl.u8 q3, d6, d7
vld1.64 {d19}, [r0,:64], r1
vaddl.u8 q8, d18, d19
vld1.64 {d22}, [r0,:64], r1
vsubl.u8 q9, d18, d19
vld1.64 {d23}, [r0,:64], r1
SUMSUB_ABCD q12, q14, q13, q15, q0, q2, q1, q3
vaddl.u8 q10, d22, d23
vsubl.u8 q11, d22, d23
vtrn.16 q12, q13
SUMSUB_ABCD q0, q2, q1, q3, q8, q10, q9, q11
vtrn.16 q14, q15
SUMSUB_AB q8, q9, q12, q13
vtrn.16 q0, q1
SUMSUB_AB q10, q11, q14, q15
vtrn.16 q2, q3
SUMSUB_AB q12, q13, q0, q1
vtrn.32 q8, q10
SUMSUB_AB q14, q15, q2, q3
vtrn.32 q9, q11
SUMSUB_AB q0, q2, q8, q10
vtrn.32 q12, q14
SUMSUB_AB q1, q3, q9, q11
vtrn.32 q13, q15
SUMSUB_ABCD q8, q10, q9, q11, q12, q14, q13, q15
vabs.s16 q12, q0
vabs.s16 q13, q8
vabs.s16 q15, q1
vadd.s16 q12, q12, q13
vabs.s16 q14, q2
vand.s16 q12, q12, q6
vabs.s16 q13, q3
vadd.s16 q12, q12, q15
vabs.s16 q15, q9
vadd.s16 q12, q12, q14
vabs.s16 q14, q10
vadd.s16 q12, q12, q13
vabs.s16 q13, q11
vadd.s16 q12, q12, q15
vsub.s16 q15, q11, q3
vadd.s16 q12, q12, q14
vadd.s16 q14, q11, q3
vadd.s16 q12, q12, q13
vsub.s16 q13, q10, q2
vadd.s16 q2, q10, q2
vpadal.u16 q4, q12
SUMSUB_AB q10, q11, q9, q1
SUMSUB_AB q9, q8, q0, q8
vswp d29, d30
vabs.s16 q14, q14
vabs.s16 q15, q15
vswp d5, d26
vabs.s16 q2, q2
vabs.s16 q13, q13
vswp d21, d22
vabs.s16 q10, q10
vabs.s16 q11, q11
vmax.s16 q3, q14, q15
vmax.s16 q2, q2, q13
vmax.s16 q1, q10, q11
vswp d19, d16
SUMSUB_AB q14, q15, q9, q8
vadd.s16 q2, q2, q3
vadd.s16 q2, q2, q1
vand q14, q14, q7
vadd.s16 q2, q2, q2
vabs.s16 q15, q15
vabs.s16 q14, q14
vadd.s16 q2, q2, q15
vadd.s16 q2, q2, q14
vpadal.u16 q5, q2
bx lr
endfunc
.macro SSIM_ITER n ssa s12 ssb lastssa lasts12 lastssb da db dnext
vld1.64 {\db}, [r2], r3
vmull.u8 \ssa, \da, \da
vmull.u8 \s12, \da, \db
.if \n == 1
vpaddl.u16 q2, \lastssa
vpaddl.u16 q3, \lasts12
vaddl.u8 q0, d0, \da
.else
vpadal.u16 q2, \lastssa
vpadal.u16 q3, \lasts12
vaddw.u8 q0, q0, \da
.endif
vpadal.u16 q2, \lastssb
.if \n < 3
vld1.64 {\dnext}, [r0], r1
.endif
.if \n == 1
vaddl.u8 q1, d2, \db
.else
vaddw.u8 q1, q1, \db
.endif
vmull.u8 \ssb, \db, \db
.endm
function pixel_ssim_4x4x2_core_neon
ldr ip, [sp]
vld1.64 {d0}, [r0], r1
vld1.64 {d2}, [r2], r3
vmull.u8 q2, d0, d0
vmull.u8 q3, d0, d2
vld1.64 {d28}, [r0], r1
vmull.u8 q15, d2, d2
SSIM_ITER 1, q8, q9, q14, q2, q3, q15, d28, d29, d26
SSIM_ITER 2, q10,q11,q13, q8, q9, q14, d26, d27, d28
SSIM_ITER 3, q8, q9, q15, q10,q11,q13, d28, d29
vpadal.u16 q2, q8
vpaddl.u16 q0, q0
vpaddl.u16 q1, q1
vpadal.u16 q2, q15
vpadal.u16 q3, q9
vpadd.u32 d0, d0, d1
vpadd.u32 d1, d2, d3
vpadd.u32 d2, d4, d5
vpadd.u32 d3, d6, d7
vst4.32 {d0-d3}, [ip]
bx lr
endfunc
// FIXME: see about doing 16x16 -> 32 bit multiplies for s1/s2
function pixel_ssim_end4_neon
vld1.32 {d16-d19}, [r0,:128]!
vld1.32 {d20-d23}, [r1,:128]!
vadd.s32 q0, q8, q10
vadd.s32 q1, q9, q11
vld1.32 {d24-d27}, [r0,:128]!
vadd.s32 q0, q0, q1
vld1.32 {d28-d31}, [r1,:128]!
vadd.s32 q2, q12, q14
vadd.s32 q3, q13, q15
vld1.32 {d16-d17}, [r0,:128]
vadd.s32 q1, q1, q2
vld1.32 {d18-d19}, [r1,:128]
vadd.s32 q8, q8, q9
vadd.s32 q2, q2, q3
vadd.s32 q3, q3, q8
vtrn.32 q0, q1
vtrn.32 q2, q3
vswp d1, d4
vswp d3, d6
// s1=q0, s2=q1, ss=q2, s12=q3
vmul.s32 q8, q0, q1 // s1*s2
vmul.s32 q0, q0, q0
vmla.s32 q0, q1, q1 // s1*s1 + s2*s2
vshl.s32 q3, q3, #7
vshl.s32 q2, q2, #6
vadd.s32 q1, q8, q8
mov r3, #416 // ssim_c1 = .01*.01*255*255*64
movconst ip, 235963 // ssim_c2 = .03*.03*255*255*64*63
vdup.32 q14, r3
vdup.32 q15, ip
vsub.s32 q2, q2, q0 // vars
vsub.s32 q3, q3, q1 // covar*2
vadd.s32 q0, q0, q14
vadd.s32 q2, q2, q15
vadd.s32 q1, q1, q14
vadd.s32 q3, q3, q15
vcvt.f32.s32 q0, q0
vcvt.f32.s32 q2, q2
vcvt.f32.s32 q1, q1
vcvt.f32.s32 q3, q3
vmul.f32 q0, q0, q2
vmul.f32 q1, q1, q3
cmp r2, #4
vdiv.f32 s0, s4, s0
vdiv.f32 s1, s5, s1
vdiv.f32 s2, s6, s2
vdiv.f32 s3, s7, s3
beq ssim_skip
movrel r3, mask_ff
sub r3, r3, r2, lsl #2
vld1.64 {d6-d7}, [r3]
vand q0, q0, q3
ssim_skip:
vadd.f32 d0, d0, d1
vpadd.f32 d0, d0, d0
vmov.32 r0, d0[0]
bx lr
endfunc
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_MultiChannelSingleConversion/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_MultiChannelSingleConversion/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/ADC/ADC_MultiChannelSingleConversion/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/EXTI/EXTI_ToggleLedOnIT_Init/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/EXTI/EXTI_ToggleLedOnIT_Init/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/EXTI/EXTI_ToggleLedOnIT_Init/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/EXTI/EXTI_ToggleLedOnIT/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/EXTI/EXTI_ToggleLedOnIT/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/EXTI/EXTI_ToggleLedOnIT/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/RCC/RCC_UseHSI_PLLasSystemClock/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/RCC/RCC_UseHSI_PLLasSystemClock/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/RCC/RCC_UseHSI_PLLasSystemClock/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/RCC/RCC_OutputSystemClockOnMCO/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/RCC/RCC_OutputSystemClockOnMCO/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/RCC/RCC_OutputSystemClockOnMCO/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/RCC/RCC_UseHSEasSystemClock/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/RCC/RCC_UseHSEasSystemClock/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Examples_LL/RCC/RCC_UseHSEasSystemClock/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,445
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Templates_LL/MDK-ARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 & USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_VDDIO2_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_4_IRQHandler [WEAK]
EXPORT CEC_CAN_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_VDDIO2_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_4_IRQHandler
CEC_CAN_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 10,877
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Templates_LL/SW4STM32/startup_stm32f072xb.s
|
/**
******************************************************************************
* @file startup_stm32f072xb.s
* @author MCD Application Team
* @brief STM32F072x8/STM32F072xB devices vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_VDDIO2_IRQHandler /* PVD and VDDIO2 through EXTI Line detect */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_CRS_IRQHandler /* RCC and CRS */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word TSC_IRQHandler /* TSC */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word TIM1_BRK_UP_TRG_COM_IRQHandler /* TIM1 Break, Update, Trigger and Commutation */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC */
.word TIM7_IRQHandler /* TIM7 */
.word TIM14_IRQHandler /* TIM14 */
.word TIM15_IRQHandler /* TIM15 */
.word TIM16_IRQHandler /* TIM16 */
.word TIM17_IRQHandler /* TIM17 */
.word I2C1_IRQHandler /* I2C1 */
.word I2C2_IRQHandler /* I2C2 */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_4_IRQHandler /* USART3 and USART4 */
.word CEC_CAN_IRQHandler /* CEC and CAN */
.word USB_IRQHandler /* USB */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_VDDIO2_IRQHandler
.thumb_set PVD_VDDIO2_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_CRS_IRQHandler
.thumb_set RCC_CRS_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak TSC_IRQHandler
.thumb_set TSC_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak TIM1_BRK_UP_TRG_COM_IRQHandler
.thumb_set TIM1_BRK_UP_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak TIM14_IRQHandler
.thumb_set TIM14_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak I2C2_IRQHandler
.thumb_set I2C2_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_4_IRQHandler
.thumb_set USART3_4_IRQHandler,Default_Handler
.weak CEC_CAN_IRQHandler
.thumb_set CEC_CAN_IRQHandler,Default_Handler
.weak USB_IRQHandler
.thumb_set USB_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
aegean-odyssey/mpmd_marlin_1.1.x
| 11,516
|
STM32Cube-1.10.1/Projects/STM32F072RB-Nucleo/Templates_LL/EWARM/startup_stm32f072xb.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f072xb.s
;* Author : MCD Application Team
;* Description : STM32F072x8/STM32F072xB devices vector table for EWARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == __iar_program_start,
;* - Set the vector table entries with the exceptions ISR
;* address,
;* - Branches to main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
;
; The modules in this file are included in the libraries, and may be replaced
; by any user-defined modules that define the PUBLIC symbol _program_start or
; a user defined start symbol.
; To override the cstartup defined in the library, simply add your modified
; version to the workbench project.
;
; The vector table is normally located at address 0.
; When debugging in RAM, it can be located in RAM, aligned to at least 2^6.
; The name "__vector_table" has special meaning for C-SPY:
; it is where the SP start value is found, and the NVIC vector
; table register (VTOR) is initialized to this address if != 0.
;
; Cortex-M version
;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION CSTACK:DATA:NOROOT(3)
SECTION .intvec:CODE:NOROOT(2)
EXTERN __iar_program_start
EXTERN SystemInit
PUBLIC __vector_table
DATA
__vector_table
DCD sfe(CSTACK)
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_VDDIO2_IRQHandler ; PVD and VDDIO2 through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4 to Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD TIM7_IRQHandler ; TIM7
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_4_IRQHandler ; USART3 and USART4
DCD CEC_CAN_IRQHandler ; CEC and CAN
DCD USB_IRQHandler ; USB
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Default interrupt handlers.
;;
THUMB
PUBWEAK Reset_Handler
SECTION .text:CODE:NOROOT:REORDER(2)
Reset_Handler
LDR R0, =SystemInit
BLX R0
LDR R0, =__iar_program_start
BX R0
PUBWEAK NMI_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
NMI_Handler
B NMI_Handler
PUBWEAK HardFault_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
HardFault_Handler
B HardFault_Handler
PUBWEAK SVC_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SVC_Handler
B SVC_Handler
PUBWEAK PendSV_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
PendSV_Handler
B PendSV_Handler
PUBWEAK SysTick_Handler
SECTION .text:CODE:NOROOT:REORDER(1)
SysTick_Handler
B SysTick_Handler
PUBWEAK WWDG_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
WWDG_IRQHandler
B WWDG_IRQHandler
PUBWEAK PVD_VDDIO2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
PVD_VDDIO2_IRQHandler
B PVD_VDDIO2_IRQHandler
PUBWEAK RTC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RTC_IRQHandler
B RTC_IRQHandler
PUBWEAK FLASH_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
FLASH_IRQHandler
B FLASH_IRQHandler
PUBWEAK RCC_CRS_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
RCC_CRS_IRQHandler
B RCC_CRS_IRQHandler
PUBWEAK EXTI0_1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI0_1_IRQHandler
B EXTI0_1_IRQHandler
PUBWEAK EXTI2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI2_3_IRQHandler
B EXTI2_3_IRQHandler
PUBWEAK EXTI4_15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
EXTI4_15_IRQHandler
B EXTI4_15_IRQHandler
PUBWEAK TSC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TSC_IRQHandler
B TSC_IRQHandler
PUBWEAK DMA1_Channel1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel1_IRQHandler
B DMA1_Channel1_IRQHandler
PUBWEAK DMA1_Channel2_3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel2_3_IRQHandler
B DMA1_Channel2_3_IRQHandler
PUBWEAK DMA1_Channel4_5_6_7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
DMA1_Channel4_5_6_7_IRQHandler
B DMA1_Channel4_5_6_7_IRQHandler
PUBWEAK ADC1_COMP_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
ADC1_COMP_IRQHandler
B ADC1_COMP_IRQHandler
PUBWEAK TIM1_BRK_UP_TRG_COM_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_BRK_UP_TRG_COM_IRQHandler
B TIM1_BRK_UP_TRG_COM_IRQHandler
PUBWEAK TIM1_CC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM1_CC_IRQHandler
B TIM1_CC_IRQHandler
PUBWEAK TIM2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM2_IRQHandler
B TIM2_IRQHandler
PUBWEAK TIM3_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM3_IRQHandler
B TIM3_IRQHandler
PUBWEAK TIM6_DAC_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM6_DAC_IRQHandler
B TIM6_DAC_IRQHandler
PUBWEAK TIM7_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM7_IRQHandler
B TIM7_IRQHandler
PUBWEAK TIM14_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM14_IRQHandler
B TIM14_IRQHandler
PUBWEAK TIM15_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM15_IRQHandler
B TIM15_IRQHandler
PUBWEAK TIM16_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM16_IRQHandler
B TIM16_IRQHandler
PUBWEAK TIM17_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
TIM17_IRQHandler
B TIM17_IRQHandler
PUBWEAK I2C1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C1_IRQHandler
B I2C1_IRQHandler
PUBWEAK I2C2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
I2C2_IRQHandler
B I2C2_IRQHandler
PUBWEAK SPI1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI1_IRQHandler
B SPI1_IRQHandler
PUBWEAK SPI2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
SPI2_IRQHandler
B SPI2_IRQHandler
PUBWEAK USART1_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART1_IRQHandler
B USART1_IRQHandler
PUBWEAK USART2_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART2_IRQHandler
B USART2_IRQHandler
PUBWEAK USART3_4_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USART3_4_IRQHandler
B USART3_4_IRQHandler
PUBWEAK CEC_CAN_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
CEC_CAN_IRQHandler
B CEC_CAN_IRQHandler
PUBWEAK USB_IRQHandler
SECTION .text:CODE:NOROOT:REORDER(1)
USB_IRQHandler
B USB_IRQHandler
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
aegean-odyssey/mpmd_marlin_1.1.x
| 5,366
|
STM32Cube-1.10.1/Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM3/portasm.s
|
/*
FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
isb
ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
ldr r2, [r3]
stmdb r0!, {r4-r11} /* Save the remaining registers. */
str r0, [r2] /* Save the new top of stack into the first member of the TCB. */
stmdb sp!, {r3, r14}
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
msr basepri, r0
dsb
isb
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
ldmia sp!, {r3, r14}
ldr r1, [r3]
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
ldmia r0!, {r4-r11} /* Pop the registers. */
msr psp, r0
isb
bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11}
msr psp, r0
isb
mov r0, #0
msr basepri, r0
orr r14, r14, #13
bx r14
/*-----------------------------------------------------------*/
vPortStartFirstTask
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Call SVC to start the first task, ensuring interrupts are enabled. */
cpsie i
cpsie f
dsb
isb
svc 0
END
|
aegean-odyssey/mpmd_marlin_1.1.x
| 6,004
|
STM32Cube-1.10.1/Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM4F/portasm.s
|
/*
FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
PUBLIC vPortEnableVFP
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
isb
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r2, [r3]
/* Is the task using the FPU context? If so, push high vfp registers. */
tst r14, #0x10
it eq
vstmdbeq r0!, {s16-s31}
/* Save the core registers. */
stmdb r0!, {r4-r11, r14}
/* Save the new top of stack into the first member of the TCB. */
str r0, [r2]
stmdb sp!, {r3}
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
msr basepri, r0
dsb
isb
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
ldmia sp!, {r3}
/* The first item in pxCurrentTCB is the task top of stack. */
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11, r14}
/* Is the task using the FPU context? If so, pop the high vfp registers
too. */
tst r14, #0x10
it eq
vldmiaeq r0!, {s16-s31}
msr psp, r0
isb
#ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata */
#if WORKAROUND_PMU_CM001 == 1
push { r14 }
pop { pc }
#endif
#endif
bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11, r14}
msr psp, r0
isb
mov r0, #0
msr basepri, r0
bx r14
/*-----------------------------------------------------------*/
vPortStartFirstTask
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Call SVC to start the first task. */
cpsie i
cpsie f
dsb
isb
svc 0
/*-----------------------------------------------------------*/
vPortEnableVFP:
/* The FPU enable bits are in the CPACR. */
ldr.w r0, =0xE000ED88
ldr r1, [r0]
/* Enable CP10 and CP11 coprocessors, then save back. */
orr r1, r1, #( 0xf << 20 )
str r1, [r0]
bx r14
END
|
aegean-odyssey/mpmd_marlin_1.1.x
| 6,709
|
STM32Cube-1.10.1/Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM0/portasm.s
|
/*
FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN vPortYieldFromISR
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
PUBLIC vSetMSP
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
PUBLIC ulSetInterruptMaskFromISR
PUBLIC vClearInterruptMaskFromISR
/*-----------------------------------------------------------*/
vSetMSP
msr msp, r0
bx lr
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */
ldr r2, [r3]
subs r0, r0, #32 /* Make space for the remaining low registers. */
str r0, [r2] /* Save the new top of stack. */
stmia r0!, {r4-r7} /* Store the low registers that are not saved automatically. */
mov r4, r8 /* Store the high registers. */
mov r5, r9
mov r6, r10
mov r7, r11
stmia r0!, {r4-r7}
push {r3, r14}
cpsid i
bl vTaskSwitchContext
cpsie i
pop {r2, r3} /* lr goes in r3. r2 now holds tcb pointer. */
ldr r1, [r2]
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
adds r0, r0, #16 /* Move to the high registers. */
ldmia r0!, {r4-r7} /* Pop the high registers. */
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
msr psp, r0 /* Remember the new top of stack for the task. */
subs r0, r0, #32 /* Go back for the low registers that are not automatically restored. */
ldmia r0!, {r4-r7} /* Pop low registers. */
bx r3
/*-----------------------------------------------------------*/
vPortSVCHandler;
/* This function is no longer used, but retained for backward
compatibility. */
bx lr
/*-----------------------------------------------------------*/
vPortStartFirstTask
/* The MSP stack is not reset as, unlike on M3/4 parts, there is no vector
table offset register that can be used to locate the initial stack value.
Not all M0 parts have the application vector table at address 0. */
ldr r3, =pxCurrentTCB /* Obtain location of pxCurrentTCB. */
ldr r1, [r3]
ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. */
adds r0, #32 /* Discard everything up to r0. */
msr psp, r0 /* This is now the new top of stack to use in the task. */
movs r0, #2 /* Switch to the psp stack. */
msr CONTROL, r0
isb
pop {r0-r5} /* Pop the registers that are saved automatically. */
mov lr, r5 /* lr is now in r5. */
pop {r3} /* The return address is now in r3. */
pop {r2} /* Pop and discard the XPSR. */
cpsie i /* The first task has its context and interrupts can be enabled. */
bx r3 /* Jump to the user defined task code. */
/*-----------------------------------------------------------*/
ulSetInterruptMaskFromISR
mrs r0, PRIMASK
cpsid i
bx lr
/*-----------------------------------------------------------*/
vClearInterruptMaskFromISR
msr PRIMASK, r0
bx lr
END
|
aegean-odyssey/mpmd_marlin_1.1.x
| 6,022
|
STM32Cube-1.10.1/Middlewares/Third_Party/FreeRTOS/Source/portable/IAR/ARM_CM7/r0p1/portasm.s
|
/*
FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
#include <FreeRTOSConfig.h>
RSEG CODE:CODE(2)
thumb
EXTERN pxCurrentTCB
EXTERN vTaskSwitchContext
PUBLIC xPortPendSVHandler
PUBLIC vPortSVCHandler
PUBLIC vPortStartFirstTask
PUBLIC vPortEnableVFP
/*-----------------------------------------------------------*/
xPortPendSVHandler:
mrs r0, psp
isb
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r2, [r3]
/* Is the task using the FPU context? If so, push high vfp registers. */
tst r14, #0x10
it eq
vstmdbeq r0!, {s16-s31}
/* Save the core registers. */
stmdb r0!, {r4-r11, r14}
/* Save the new top of stack into the first member of the TCB. */
str r0, [r2]
stmdb sp!, {r3}
mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY
cpsid i
msr basepri, r0
dsb
isb
cpsie i
bl vTaskSwitchContext
mov r0, #0
msr basepri, r0
ldmia sp!, {r3}
/* The first item in pxCurrentTCB is the task top of stack. */
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11, r14}
/* Is the task using the FPU context? If so, pop the high vfp registers
too. */
tst r14, #0x10
it eq
vldmiaeq r0!, {s16-s31}
msr psp, r0
isb
#ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata */
#if WORKAROUND_PMU_CM001 == 1
push { r14 }
pop { pc }
#endif
#endif
bx r14
/*-----------------------------------------------------------*/
vPortSVCHandler:
/* Get the location of the current TCB. */
ldr r3, =pxCurrentTCB
ldr r1, [r3]
ldr r0, [r1]
/* Pop the core registers. */
ldmia r0!, {r4-r11, r14}
msr psp, r0
isb
mov r0, #0
msr basepri, r0
bx r14
/*-----------------------------------------------------------*/
vPortStartFirstTask
/* Use the NVIC offset register to locate the stack. */
ldr r0, =0xE000ED08
ldr r0, [r0]
ldr r0, [r0]
/* Set the msp back to the start of the stack. */
msr msp, r0
/* Call SVC to start the first task. */
cpsie i
cpsie f
dsb
isb
svc 0
/*-----------------------------------------------------------*/
vPortEnableVFP:
/* The FPU enable bits are in the CPACR. */
ldr.w r0, =0xE000ED88
ldr r1, [r0]
/* Enable CP10 and CP11 coprocessors, then save back. */
orr r1, r1, #( 0xf << 20 )
str r1, [r0]
bx r14
END
|
aether-os-studio/naos
| 1,495
|
kernel/src/arch/x64/copy-sse.S
|
.section .text
.global fast_copy_16
fast_copy_16:
# 参数: %rdi = dest, %rsi = src, %rdx = n
testq %rdx, %rdx # 检查 n 是否为 0
jz .end # 如果 n=0,直接返回
# 处理小数据块(n < 16)
cmpq $16, %rdx
jb .tail_copy # 跳转到尾部复制(处理 <16 字节)
# 准备循环:复制 16 字节块
movq %rdx, %rcx # rcx = n
shrq $4, %rcx # rcx = n / 16(循环次数)
andq $0x0F, %rdx # rdx = n % 16(剩余字节)
.loop:
movdqu (%rsi), %xmm0 # 从 src 加载 16 字节(非对齐)
movdqu %xmm0, (%rdi) # 存储到 dest(非对齐)
addq $16, %rsi
addq $16, %rdi
decq %rcx
jnz .loop # 循环直到所有块复制完毕
# 检查是否有剩余字节(0-15)
testq %rdx, %rdx
jz .end # 无剩余则结束
.tail_copy:
# 复制剩余字节(1-15),按 8/4/2/1 字节分段处理
testb $8, %dl # 检查是否需复制 8 字节
jz .lt8
movq (%rsi), %rax # 复制 8 字节
movq %rax, (%rdi)
addq $8, %rsi
addq $8, %rdi
.lt8:
testb $4, %dl # 检查是否需复制 4 字节
jz .lt4
movl (%rsi), %eax # 复制 4 字节
movl %eax, (%rdi)
addq $4, %rsi
addq $4, %rdi
.lt4:
testb $2, %dl # 检查是否需复制 2 字节
jz .lt2
movw (%rsi), %ax # 复制 2 字节
movw %ax, (%rdi)
addq $2, %rsi
addq $2, %rdi
.lt2:
testb $1, %dl # 检查是否需复制 1 字节
jz .end
movb (%rsi), %al # 复制 1 字节
movb %al, (%rdi)
.end:
retq # 函数返回
|
aether-os-studio/naos
| 6,311
|
kernel/src/arch/aarch64/irq/entry.S
|
#include "settings.h"
// https://gitee.com/BookOS/nxos/blob/master/src/arch/aarch64/kernel/vector.S
.macro SAVE_FPU, reg
STR Q0, [\reg, #-0x10]!
STR Q1, [\reg, #-0x10]!
STR Q2, [\reg, #-0x10]!
STR Q3, [\reg, #-0x10]!
STR Q4, [\reg, #-0x10]!
STR Q5, [\reg, #-0x10]!
STR Q6, [\reg, #-0x10]!
STR Q7, [\reg, #-0x10]!
STR Q8, [\reg, #-0x10]!
STR Q9, [\reg, #-0x10]!
STR Q10, [\reg, #-0x10]!
STR Q11, [\reg, #-0x10]!
STR Q12, [\reg, #-0x10]!
STR Q13, [\reg, #-0x10]!
STR Q14, [\reg, #-0x10]!
STR Q15, [\reg, #-0x10]!
.endm
.macro RESTORE_FPU, reg
LDR Q15, [\reg], #0x10
LDR Q14, [\reg], #0x10
LDR Q13, [\reg], #0x10
LDR Q12, [\reg], #0x10
LDR Q11, [\reg], #0x10
LDR Q10, [\reg], #0x10
LDR Q9, [\reg], #0x10
LDR Q8, [\reg], #0x10
LDR Q7, [\reg], #0x10
LDR Q6, [\reg], #0x10
LDR Q5, [\reg], #0x10
LDR Q4, [\reg], #0x10
LDR Q3, [\reg], #0x10
LDR Q2, [\reg], #0x10
LDR Q1, [\reg], #0x10
LDR Q0, [\reg], #0x10
.endm
.macro SAVE_CONTEXT
/* Save the entire context. */
SAVE_FPU SP
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X20, X21, [SP, #-0x10]!
STP X22, X23, [SP, #-0x10]!
STP X24, X25, [SP, #-0x10]!
STP X26, X27, [SP, #-0x10]!
STP X28, X29, [SP, #-0x10]!
MRS X28, FPCR
MRS X29, FPSR
STP X28, X29, [SP, #-0x10]!
MRS X29, SP_EL0
STP X29, X30, [SP, #-0x10]!
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
STP X2, X3, [SP, #-0x10]!
MOV X0, SP /* Move SP into X0 for saving. */
.endm
.macro SAVE_CONTEXT_FROM_EL1
/* Save the entire context. */
SAVE_FPU SP
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X20, X21, [SP, #-0x10]!
STP X22, X23, [SP, #-0x10]!
STP X24, X25, [SP, #-0x10]!
STP X26, X27, [SP, #-0x10]!
STP X28, X29, [SP, #-0x10]!
MRS X28, FPCR
MRS X29, FPSR
STP X28, X29, [SP, #-0x10]!
MRS X29, SP_EL0
STP X29, X30, [SP, #-0x10]!
MOV X19, #((3 << 6) | 0x4 | 0x1) /* el1h, disable interrupt */
MOV X18, X30
STP X18, X19, [SP, #-0x10]!
.endm
.macro RESTORE_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
MOV SP, X0
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
TST X3, #0x1f
MSR SPSR_EL1, X3
MSR ELR_EL1, X2
LDP X29, X30, [SP], #0x10
MSR SP_EL0, X29
LDP X28, X29, [SP], #0x10
MSR FPCR, X28
MSR FPSR, X29
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
RESTORE_FPU SP
ERET
.endm
#define BAD_SYNC 0
#define BAD_IRQ 1
#define BAD_FIQ 2
#define BAD_ERROR 3
/*
vector table entry
每个表项是128字节, align 7表示128字节对齐
*/
.macro vtentry label
.align 7
b \label
.endm
/*
处理无效的异常向量
*/
.macro inv_entry reason
SAVE_CONTEXT
mov x0, sp
mov x1, #\reason
mrs x2, esr_el1
b bad_mode
.endm
/*
* Vector Table
*
* ARM64的异常向量表一共占用2048个字节
* 分成4组,每组4个表项,每个表项占128字节
* 参见ARMv8 spec v8.6第D1.10节
* align 11表示2048字节对齐
*/
.align 11
.global vectors
vectors:
/* Current EL with SP0
当前系统运行在EL1时使用EL0的栈指针SP
这是一种异常错误的类型
*/
vtentry el1_sync_invalid
vtentry el1_irq_invalid
vtentry el1_fiq_invalid
vtentry el1_error_invalid
/* Current EL with SPx
当前系统运行在EL1时使用EL1的栈指针SP
这说明系统在内核态发生了异常
Note: 我们暂时只实现IRQ中断
*/
vtentry el1_sync_invalid
vtentry el1_irq
vtentry el1_fiq_invalid
vtentry el1_error_invalid
/* Lower EL using AArch64
在用户态的aarch64的程序发生了异常
*/
vtentry el0_sync
vtentry el0_irq
vtentry el0_fiq_invalid
vtentry el0_error_invalid
/* Lower EL using AArch32
在用户态的aarch32的程序发生了异常
*/
vtentry el0_sync_invalid
vtentry el0_irq_invalid
vtentry el0_fiq_invalid
vtentry el0_error_invalid
el1_sync_invalid:
inv_entry BAD_SYNC
el1_irq_invalid:
inv_entry BAD_IRQ
el1_fiq_invalid:
inv_entry BAD_FIQ
el1_error_invalid:
inv_entry BAD_ERROR
el0_sync_invalid:
inv_entry BAD_SYNC
el0_irq_invalid:
inv_entry BAD_IRQ
el0_fiq_invalid:
inv_entry BAD_FIQ
el0_error_invalid:
inv_entry BAD_ERROR
.align 3
.globl arch_context_switch_with_next
arch_context_switch_with_next:
LDR X0, [X0]
MOV SP, X0
B arch_context_switch_exit
.align 3
.globl arch_context_switch_with_prev_next
arch_context_switch_with_prev_next:
SAVE_CONTEXT_FROM_EL1
MOV X2, SP
STR X2, [X0]
LDR X0, [X1]
MOV SP, X0
B arch_context_switch_exit
.align 3
.global arch_context_switch_exit
arch_context_switch_exit:
MOV X0, SP
CLREX
RESTORE_CONTEXT
.align 3
el0_irq:
SAVE_CONTEXT
BL aarch64_do_irq
B arch_context_switch_exit
.align 3
el0_sync:
SAVE_CONTEXT
STP X0, X1, [SP, #-0x10]!
BL trap_dispatch
LDP X0, X1, [SP], #0x10
MOV SP, X0
B arch_context_switch_exit
.align 3
el1_irq:
SAVE_CONTEXT
BL aarch64_do_irq
B arch_context_switch_exit
.global setup_vectors
setup_vectors:
/* setup vectors */
ldr x0, =vectors
msr vbar_el1, x0
isb
/* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
mov x0, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
msr cpacr_el1, x0
ret
|
aether-os-studio/naos
| 6,028
|
kernel/src/arch/x64/irq/entry.S
|
#include "arch/x64/asm.h"
#include "settings.h"
.section .data
.align 16
.global GDT_Table
GDT_Table:
.quad 0x0000000000000000
.quad 0x0020980000000000
.quad 0x0000920000000000
.quad 0x0000f20000000000
.quad 0x0020f80000000000
.quad 0x0000000000000000
.quad 0x0000000000000000
.quad 0x00cf9a000000ffff
.quad 0x00cf92000000ffff
.fill (MAX_CPU_NUM * 2 + 1), 8, 0
GDT_END:
.global GDT_POINTER
GDT_POINTER:
GDT_LIMIT: .word GDT_END - GDT_Table - 1
GDT_BASE: .quad GDT_Table
.align 16
.global IDT_Table
IDT_Table:
.fill 512, 8, 0
IDT_END:
.global IDT_POINTER
IDT_POINTER:
IDT_LIMIT: .word IDT_END - IDT_Table - 1
IDT_BASE: .quad IDT_Table
R15 = 0x00
R14 = 0x08
R13 = 0x10
R12 = 0x18
R11 = 0x20
R10 = 0x28
R9 = 0x30
R8 = 0x38
RBX = 0x40
RCX = 0x48
RDX = 0x50
RSI = 0x58
RDI = 0x60
RBP = 0x68
DS = 0x70
ES = 0x78
RAX = 0x80
FUNC = 0x88
ERRCODE = 0x90
RIP = 0x98
CS = 0xa0
RFLAGS = 0xa8
OLD_RSP = 0xb0
OLDSS = 0xb8
.section .text
Restore_all:
// === 恢复调用现场 ===
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rbx
popq %rcx
popq %rdx
popq %rsi
popq %rdi
popq %rbp
popq %rax
movq %rax, %ds
popq %rax
movq %rax, %es
popq %rax
addq $0x10, %rsp // 弹出变量FUNC和errcode
iretq
ENTRY(ret_from_intr)
ENTRY(ret_from_exception)
cli
jmp Restore_all
Err_Code:
cli
pushq %rax
movq %es, %rax
pushq %rax
movq %ds, %rax
pushq %rax
xorq %rax, %rax
pushq %rbp
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbx
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
cld
movq ERRCODE(%rsp), %rsi // 把错误码装进rsi,作为函数的第二个参数
movq FUNC(%rsp), %rdx
movq $0x10, %rdi // 加载内核段的地址
movq %rdi, %ds
movq %rdi, %es
movq %rsp, %rdi // 把栈指针装入rdi,作为函数的第一个的参数
callq *%rdx //调用服务程序 带*号表示调用的是绝对地址
jmp ret_from_exception
// 0 #DE 除法错误
ENTRY(divide_error)
pushq $0 //由于#DE不会产生错误码,但是为了保持弹出结构的一致性,故也压入一个错误码0
pushq %rax // 先将rax入栈
leaq do_divide_error(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 1 #DB 调试异常
ENTRY(debug)
pushq $0
pushq %rax
leaq do_debug(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 2 不可屏蔽中断
ENTRY(nmi)
// 不可屏蔽中断不是异常,而是一个外部中断,不会产生错误码
// 应执行中断处理流程
pushq $0 //占位err_code
pushq %rax
leaq do_nmi(%rip), %rax
xchgq %rax, (%rsp)
jmp Err_Code
// 3 #BP 断点异常
ENTRY(int3)
pushq $0
pushq %rax
leaq do_int3(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 4 #OF 溢出异常
ENTRY(overflow)
pushq $0
pushq %rax
leaq do_overflow(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 5 #BR 越界异常
ENTRY(bounds)
pushq $0
pushq %rax
leaq do_bounds(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 6 #UD 无效/未定义的机器码
ENTRY(undefined_opcode)
pushq $0
pushq %rax
leaq do_undefined_opcode(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 7 #NM 设备异常(FPU不存在)
ENTRY(dev_not_avaliable)
pushq $0
pushq %rax
leaq do_dev_not_avaliable(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 8 #DF 双重错误
ENTRY(double_fault)
pushq %rax
leaq do_double_fault(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 9 协处理器越界(保留)
ENTRY(coprocessor_segment_overrun)
pushq $0
pushq %rax
leaq do_coprocessor_segment_overrun(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 10 #TS 无效的TSS段
ENTRY(invalid_TSS)
// === 不正确的任务状态段 #TS ==
// 有错误码,处理器已经自动在异常处理程序栈中压入错误码
pushq %rax
leaq do_invalid_TSS(%rip), %rax
xchgq %rax, (%rsp)
jmp Err_Code
// 11 #NP 段不存在
ENTRY(segment_not_exists)
pushq %rax
leaq do_segment_not_exists(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 12 #SS 段错误
ENTRY(stack_segment_fault)
pushq %rax
leaq do_stack_segment_fault(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 13 #GP 通用保护性异常
ENTRY(general_protection)
pushq %rax
leaq do_general_protection(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 14 #PF 页错误
ENTRY(page_fault)
// === 页故障 #PF ==
// 有错误码
pushq %rax
leaq do_page_fault(%rip), %rax
xchgq %rax, (%rsp)
jmp Err_Code
// 15 Intel保留,请勿使用
// 16 #MF X87 FPU错误(计算错误)
ENTRY(x87_FPU_error)
pushq $0
pushq %rax
leaq do_x87_FPU_error(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 17 #AC 对齐检测
ENTRY(alignment_check)
pushq %rax
leaq do_alignment_check(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 18 #MC 机器检测
ENTRY(machine_check)
pushq $0
pushq %rax
leaq do_machine_check(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 19 #XM SIMD浮点异常
ENTRY(SIMD_exception)
pushq $0
pushq %rax
leaq do_SIMD_exception(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 20 #VE 虚拟化异常
ENTRY(virtualization_exception)
pushq $0
pushq %rax
leaq do_virtualization_exception(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
ENTRY(gdtidt_setup)
lidt IDT_POINTER(%rip)
lgdt GDT_POINTER(%rip)
movq ready_to_ret(%rip), %rax
pushq $0x08
pushq %rax
lretq
ready_to_ret:
.quad to_ret
to_ret:
movq $0x10, %rax
movq %rax, %ds
movq %rax, %es
movq %rax, %fs
movq %rax, %gs
movq %rax, %ss
ret
|
aether-os-studio/naos
| 1,122
|
kernel/src/arch/x64/syscall/syscall.S
|
#include "arch/x64/asm.h"
#include "settings.h"
ENTRY(syscall_exception)
cli
swapgs
movq %rsp, %gs:0x8 // syscall_stack_user
cmpq $0, %gs:0x20
je normal
signal:
movq %gs:0x10, %rsp // signal_syscall_stack
jmp next
normal:
movq %gs:0x0, %rsp // syscall_stack
jmp next
next:
subq $0x38, %rsp
pushq %rax
movq %es, %rax
pushq %rax
movq %ds, %rax
pushq %rax
pushq %rbp
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbx
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rsp, %rdi
movq %gs:0x8, %rsi
swapgs
call syscall_handler
ret_from_syscall:
cli
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rbx
popq %rcx
popq %rdx
popq %rsi
popq %rdi
popq %rbp
popq %rax
movq %rax, %ds
popq %rax
movq %rax, %es
popq %rax
addq $0x38, %rsp
swapgs
movq %gs:0x8, %rsp // syscall_stack_user
swapgs
sysretq
|
aether-os-studio/naos
| 2,569
|
kernel/src/arch/riscv64/irq/vector.S
|
.section .text
.globl _start
.globl trap_vector
.globl init_trap_vector
.align 8
trap_vector:
j trap_handler
init_trap_vector:
la t0, trap_vector
csrw stvec, t0
li a0, 0
ret
.align 4
trap_handler:
csrrw sp, sscratch, sp
addi sp, sp, -280
sd x1, 0(sp) // ra
// x2 (sp) 稍后保存
sd x3, 16(sp) // gp
sd x4, 24(sp) // tp
sd x5, 32(sp) // t0
sd x6, 40(sp) // t1
sd x7, 48(sp) // t2
sd x8, 56(sp) // s0/fp
sd x9, 64(sp) // s1
sd x10, 72(sp) // a0
sd x11, 80(sp) // a1
sd x12, 88(sp) // a2
sd x13, 96(sp) // a3
sd x14, 104(sp) // a4
sd x15, 112(sp) // a5
sd x16, 120(sp) // a6
sd x17, 128(sp) // a7
sd x18, 136(sp) // s2
sd x19, 144(sp) // s3
sd x20, 152(sp) // s4
sd x21, 160(sp) // s5
sd x22, 168(sp) // s6
sd x23, 176(sp) // s7
sd x24, 184(sp) // s8
sd x25, 192(sp) // s9
sd x26, 200(sp) // s10
sd x27, 208(sp) // s11
sd x28, 216(sp) // t3
sd x29, 224(sp) // t4
sd x30, 232(sp) // t5
sd x31, 240(sp) // t6
csrr t0, sscratch
sd t0, 8(sp) // 保存真正的用户 sp
csrr t0, sepc
sd t0, 248(sp) // sepc
csrr t0, scause
sd t0, 256(sp) // scause
csrr t0, stval
sd t0, 264(sp) // stval
csrr t0, sstatus
sd t0, 272(sp) // sstatus
mv a0, sp
call handle_trap_c
.globl ret_from_trap_handler
ret_from_trap_handler:
ld t0, 248(sp) // sepc
csrw sepc, t0
ld t0, 272(sp) // sstatus
csrw sstatus, t0
ld t0, 8(sp) // sp
csrw sscratch, t0
ld x1, 0(sp) // ra
// x2 (sp) 最后恢复
ld x3, 16(sp) // gp
ld x4, 24(sp) // tp
ld x6, 40(sp) // t1
ld x7, 48(sp) // t2
ld x8, 56(sp) // s0/fp
ld x9, 64(sp) // s1
ld x10, 72(sp) // a0
ld x11, 80(sp) // a1
ld x12, 88(sp) // a2
ld x13, 96(sp) // a3
ld x14, 104(sp) // a4
ld x15, 112(sp) // a5
ld x16, 120(sp) // a6
ld x17, 128(sp) // a7
ld x18, 136(sp) // s2
ld x19, 144(sp) // s3
ld x20, 152(sp) // s4
ld x21, 160(sp) // s5
ld x22, 168(sp) // s6
ld x23, 176(sp) // s7
ld x24, 184(sp) // s8
ld x25, 192(sp) // s9
ld x26, 200(sp) // s10
ld x27, 208(sp) // s11
ld x28, 216(sp) // t3
ld x29, 224(sp) // t4
ld x30, 232(sp) // t5
ld x31, 240(sp) // t6
ld x5, 32(sp) // t0
addi sp, sp, 280
csrrw sp, sscratch, sp
sret
|
aenu1/aps3e
| 15,018
|
app/src/main/cpp/rpcs3/3rdparty/zstd/zstd/lib/decompress/huf_decompress_amd64.S
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "../common/portability_macros.h"
#if defined(__ELF__) && defined(__GNUC__)
/* Stack marking
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
*/
.section .note.GNU-stack,"",%progbits
#if defined(__aarch64__)
/* Mark that this assembly supports BTI & PAC, because it is empty for aarch64.
* See: https://github.com/facebook/zstd/issues/3841
* See: https://gcc.godbolt.org/z/sqr5T4ffK
* See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/
* See: https://reviews.llvm.org/D62609
*/
.pushsection .note.gnu.property, "a"
.p2align 3
.long 4 /* size of the name - "GNU\0" */
.long 0x10 /* size of descriptor */
.long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */
.asciz "GNU"
.long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4 /* pr_datasz - 4 bytes */
.long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */
.p2align 3 /* pr_padding - bring everything to 8 byte alignment */
.popsection
#endif
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
/* Calling convention:
*
* %rdi contains the first argument: HUF_DecompressAsmArgs*.
* %rbp isn't maintained (no frame pointer).
* %rsp contains the stack pointer that grows down.
* No red-zone is assumed, only addresses >= %rsp are used.
* All register contents are preserved.
*
* TODO: Support Windows calling convention.
*/
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.text
/* Sets up register mappings for clarity.
* op[], bits[], dtable & ip[0] each get their own register.
* ip[1,2,3] & olimit alias var[].
* %rax is a scratch register.
*/
#define op0 rsi
#define op1 rbx
#define op2 rcx
#define op3 rdi
#define ip0 r8
#define ip1 r9
#define ip2 r10
#define ip3 r11
#define bits0 rbp
#define bits1 rdx
#define bits2 r12
#define bits3 r13
#define dtable r14
#define olimit r15
/* var[] aliases ip[1,2,3] & olimit
* ip[1,2,3] are saved every iteration.
* olimit is only used in compute_olimit.
*/
#define var0 r15
#define var1 r9
#define var2 r10
#define var3 r11
/* 32-bit var registers */
#define vard0 r15d
#define vard1 r9d
#define vard2 r10d
#define vard3 r11d
/* Calls X(N) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM(X) \
X(0); \
X(1); \
X(2); \
X(3)
/* Calls X(N, idx) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \
X(0, idx); \
X(1, idx); \
X(2, idx); \
X(3, idx)
/* Define both _HUF_* & HUF_* symbols because MacOS
* C symbols are prefixed with '_' & Linux symbols aren't.
*/
_HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
movq %rdi, %rax
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push 104(%rax) /* ilowest */
push 112(%rax) /* oend */
push %olimit /* olimit space */
subq $24, %rsp
.L_4X1_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rbx, rdx must be saved
* op3 & ip0 mustn't be clobbered
*/
movq %rbx, 0(%rsp)
movq %rdx, 8(%rsp)
movq 32(%rsp), %rax /* rax = oend */
subq %op3, %rax /* rax = oend - op3 */
/* r15 = (oend - op3) / 5 */
movabsq $-3689348814741910323, %rdx
mulq %rdx
movq %rdx, %r15
shrq $2, %r15
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %rbx /* rbx = ip0 - ilowest */
/* rdx = (ip0 - ilowest) / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %rbx
shrq %rbx
addq %rbx, %rdx
shrq $2, %rdx
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* r15 = r15 * 5 */
leaq (%r15, %r15, 4), %r15
/* olimit = op3 + r15 */
addq %op3, %olimit
movq 8(%rsp), %rdx
movq 0(%rsp), %rbx
/* If (op3 + 20 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X1_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X1_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X1_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X1_exit
/* Reads top 11 bits from bits[n]
* Loads dt[bits[n]] into var[n]
*/
#define GET_NEXT_DELT(n) \
movq $53, %var##n; \
shrxq %var##n, %bits##n, %var##n; \
movzwl (%dtable,%var##n,2),%vard##n
/* var[n] must contain the DTable entry computed with GET_NEXT_DELT
* Moves var[n] to %rax
* bits[n] <<= var[n] & 63
* op[n][idx] = %rax >> 8
* %ah is a way to access bits [8, 16) of %rax
*/
#define DECODE_FROM_DELT(n, idx) \
movq %var##n, %rax; \
shlxq %var##n, %bits##n, %bits##n; \
movb %ah, idx(%op##n)
/* Assumes GET_NEXT_DELT has been called.
* Calls DECODE_FROM_DELT then GET_NEXT_DELT
*/
#define DECODE_AND_GET_NEXT(n, idx) \
DECODE_FROM_DELT(n, idx); \
GET_NEXT_DELT(n) \
/* // ctz & nbBytes is stored in bits[n]
* // nbBits is stored in %rax
* ctz = CTZ[bits[n]]
* nbBits = ctz & 7
* nbBytes = ctz >> 3
* op[n] += 5
* ip[n] -= nbBytes
* // Note: x86-64 is little-endian ==> no bswap
* bits[n] = MEM_readST(ip[n]) | 1
* bits[n] <<= nbBits
*/
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
andq $7, %rax; \
shrq $3, %bits##n; \
leaq 5(%op##n), %op##n; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlx %rax, %bits##n, %bits##n
/* Store clobbered variables on the stack */
movq %olimit, 24(%rsp)
movq %ip1, 0(%rsp)
movq %ip2, 8(%rsp)
movq %ip3, 16(%rsp)
/* Call GET_NEXT_DELT for each stream */
FOR_EACH_STREAM(GET_NEXT_DELT)
.p2align 6
.L_4X1_loop_body:
/* Decode 5 symbols in each of the 4 streams (20 total)
* Must have called GET_NEXT_DELT for each stream
*/
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4)
/* Load ip[1,2,3] from stack (var[] aliases them)
* ip[] is needed for RELOAD_BITS
* Each will be stored back to the stack after RELOAD
*/
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Reload each stream & fetch the next table entry
* to prepare for the next iteration
*/
RELOAD_BITS(0)
GET_NEXT_DELT(0)
RELOAD_BITS(1)
movq %ip1, 0(%rsp)
GET_NEXT_DELT(1)
RELOAD_BITS(2)
movq %ip2, 8(%rsp)
GET_NEXT_DELT(2)
RELOAD_BITS(3)
movq %ip3, 16(%rsp)
GET_NEXT_DELT(3)
/* If op3 < olimit: continue the loop */
cmp %op3, 24(%rsp)
ja .L_4X1_loop_body
/* Reload ip[1,2,3] from stack */
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Re-compute olimit */
jmp .L_4X1_compute_olimit
#undef GET_NEXT_DELT
#undef DECODE_FROM_DELT
#undef DECODE
#undef RELOAD_BITS
.L_4X1_exit:
addq $24, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* olimit */
pop %rax /* oend */
pop %rax /* ilowest */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
_HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
movq %rdi, %rax
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push %rax /* olimit */
push 104(%rax) /* ilowest */
movq 112(%rax), %rax
push %rax /* oend3 */
movq %op3, %rax
push %rax /* oend2 */
movq %op2, %rax
push %rax /* oend1 */
movq %op1, %rax
push %rax /* oend0 */
/* Scratch space */
subq $8, %rsp
.L_4X2_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rdx must be saved
* op[1,2,3,4] & ip0 mustn't be clobbered
*/
movq %rdx, 0(%rsp)
/* We can consume up to 7 input bytes each iteration. */
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %r15 /* r15 = ip0 - ilowest */
/* rdx = rax / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %r15
shrq %r15
addq %r15, %rdx
shrq $2, %rdx
/* r15 = (ip0 - ilowest) / 7 */
movq %rdx, %r15
/* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */
movq 8(%rsp), %rax /* rax = oend0 */
subq %op0, %rax /* rax = oend0 - op0 */
movq 16(%rsp), %rdx /* rdx = oend1 */
subq %op1, %rdx /* rdx = oend1 - op1 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 24(%rsp), %rax /* rax = oend2 */
subq %op2, %rax /* rax = oend2 - op2 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 32(%rsp), %rax /* rax = oend3 */
subq %op3, %rax /* rax = oend3 - op3 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movabsq $-3689348814741910323, %rax
mulq %rdx
shrq $3, %rdx /* rdx = rdx / 10 */
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* olimit = op3 + 5 * r15 */
movq %r15, %rax
leaq (%op3, %rax, 4), %olimit
addq %rax, %olimit
movq 0(%rsp), %rdx
/* If (op3 + 10 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X2_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X2_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X2_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X2_exit
#define DECODE(n, idx) \
movq %bits##n, %rax; \
shrq $53, %rax; \
movzwl 0(%dtable,%rax,4),%r8d; \
movzbl 2(%dtable,%rax,4),%r15d; \
movzbl 3(%dtable,%rax,4),%eax; \
movw %r8w, (%op##n); \
shlxq %r15, %bits##n, %bits##n; \
addq %rax, %op##n
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
shrq $3, %bits##n; \
andq $7, %rax; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlxq %rax, %bits##n, %bits##n
movq %olimit, 48(%rsp)
.p2align 6
.L_4X2_loop_body:
/* We clobber r8, so store it on the stack */
movq %r8, 0(%rsp)
/* Decode 5 symbols from each of the 4 streams (20 symbols total). */
FOR_EACH_STREAM_WITH_INDEX(DECODE, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 4)
/* Reload r8 */
movq 0(%rsp), %r8
FOR_EACH_STREAM(RELOAD_BITS)
cmp %op3, 48(%rsp)
ja .L_4X2_loop_body
jmp .L_4X2_compute_olimit
#undef DECODE
#undef RELOAD_BITS
.L_4X2_exit:
addq $8, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* oend0 */
pop %rax /* oend1 */
pop %rax /* oend2 */
pop %rax /* oend3 */
pop %rax /* ilowest */
pop %rax /* olimit */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
#endif
|
afiskon/stm32-sdcard
| 21,686
|
startup_stm32f411xe.s
|
/**
******************************************************************************
* @file startup_stm32f411xe.s
* @author MCD Application Team
* @brief STM32F411xExx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
afiskon/stm32-si5351
| 11,157
|
examples/signal-generator/startup_stm32f103xb.s
|
/**
*************** (C) COPYRIGHT 2017 STMicroelectronics ************************
* @file startup_stm32f103xb.s
* @author MCD Application Team
* @version V4.2.0
* @date 31-March-2017
* @brief STM32F103xB Devices vector table for Atollic toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Configure the clock system
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M3 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* <h2><center>© COPYRIGHT(c) 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m3
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.equ BootRAM, 0xF108F85F
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler
.word PVD_IRQHandler
.word TAMPER_IRQHandler
.word RTC_IRQHandler
.word FLASH_IRQHandler
.word RCC_IRQHandler
.word EXTI0_IRQHandler
.word EXTI1_IRQHandler
.word EXTI2_IRQHandler
.word EXTI3_IRQHandler
.word EXTI4_IRQHandler
.word DMA1_Channel1_IRQHandler
.word DMA1_Channel2_IRQHandler
.word DMA1_Channel3_IRQHandler
.word DMA1_Channel4_IRQHandler
.word DMA1_Channel5_IRQHandler
.word DMA1_Channel6_IRQHandler
.word DMA1_Channel7_IRQHandler
.word ADC1_2_IRQHandler
.word USB_HP_CAN1_TX_IRQHandler
.word USB_LP_CAN1_RX0_IRQHandler
.word CAN1_RX1_IRQHandler
.word CAN1_SCE_IRQHandler
.word EXTI9_5_IRQHandler
.word TIM1_BRK_IRQHandler
.word TIM1_UP_IRQHandler
.word TIM1_TRG_COM_IRQHandler
.word TIM1_CC_IRQHandler
.word TIM2_IRQHandler
.word TIM3_IRQHandler
.word TIM4_IRQHandler
.word I2C1_EV_IRQHandler
.word I2C1_ER_IRQHandler
.word I2C2_EV_IRQHandler
.word I2C2_ER_IRQHandler
.word SPI1_IRQHandler
.word SPI2_IRQHandler
.word USART1_IRQHandler
.word USART2_IRQHandler
.word USART3_IRQHandler
.word EXTI15_10_IRQHandler
.word RTC_Alarm_IRQHandler
.word USBWakeUp_IRQHandler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word BootRAM /* @0x108. This is for boot in RAM mode for
STM32F10x Medium Density devices. */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMPER_IRQHandler
.thumb_set TAMPER_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_IRQHandler
.thumb_set DMA1_Channel2_IRQHandler,Default_Handler
.weak DMA1_Channel3_IRQHandler
.thumb_set DMA1_Channel3_IRQHandler,Default_Handler
.weak DMA1_Channel4_IRQHandler
.thumb_set DMA1_Channel4_IRQHandler,Default_Handler
.weak DMA1_Channel5_IRQHandler
.thumb_set DMA1_Channel5_IRQHandler,Default_Handler
.weak DMA1_Channel6_IRQHandler
.thumb_set DMA1_Channel6_IRQHandler,Default_Handler
.weak DMA1_Channel7_IRQHandler
.thumb_set DMA1_Channel7_IRQHandler,Default_Handler
.weak ADC1_2_IRQHandler
.thumb_set ADC1_2_IRQHandler,Default_Handler
.weak USB_HP_CAN1_TX_IRQHandler
.thumb_set USB_HP_CAN1_TX_IRQHandler,Default_Handler
.weak USB_LP_CAN1_RX0_IRQHandler
.thumb_set USB_LP_CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak USBWakeUp_IRQHandler
.thumb_set USBWakeUp_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
afiskon/stm32-si5351
| 11,157
|
examples/basic-example/startup_stm32f103xb.s
|
/**
*************** (C) COPYRIGHT 2017 STMicroelectronics ************************
* @file startup_stm32f103xb.s
* @author MCD Application Team
* @version V4.2.0
* @date 31-March-2017
* @brief STM32F103xB Devices vector table for Atollic toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Configure the clock system
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M3 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* <h2><center>© COPYRIGHT(c) 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m3
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.equ BootRAM, 0xF108F85F
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler
.word PVD_IRQHandler
.word TAMPER_IRQHandler
.word RTC_IRQHandler
.word FLASH_IRQHandler
.word RCC_IRQHandler
.word EXTI0_IRQHandler
.word EXTI1_IRQHandler
.word EXTI2_IRQHandler
.word EXTI3_IRQHandler
.word EXTI4_IRQHandler
.word DMA1_Channel1_IRQHandler
.word DMA1_Channel2_IRQHandler
.word DMA1_Channel3_IRQHandler
.word DMA1_Channel4_IRQHandler
.word DMA1_Channel5_IRQHandler
.word DMA1_Channel6_IRQHandler
.word DMA1_Channel7_IRQHandler
.word ADC1_2_IRQHandler
.word USB_HP_CAN1_TX_IRQHandler
.word USB_LP_CAN1_RX0_IRQHandler
.word CAN1_RX1_IRQHandler
.word CAN1_SCE_IRQHandler
.word EXTI9_5_IRQHandler
.word TIM1_BRK_IRQHandler
.word TIM1_UP_IRQHandler
.word TIM1_TRG_COM_IRQHandler
.word TIM1_CC_IRQHandler
.word TIM2_IRQHandler
.word TIM3_IRQHandler
.word TIM4_IRQHandler
.word I2C1_EV_IRQHandler
.word I2C1_ER_IRQHandler
.word I2C2_EV_IRQHandler
.word I2C2_ER_IRQHandler
.word SPI1_IRQHandler
.word SPI2_IRQHandler
.word USART1_IRQHandler
.word USART2_IRQHandler
.word USART3_IRQHandler
.word EXTI15_10_IRQHandler
.word RTC_Alarm_IRQHandler
.word USBWakeUp_IRQHandler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word BootRAM /* @0x108. This is for boot in RAM mode for
STM32F10x Medium Density devices. */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMPER_IRQHandler
.thumb_set TAMPER_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_IRQHandler
.thumb_set DMA1_Channel2_IRQHandler,Default_Handler
.weak DMA1_Channel3_IRQHandler
.thumb_set DMA1_Channel3_IRQHandler,Default_Handler
.weak DMA1_Channel4_IRQHandler
.thumb_set DMA1_Channel4_IRQHandler,Default_Handler
.weak DMA1_Channel5_IRQHandler
.thumb_set DMA1_Channel5_IRQHandler,Default_Handler
.weak DMA1_Channel6_IRQHandler
.thumb_set DMA1_Channel6_IRQHandler,Default_Handler
.weak DMA1_Channel7_IRQHandler
.thumb_set DMA1_Channel7_IRQHandler,Default_Handler
.weak ADC1_2_IRQHandler
.thumb_set ADC1_2_IRQHandler,Default_Handler
.weak USB_HP_CAN1_TX_IRQHandler
.thumb_set USB_HP_CAN1_TX_IRQHandler,Default_Handler
.weak USB_LP_CAN1_RX0_IRQHandler
.thumb_set USB_LP_CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak USBWakeUp_IRQHandler
.thumb_set USBWakeUp_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
afiskon/stm32-ili9341
| 21,686
|
startup_stm32f411xe.s
|
/**
******************************************************************************
* @file startup_stm32f411xe.s
* @author MCD Application Team
* @brief STM32F411xExx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
afiskon/stm32-st7735
| 21,686
|
startup_stm32f411xe.s
|
/**
******************************************************************************
* @file startup_stm32f411xe.s
* @author MCD Application Team
* @brief STM32F411xExx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
afiskon/stm32-w5500
| 23,950
|
startup_stm32f405xx.s
|
/**
******************************************************************************
* @file startup_stm32f405xx.s
* @author MCD Application Team
* @brief STM32F405xx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word CAN1_TX_IRQHandler /* CAN1 TX */
.word CAN1_RX0_IRQHandler /* CAN1 RX0 */
.word CAN1_RX1_IRQHandler /* CAN1 RX1 */
.word CAN1_SCE_IRQHandler /* CAN1 SCE */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FSMC_IRQHandler /* FSMC */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word CAN2_TX_IRQHandler /* CAN2 TX */
.word CAN2_RX0_IRQHandler /* CAN2 RX0 */
.word CAN2_RX1_IRQHandler /* CAN2 RX1 */
.word CAN2_SCE_IRQHandler /* CAN2 SCE */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word HASH_RNG_IRQHandler /* Hash and Rng */
.word FPU_IRQHandler /* FPU */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak CAN1_TX_IRQHandler
.thumb_set CAN1_TX_IRQHandler,Default_Handler
.weak CAN1_RX0_IRQHandler
.thumb_set CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FSMC_IRQHandler
.thumb_set FSMC_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak CAN2_TX_IRQHandler
.thumb_set CAN2_TX_IRQHandler,Default_Handler
.weak CAN2_RX0_IRQHandler
.thumb_set CAN2_RX0_IRQHandler,Default_Handler
.weak CAN2_RX1_IRQHandler
.thumb_set CAN2_RX1_IRQHandler,Default_Handler
.weak CAN2_SCE_IRQHandler
.thumb_set CAN2_SCE_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak HASH_RNG_IRQHandler
.thumb_set HASH_RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
afiskon/stm32-ssd1306
| 21,686
|
examples/oled-tester/firmware/i2c/startup_stm32f411xe.s
|
/**
******************************************************************************
* @file startup_stm32f411xe.s
* @author MCD Application Team
* @brief STM32F411xExx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
afiskon/stm32-ssd1306
| 21,686
|
examples/oled-tester/firmware/spi/startup_stm32f411xe.s
|
/**
******************************************************************************
* @file startup_stm32f411xe.s
* @author MCD Application Team
* @brief STM32F411xExx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
afiskon/hbr-mk2
| 11,157
|
firmware/startup_stm32f103xb.s
|
/**
*************** (C) COPYRIGHT 2017 STMicroelectronics ************************
* @file startup_stm32f103xb.s
* @author MCD Application Team
* @version V4.2.0
* @date 31-March-2017
* @brief STM32F103xB Devices vector table for Atollic toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Configure the clock system
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M3 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* <h2><center>© COPYRIGHT(c) 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m3
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.equ BootRAM, 0xF108F85F
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler
.word PVD_IRQHandler
.word TAMPER_IRQHandler
.word RTC_IRQHandler
.word FLASH_IRQHandler
.word RCC_IRQHandler
.word EXTI0_IRQHandler
.word EXTI1_IRQHandler
.word EXTI2_IRQHandler
.word EXTI3_IRQHandler
.word EXTI4_IRQHandler
.word DMA1_Channel1_IRQHandler
.word DMA1_Channel2_IRQHandler
.word DMA1_Channel3_IRQHandler
.word DMA1_Channel4_IRQHandler
.word DMA1_Channel5_IRQHandler
.word DMA1_Channel6_IRQHandler
.word DMA1_Channel7_IRQHandler
.word ADC1_2_IRQHandler
.word USB_HP_CAN1_TX_IRQHandler
.word USB_LP_CAN1_RX0_IRQHandler
.word CAN1_RX1_IRQHandler
.word CAN1_SCE_IRQHandler
.word EXTI9_5_IRQHandler
.word TIM1_BRK_IRQHandler
.word TIM1_UP_IRQHandler
.word TIM1_TRG_COM_IRQHandler
.word TIM1_CC_IRQHandler
.word TIM2_IRQHandler
.word TIM3_IRQHandler
.word TIM4_IRQHandler
.word I2C1_EV_IRQHandler
.word I2C1_ER_IRQHandler
.word I2C2_EV_IRQHandler
.word I2C2_ER_IRQHandler
.word SPI1_IRQHandler
.word SPI2_IRQHandler
.word USART1_IRQHandler
.word USART2_IRQHandler
.word USART3_IRQHandler
.word EXTI15_10_IRQHandler
.word RTC_Alarm_IRQHandler
.word USBWakeUp_IRQHandler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word BootRAM /* @0x108. This is for boot in RAM mode for
STM32F10x Medium Density devices. */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMPER_IRQHandler
.thumb_set TAMPER_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_IRQHandler
.thumb_set DMA1_Channel2_IRQHandler,Default_Handler
.weak DMA1_Channel3_IRQHandler
.thumb_set DMA1_Channel3_IRQHandler,Default_Handler
.weak DMA1_Channel4_IRQHandler
.thumb_set DMA1_Channel4_IRQHandler,Default_Handler
.weak DMA1_Channel5_IRQHandler
.thumb_set DMA1_Channel5_IRQHandler,Default_Handler
.weak DMA1_Channel6_IRQHandler
.thumb_set DMA1_Channel6_IRQHandler,Default_Handler
.weak DMA1_Channel7_IRQHandler
.thumb_set DMA1_Channel7_IRQHandler,Default_Handler
.weak ADC1_2_IRQHandler
.thumb_set ADC1_2_IRQHandler,Default_Handler
.weak USB_HP_CAN1_TX_IRQHandler
.thumb_set USB_HP_CAN1_TX_IRQHandler,Default_Handler
.weak USB_LP_CAN1_RX0_IRQHandler
.thumb_set USB_LP_CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak USBWakeUp_IRQHandler
.thumb_set USBWakeUp_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
afska/gba-link-connection
| 1,129
|
examples/LinkCard_demo/#loader/crt0.s
|
/**********************************/
/* NINTENDO E-READER STARTUP CODE */
/**********************************/
/* Author : Tim Schuerewegen */
/* Version : 1.0 */
/**********************************/
.GLOBAL _start
.TEXT
.ARM
_start:
@ enter thumb mode
LDR R0, =(_start_thumb+1)
BX R0
@ For some reason the usa e-reader subtracts 0x0001610C from the value at
@ address 0x02000008 if it is not "valid". This is only the case when
@ running as dot code, not when running from flash. However, it is
@ recommended to put a "valid" value at that address because the jap
@ e-reader does not have this kind of "protection".
@ 0x02000000 <= valid value < 0x020000E4
.POOL
.THUMB
_start_thumb:
@ save return address
PUSH {LR}
@ clear bss section
_bss_clear:
LDR R0, =__bss_start
LDR R1, =__bss_end
MOV R2, #0
_bss_clear_loop:
CMP R0, R1
BEQ _bss_clear_exit
STRB R2, [R0]
ADD R0, #1
B _bss_clear_loop
_bss_clear_exit:
@ restore return address
POP {R3}
MOV LR, R3
@ jump to main
LDR R3, =main
BX R3
.ALIGN
.POOL
.END
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.